Merge pull request #49 from hashicorp/b-heredoc

Heredoc support
This commit is contained in:
Mitchell Hashimoto 2015-11-10 15:09:49 -08:00
commit fa160f1206
16 changed files with 213 additions and 34 deletions

View File

@ -226,7 +226,7 @@ func (d *decoder) decodeInterface(name string, node ast.Node, result reflect.Val
case token.NUMBER:
var result int
set = reflect.Indirect(reflect.New(reflect.TypeOf(result)))
case token.STRING:
case token.STRING, token.HEREDOC:
set = reflect.Indirect(reflect.New(reflect.TypeOf("")))
default:
return fmt.Errorf(
@ -411,13 +411,13 @@ func (d *decoder) decodeString(name string, node ast.Node, result reflect.Value)
case token.NUMBER:
result.Set(reflect.ValueOf(n.Token.Text).Convert(result.Type()))
return nil
case token.STRING:
case token.STRING, token.HEREDOC:
result.Set(reflect.ValueOf(n.Token.Value()).Convert(result.Type()))
return nil
}
}
return fmt.Errorf("%s: unknown type %T", name, node)
return fmt.Errorf("%s: unknown type for string %T", name, node)
}
func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value) error {

View File

@ -57,18 +57,31 @@ func TestDecode_interface(t *testing.T) {
"a": 1.02,
},
},
/*
{
"multiline_bad.hcl",
false,
map[string]interface{}{"foo": "bar\nbaz\n"},
},
{
"multiline.json",
false,
map[string]interface{}{"foo": "bar\nbaz"},
},
*/
{
"multiline_bad.hcl",
true,
nil,
},
{
"multiline_no_marker.hcl",
true,
nil,
},
{
"multiline.hcl",
false,
map[string]interface{}{"foo": "bar\nbaz\n"},
},
{
"multiline_no_eof.hcl",
false,
map[string]interface{}{"foo": "bar\nbaz\n", "key": "value"},
},
{
"multiline.json",
false,
map[string]interface{}{"foo": "bar\nbaz"},
},
{
"scientific.json",
false,

View File

@ -202,7 +202,7 @@ func (p *Parser) object() (ast.Node, error) {
tok := p.scan()
switch tok.Type {
case token.NUMBER, token.FLOAT, token.BOOL, token.STRING:
case token.NUMBER, token.FLOAT, token.BOOL, token.STRING, token.HEREDOC:
return p.literalType()
case token.LBRACE:
return p.objectType()

View File

@ -22,6 +22,7 @@ func TestType(t *testing.T) {
{token.FLOAT, `foo = 123.12`},
{token.FLOAT, `foo = -123.12`},
{token.BOOL, `foo = true`},
{token.HEREDOC, "foo = <<EOF\nHello\nWorld\nEOF"},
}
for _, l := range literals {

View File

@ -16,6 +16,10 @@ const (
infinity = 1 << 30 // offset or line
)
var (
unindent = []byte("\uE123") // in the private use space
)
type printer struct {
cfg Config
prev token.Pos
@ -147,7 +151,7 @@ func (p *printer) output(n interface{}) []byte {
p.prev = t.Pos()
buf.Write(p.objectItem(t))
case *ast.LiteralType:
buf.WriteString(t.Token.Text)
buf.Write(p.literalType(t))
case *ast.ListType:
buf.Write(p.list(t))
case *ast.ObjectType:
@ -159,6 +163,21 @@ func (p *printer) output(n interface{}) []byte {
return buf.Bytes()
}
func (p *printer) literalType(lit *ast.LiteralType) []byte {
result := []byte(lit.Token.Text)
if lit.Token.Type == token.HEREDOC {
// Clear the trailing newline from heredocs
if result[len(result)-1] == '\n' {
result = result[:len(result)-1]
}
// Poison lines 2+ so that we don't indent them
result = p.heredocIndent(result)
}
return result
}
// objectItem returns the printable HCL form of an object item. An object type
// starts with one/multiple keys and has a value. The value might be of any
// type.
@ -460,6 +479,51 @@ func (p *printer) indent(buf []byte) []byte {
if bol && c != '\n' {
res = append(res, prefix...)
}
res = append(res, c)
bol = c == '\n'
}
return res
}
// unindent removes all the indentation from the tombstoned lines
func (p *printer) unindent(buf []byte) []byte {
var res []byte
for i := 0; i < len(buf); i++ {
skip := len(buf)-i <= len(unindent)
if !skip {
skip = !bytes.Equal(unindent, buf[i:i+len(unindent)])
}
if skip {
res = append(res, buf[i])
continue
}
// We have a marker. we have to backtrace here and clean out
// any whitespace ahead of our tombstone up to a \n
for j := len(res) - 1; j >= 0; j-- {
if res[j] == '\n' {
break
}
res = res[:j]
}
// Skip the entire unindent marker
i += len(unindent) - 1
}
return res
}
// heredocIndent marks all the 2nd and further lines as unindentable
func (p *printer) heredocIndent(buf []byte) []byte {
var res []byte
bol := false
for _, c := range buf {
if bol && c != '\n' {
res = append(res, unindent...)
}
res = append(res, c)
bol = c == '\n'
}

View File

@ -29,7 +29,7 @@ func (c *Config) Fprint(output io.Writer, node ast.Node) error {
p.collectComments(node)
if _, err := output.Write(p.output(node)); err != nil {
if _, err := output.Write(p.unindent(p.output(node))); err != nil {
return err
}

View File

@ -33,7 +33,10 @@ resource aws_instance "web" {
network_interface = {
device_index = 1
description = "Another network interface"
description = <<EOF
ANOTHER NETWORK INTERFACE
EOF
}
}
@ -45,5 +48,7 @@ resource "aws_instance" "db" {
}
output "web_ip" {
value = "${aws_instance.web.private_ip}"
value = <<EOF
TUBES
EOF
}

View File

@ -32,7 +32,9 @@ resource "aws_security_group" "firewall" {
network_interface = {
device_index = 1
description = "Another network interface"
description = <<EOF
ANOTHER NETWORK INTERFACE
EOF
}
}
@ -45,5 +47,7 @@ resource "aws_instance" "db" {
output "web_ip" {
value="${aws_instance.web.private_ip}"
value=<<EOF
TUBES
EOF
}

View File

@ -174,6 +174,9 @@ func (s *Scanner) Scan() token.Token {
ch = s.scanMantissa(ch)
ch = s.scanExponent(ch)
}
case '<':
tok = token.HEREDOC
s.scanHeredoc()
case '[':
tok = token.LBRACK
case ']':
@ -371,6 +374,67 @@ func (s *Scanner) scanExponent(ch rune) rune {
return ch
}
// scanHeredoc scans a heredoc string.
func (s *Scanner) scanHeredoc() {
// Scan the second '<' in example: '<<EOF'
if s.next() != '<' {
s.err("heredoc expected second '<', didn't see it")
return
}
// Get the original offset so we can read just the heredoc ident
offs := s.srcPos.Offset
// Scan the identifier
ch := s.next()
for isLetter(ch) {
ch = s.next()
}
// If we reached an EOF then that is not good
if ch == eof {
s.err("heredoc not terminated")
return
}
// If we didn't reach a newline then that is also not good
if ch != '\n' {
s.err("invalid characters in heredoc anchor")
return
}
// Read the identifier
identBytes := s.src[offs : s.srcPos.Offset-s.lastCharLen]
// Read the actual string value
lineStart := s.srcPos.Offset
for {
ch := s.next()
// Special newline handling.
if ch == '\n' {
// Math is fast, so we first compare the byte counts to
// see if we have a chance of seeing the same identifier. If those
// match, then we compare the string values directly.
lineBytesLen := s.srcPos.Offset - s.lastCharLen - lineStart
if lineBytesLen == len(identBytes) &&
bytes.Equal(identBytes, s.src[lineStart:s.srcPos.Offset-s.lastCharLen]) {
break
}
// Not an anchor match, record the start of a new line
lineStart = s.srcPos.Offset
}
if ch == eof {
s.err("heredoc not terminated")
return
}
}
return
}
// scanString scans a quoted string
func (s *Scanner) scanString() {
braces := 0

View File

@ -71,6 +71,9 @@ var tokenLists = map[string][]tokenPair{
{token.IDENT, "foo६४"},
{token.IDENT, "bar"},
},
"heredoc": []tokenPair{
{token.HEREDOC, "<<EOF\nhello\nworld\nEOF"},
},
"string": []tokenPair{
{token.STRING, `" "`},
{token.STRING, `"a"`},
@ -229,6 +232,7 @@ var orderedTokenLists = []string{
"operator",
"bool",
"ident",
"heredoc",
"string",
"number",
"float",
@ -327,7 +331,9 @@ func TestRealExample(t *testing.T) {
network_interface {
device_index = 0
description = "Main network interface"
description = <<EOF
Main interface
EOF
}
}`
@ -385,7 +391,7 @@ func TestRealExample(t *testing.T) {
{token.NUMBER, `0`},
{token.IDENT, `description`},
{token.ASSIGN, `=`},
{token.STRING, `"Main network interface"`},
{token.HEREDOC, "<<EOF\nMain interface\nEOF\n"},
{token.RBRACE, `}`},
{token.RBRACE, `}`},
{token.EOF, ``},

View File

@ -5,6 +5,7 @@ package token
import (
"fmt"
"strconv"
"strings"
hclstrconv "github.com/hashicorp/hcl/hcl/strconv"
)
@ -29,10 +30,11 @@ const (
identifier_beg
IDENT // literals
literal_beg
NUMBER // 12345
FLOAT // 123.45
BOOL // true,false
STRING // "abc"
NUMBER // 12345
FLOAT // 123.45
BOOL // true,false
STRING // "abc"
HEREDOC // <<FOO\nbar\nFOO
literal_end
identifier_end
@ -63,10 +65,11 @@ var tokens = [...]string{
BOOL: "BOOL",
STRING: "STRING",
LBRACK: "LBRACK",
LBRACE: "LBRACE",
COMMA: "COMMA",
PERIOD: "PERIOD",
LBRACK: "LBRACK",
LBRACE: "LBRACE",
COMMA: "COMMA",
PERIOD: "PERIOD",
HEREDOC: "HEREDOC",
RBRACK: "RBRACK",
RBRACE: "RBRACE",
@ -138,6 +141,14 @@ func (t Token) Value() interface{} {
return int64(v)
case IDENT:
return t.Text
case HEREDOC:
// We need to find the end of the marker
idx := strings.IndexByte(t.Text, '\n')
if idx == -1 {
panic("heredoc doesn't contain newline")
}
return string(t.Text[idx+1 : len(t.Text)-idx+1])
case STRING:
// Determine the Unquote method to use. If it came from JSON,
// then we need to use the built-in unquote since we have to

View File

@ -18,6 +18,7 @@ func TestTypeString(t *testing.T) {
{FLOAT, "FLOAT"},
{BOOL, "BOOL"},
{STRING, "STRING"},
{HEREDOC, "HEREDOC"},
{LBRACK, "LBRACK"},
{LBRACE, "LBRACE"},
{COMMA, "COMMA"},
@ -32,7 +33,6 @@ func TestTypeString(t *testing.T) {
for _, token := range tokens {
if token.tt.String() != token.str {
t.Errorf("want: %q got:%q\n", token.str, token.tt)
}
}
@ -50,6 +50,7 @@ func TestTokenValue(t *testing.T) {
{Token{Type: IDENT, Text: `foo`}, "foo"},
{Token{Type: STRING, Text: `"foo"`}, "foo"},
{Token{Type: STRING, Text: `"foo\nbar"`}, "foo\nbar"},
{Token{Type: HEREDOC, Text: "<<EOF\nfoo\nbar\nEOF"}, "foo\nbar"},
}
for _, token := range tokens {

View File

@ -0,0 +1,4 @@
foo = <<EOF
bar
baz
EOF

View File

@ -1,4 +1,4 @@
foo = <<EOF
foo = <EOF
bar
baz
EOF

View File

@ -0,0 +1,5 @@
foo = <<EOF
bar
baz
EOF
key = "value"

View File

@ -0,0 +1 @@
foo = <<