hclsyntax: emit Newline after a CHeredoc

Previously, due to how heredoc scanning was implemented, the closing
marker for a heredoc would consume the newline that terminated it. This
was problematic in any context that is newline-sensitive, because it
would cause us to skip the TokenNewline that might terminate e.g. an
attribute definition:

    foo = <<EOT
    hello
    EOT
    bar = "hello"

Previously the "foo" attribute would fail to parse properly due to trying
to consume the "bar" definition as part of its expression.

Now we synthetically split the marker token into two parts: the marker
itself and the newline that follows it. This means that using a heredoc
in any context where newlines are sensitive will involuntarily introduce
a newline, but that seems consistent with user expectation based on how
heredocs seem to be used "in the wild".
This commit is contained in:
Martin Atkins 2018-03-08 08:22:32 -08:00
parent be66a72aa8
commit 7d6ed4d8f3
4 changed files with 299 additions and 86 deletions

View File

@ -1380,6 +1380,89 @@ block "valid" {}
},
},
},
{
"a = <<EOT\nHello\nEOT\nb = \"Hi\"",
0,
&Body{
Attributes: Attributes{
"a": {
Name: "a",
Expr: &TemplateExpr{
Parts: []Expression{
&LiteralValueExpr{
Val: cty.StringVal("Hello\n"),
SrcRange: hcl.Range{
Start: hcl.Pos{Line: 1, Column: 6, Byte: 5},
End: hcl.Pos{Line: 1, Column: 12, Byte: 11},
},
},
},
SrcRange: hcl.Range{
Start: hcl.Pos{Line: 1, Column: 5, Byte: 4},
End: hcl.Pos{Line: 3, Column: 4, Byte: 19},
},
},
SrcRange: hcl.Range{
Start: hcl.Pos{Line: 1, Column: 1, Byte: 0},
End: hcl.Pos{Line: 3, Column: 4, Byte: 19},
},
NameRange: hcl.Range{
Start: hcl.Pos{Line: 1, Column: 1, Byte: 0},
End: hcl.Pos{Line: 1, Column: 2, Byte: 1},
},
EqualsRange: hcl.Range{
Start: hcl.Pos{Line: 1, Column: 3, Byte: 2},
End: hcl.Pos{Line: 1, Column: 4, Byte: 3},
},
},
"b": {
Name: "b",
Expr: &TemplateExpr{
Parts: []Expression{
&LiteralValueExpr{
Val: cty.StringVal("Hi"),
SrcRange: hcl.Range{
Start: hcl.Pos{Line: 4, Column: 24, Byte: 5},
End: hcl.Pos{Line: 4, Column: 9, Byte: 28},
},
},
},
SrcRange: hcl.Range{
Start: hcl.Pos{Line: 4, Column: 5, Byte: 24},
End: hcl.Pos{Line: 4, Column: 9, Byte: 28},
},
},
SrcRange: hcl.Range{
Start: hcl.Pos{Line: 4, Column: 1, Byte: 20},
End: hcl.Pos{Line: 4, Column: 9, Byte: 28},
},
NameRange: hcl.Range{
Start: hcl.Pos{Line: 4, Column: 1, Byte: 20},
End: hcl.Pos{Line: 4, Column: 2, Byte: 21},
},
EqualsRange: hcl.Range{
Start: hcl.Pos{Line: 4, Column: 3, Byte: 22},
End: hcl.Pos{Line: 4, Column: 4, Byte: 23},
},
},
},
Blocks: Blocks{},
SrcRange: hcl.Range{
Start: hcl.Pos{Line: 1, Column: 1, Byte: 0},
End: hcl.Pos{Line: 4, Column: 9, Byte: 28},
},
EndRange: hcl.Range{
Start: hcl.Pos{Line: 4, Column: 9, Byte: 28},
End: hcl.Pos{Line: 4, Column: 9, Byte: 28},
},
},
},
{
"a = foo.bar\n",
0,

View File

@ -4311,7 +4311,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
Pos: start,
}
// line 276 "scan_tokens.rl"
// line 294 "scan_tokens.rl"
// Ragel state
p := 0 // "Pointer" into data
@ -4339,7 +4339,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
var retBraces []int // stack of brace levels that cause us to use fret
var heredocs []heredocInProgress // stack of heredocs we're currently processing
// line 311 "scan_tokens.rl"
// line 329 "scan_tokens.rl"
// Make Go compiler happy
_ = ts
@ -4464,22 +4464,22 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
_acts++
switch _hcltok_actions[_acts-1] {
case 0:
// line 200 "scan_tokens.rl"
// line 218 "scan_tokens.rl"
p--
case 1:
// line 201 "scan_tokens.rl"
// line 219 "scan_tokens.rl"
p--
case 2:
// line 206 "scan_tokens.rl"
// line 224 "scan_tokens.rl"
p--
case 3:
// line 207 "scan_tokens.rl"
// line 225 "scan_tokens.rl"
p--
@ -4489,7 +4489,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
te = p + 1
case 8:
// line 137 "scan_tokens.rl"
// line 155 "scan_tokens.rl"
te = p + 1
{
@ -4508,7 +4508,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
}
}
case 9:
// line 147 "scan_tokens.rl"
// line 165 "scan_tokens.rl"
te = p + 1
{
@ -4541,21 +4541,21 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
}
case 11:
// line 221 "scan_tokens.rl"
// line 239 "scan_tokens.rl"
te = p + 1
{
token(TokenInvalid)
}
case 12:
// line 222 "scan_tokens.rl"
// line 240 "scan_tokens.rl"
te = p + 1
{
token(TokenBadUTF8)
}
case 13:
// line 137 "scan_tokens.rl"
// line 155 "scan_tokens.rl"
te = p
p--
@ -4575,7 +4575,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
}
}
case 14:
// line 147 "scan_tokens.rl"
// line 165 "scan_tokens.rl"
te = p
p--
@ -4595,7 +4595,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
}
}
case 15:
// line 220 "scan_tokens.rl"
// line 238 "scan_tokens.rl"
te = p
p--
@ -4603,7 +4603,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
token(TokenQuotedLit)
}
case 16:
// line 221 "scan_tokens.rl"
// line 239 "scan_tokens.rl"
te = p
p--
@ -4611,7 +4611,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
token(TokenInvalid)
}
case 17:
// line 222 "scan_tokens.rl"
// line 240 "scan_tokens.rl"
te = p
p--
@ -4619,29 +4619,29 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
token(TokenBadUTF8)
}
case 18:
// line 220 "scan_tokens.rl"
// line 238 "scan_tokens.rl"
p = (te) - 1
{
token(TokenQuotedLit)
}
case 19:
// line 222 "scan_tokens.rl"
// line 240 "scan_tokens.rl"
p = (te) - 1
{
token(TokenBadUTF8)
}
case 20:
// line 125 "scan_tokens.rl"
// line 143 "scan_tokens.rl"
act = 10
case 21:
// line 230 "scan_tokens.rl"
// line 248 "scan_tokens.rl"
act = 11
case 22:
// line 137 "scan_tokens.rl"
// line 155 "scan_tokens.rl"
te = p + 1
{
@ -4660,7 +4660,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
}
}
case 23:
// line 147 "scan_tokens.rl"
// line 165 "scan_tokens.rl"
te = p + 1
{
@ -4691,7 +4691,25 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
if topdoc.StartOfLine {
maybeMarker := bytes.TrimSpace(data[ts:te])
if bytes.Equal(maybeMarker, topdoc.Marker) {
// We actually emit two tokens here: the end-of-heredoc
// marker first, and then separately the newline that
// follows it. This then avoids issues with the closing
// marker consuming a newline that would normally be used
// to mark the end of an attribute definition.
// We might have either a \n sequence or an \r\n sequence
// here, so we must handle both.
nls := te - 1
nle := te
te--
if data[te-1] == '\r' {
// back up one more byte
nls--
te--
}
token(TokenCHeredoc)
ts = nls
te = nle
token(TokenNewline)
heredocs = heredocs[:len(heredocs)-1]
top--
cs = stack[top]
@ -4707,14 +4725,14 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
token(TokenStringLit)
}
case 25:
// line 230 "scan_tokens.rl"
// line 248 "scan_tokens.rl"
te = p + 1
{
token(TokenBadUTF8)
}
case 26:
// line 137 "scan_tokens.rl"
// line 155 "scan_tokens.rl"
te = p
p--
@ -4734,7 +4752,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
}
}
case 27:
// line 147 "scan_tokens.rl"
// line 165 "scan_tokens.rl"
te = p
p--
@ -4754,7 +4772,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
}
}
case 28:
// line 125 "scan_tokens.rl"
// line 143 "scan_tokens.rl"
te = p
p--
@ -4766,7 +4784,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
token(TokenStringLit)
}
case 29:
// line 230 "scan_tokens.rl"
// line 248 "scan_tokens.rl"
te = p
p--
@ -4774,7 +4792,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
token(TokenBadUTF8)
}
case 30:
// line 125 "scan_tokens.rl"
// line 143 "scan_tokens.rl"
p = (te) - 1
{
@ -4811,15 +4829,15 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
}
case 32:
// line 133 "scan_tokens.rl"
// line 151 "scan_tokens.rl"
act = 14
case 33:
// line 237 "scan_tokens.rl"
// line 255 "scan_tokens.rl"
act = 15
case 34:
// line 137 "scan_tokens.rl"
// line 155 "scan_tokens.rl"
te = p + 1
{
@ -4838,7 +4856,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
}
}
case 35:
// line 147 "scan_tokens.rl"
// line 165 "scan_tokens.rl"
te = p + 1
{
@ -4857,21 +4875,21 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
}
}
case 36:
// line 133 "scan_tokens.rl"
// line 151 "scan_tokens.rl"
te = p + 1
{
token(TokenStringLit)
}
case 37:
// line 237 "scan_tokens.rl"
// line 255 "scan_tokens.rl"
te = p + 1
{
token(TokenBadUTF8)
}
case 38:
// line 137 "scan_tokens.rl"
// line 155 "scan_tokens.rl"
te = p
p--
@ -4891,7 +4909,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
}
}
case 39:
// line 147 "scan_tokens.rl"
// line 165 "scan_tokens.rl"
te = p
p--
@ -4911,7 +4929,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
}
}
case 40:
// line 133 "scan_tokens.rl"
// line 151 "scan_tokens.rl"
te = p
p--
@ -4919,7 +4937,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
token(TokenStringLit)
}
case 41:
// line 237 "scan_tokens.rl"
// line 255 "scan_tokens.rl"
te = p
p--
@ -4927,7 +4945,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
token(TokenBadUTF8)
}
case 42:
// line 133 "scan_tokens.rl"
// line 151 "scan_tokens.rl"
p = (te) - 1
{
@ -4956,29 +4974,29 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
}
case 44:
// line 241 "scan_tokens.rl"
// line 259 "scan_tokens.rl"
act = 16
case 45:
// line 242 "scan_tokens.rl"
// line 260 "scan_tokens.rl"
act = 17
case 46:
// line 242 "scan_tokens.rl"
// line 260 "scan_tokens.rl"
te = p + 1
{
token(TokenBadUTF8)
}
case 47:
// line 243 "scan_tokens.rl"
// line 261 "scan_tokens.rl"
te = p + 1
{
token(TokenInvalid)
}
case 48:
// line 241 "scan_tokens.rl"
// line 259 "scan_tokens.rl"
te = p
p--
@ -4986,7 +5004,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
token(TokenIdent)
}
case 49:
// line 242 "scan_tokens.rl"
// line 260 "scan_tokens.rl"
te = p
p--
@ -4994,14 +5012,14 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
token(TokenBadUTF8)
}
case 50:
// line 241 "scan_tokens.rl"
// line 259 "scan_tokens.rl"
p = (te) - 1
{
token(TokenIdent)
}
case 51:
// line 242 "scan_tokens.rl"
// line 260 "scan_tokens.rl"
p = (te) - 1
{
@ -5024,104 +5042,104 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
}
case 53:
// line 249 "scan_tokens.rl"
// line 267 "scan_tokens.rl"
act = 21
case 54:
// line 251 "scan_tokens.rl"
// line 269 "scan_tokens.rl"
act = 22
case 55:
// line 262 "scan_tokens.rl"
// line 280 "scan_tokens.rl"
act = 32
case 56:
// line 272 "scan_tokens.rl"
// line 290 "scan_tokens.rl"
act = 38
case 57:
// line 273 "scan_tokens.rl"
// line 291 "scan_tokens.rl"
act = 39
case 58:
// line 251 "scan_tokens.rl"
// line 269 "scan_tokens.rl"
te = p + 1
{
token(TokenComment)
}
case 59:
// line 252 "scan_tokens.rl"
// line 270 "scan_tokens.rl"
te = p + 1
{
token(TokenNewline)
}
case 60:
// line 254 "scan_tokens.rl"
// line 272 "scan_tokens.rl"
te = p + 1
{
token(TokenEqualOp)
}
case 61:
// line 255 "scan_tokens.rl"
// line 273 "scan_tokens.rl"
te = p + 1
{
token(TokenNotEqual)
}
case 62:
// line 256 "scan_tokens.rl"
// line 274 "scan_tokens.rl"
te = p + 1
{
token(TokenGreaterThanEq)
}
case 63:
// line 257 "scan_tokens.rl"
// line 275 "scan_tokens.rl"
te = p + 1
{
token(TokenLessThanEq)
}
case 64:
// line 258 "scan_tokens.rl"
// line 276 "scan_tokens.rl"
te = p + 1
{
token(TokenAnd)
}
case 65:
// line 259 "scan_tokens.rl"
// line 277 "scan_tokens.rl"
te = p + 1
{
token(TokenOr)
}
case 66:
// line 260 "scan_tokens.rl"
// line 278 "scan_tokens.rl"
te = p + 1
{
token(TokenEllipsis)
}
case 67:
// line 261 "scan_tokens.rl"
// line 279 "scan_tokens.rl"
te = p + 1
{
token(TokenFatArrow)
}
case 68:
// line 262 "scan_tokens.rl"
// line 280 "scan_tokens.rl"
te = p + 1
{
selfToken()
}
case 69:
// line 157 "scan_tokens.rl"
// line 175 "scan_tokens.rl"
te = p + 1
{
@ -5129,7 +5147,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
braces++
}
case 70:
// line 162 "scan_tokens.rl"
// line 180 "scan_tokens.rl"
te = p + 1
{
@ -5150,7 +5168,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
}
}
case 71:
// line 174 "scan_tokens.rl"
// line 192 "scan_tokens.rl"
te = p + 1
{
@ -5225,27 +5243,27 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
}
}
case 74:
// line 272 "scan_tokens.rl"
// line 290 "scan_tokens.rl"
te = p + 1
{
token(TokenBadUTF8)
}
case 75:
// line 273 "scan_tokens.rl"
// line 291 "scan_tokens.rl"
te = p + 1
{
token(TokenInvalid)
}
case 76:
// line 247 "scan_tokens.rl"
// line 265 "scan_tokens.rl"
te = p
p--
case 77:
// line 248 "scan_tokens.rl"
// line 266 "scan_tokens.rl"
te = p
p--
@ -5253,7 +5271,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
token(TokenNumberLit)
}
case 78:
// line 249 "scan_tokens.rl"
// line 267 "scan_tokens.rl"
te = p
p--
@ -5261,7 +5279,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
token(TokenIdent)
}
case 79:
// line 251 "scan_tokens.rl"
// line 269 "scan_tokens.rl"
te = p
p--
@ -5269,7 +5287,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
token(TokenComment)
}
case 80:
// line 262 "scan_tokens.rl"
// line 280 "scan_tokens.rl"
te = p
p--
@ -5277,7 +5295,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
selfToken()
}
case 81:
// line 272 "scan_tokens.rl"
// line 290 "scan_tokens.rl"
te = p
p--
@ -5285,7 +5303,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
token(TokenBadUTF8)
}
case 82:
// line 273 "scan_tokens.rl"
// line 291 "scan_tokens.rl"
te = p
p--
@ -5293,28 +5311,28 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
token(TokenInvalid)
}
case 83:
// line 248 "scan_tokens.rl"
// line 266 "scan_tokens.rl"
p = (te) - 1
{
token(TokenNumberLit)
}
case 84:
// line 249 "scan_tokens.rl"
// line 267 "scan_tokens.rl"
p = (te) - 1
{
token(TokenIdent)
}
case 85:
// line 262 "scan_tokens.rl"
// line 280 "scan_tokens.rl"
p = (te) - 1
{
selfToken()
}
case 86:
// line 272 "scan_tokens.rl"
// line 290 "scan_tokens.rl"
p = (te) - 1
{
@ -5351,7 +5369,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
}
}
// line 5214 "scan_tokens.go"
// line 5232 "scan_tokens.go"
}
}
@ -5372,7 +5390,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
act = 0
// line 5234 "scan_tokens.go"
// line 5252 "scan_tokens.go"
}
}
@ -5398,7 +5416,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
}
}
// line 334 "scan_tokens.rl"
// line 352 "scan_tokens.rl"
// If we fall out here without being in a final state then we've
// encountered something that the scanner can't match, which we'll

View File

@ -112,7 +112,25 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
if topdoc.StartOfLine {
maybeMarker := bytes.TrimSpace(data[ts:te])
if bytes.Equal(maybeMarker, topdoc.Marker) {
// We actually emit two tokens here: the end-of-heredoc
// marker first, and then separately the newline that
// follows it. This then avoids issues with the closing
// marker consuming a newline that would normally be used
// to mark the end of an attribute definition.
// We might have either a \n sequence or an \r\n sequence
// here, so we must handle both.
nls := te-1
nle := te
te--
if data[te-1] == '\r' {
// back up one more byte
nls--
te--
}
token(TokenCHeredoc);
ts = nls
te = nle
token(TokenNewline);
heredocs = heredocs[:len(heredocs)-1]
fret;
}

View File

@ -4,6 +4,8 @@ import (
"reflect"
"testing"
"github.com/go-test/deep"
"github.com/hashicorp/hcl2/hcl"
"github.com/kylelemons/godebug/pretty"
)
@ -978,9 +980,17 @@ EOT
},
{
Type: TokenCHeredoc,
Bytes: []byte("EOT\n"),
Bytes: []byte("EOT"),
Range: hcl.Range{
Start: hcl.Pos{Byte: 18, Line: 3, Column: 1},
End: hcl.Pos{Byte: 21, Line: 3, Column: 4},
},
},
{
Type: TokenNewline,
Bytes: []byte("\n"),
Range: hcl.Range{
Start: hcl.Pos{Byte: 21, Line: 3, Column: 4},
End: hcl.Pos{Byte: 22, Line: 4, Column: 1},
},
},
@ -994,6 +1004,51 @@ EOT
},
},
},
{
"<<EOT\r\nhello world\r\nEOT\r\n", // intentional windows-style line endings
[]Token{
{
Type: TokenOHeredoc,
Bytes: []byte("<<EOT\r\n"),
Range: hcl.Range{
Start: hcl.Pos{Byte: 0, Line: 1, Column: 1},
End: hcl.Pos{Byte: 7, Line: 1, Column: 7},
},
},
{
Type: TokenStringLit,
Bytes: []byte("hello world\r\n"),
Range: hcl.Range{
Start: hcl.Pos{Byte: 7, Line: 1, Column: 7},
End: hcl.Pos{Byte: 20, Line: 1, Column: 19},
},
},
{
Type: TokenCHeredoc,
Bytes: []byte("EOT"),
Range: hcl.Range{
Start: hcl.Pos{Byte: 20, Line: 1, Column: 19},
End: hcl.Pos{Byte: 23, Line: 1, Column: 22},
},
},
{
Type: TokenNewline,
Bytes: []byte("\r\n"),
Range: hcl.Range{
Start: hcl.Pos{Byte: 23, Line: 1, Column: 22},
End: hcl.Pos{Byte: 25, Line: 1, Column: 23},
},
},
{
Type: TokenEOF,
Bytes: []byte{},
Range: hcl.Range{
Start: hcl.Pos{Byte: 25, Line: 1, Column: 23},
End: hcl.Pos{Byte: 25, Line: 1, Column: 23},
},
},
},
},
{
`<<EOT
hello ${name}
@ -1050,9 +1105,17 @@ EOT
},
{
Type: TokenCHeredoc,
Bytes: []byte("EOT\n"),
Bytes: []byte("EOT"),
Range: hcl.Range{
Start: hcl.Pos{Byte: 20, Line: 3, Column: 1},
End: hcl.Pos{Byte: 23, Line: 3, Column: 4},
},
},
{
Type: TokenNewline,
Bytes: []byte("\n"),
Range: hcl.Range{
Start: hcl.Pos{Byte: 23, Line: 3, Column: 4},
End: hcl.Pos{Byte: 24, Line: 4, Column: 1},
},
},
@ -1114,9 +1177,17 @@ EOT
},
{
Type: TokenCHeredoc,
Bytes: []byte("EOT\n"),
Bytes: []byte("EOT"),
Range: hcl.Range{
Start: hcl.Pos{Byte: 17, Line: 3, Column: 1},
End: hcl.Pos{Byte: 20, Line: 3, Column: 4},
},
},
{
Type: TokenNewline,
Bytes: []byte("\n"),
Range: hcl.Range{
Start: hcl.Pos{Byte: 20, Line: 3, Column: 4},
End: hcl.Pos{Byte: 21, Line: 4, Column: 1},
},
},
@ -1173,9 +1244,17 @@ EOF
},
{
Type: TokenCHeredoc,
Bytes: []byte("EOF\n"),
Bytes: []byte("EOF"),
Range: hcl.Range{
Start: hcl.Pos{Byte: 21, Line: 4, Column: 1},
End: hcl.Pos{Byte: 24, Line: 4, Column: 4},
},
},
{
Type: TokenNewline,
Bytes: []byte("\n"),
Range: hcl.Range{
Start: hcl.Pos{Byte: 24, Line: 4, Column: 4},
End: hcl.Pos{Byte: 25, Line: 5, Column: 1},
},
},
@ -1197,9 +1276,17 @@ EOF
},
{
Type: TokenCHeredoc,
Bytes: []byte("EOF\n"),
Bytes: []byte("EOF"),
Range: hcl.Range{
Start: hcl.Pos{Byte: 27, Line: 6, Column: 1},
End: hcl.Pos{Byte: 30, Line: 6, Column: 4},
},
},
{
Type: TokenNewline,
Bytes: []byte("\n"),
Range: hcl.Range{
Start: hcl.Pos{Byte: 30, Line: 6, Column: 4},
End: hcl.Pos{Byte: 31, Line: 7, Column: 1},
},
},
@ -1565,6 +1652,13 @@ EOF
test.input, diff,
)
}
// "pretty" diff output is not helpful for all differences, so
// we'll also print out a list of specific differences.
for _, problem := range deep.Equal(got, test.want) {
t.Error(problem)
}
})
}
}