zclsyntax: peeker pretends single-line comments are newlines

When we're skipping comments but retaining newlines, we need to do some
slight-of-hand because single-line comment tokens contain the newline
that terminates them (for simpler handling of lead doc comments) but our
parsing can be newline-sensitive.

To allow for this, as a special case we transform single-line comment
tokens into newlines when in this situation, thus allowing parser code
to just worry about the newlines and skip over the comments.
This commit is contained in:
Martin Atkins 2017-06-09 07:19:25 -07:00
parent c88641b147
commit 755fe38505
3 changed files with 75 additions and 6 deletions

View File

@ -157,11 +157,14 @@ func (p *parser) finishParsingBodyAttribute(ident Token) (Node, zcl.Diagnostics)
panic("finishParsingBodyAttribute called with next not equals")
}
var endRange zcl.Range
expr, diags := p.ParseExpression()
if p.recovery && diags.HasErrors() {
// recovery within expressions tends to be tricky, so we've probably
// landed somewhere weird. We'll try to reset to the start of a body
// item so parsing can continue.
endRange = p.PrevRange()
p.recoverAfterBodyItem()
} else {
end := p.Peek()
@ -175,14 +178,14 @@ func (p *parser) finishParsingBodyAttribute(ident Token) (Node, zcl.Diagnostics)
Context: zcl.RangeBetween(ident.Range, end.Range).Ptr(),
})
}
endRange = p.PrevRange()
p.recoverAfterBodyItem()
} else {
endRange = p.PrevRange()
p.Read() // eat newline
}
}
endRange := p.PrevRange()
return &Attribute{
Name: string(ident.Bytes),
Expr: expr,

View File

@ -364,7 +364,7 @@ block "valid" {}
},
{
`a = 1`,
"a = 1\n",
0,
&Body{
Attributes: Attributes{
@ -396,11 +396,11 @@ block "valid" {}
Blocks: Blocks{},
SrcRange: zcl.Range{
Start: zcl.Pos{Line: 1, Column: 1, Byte: 0},
End: zcl.Pos{Line: 1, Column: 6, Byte: 5},
End: zcl.Pos{Line: 2, Column: 1, Byte: 6},
},
EndRange: zcl.Range{
Start: zcl.Pos{Line: 1, Column: 6, Byte: 5},
End: zcl.Pos{Line: 1, Column: 6, Byte: 5},
Start: zcl.Pos{Line: 2, Column: 1, Byte: 6},
End: zcl.Pos{Line: 2, Column: 1, Byte: 6},
},
},
},
@ -462,6 +462,47 @@ block "valid" {}
},
},
},
{
"a = 1 # line comment\n",
0,
&Body{
Attributes: Attributes{
"a": {
Name: "a",
Expr: &LiteralValueExpr{
Val: cty.NumberIntVal(1),
SrcRange: zcl.Range{
Start: zcl.Pos{Line: 1, Column: 5, Byte: 4},
End: zcl.Pos{Line: 1, Column: 6, Byte: 5},
},
},
SrcRange: zcl.Range{
Start: zcl.Pos{Line: 1, Column: 1, Byte: 0},
End: zcl.Pos{Line: 1, Column: 6, Byte: 5},
},
NameRange: zcl.Range{
Start: zcl.Pos{Line: 1, Column: 1, Byte: 0},
End: zcl.Pos{Line: 1, Column: 2, Byte: 1},
},
EqualsRange: zcl.Range{
Start: zcl.Pos{Line: 1, Column: 3, Byte: 2},
End: zcl.Pos{Line: 1, Column: 4, Byte: 3},
},
},
},
Blocks: Blocks{},
SrcRange: zcl.Range{
Start: zcl.Pos{Line: 1, Column: 1, Byte: 0},
End: zcl.Pos{Line: 2, Column: 1, Byte: 21},
},
EndRange: zcl.Range{
Start: zcl.Pos{Line: 2, Column: 1, Byte: 21},
End: zcl.Pos{Line: 2, Column: 1, Byte: 21},
},
},
},
{
` `,

View File

@ -50,6 +50,31 @@ func (p *peeker) nextToken() (Token, int) {
switch tok.Type {
case TokenComment:
if !p.IncludeComments {
// Single-line comment tokens, starting with # or //, absorb
// the trailing newline that terminates them as part of their
// bytes. When we're filtering out comments, we must as a
// special case transform these to newline tokens in order
// to properly parse newline-terminated block items.
if p.includingNewlines() {
if len(tok.Bytes) > 0 && tok.Bytes[len(tok.Bytes)-1] == '\n' {
fakeNewline := Token{
Type: TokenNewline,
Bytes: tok.Bytes[len(tok.Bytes)-1 : len(tok.Bytes)],
// We use the whole token range as the newline
// range, even though that's a little... weird,
// because otherwise we'd need to go count
// characters again in order to figure out the
// column of the newline, and that complexity
// isn't justified when ranges of newlines are
// so rarely printed anyway.
Range: tok.Range,
}
return fakeNewline, i + 1
}
}
continue
}
case TokenNewline: