2017-05-28 02:00:00 +00:00
|
|
|
|
package zclsyntax
|
|
|
|
|
|
|
|
|
|
import (
|
2017-05-28 14:38:17 +00:00
|
|
|
|
"fmt"
|
|
|
|
|
|
2017-05-28 02:00:00 +00:00
|
|
|
|
"github.com/apparentlymart/go-textseg/textseg"
|
|
|
|
|
"github.com/zclconf/go-zcl/zcl"
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
// Token represents a sequence of bytes from some zcl code that has been
|
|
|
|
|
// tagged with a type and its range within the source file.
|
|
|
|
|
type Token struct {
|
|
|
|
|
Type TokenType
|
|
|
|
|
Bytes []byte
|
|
|
|
|
Range zcl.Range
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// TokenType is an enumeration used for the Type field on Token.
|
|
|
|
|
type TokenType rune
|
|
|
|
|
|
2017-05-28 14:38:17 +00:00
|
|
|
|
//go:generate stringer -type TokenType -output token_type_string.go
|
|
|
|
|
|
2017-05-28 02:00:00 +00:00
|
|
|
|
const (
|
|
|
|
|
// Single-character tokens are represented by their own character, for
|
|
|
|
|
// convenience in producing these within the scanner. However, the values
|
|
|
|
|
// are otherwise arbitrary and just intended to be mnemonic for humans
|
|
|
|
|
// who might see them in debug output.
|
|
|
|
|
|
2017-05-28 16:36:32 +00:00
|
|
|
|
TokenOBrace TokenType = '{'
|
|
|
|
|
TokenCBrace TokenType = '}'
|
|
|
|
|
TokenOBrack TokenType = '['
|
|
|
|
|
TokenCBrack TokenType = ']'
|
|
|
|
|
TokenOParen TokenType = '('
|
|
|
|
|
TokenCParen TokenType = ')'
|
|
|
|
|
TokenOQuote TokenType = '«'
|
|
|
|
|
TokenCQuote TokenType = '»'
|
|
|
|
|
TokenOHeredoc TokenType = 'H'
|
|
|
|
|
TokenCHeredoc TokenType = 'h'
|
2017-05-28 02:00:00 +00:00
|
|
|
|
|
|
|
|
|
TokenDot TokenType = '.'
|
|
|
|
|
TokenStar TokenType = '*'
|
|
|
|
|
TokenSlash TokenType = '/'
|
|
|
|
|
TokenPlus TokenType = '+'
|
|
|
|
|
TokenMinus TokenType = '-'
|
|
|
|
|
|
|
|
|
|
TokenEqual TokenType = '='
|
|
|
|
|
TokenNotEqual TokenType = '≠'
|
|
|
|
|
TokenLessThan TokenType = '<'
|
|
|
|
|
TokenLessThanEq TokenType = '≤'
|
|
|
|
|
TokenGreaterThan TokenType = '>'
|
|
|
|
|
TokenGreaterThanEq TokenType = '≥'
|
|
|
|
|
|
|
|
|
|
TokenAnd TokenType = '∧'
|
|
|
|
|
TokenOr TokenType = '∨'
|
|
|
|
|
TokenBang TokenType = '!'
|
|
|
|
|
|
|
|
|
|
TokenQuestion TokenType = '?'
|
|
|
|
|
TokenColon TokenType = ':'
|
|
|
|
|
|
|
|
|
|
TokenTemplateInterp TokenType = '∫'
|
|
|
|
|
TokenTemplateControl TokenType = 'λ'
|
2017-05-28 14:20:39 +00:00
|
|
|
|
TokenTemplateSeqEnd TokenType = '∎'
|
2017-05-28 02:00:00 +00:00
|
|
|
|
|
|
|
|
|
TokenStringLit TokenType = 'S'
|
|
|
|
|
TokenNumberLit TokenType = 'N'
|
|
|
|
|
TokenIdent TokenType = 'I'
|
|
|
|
|
|
|
|
|
|
TokenNewline TokenType = '\n'
|
|
|
|
|
TokenEOF TokenType = '␄'
|
|
|
|
|
|
|
|
|
|
// The rest are not used in the language but recognized by the scanner so
|
|
|
|
|
// we can generate good diagnostics in the parser when users try to write
|
|
|
|
|
// things that might work in other languages they are familiar with, or
|
|
|
|
|
// simply make incorrect assumptions about the zcl language.
|
|
|
|
|
|
|
|
|
|
TokenBitwiseAnd TokenType = '&'
|
|
|
|
|
TokenBitwiseOr TokenType = '|'
|
|
|
|
|
TokenBitwiseNot TokenType = '~'
|
|
|
|
|
TokenBitwiseXor TokenType = '^'
|
|
|
|
|
TokenStarStar TokenType = '➚'
|
|
|
|
|
TokenBacktick TokenType = '`'
|
|
|
|
|
TokenSemicolon TokenType = ';'
|
2017-05-28 15:38:13 +00:00
|
|
|
|
TokenTabs TokenType = '␉'
|
2017-05-28 02:00:00 +00:00
|
|
|
|
TokenInvalid TokenType = '<27>'
|
|
|
|
|
TokenBadUTF8 TokenType = '💩'
|
|
|
|
|
)
|
|
|
|
|
|
2017-05-28 14:38:17 +00:00
|
|
|
|
func (t TokenType) GoString() string {
|
|
|
|
|
return fmt.Sprintf("zclsyntax.%s", t.String())
|
|
|
|
|
}
|
|
|
|
|
|
2017-05-28 22:44:22 +00:00
|
|
|
|
type scanMode int
|
|
|
|
|
|
|
|
|
|
const (
|
|
|
|
|
scanNormal scanMode = iota
|
|
|
|
|
scanTemplate
|
|
|
|
|
)
|
|
|
|
|
|
2017-05-28 14:11:24 +00:00
|
|
|
|
type tokenAccum struct {
|
2017-05-28 02:00:00 +00:00
|
|
|
|
Filename string
|
|
|
|
|
Bytes []byte
|
2017-05-28 15:38:13 +00:00
|
|
|
|
Pos zcl.Pos
|
2017-05-28 14:11:24 +00:00
|
|
|
|
Tokens []Token
|
2017-05-28 02:00:00 +00:00
|
|
|
|
}
|
|
|
|
|
|
2017-05-28 15:38:13 +00:00
|
|
|
|
func (f *tokenAccum) emitToken(ty TokenType, startOfs, endOfs int) {
|
2017-05-28 02:00:00 +00:00
|
|
|
|
// Walk through our buffer to figure out how much we need to adjust
|
|
|
|
|
// the start pos to get our end pos.
|
|
|
|
|
|
2017-05-28 15:38:13 +00:00
|
|
|
|
start := f.Pos
|
|
|
|
|
start.Column += startOfs - f.Pos.Byte // Safe because only ASCII spaces can be in the offset
|
|
|
|
|
start.Byte = startOfs
|
2017-05-28 02:00:00 +00:00
|
|
|
|
|
|
|
|
|
end := start
|
2017-05-28 15:38:13 +00:00
|
|
|
|
end.Byte = endOfs
|
|
|
|
|
b := f.Bytes[startOfs:endOfs]
|
2017-05-28 02:00:00 +00:00
|
|
|
|
for len(b) > 0 {
|
|
|
|
|
advance, seq, _ := textseg.ScanGraphemeClusters(b, true)
|
|
|
|
|
if len(seq) == 1 && seq[0] == '\n' {
|
|
|
|
|
end.Line++
|
|
|
|
|
end.Column = 1
|
|
|
|
|
} else {
|
|
|
|
|
end.Column++
|
|
|
|
|
}
|
|
|
|
|
b = b[advance:]
|
|
|
|
|
}
|
|
|
|
|
|
2017-05-28 15:38:13 +00:00
|
|
|
|
f.Pos = end
|
|
|
|
|
|
2017-05-28 14:11:24 +00:00
|
|
|
|
f.Tokens = append(f.Tokens, Token{
|
2017-05-28 02:00:00 +00:00
|
|
|
|
Type: ty,
|
|
|
|
|
Bytes: f.Bytes[startOfs:endOfs],
|
|
|
|
|
Range: zcl.Range{
|
|
|
|
|
Filename: f.Filename,
|
|
|
|
|
Start: start,
|
|
|
|
|
End: end,
|
|
|
|
|
},
|
2017-05-28 14:11:24 +00:00
|
|
|
|
})
|
2017-05-28 02:00:00 +00:00
|
|
|
|
}
|