hcl: split into meaningful packages

This commit is contained in:
Fatih Arslan 2015-10-16 23:12:26 +03:00
parent 72f3456c0f
commit e93a8e97ca
10 changed files with 401 additions and 416 deletions

94
ast/ast.go Normal file
View File

@ -0,0 +1,94 @@
package ast
import "github.com/fatih/hcl/token"
// Node is an element in the abstract syntax tree.
type Node interface {
node()
Pos() token.Pos
}
func (ObjectList) node() {}
func (ObjectKey) node() {}
func (ObjectItem) node() {}
func (ObjectType) node() {}
func (LiteralType) node() {}
func (ListType) node() {}
// ObjectList represents a list of ObjectItems. An HCL file itself is an
// ObjectList.
type ObjectList struct {
Items []*ObjectItem
}
func (o *ObjectList) Add(item *ObjectItem) {
o.Items = append(o.Items, item)
}
func (o *ObjectList) Pos() token.Pos {
// always returns the uninitiliazed position
return o.Items[0].Pos()
}
// ObjectItem represents a HCL Object Item. An item is represented with a key
// (or keys). It can be an assignment or an object (both normal and nested)
type ObjectItem struct {
// keys is only one length long if it's of type assignment. If it's a
// nested object it can be larger than one. In that case "assign" is
// invalid as there is no assignments for a nested object.
Keys []*ObjectKey
// assign contains the position of "=", if any
Assign token.Pos
// val is the item itself. It can be an object,list, number, bool or a
// string. If key length is larger than one, val can be only of type
// Object.
Val Node
}
func (o *ObjectItem) Pos() token.Pos {
return o.Keys[0].Pos()
}
// ObjectKeys are either an identifier or of type string.
type ObjectKey struct {
Token token.Token
}
func (o *ObjectKey) Pos() token.Pos {
return o.Token.Pos
}
// LiteralType represents a literal of basic type. Valid types are:
// token.NUMBER, token.FLOAT, token.BOOL and token.STRING
type LiteralType struct {
Token token.Token
}
func (l *LiteralType) Pos() token.Pos {
return l.Token.Pos
}
// ListStatement represents a HCL List type
type ListType struct {
Lbrack token.Pos // position of "["
Rbrack token.Pos // position of "]"
List []Node // the elements in lexical order
}
func (l *ListType) Pos() token.Pos {
return l.Lbrack
}
// ObjectType represents a HCL Object Type
type ObjectType struct {
Lbrace token.Pos // position of "{"
Rbrace token.Pos // position of "}"
List []Node // the nodes in lexical order
}
func (b *ObjectType) Pos() token.Pos {
return b.Lbrace
}

View File

@ -1,4 +1,4 @@
package parser package ast
// Walk traverses an AST in depth-first order: It starts by calling fn(node); // Walk traverses an AST in depth-first order: It starts by calling fn(node);
// node must not be nil. If f returns true, Walk invokes f recursively for // node must not be nil. If f returns true, Walk invokes f recursively for
@ -10,24 +10,24 @@ func Walk(node Node, fn func(Node) bool) {
switch n := node.(type) { switch n := node.(type) {
case *ObjectList: case *ObjectList:
for _, item := range n.items { for _, item := range n.Items {
Walk(item, fn) Walk(item, fn)
} }
case *ObjectKey: case *ObjectKey:
// nothing to do // nothing to do
case *ObjectItem: case *ObjectItem:
for _, k := range n.keys { for _, k := range n.Keys {
Walk(k, fn) Walk(k, fn)
} }
Walk(n.val, fn) Walk(n.Val, fn)
case *LiteralType: case *LiteralType:
// nothing to do // nothing to do
case *ListType: case *ListType:
for _, l := range n.list { for _, l := range n.List {
Walk(l, fn) Walk(l, fn)
} }
case *ObjectType: case *ObjectType:
for _, l := range n.list { for _, l := range n.List {
Walk(l, fn) Walk(l, fn)
} }
} }

View File

@ -1,116 +0,0 @@
package parser
import "github.com/fatih/hcl/scanner"
// Node is an element in the abstract syntax tree.
type Node interface {
node()
Pos() scanner.Pos
}
func (ObjectList) node() {}
func (ObjectKey) node() {}
func (ObjectItem) node() {}
func (ObjectType) node() {}
func (LiteralType) node() {}
func (ListType) node() {}
// ObjectList represents a list of ObjectItems. An HCL file itself is an
// ObjectList.
type ObjectList struct {
items []*ObjectItem
}
func (o *ObjectList) add(item *ObjectItem) {
o.items = append(o.items, item)
}
func (o *ObjectList) Pos() scanner.Pos {
// always returns the uninitiliazed position
return o.items[0].Pos()
}
// ObjectItem represents a HCL Object Item. An item is represented with a key
// (or keys). It can be an assignment or an object (both normal and nested)
type ObjectItem struct {
// keys is only one length long if it's of type assignment. If it's a
// nested object it can be larger than one. In that case "assign" is
// invalid as there is no assignments for a nested object.
keys []*ObjectKey
// assign contains the position of "=", if any
assign scanner.Pos
// val is the item itself. It can be an object,list, number, bool or a
// string. If key length is larger than one, val can be only of type
// Object.
val Node
}
func (o *ObjectItem) Pos() scanner.Pos {
return o.keys[0].Pos()
}
// ObjectKeys are either an identifier or of type string.
type ObjectKey struct {
token scanner.Token
}
func (o *ObjectKey) Pos() scanner.Pos {
return o.token.Pos
}
// isValid() returns true if the underlying identifier satisfies one of the
// valid types (IDENT or STRING)
func (o *ObjectKey) isValid() bool {
switch o.token.Type {
case scanner.IDENT, scanner.STRING:
return true
default:
return false
}
}
// LiteralType represents a literal of basic type. Valid types are:
// scanner.NUMBER, scanner.FLOAT, scanner.BOOL and scanner.STRING
type LiteralType struct {
token scanner.Token
}
// isValid() returns true if the underlying identifier satisfies one of the
// valid types.
func (l *LiteralType) isValid() bool {
switch l.token.Type {
case scanner.NUMBER, scanner.FLOAT, scanner.BOOL, scanner.STRING:
return true
default:
return false
}
}
func (l *LiteralType) Pos() scanner.Pos {
return l.token.Pos
}
// ListStatement represents a HCL List type
type ListType struct {
lbrack scanner.Pos // position of "["
rbrack scanner.Pos // position of "]"
list []Node // the elements in lexical order
}
func (l *ListType) Pos() scanner.Pos {
return l.lbrack
}
// ObjectType represents a HCL Object Type
type ObjectType struct {
lbrace scanner.Pos // position of "{"
rbrace scanner.Pos // position of "}"
list []Node // the nodes in lexical order
}
func (b *ObjectType) Pos() scanner.Pos {
return b.lbrace
}

View File

@ -4,14 +4,16 @@ import (
"errors" "errors"
"fmt" "fmt"
"github.com/fatih/hcl/ast"
"github.com/fatih/hcl/scanner" "github.com/fatih/hcl/scanner"
"github.com/fatih/hcl/token"
) )
type Parser struct { type Parser struct {
sc *scanner.Scanner sc *scanner.Scanner
tok scanner.Token // last read token tok token.Token // last read token
prevTok scanner.Token // previous read token prevTok token.Token // previous read token
enableTrace bool enableTrace bool
indent int indent int
@ -27,9 +29,9 @@ func New(src []byte) *Parser {
var errEofToken = errors.New("EOF token found") var errEofToken = errors.New("EOF token found")
// Parse returns the fully parsed source and returns the abstract syntax tree. // Parse returns the fully parsed source and returns the abstract syntax tree.
func (p *Parser) Parse() (Node, error) { func (p *Parser) Parse() (ast.Node, error) {
defer un(trace(p, "ParseObjectList")) defer un(trace(p, "ParseObjectList"))
node := &ObjectList{} node := &ast.ObjectList{}
for { for {
n, err := p.parseObjectItem() n, err := p.parseObjectItem()
@ -41,14 +43,14 @@ func (p *Parser) Parse() (Node, error) {
} }
// we successfully parsed a node, add it to the final source node // we successfully parsed a node, add it to the final source node
node.add(n) node.Add(n)
} }
return node, nil return node, nil
} }
// parseObjectItem parses a single object item // parseObjectItem parses a single object item
func (p *Parser) parseObjectItem() (*ObjectItem, error) { func (p *Parser) parseObjectItem() (*ast.ObjectItem, error) {
defer un(trace(p, "ParseObjectItem")) defer un(trace(p, "ParseObjectItem"))
keys, err := p.parseObjectKey() keys, err := p.parseObjectKey()
@ -58,19 +60,19 @@ func (p *Parser) parseObjectItem() (*ObjectItem, error) {
// either an assignment or object // either an assignment or object
switch p.tok.Type { switch p.tok.Type {
case scanner.ASSIGN: case token.ASSIGN:
o := &ObjectItem{ o := &ast.ObjectItem{
keys: keys, Keys: keys,
assign: p.tok.Pos, Assign: p.tok.Pos,
} }
o.val, err = p.parseType() o.Val, err = p.parseType()
if err != nil { if err != nil {
return nil, err return nil, err
} }
return o, nil return o, nil
case scanner.LBRACE: case token.LBRACE:
if len(keys) > 1 { if len(keys) > 1 {
// nested object // nested object
fmt.Println("nested object") fmt.Println("nested object")
@ -85,20 +87,20 @@ func (p *Parser) parseObjectItem() (*ObjectItem, error) {
// parseType parses any type of Type, such as number, bool, string, object or // parseType parses any type of Type, such as number, bool, string, object or
// list. // list.
func (p *Parser) parseType() (Node, error) { func (p *Parser) parseType() (ast.Node, error) {
defer un(trace(p, "ParseType")) defer un(trace(p, "ParseType"))
tok := p.scan() tok := p.scan()
switch tok.Type { switch tok.Type {
case scanner.NUMBER, scanner.FLOAT, scanner.BOOL, scanner.STRING: case token.NUMBER, token.FLOAT, token.BOOL, token.STRING:
return p.parseLiteralType() return p.parseLiteralType()
case scanner.LBRACE: case token.LBRACE:
return p.parseObjectType() return p.parseObjectType()
case scanner.LBRACK: case token.LBRACK:
return p.parseListType() return p.parseListType()
case scanner.COMMENT: case token.COMMENT:
// implement comment // implement comment
case scanner.EOF: case token.EOF:
return nil, errEofToken return nil, errEofToken
} }
@ -106,18 +108,18 @@ func (p *Parser) parseType() (Node, error) {
} }
// parseObjectKey parses an object key and returns a ObjectKey AST // parseObjectKey parses an object key and returns a ObjectKey AST
func (p *Parser) parseObjectKey() ([]*ObjectKey, error) { func (p *Parser) parseObjectKey() ([]*ast.ObjectKey, error) {
tok := p.scan() tok := p.scan()
if tok.Type == scanner.EOF { if tok.Type == token.EOF {
return nil, errEofToken return nil, errEofToken
} }
keys := make([]*ObjectKey, 0) keys := make([]*ast.ObjectKey, 0)
switch tok.Type { switch tok.Type {
case scanner.IDENT, scanner.STRING: case token.IDENT, token.STRING:
// add first found token // add first found token
keys = append(keys, &ObjectKey{token: tok}) keys = append(keys, &ast.ObjectKey{Token: tok})
default: default:
return nil, fmt.Errorf("expected: IDENT | STRING got: %s", tok.Type) return nil, fmt.Errorf("expected: IDENT | STRING got: %s", tok.Type)
} }
@ -131,7 +133,7 @@ func (p *Parser) parseObjectKey() ([]*ObjectKey, error) {
for { for {
tok := p.scan() tok := p.scan()
switch tok.Type { switch tok.Type {
case scanner.ASSIGN: case token.ASSIGN:
// assignment or object only, but not nested objects. this is not // assignment or object only, but not nested objects. this is not
// allowed: `foo bar = {}` // allowed: `foo bar = {}`
if nestedObj { if nestedObj {
@ -139,13 +141,13 @@ func (p *Parser) parseObjectKey() ([]*ObjectKey, error) {
} }
return keys, nil return keys, nil
case scanner.LBRACE: case token.LBRACE:
// object // object
return keys, nil return keys, nil
case scanner.IDENT, scanner.STRING: case token.IDENT, token.STRING:
// nested object // nested object
nestedObj = true nestedObj = true
keys = append(keys, &ObjectKey{token: tok}) keys = append(keys, &ast.ObjectKey{Token: tok})
default: default:
return nil, fmt.Errorf("expected: IDENT | STRING | ASSIGN | LBRACE got: %s", tok.Type) return nil, fmt.Errorf("expected: IDENT | STRING | ASSIGN | LBRACE got: %s", tok.Type)
} }
@ -153,23 +155,23 @@ func (p *Parser) parseObjectKey() ([]*ObjectKey, error) {
} }
// parseLiteralType parses a literal type and returns a LiteralType AST // parseLiteralType parses a literal type and returns a LiteralType AST
func (p *Parser) parseLiteralType() (*LiteralType, error) { func (p *Parser) parseLiteralType() (*ast.LiteralType, error) {
defer un(trace(p, "ParseLiteral")) defer un(trace(p, "ParseLiteral"))
return &LiteralType{ return &ast.LiteralType{
token: p.tok, Token: p.tok,
}, nil }, nil
} }
// parseObjectType parses an object type and returns a ObjectType AST // parseObjectType parses an object type and returns a ObjectType AST
func (p *Parser) parseObjectType() (*ObjectType, error) { func (p *Parser) parseObjectType() (*ast.ObjectType, error) {
defer un(trace(p, "ParseObjectYpe")) defer un(trace(p, "ParseObjectYpe"))
return nil, errors.New("ObjectType is not implemented yet") return nil, errors.New("ObjectType is not implemented yet")
} }
// parseListType parses a list type and returns a ListType AST // parseListType parses a list type and returns a ListType AST
func (p *Parser) parseListType() (*ListType, error) { func (p *Parser) parseListType() (*ast.ListType, error) {
defer un(trace(p, "ParseListType")) defer un(trace(p, "ParseListType"))
return nil, errors.New("ListType is not implemented yet") return nil, errors.New("ListType is not implemented yet")
@ -177,7 +179,7 @@ func (p *Parser) parseListType() (*ListType, error) {
// scan returns the next token from the underlying scanner. // scan returns the next token from the underlying scanner.
// If a token has been unscanned then read that instead. // If a token has been unscanned then read that instead.
func (p *Parser) scan() scanner.Token { func (p *Parser) scan() token.Token {
// If we have a token on the buffer, then return it. // If we have a token on the buffer, then return it.
if p.n != 0 { if p.n != 0 {
p.n = 0 p.n = 0

View File

@ -7,7 +7,8 @@ import (
"runtime" "runtime"
"testing" "testing"
"github.com/fatih/hcl/scanner" "github.com/fatih/hcl/ast"
"github.com/fatih/hcl/token"
) )
func TestParseType(t *testing.T) { func TestParseType(t *testing.T) {
@ -22,7 +23,7 @@ func TestParseType(t *testing.T) {
fmt.Printf("n = %+v\n", n) fmt.Printf("n = %+v\n", n)
Walk(n, func(node Node) bool { ast.Walk(n, func(node ast.Node) bool {
fmt.Printf("node = %+v\n", node) fmt.Printf("node = %+v\n", node)
return true return true
}) })
@ -30,21 +31,21 @@ func TestParseType(t *testing.T) {
func TestObjectKey(t *testing.T) { func TestObjectKey(t *testing.T) {
keys := []struct { keys := []struct {
exp []scanner.TokenType exp []token.TokenType
src string src string
}{ }{
{[]scanner.TokenType{scanner.IDENT}, `foo {}`}, {[]token.TokenType{token.IDENT}, `foo {}`},
{[]scanner.TokenType{scanner.IDENT}, `foo = {}`}, {[]token.TokenType{token.IDENT}, `foo = {}`},
{[]scanner.TokenType{scanner.IDENT}, `foo = bar`}, {[]token.TokenType{token.IDENT}, `foo = bar`},
{[]scanner.TokenType{scanner.IDENT}, `foo = 123`}, {[]token.TokenType{token.IDENT}, `foo = 123`},
{[]scanner.TokenType{scanner.IDENT}, `foo = "${var.bar}`}, {[]token.TokenType{token.IDENT}, `foo = "${var.bar}`},
{[]scanner.TokenType{scanner.STRING}, `"foo" {}`}, {[]token.TokenType{token.STRING}, `"foo" {}`},
{[]scanner.TokenType{scanner.STRING}, `"foo" = {}`}, {[]token.TokenType{token.STRING}, `"foo" = {}`},
{[]scanner.TokenType{scanner.STRING}, `"foo" = "${var.bar}`}, {[]token.TokenType{token.STRING}, `"foo" = "${var.bar}`},
{[]scanner.TokenType{scanner.IDENT, scanner.IDENT}, `foo bar {}`}, {[]token.TokenType{token.IDENT, token.IDENT}, `foo bar {}`},
{[]scanner.TokenType{scanner.IDENT, scanner.STRING}, `foo "bar" {}`}, {[]token.TokenType{token.IDENT, token.STRING}, `foo "bar" {}`},
{[]scanner.TokenType{scanner.STRING, scanner.IDENT}, `"foo" bar {}`}, {[]token.TokenType{token.STRING, token.IDENT}, `"foo" bar {}`},
{[]scanner.TokenType{scanner.IDENT, scanner.IDENT, scanner.IDENT}, `foo bar baz {}`}, {[]token.TokenType{token.IDENT, token.IDENT, token.IDENT}, `foo bar baz {}`},
} }
for _, k := range keys { for _, k := range keys {
@ -54,9 +55,9 @@ func TestObjectKey(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
tokens := []scanner.TokenType{} tokens := []token.TokenType{}
for _, o := range keys { for _, o := range keys {
tokens = append(tokens, o.token.Type) tokens = append(tokens, o.Token.Type)
} }
equals(t, k.exp, tokens) equals(t, k.exp, tokens)

View File

@ -8,6 +8,8 @@ import (
"os" "os"
"unicode" "unicode"
"unicode/utf8" "unicode/utf8"
"github.com/fatih/hcl/token"
) )
// eof represents a marker rune for the end of the reader. // eof represents a marker rune for the end of the reader.
@ -19,8 +21,8 @@ type Scanner struct {
src []byte // Source buffer for immutable access src []byte // Source buffer for immutable access
// Source Position // Source Position
srcPos Pos // current position srcPos token.Pos // current position
prevPos Pos // previous position, used for peek() method prevPos token.Pos // previous position, used for peek() method
lastCharLen int // length of last character in bytes lastCharLen int // length of last character in bytes
lastLineLen int // length of last line in characters (for correct column reporting) lastLineLen int // length of last line in characters (for correct column reporting)
@ -30,7 +32,7 @@ type Scanner struct {
// Error is called for each error encountered. If no Error // Error is called for each error encountered. If no Error
// function is set, the error is reported to os.Stderr. // function is set, the error is reported to os.Stderr.
Error func(pos Pos, msg string) Error func(pos token.Pos, msg string)
// ErrorCount is incremented by one for each error encountered. // ErrorCount is incremented by one for each error encountered.
ErrorCount int ErrorCount int
@ -39,7 +41,7 @@ type Scanner struct {
// Scan. The Filename field is always left untouched by the Scanner. If // Scan. The Filename field is always left untouched by the Scanner. If
// an error is reported (via Error) and Position is invalid, the scanner is // an error is reported (via Error) and Position is invalid, the scanner is
// not inside a token. // not inside a token.
tokPos Pos tokPos token.Pos
} }
// New creates and initializes a new instance of Scanner using src as // New creates and initializes a new instance of Scanner using src as
@ -117,7 +119,7 @@ func (s *Scanner) peek() rune {
} }
// Scan scans the next token and returns the token. // Scan scans the next token and returns the token.
func (s *Scanner) Scan() Token { func (s *Scanner) Scan() token.Token {
ch := s.next() ch := s.next()
// skip white space // skip white space
@ -125,7 +127,7 @@ func (s *Scanner) Scan() Token {
ch = s.next() ch = s.next()
} }
var tok TokenType var tok token.TokenType
// token text markings // token text markings
s.tokStart = s.srcPos.Offset - s.lastCharLen s.tokStart = s.srcPos.Offset - s.lastCharLen
@ -147,47 +149,47 @@ func (s *Scanner) Scan() Token {
switch { switch {
case isLetter(ch): case isLetter(ch):
tok = IDENT tok = token.IDENT
lit := s.scanIdentifier() lit := s.scanIdentifier()
if lit == "true" || lit == "false" { if lit == "true" || lit == "false" {
tok = BOOL tok = token.BOOL
} }
case isDecimal(ch): case isDecimal(ch):
tok = s.scanNumber(ch) tok = s.scanNumber(ch)
default: default:
switch ch { switch ch {
case eof: case eof:
tok = EOF tok = token.EOF
case '"': case '"':
tok = STRING tok = token.STRING
s.scanString() s.scanString()
case '#', '/': case '#', '/':
tok = COMMENT tok = token.COMMENT
s.scanComment(ch) s.scanComment(ch)
case '.': case '.':
tok = PERIOD tok = token.PERIOD
ch = s.peek() ch = s.peek()
if isDecimal(ch) { if isDecimal(ch) {
tok = FLOAT tok = token.FLOAT
ch = s.scanMantissa(ch) ch = s.scanMantissa(ch)
ch = s.scanExponent(ch) ch = s.scanExponent(ch)
} }
case '[': case '[':
tok = LBRACK tok = token.LBRACK
case ']': case ']':
tok = RBRACK tok = token.RBRACK
case '{': case '{':
tok = LBRACE tok = token.LBRACE
case '}': case '}':
tok = RBRACE tok = token.RBRACE
case ',': case ',':
tok = COMMA tok = token.COMMA
case '=': case '=':
tok = ASSIGN tok = token.ASSIGN
case '+': case '+':
tok = ADD tok = token.ADD
case '-': case '-':
tok = SUB tok = token.SUB
default: default:
s.err("illegal char") s.err("illegal char")
} }
@ -203,7 +205,7 @@ func (s *Scanner) Scan() Token {
} }
s.tokStart = s.tokEnd // ensure idempotency of tokenText() call s.tokStart = s.tokEnd // ensure idempotency of tokenText() call
return Token{ return token.Token{
Type: tok, Type: tok,
Pos: s.tokPos, Pos: s.tokPos,
Text: tokenText, Text: tokenText,
@ -244,7 +246,7 @@ func (s *Scanner) scanComment(ch rune) {
} }
// scanNumber scans a HCL number definition starting with the given rune // scanNumber scans a HCL number definition starting with the given rune
func (s *Scanner) scanNumber(ch rune) TokenType { func (s *Scanner) scanNumber(ch rune) token.TokenType {
if ch == '0' { if ch == '0' {
// check for hexadecimal, octal or float // check for hexadecimal, octal or float
ch = s.next() ch = s.next()
@ -265,7 +267,7 @@ func (s *Scanner) scanNumber(ch rune) TokenType {
s.unread() s.unread()
} }
return NUMBER return token.NUMBER
} }
// now it's either something like: 0421(octal) or 0.1231(float) // now it's either something like: 0421(octal) or 0.1231(float)
@ -283,7 +285,7 @@ func (s *Scanner) scanNumber(ch rune) TokenType {
// literals of form 01e10 are treates as Numbers in HCL, which differs from Go. // literals of form 01e10 are treates as Numbers in HCL, which differs from Go.
if ch == 'e' || ch == 'E' { if ch == 'e' || ch == 'E' {
ch = s.scanExponent(ch) ch = s.scanExponent(ch)
return NUMBER return token.NUMBER
} }
if ch == '.' { if ch == '.' {
@ -293,7 +295,7 @@ func (s *Scanner) scanNumber(ch rune) TokenType {
ch = s.next() ch = s.next()
ch = s.scanExponent(ch) ch = s.scanExponent(ch)
} }
return FLOAT return token.FLOAT
} }
if illegalOctal { if illegalOctal {
@ -303,7 +305,7 @@ func (s *Scanner) scanNumber(ch rune) TokenType {
if ch != eof { if ch != eof {
s.unread() s.unread()
} }
return NUMBER return token.NUMBER
} }
s.scanMantissa(ch) s.scanMantissa(ch)
@ -311,7 +313,7 @@ func (s *Scanner) scanNumber(ch rune) TokenType {
// literals of form 1e10 are treates as Numbers in HCL, which differs from Go. // literals of form 1e10 are treates as Numbers in HCL, which differs from Go.
if ch == 'e' || ch == 'E' { if ch == 'e' || ch == 'E' {
ch = s.scanExponent(ch) ch = s.scanExponent(ch)
return NUMBER return token.NUMBER
} }
if ch == '.' { if ch == '.' {
@ -320,11 +322,11 @@ func (s *Scanner) scanNumber(ch rune) TokenType {
ch = s.next() ch = s.next()
ch = s.scanExponent(ch) ch = s.scanExponent(ch)
} }
return FLOAT return token.FLOAT
} }
s.unread() s.unread()
return NUMBER return token.NUMBER
} }
// scanMantissa scans the mantissa begining from the rune. It returns the next // scanMantissa scans the mantissa begining from the rune. It returns the next
@ -446,7 +448,7 @@ func (s *Scanner) scanIdentifier() string {
// recentPosition returns the position of the character immediately after the // recentPosition returns the position of the character immediately after the
// character or token returned by the last call to Scan. // character or token returned by the last call to Scan.
func (s *Scanner) recentPosition() (pos Pos) { func (s *Scanner) recentPosition() (pos token.Pos) {
pos.Offset = s.srcPos.Offset - s.lastCharLen pos.Offset = s.srcPos.Offset - s.lastCharLen
switch { switch {
case s.srcPos.Column > 0: case s.srcPos.Column > 0:

View File

@ -4,161 +4,163 @@ import (
"bytes" "bytes"
"fmt" "fmt"
"testing" "testing"
"github.com/fatih/hcl/token"
) )
var f100 = "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" var f100 = "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
type tokenPair struct { type tokenPair struct {
tok TokenType tok token.TokenType
text string text string
} }
var tokenLists = map[string][]tokenPair{ var tokenLists = map[string][]tokenPair{
"comment": []tokenPair{ "comment": []tokenPair{
{COMMENT, "//"}, {token.COMMENT, "//"},
{COMMENT, "////"}, {token.COMMENT, "////"},
{COMMENT, "// comment"}, {token.COMMENT, "// comment"},
{COMMENT, "// /* comment */"}, {token.COMMENT, "// /* comment */"},
{COMMENT, "// // comment //"}, {token.COMMENT, "// // comment //"},
{COMMENT, "//" + f100}, {token.COMMENT, "//" + f100},
{COMMENT, "#"}, {token.COMMENT, "#"},
{COMMENT, "##"}, {token.COMMENT, "##"},
{COMMENT, "# comment"}, {token.COMMENT, "# comment"},
{COMMENT, "# /* comment */"}, {token.COMMENT, "# /* comment */"},
{COMMENT, "# # comment #"}, {token.COMMENT, "# # comment #"},
{COMMENT, "#" + f100}, {token.COMMENT, "#" + f100},
{COMMENT, "/**/"}, {token.COMMENT, "/**/"},
{COMMENT, "/***/"}, {token.COMMENT, "/***/"},
{COMMENT, "/* comment */"}, {token.COMMENT, "/* comment */"},
{COMMENT, "/* // comment */"}, {token.COMMENT, "/* // comment */"},
{COMMENT, "/* /* comment */"}, {token.COMMENT, "/* /* comment */"},
{COMMENT, "/*\n comment\n*/"}, {token.COMMENT, "/*\n comment\n*/"},
{COMMENT, "/*" + f100 + "*/"}, {token.COMMENT, "/*" + f100 + "*/"},
}, },
"operator": []tokenPair{ "operator": []tokenPair{
{LBRACK, "["}, {token.LBRACK, "["},
{LBRACE, "{"}, {token.LBRACE, "{"},
{COMMA, ","}, {token.COMMA, ","},
{PERIOD, "."}, {token.PERIOD, "."},
{RBRACK, "]"}, {token.RBRACK, "]"},
{RBRACE, "}"}, {token.RBRACE, "}"},
{ASSIGN, "="}, {token.ASSIGN, "="},
{ADD, "+"}, {token.ADD, "+"},
{SUB, "-"}, {token.SUB, "-"},
}, },
"bool": []tokenPair{ "bool": []tokenPair{
{BOOL, "true"}, {token.BOOL, "true"},
{BOOL, "false"}, {token.BOOL, "false"},
}, },
"ident": []tokenPair{ "identoken.t": []tokenPair{
{IDENT, "a"}, {token.IDENT, "a"},
{IDENT, "a0"}, {token.IDENT, "a0"},
{IDENT, "foobar"}, {token.IDENT, "foobar"},
{IDENT, "abc123"}, {token.IDENT, "abc123"},
{IDENT, "LGTM"}, {token.IDENT, "LGTM"},
{IDENT, "_"}, {token.IDENT, "_"},
{IDENT, "_abc123"}, {token.IDENT, "_abc123"},
{IDENT, "abc123_"}, {token.IDENT, "abc123_"},
{IDENT, "_abc_123_"}, {token.IDENT, "_abc_123_"},
{IDENT, "_äöü"}, {token.IDENT, "_äöü"},
{IDENT, "_本"}, {token.IDENT, "_本"},
{IDENT, "äöü"}, {token.IDENT, "äöü"},
{IDENT, "本"}, {token.IDENT, "本"},
{IDENT, "a۰۱۸"}, {token.IDENT, "a۰۱۸"},
{IDENT, "foo६४"}, {token.IDENT, "foo६४"},
{IDENT, "bar"}, {token.IDENT, "bar"},
}, },
"string": []tokenPair{ "stritoken.ng": []tokenPair{
{STRING, `" "`}, {token.STRING, `" "`},
{STRING, `"a"`}, {token.STRING, `"a"`},
{STRING, `"本"`}, {token.STRING, `"本"`},
{STRING, `"\a"`}, {token.STRING, `"\a"`},
{STRING, `"\b"`}, {token.STRING, `"\b"`},
{STRING, `"\f"`}, {token.STRING, `"\f"`},
{STRING, `"\n"`}, {token.STRING, `"\n"`},
{STRING, `"\r"`}, {token.STRING, `"\r"`},
{STRING, `"\t"`}, {token.STRING, `"\t"`},
{STRING, `"\v"`}, {token.STRING, `"\v"`},
{STRING, `"\""`}, {token.STRING, `"\""`},
{STRING, `"\000"`}, {token.STRING, `"\000"`},
{STRING, `"\777"`}, {token.STRING, `"\777"`},
{STRING, `"\x00"`}, {token.STRING, `"\x00"`},
{STRING, `"\xff"`}, {token.STRING, `"\xff"`},
{STRING, `"\u0000"`}, {token.STRING, `"\u0000"`},
{STRING, `"\ufA16"`}, {token.STRING, `"\ufA16"`},
{STRING, `"\U00000000"`}, {token.STRING, `"\U00000000"`},
{STRING, `"\U0000ffAB"`}, {token.STRING, `"\U0000ffAB"`},
{STRING, `"` + f100 + `"`}, {token.STRING, `"` + f100 + `"`},
}, },
"number": []tokenPair{ "numbtoken.er": []tokenPair{
{NUMBER, "0"}, {token.NUMBER, "0"},
{NUMBER, "1"}, {token.NUMBER, "1"},
{NUMBER, "9"}, {token.NUMBER, "9"},
{NUMBER, "42"}, {token.NUMBER, "42"},
{NUMBER, "1234567890"}, {token.NUMBER, "1234567890"},
{NUMBER, "00"}, {token.NUMBER, "00"},
{NUMBER, "01"}, {token.NUMBER, "01"},
{NUMBER, "07"}, {token.NUMBER, "07"},
{NUMBER, "042"}, {token.NUMBER, "042"},
{NUMBER, "01234567"}, {token.NUMBER, "01234567"},
{NUMBER, "0x0"}, {token.NUMBER, "0x0"},
{NUMBER, "0x1"}, {token.NUMBER, "0x1"},
{NUMBER, "0xf"}, {token.NUMBER, "0xf"},
{NUMBER, "0x42"}, {token.NUMBER, "0x42"},
{NUMBER, "0x123456789abcDEF"}, {token.NUMBER, "0x123456789abcDEF"},
{NUMBER, "0x" + f100}, {token.NUMBER, "0x" + f100},
{NUMBER, "0X0"}, {token.NUMBER, "0X0"},
{NUMBER, "0X1"}, {token.NUMBER, "0X1"},
{NUMBER, "0XF"}, {token.NUMBER, "0XF"},
{NUMBER, "0X42"}, {token.NUMBER, "0X42"},
{NUMBER, "0X123456789abcDEF"}, {token.NUMBER, "0X123456789abcDEF"},
{NUMBER, "0X" + f100}, {token.NUMBER, "0X" + f100},
{NUMBER, "0e0"}, {token.NUMBER, "0e0"},
{NUMBER, "1e0"}, {token.NUMBER, "1e0"},
{NUMBER, "42e0"}, {token.NUMBER, "42e0"},
{NUMBER, "01234567890e0"}, {token.NUMBER, "01234567890e0"},
{NUMBER, "0E0"}, {token.NUMBER, "0E0"},
{NUMBER, "1E0"}, {token.NUMBER, "1E0"},
{NUMBER, "42E0"}, {token.NUMBER, "42E0"},
{NUMBER, "01234567890E0"}, {token.NUMBER, "01234567890E0"},
{NUMBER, "0e+10"}, {token.NUMBER, "0e+10"},
{NUMBER, "1e-10"}, {token.NUMBER, "1e-10"},
{NUMBER, "42e+10"}, {token.NUMBER, "42e+10"},
{NUMBER, "01234567890e-10"}, {token.NUMBER, "01234567890e-10"},
{NUMBER, "0E+10"}, {token.NUMBER, "0E+10"},
{NUMBER, "1E-10"}, {token.NUMBER, "1E-10"},
{NUMBER, "42E+10"}, {token.NUMBER, "42E+10"},
{NUMBER, "01234567890E-10"}, {token.NUMBER, "01234567890E-10"},
}, },
"float": []tokenPair{ "floatoken.t": []tokenPair{
{FLOAT, "0."}, {token.FLOAT, "0."},
{FLOAT, "1."}, {token.FLOAT, "1."},
{FLOAT, "42."}, {token.FLOAT, "42."},
{FLOAT, "01234567890."}, {token.FLOAT, "01234567890."},
{FLOAT, ".0"}, {token.FLOAT, ".0"},
{FLOAT, ".1"}, {token.FLOAT, ".1"},
{FLOAT, ".42"}, {token.FLOAT, ".42"},
{FLOAT, ".0123456789"}, {token.FLOAT, ".0123456789"},
{FLOAT, "0.0"}, {token.FLOAT, "0.0"},
{FLOAT, "1.0"}, {token.FLOAT, "1.0"},
{FLOAT, "42.0"}, {token.FLOAT, "42.0"},
{FLOAT, "01234567890.0"}, {token.FLOAT, "01234567890.0"},
{FLOAT, "01.8e0"}, {token.FLOAT, "01.8e0"},
{FLOAT, "1.4e0"}, {token.FLOAT, "1.4e0"},
{FLOAT, "42.2e0"}, {token.FLOAT, "42.2e0"},
{FLOAT, "01234567890.12e0"}, {token.FLOAT, "01234567890.12e0"},
{FLOAT, "0.E0"}, {token.FLOAT, "0.E0"},
{FLOAT, "1.12E0"}, {token.FLOAT, "1.12E0"},
{FLOAT, "42.123E0"}, {token.FLOAT, "42.123E0"},
{FLOAT, "01234567890.213E0"}, {token.FLOAT, "01234567890.213E0"},
{FLOAT, "0.2e+10"}, {token.FLOAT, "0.2e+10"},
{FLOAT, "1.2e-10"}, {token.FLOAT, "1.2e-10"},
{FLOAT, "42.54e+10"}, {token.FLOAT, "42.54e+10"},
{FLOAT, "01234567890.98e-10"}, {token.FLOAT, "01234567890.98e-10"},
{FLOAT, "0.1E+10"}, {token.FLOAT, "0.1E+10"},
{FLOAT, "1.1E-10"}, {token.FLOAT, "1.1E-10"},
{FLOAT, "42.1E+10"}, {token.FLOAT, "42.1E+10"},
{FLOAT, "01234567890.1E-10"}, {token.FLOAT, "01234567890.1E-10"},
}, },
} }
@ -184,7 +186,7 @@ func TestPosition(t *testing.T) {
s := New(buf.Bytes()) s := New(buf.Bytes())
pos := Pos{"", 4, 1, 5} pos := token.Pos{"", 4, 1, 5}
s.Scan() s.Scan()
for _, listName := range orderedTokenLists { for _, listName := range orderedTokenLists {
@ -270,63 +272,63 @@ func TestRealExample(t *testing.T) {
}` }`
literals := []struct { literals := []struct {
tokenType TokenType tokenType token.TokenType
literal string literal string
}{ }{
{COMMENT, `// This comes from Terraform, as a test`}, {token.COMMENT, `// This comes from Terraform, as a test`},
{IDENT, `variable`}, {token.IDENT, `variable`},
{STRING, `"foo"`}, {token.STRING, `"foo"`},
{LBRACE, `{`}, {token.LBRACE, `{`},
{IDENT, `default`}, {token.IDENT, `default`},
{ASSIGN, `=`}, {token.ASSIGN, `=`},
{STRING, `"bar"`}, {token.STRING, `"bar"`},
{IDENT, `description`}, {token.IDENT, `description`},
{ASSIGN, `=`}, {token.ASSIGN, `=`},
{STRING, `"bar"`}, {token.STRING, `"bar"`},
{RBRACE, `}`}, {token.RBRACE, `}`},
{IDENT, `provider`}, {token.IDENT, `provider`},
{STRING, `"aws"`}, {token.STRING, `"aws"`},
{LBRACE, `{`}, {token.LBRACE, `{`},
{IDENT, `access_key`}, {token.IDENT, `access_key`},
{ASSIGN, `=`}, {token.ASSIGN, `=`},
{STRING, `"foo"`}, {token.STRING, `"foo"`},
{IDENT, `secret_key`}, {token.IDENT, `secret_key`},
{ASSIGN, `=`}, {token.ASSIGN, `=`},
{STRING, `"bar"`}, {token.STRING, `"bar"`},
{RBRACE, `}`}, {token.RBRACE, `}`},
{IDENT, `resource`}, {token.IDENT, `resource`},
{STRING, `"aws_security_group"`}, {token.STRING, `"aws_security_group"`},
{STRING, `"firewall"`}, {token.STRING, `"firewall"`},
{LBRACE, `{`}, {token.LBRACE, `{`},
{IDENT, `count`}, {token.IDENT, `count`},
{ASSIGN, `=`}, {token.ASSIGN, `=`},
{NUMBER, `5`}, {token.NUMBER, `5`},
{RBRACE, `}`}, {token.RBRACE, `}`},
{IDENT, `resource`}, {token.IDENT, `resource`},
{IDENT, `aws_instance`}, {token.IDENT, `aws_instance`},
{STRING, `"web"`}, {token.STRING, `"web"`},
{LBRACE, `{`}, {token.LBRACE, `{`},
{IDENT, `ami`}, {token.IDENT, `ami`},
{ASSIGN, `=`}, {token.ASSIGN, `=`},
{STRING, `"${var.foo}"`}, {token.STRING, `"${var.foo}"`},
{IDENT, `security_groups`}, {token.IDENT, `security_groups`},
{ASSIGN, `=`}, {token.ASSIGN, `=`},
{LBRACK, `[`}, {token.LBRACK, `[`},
{STRING, `"foo"`}, {token.STRING, `"foo"`},
{COMMA, `,`}, {token.COMMA, `,`},
{STRING, `"${aws_security_group.firewall.foo}"`}, {token.STRING, `"${aws_security_group.firewall.foo}"`},
{RBRACK, `]`}, {token.RBRACK, `]`},
{IDENT, `network_interface`}, {token.IDENT, `network_interface`},
{LBRACE, `{`}, {token.LBRACE, `{`},
{IDENT, `device_index`}, {token.IDENT, `device_index`},
{ASSIGN, `=`}, {token.ASSIGN, `=`},
{NUMBER, `0`}, {token.NUMBER, `0`},
{IDENT, `description`}, {token.IDENT, `description`},
{ASSIGN, `=`}, {token.ASSIGN, `=`},
{STRING, `"Main network interface"`}, {token.STRING, `"Main network interface"`},
{RBRACE, `}`}, {token.RBRACE, `}`},
{RBRACE, `}`}, {token.RBRACE, `}`},
{EOF, ``}, {token.EOF, ``},
} }
s := New([]byte(complexHCL)) s := New([]byte(complexHCL))
@ -344,32 +346,32 @@ func TestRealExample(t *testing.T) {
} }
func TestError(t *testing.T) { func TestError(t *testing.T) {
testError(t, "\x80", "1:1", "illegal UTF-8 encoding", ILLEGAL) testError(t, "\x80", "1:1", "illegal UTF-8 encoding", token.ILLEGAL)
testError(t, "\xff", "1:1", "illegal UTF-8 encoding", ILLEGAL) testError(t, "\xff", "1:1", "illegal UTF-8 encoding", token.ILLEGAL)
testError(t, "ab\x80", "1:3", "illegal UTF-8 encoding", IDENT) testError(t, "ab\x80", "1:3", "illegal UTF-8 encoding", token.IDENT)
testError(t, "abc\xff", "1:4", "illegal UTF-8 encoding", IDENT) testError(t, "abc\xff", "1:4", "illegal UTF-8 encoding", token.IDENT)
testError(t, `"ab`+"\x80", "1:4", "illegal UTF-8 encoding", STRING) testError(t, `"ab`+"\x80", "1:4", "illegal UTF-8 encoding", token.STRING)
testError(t, `"abc`+"\xff", "1:5", "illegal UTF-8 encoding", STRING) testError(t, `"abc`+"\xff", "1:5", "illegal UTF-8 encoding", token.STRING)
testError(t, `01238`, "1:6", "illegal octal number", NUMBER) testError(t, `01238`, "1:6", "illegal octal number", token.NUMBER)
testError(t, `01238123`, "1:9", "illegal octal number", NUMBER) testError(t, `01238123`, "1:9", "illegal octal number", token.NUMBER)
testError(t, `0x`, "1:3", "illegal hexadecimal number", NUMBER) testError(t, `0x`, "1:3", "illegal hexadecimal number", token.NUMBER)
testError(t, `0xg`, "1:3", "illegal hexadecimal number", NUMBER) testError(t, `0xg`, "1:3", "illegal hexadecimal number", token.NUMBER)
testError(t, `'aa'`, "1:1", "illegal char", ILLEGAL) testError(t, `'aa'`, "1:1", "illegal char", token.ILLEGAL)
testError(t, `"`, "1:2", "literal not terminated", STRING) testError(t, `"`, "1:2", "literal not terminated", token.STRING)
testError(t, `"abc`, "1:5", "literal not terminated", STRING) testError(t, `"abc`, "1:5", "literal not terminated", token.STRING)
testError(t, `"abc`+"\n", "1:5", "literal not terminated", STRING) testError(t, `"abc`+"\n", "1:5", "literal not terminated", token.STRING)
testError(t, `/*/`, "1:4", "comment not terminated", COMMENT) testError(t, `/*/`, "1:4", "comment not terminated", token.COMMENT)
} }
func testError(t *testing.T, src, pos, msg string, tok TokenType) { func testError(t *testing.T, src, pos, msg string, tok token.TokenType) {
s := New([]byte(src)) s := New([]byte(src))
errorCalled := false errorCalled := false
s.Error = func(p Pos, m string) { s.Error = func(p token.Pos, m string) {
if !errorCalled { if !errorCalled {
if pos != p.String() { if pos != p.String() {
t.Errorf("pos = %q, want %q for %q", p, pos, src) t.Errorf("pos = %q, want %q for %q", p, pos, src)

View File

@ -1,4 +1,4 @@
package scanner package token
import "fmt" import "fmt"

View File

@ -1,4 +1,4 @@
package scanner package token
import ( import (
"fmt" "fmt"

View File

@ -1,4 +1,4 @@
package scanner package token
import "testing" import "testing"