Merge pull request #44 from hashicorp/f-new-json

Convert JSON parser to pure Go
This commit is contained in:
Mitchell Hashimoto 2015-11-08 16:29:24 -08:00
commit 9e5a4f89ab
21 changed files with 1617 additions and 1374 deletions

View File

@ -14,11 +14,10 @@ type Node interface {
Pos() token.Pos
}
func (File) node() {}
func (ObjectList) node() {}
func (ObjectKey) node() {}
func (ObjectItem) node() {}
func (File) node() {}
func (ObjectList) node() {}
func (ObjectKey) node() {}
func (ObjectItem) node() {}
func (Comment) node() {}
func (CommentGroup) node() {}
func (ObjectType) node() {}
@ -62,7 +61,7 @@ func (o *ObjectList) Filter(keys ...string) *ObjectList {
match := true
for i, key := range item.Keys[:len(keys)] {
key := key.Token.Text
key := key.Token.Value().(string)
if key != keys[i] && !strings.EqualFold(key, keys[i]) {
match = false
break

View File

@ -14,6 +14,7 @@ type Token struct {
Type Type
Pos Pos
Text string
JSON bool
}
// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language)
@ -138,7 +139,15 @@ func (t Token) Value() interface{} {
case IDENT:
return t.Text
case STRING:
v, err := hclstrconv.Unquote(t.Text)
// Determine the Unquote method to use. If it came from JSON,
// then we need to use the built-in unquote since we have to
// escape interpolations there.
f := hclstrconv.Unquote
if t.JSON {
f = strconv.Unquote
}
v, err := f(t.Text)
if err != nil {
panic(fmt.Sprintf("unquote %s err: %s", t.Text, err))
}

View File

@ -1,4 +0,0 @@
package json
// This is the directory where our test fixtures are.
const fixtureDir = "./test-fixtures"

View File

@ -1,256 +0,0 @@
package json
import (
"bytes"
"fmt"
"strconv"
"unicode"
"unicode/utf8"
)
//go:generate go tool yacc -p "json" parse.y
// This marks the end of the lexer
const lexEOF = 0
// The parser uses the type <prefix>Lex as a lexer. It must provide
// the methods Lex(*<prefix>SymType) int and Error(string).
type jsonLex struct {
Input string
pos int
width int
col, line int
err error
}
// The parser calls this method to get each new token.
func (x *jsonLex) Lex(yylval *jsonSymType) int {
for {
c := x.next()
if c == lexEOF {
return lexEOF
}
// Ignore all whitespace except a newline which we handle
// specially later.
if unicode.IsSpace(c) {
continue
}
// If it is a number, lex the number
if c >= '0' && c <= '9' {
x.backup()
return x.lexNumber(yylval)
}
switch c {
case 'e':
fallthrough
case 'E':
switch x.next() {
case '+':
return EPLUS
case '-':
return EMINUS
default:
x.backup()
return EPLUS
}
case '.':
return PERIOD
case '-':
return MINUS
case ':':
return COLON
case ',':
return COMMA
case '[':
return LEFTBRACKET
case ']':
return RIGHTBRACKET
case '{':
return LEFTBRACE
case '}':
return RIGHTBRACE
case '"':
return x.lexString(yylval)
default:
x.backup()
return x.lexId(yylval)
}
}
}
// lexId lexes an identifier
func (x *jsonLex) lexId(yylval *jsonSymType) int {
var b bytes.Buffer
first := true
for {
c := x.next()
if c == lexEOF {
break
}
if !unicode.IsDigit(c) && !unicode.IsLetter(c) && c != '_' && c != '-' {
x.backup()
if first {
x.createErr("Invalid identifier")
return lexEOF
}
break
}
first = false
if _, err := b.WriteRune(c); err != nil {
return lexEOF
}
}
switch v := b.String(); v {
case "true":
return TRUE
case "false":
return FALSE
case "null":
return NULL
default:
x.createErr(fmt.Sprintf("Invalid identifier: %s", v))
return lexEOF
}
}
// lexNumber lexes out a number
func (x *jsonLex) lexNumber(yylval *jsonSymType) int {
var b bytes.Buffer
gotPeriod := false
for {
c := x.next()
if c == lexEOF {
break
}
if c == '.' {
if gotPeriod {
x.backup()
break
}
gotPeriod = true
} else if c < '0' || c > '9' {
x.backup()
break
}
if _, err := b.WriteRune(c); err != nil {
x.createErr(fmt.Sprintf("Internal error: %s", err))
return lexEOF
}
}
if !gotPeriod {
v, err := strconv.ParseInt(b.String(), 0, 0)
if err != nil {
x.createErr(fmt.Sprintf("Expected number: %s", err))
return lexEOF
}
yylval.num = int(v)
return NUMBER
}
f, err := strconv.ParseFloat(b.String(), 64)
if err != nil {
x.createErr(fmt.Sprintf("Expected float: %s", err))
return lexEOF
}
yylval.f = float64(f)
return FLOAT
}
// lexString extracts a string from the input
func (x *jsonLex) lexString(yylval *jsonSymType) int {
var b bytes.Buffer
for {
c := x.next()
if c == lexEOF {
break
}
// String end
if c == '"' {
break
}
// If we're escaping a quote, then escape the quote
if c == '\\' {
n := x.next()
switch n {
case '"':
c = n
case 'n':
c = '\n'
case '\\':
c = n
default:
x.backup()
}
}
if _, err := b.WriteRune(c); err != nil {
return lexEOF
}
}
yylval.str = b.String()
return STRING
}
// Return the next rune for the lexer.
func (x *jsonLex) next() rune {
if int(x.pos) >= len(x.Input) {
x.width = 0
return lexEOF
}
r, w := utf8.DecodeRuneInString(x.Input[x.pos:])
x.width = w
x.pos += x.width
x.col += 1
if x.line == 0 {
x.line = 1
}
if r == '\n' {
x.line += 1
x.col = 0
}
return r
}
// peek returns but does not consume the next rune in the input
func (x *jsonLex) peek() rune {
r := x.next()
x.backup()
return r
}
// backup steps back one rune. Can only be called once per next.
func (x *jsonLex) backup() {
x.col -= 1
x.pos -= x.width
}
// createErr records the given error
func (x *jsonLex) createErr(msg string) {
x.err = fmt.Errorf("Line %d, column %d: %s", x.line, x.col, msg)
}
// The parser calls this method on a parse error.
func (x *jsonLex) Error(s string) {
x.createErr(s)
}

View File

@ -1,78 +0,0 @@
package json
import (
"io/ioutil"
"path/filepath"
"reflect"
"testing"
)
func TestLexJson(t *testing.T) {
cases := []struct {
Input string
Output []int
}{
{
"basic.json",
[]int{
LEFTBRACE,
STRING, COLON, STRING,
RIGHTBRACE,
lexEOF,
},
},
{
"array.json",
[]int{
LEFTBRACE,
STRING, COLON, LEFTBRACKET,
NUMBER, COMMA, NUMBER, COMMA, STRING,
RIGHTBRACKET, COMMA,
STRING, COLON, STRING,
RIGHTBRACE,
lexEOF,
},
},
{
"object.json",
[]int{
LEFTBRACE,
STRING, COLON, LEFTBRACE,
STRING, COLON, LEFTBRACKET,
NUMBER, COMMA, NUMBER,
RIGHTBRACKET,
RIGHTBRACE,
RIGHTBRACE,
lexEOF,
},
},
}
for _, tc := range cases {
d, err := ioutil.ReadFile(filepath.Join(fixtureDir, tc.Input))
if err != nil {
t.Fatalf("err: %s", err)
}
l := &jsonLex{Input: string(d)}
var actual []int
for {
token := l.Lex(new(jsonSymType))
actual = append(actual, token)
if token == lexEOF {
break
}
if len(actual) > 500 {
t.Fatalf("Input:%s\n\nExausted.", tc.Input)
}
}
if !reflect.DeepEqual(actual, tc.Output) {
t.Fatalf(
"Input: %s\n\nBad: %#v\n\nExpected: %#v",
tc.Input, actual, tc.Output)
}
}
}

View File

@ -1,227 +0,0 @@
// This is the yacc input for creating the parser for HCL JSON.
%{
package json
import (
"fmt"
"strconv"
"github.com/hashicorp/hcl/hcl/ast"
"github.com/hashicorp/hcl/hcl/token"
)
%}
%union {
f float64
list []ast.Node
node ast.Node
num int
str string
obj *ast.ObjectType
objitem *ast.ObjectItem
objlist *ast.ObjectList
}
%type <f> float
%type <list> array elements
%type <node> number value
%type <num> int
%type <obj> object
%type <objitem> pair
%type <objlist> members
%type <str> exp
%token <f> FLOAT
%token <num> NUMBER
%token <str> COLON COMMA IDENTIFIER EQUAL NEWLINE STRING
%token <str> LEFTBRACE RIGHTBRACE LEFTBRACKET RIGHTBRACKET
%token <str> TRUE FALSE NULL MINUS PERIOD EPLUS EMINUS
%%
top:
object
{
jsonResult = &ast.File{
Node: $1.List,
}
}
object:
LEFTBRACE members RIGHTBRACE
{
$$ = &ast.ObjectType{
List: $2,
}
}
| LEFTBRACE RIGHTBRACE
{
$$ = &ast.ObjectType{}
}
members:
pair
{
$$ = &ast.ObjectList{
Items: []*ast.ObjectItem{$1},
}
}
| members COMMA pair
{
$1.Items = append($1.Items, $3)
$$ = $1
}
pair:
STRING COLON value
{
$$ = &ast.ObjectItem{
Keys: []*ast.ObjectKey{
&ast.ObjectKey{
Token: token.Token{
Type: token.IDENT,
Text: $1,
},
},
},
Val: $3,
}
}
value:
STRING
{
$$ = &ast.LiteralType{
Token: token.Token{
Type: token.STRING,
Text: fmt.Sprintf(`"%s"`, $1),
},
}
}
| number
{
$$ = $1
}
| object
{
$$ = $1
}
| array
{
$$ = &ast.ListType{
List: $1,
}
}
| TRUE
{
$$ = &ast.LiteralType{
Token: token.Token{Type: token.BOOL, Text: "true"},
}
}
| FALSE
{
$$ = &ast.LiteralType{
Token: token.Token{Type: token.BOOL, Text: "false"},
}
}
| NULL
{
$$ = &ast.LiteralType{
Token: token.Token{Type: token.STRING, Text: ""},
}
}
array:
LEFTBRACKET RIGHTBRACKET
{
$$ = nil
}
| LEFTBRACKET elements RIGHTBRACKET
{
$$ = $2
}
elements:
value
{
$$ = []ast.Node{$1}
}
| elements COMMA value
{
$$ = append($1, $3)
}
number:
int
{
$$ = &ast.LiteralType{
Token: token.Token{
Type: token.NUMBER,
Text: fmt.Sprintf("%d", $1),
},
}
}
| float
{
$$ = &ast.LiteralType{
Token: token.Token{
Type: token.FLOAT,
Text: fmt.Sprintf("%f", $1),
},
}
}
| int exp
{
fs := fmt.Sprintf("%d%s", $1, $2)
$$ = &ast.LiteralType{
Token: token.Token{
Type: token.FLOAT,
Text: fs,
},
}
}
| float exp
{
fs := fmt.Sprintf("%f%s", $1, $2)
$$ = &ast.LiteralType{
Token: token.Token{
Type: token.FLOAT,
Text: fs,
},
}
}
int:
MINUS int
{
$$ = $2 * -1
}
| NUMBER
{
$$ = $1
}
float:
MINUS float
{
$$ = $2 * -1
}
| FLOAT
{
$$ = $1
}
exp:
EPLUS NUMBER
{
$$ = "e" + strconv.FormatInt(int64($2), 10)
}
| EMINUS NUMBER
{
$$ = "e-" + strconv.FormatInt(int64($2), 10)
}
%%

View File

@ -1,43 +0,0 @@
package json
import (
"io/ioutil"
"path/filepath"
"testing"
)
func TestParse(t *testing.T) {
cases := []struct {
Name string
Err bool
}{
{
"basic.json",
false,
},
{
"object.json",
false,
},
{
"array.json",
false,
},
{
"types.json",
false,
},
}
for _, tc := range cases {
d, err := ioutil.ReadFile(filepath.Join(fixtureDir, tc.Name))
if err != nil {
t.Fatalf("err: %s", err)
}
_, err = Parse(string(d))
if (err != nil) != tc.Err {
t.Fatalf("Input: %s\n\nError: %s", tc.Name, err)
}
}
}

View File

@ -1,54 +1,12 @@
package json
package parser
import (
"sync"
"github.com/hashicorp/go-multierror"
"github.com/hashicorp/hcl/hcl/ast"
)
// jsonErrors are the errors built up from parsing. These should not
// be accessed directly.
var jsonErrors []error
var jsonLock sync.Mutex
var jsonResult *ast.File
// Parse parses the given string and returns the result.
func Parse(v string) (*ast.File, error) {
jsonLock.Lock()
defer jsonLock.Unlock()
jsonErrors = nil
jsonResult = nil
// Parse
lex := &jsonLex{Input: v}
jsonParse(lex)
// If we have an error in the lexer itself, return it
if lex.err != nil {
return nil, lex.err
}
// If we have a result, flatten it. This is an operation we take on
// to make our AST look more like traditional HCL. This makes parsing
// it a lot easier later.
if jsonResult != nil {
flattenObjects(jsonResult)
}
// Build up the errors
var err error
if len(jsonErrors) > 0 {
err = &multierror.Error{Errors: jsonErrors}
jsonResult = nil
}
return jsonResult, err
}
// flattenObjects takes an AST node, walks it, and flattens
func flattenObjects(node ast.Node) {
ast.Walk(jsonResult, func(n ast.Node) bool {
ast.Walk(node, func(n ast.Node) bool {
// We only care about lists, because this is what we modify
list, ok := n.(*ast.ObjectList)
if !ok {

297
json/parser/parser.go Normal file
View File

@ -0,0 +1,297 @@
package parser
import (
"errors"
"fmt"
"github.com/hashicorp/hcl/hcl/ast"
"github.com/hashicorp/hcl/json/scanner"
"github.com/hashicorp/hcl/json/token"
)
type Parser struct {
sc *scanner.Scanner
// Last read token
tok token.Token
commaPrev token.Token
enableTrace bool
indent int
n int // buffer size (max = 1)
}
func newParser(src []byte) *Parser {
return &Parser{
sc: scanner.New(src),
}
}
// Parse returns the fully parsed source and returns the abstract syntax tree.
func Parse(src []byte) (*ast.File, error) {
p := newParser(src)
return p.Parse()
}
var errEofToken = errors.New("EOF token found")
// Parse returns the fully parsed source and returns the abstract syntax tree.
func (p *Parser) Parse() (*ast.File, error) {
f := &ast.File{}
var err, scerr error
p.sc.Error = func(pos token.Pos, msg string) {
scerr = fmt.Errorf("%s: %s", pos, msg)
}
// The root must be an object in JSON
object, err := p.object()
if scerr != nil {
return nil, scerr
}
if err != nil {
return nil, err
}
// We make our final node an object list so it is more HCL compatible
f.Node = object.List
// Flatten it, which finds patterns and turns them into more HCL-like
// AST trees.
flattenObjects(f.Node)
return f, nil
}
func (p *Parser) objectList() (*ast.ObjectList, error) {
defer un(trace(p, "ParseObjectList"))
node := &ast.ObjectList{}
for {
n, err := p.objectItem()
if err == errEofToken {
break // we are finished
}
// we don't return a nil node, because might want to use already
// collected items.
if err != nil {
return node, err
}
node.Add(n)
// Check for a followup comma. If it isn't a comma, then we're done
if tok := p.scan(); tok.Type != token.COMMA {
break
}
}
return node, nil
}
// objectItem parses a single object item
func (p *Parser) objectItem() (*ast.ObjectItem, error) {
defer un(trace(p, "ParseObjectItem"))
keys, err := p.objectKey()
if err != nil {
return nil, err
}
o := &ast.ObjectItem{
Keys: keys,
}
switch p.tok.Type {
case token.COLON:
o.Val, err = p.objectValue()
if err != nil {
return nil, err
}
}
return o, nil
}
// objectKey parses an object key and returns a ObjectKey AST
func (p *Parser) objectKey() ([]*ast.ObjectKey, error) {
keyCount := 0
keys := make([]*ast.ObjectKey, 0)
for {
tok := p.scan()
switch tok.Type {
case token.EOF:
return nil, errEofToken
case token.STRING:
keyCount++
keys = append(keys, &ast.ObjectKey{
Token: p.tok.HCLToken(),
})
case token.COLON:
// Done
return keys, nil
case token.ILLEGAL:
fmt.Println("illegal")
default:
return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type)
}
}
}
// object parses any type of object, such as number, bool, string, object or
// list.
func (p *Parser) objectValue() (ast.Node, error) {
defer un(trace(p, "ParseObjectValue"))
tok := p.scan()
switch tok.Type {
case token.NUMBER, token.FLOAT, token.BOOL, token.NULL, token.STRING:
return p.literalType()
case token.LBRACE:
return p.objectType()
case token.LBRACK:
return p.listType()
case token.EOF:
return nil, errEofToken
}
return nil, fmt.Errorf("Expected object value, got unknown token: %+v", tok)
}
// object parses any type of object, such as number, bool, string, object or
// list.
func (p *Parser) object() (*ast.ObjectType, error) {
defer un(trace(p, "ParseType"))
tok := p.scan()
switch tok.Type {
case token.LBRACE:
return p.objectType()
case token.EOF:
return nil, errEofToken
}
return nil, fmt.Errorf("Expected object, got unknown token: %+v", tok)
}
// objectType parses an object type and returns a ObjectType AST
func (p *Parser) objectType() (*ast.ObjectType, error) {
defer un(trace(p, "ParseObjectType"))
// we assume that the currently scanned token is a LBRACE
o := &ast.ObjectType{}
l, err := p.objectList()
// if we hit RBRACE, we are good to go (means we parsed all Items), if it's
// not a RBRACE, it's an syntax error and we just return it.
if err != nil && p.tok.Type != token.RBRACE {
return nil, err
}
o.List = l
return o, nil
}
// listType parses a list type and returns a ListType AST
func (p *Parser) listType() (*ast.ListType, error) {
defer un(trace(p, "ParseListType"))
// we assume that the currently scanned token is a LBRACK
l := &ast.ListType{}
for {
tok := p.scan()
switch tok.Type {
case token.NUMBER, token.FLOAT, token.STRING:
node, err := p.literalType()
if err != nil {
return nil, err
}
l.Add(node)
case token.COMMA:
continue
case token.LBRACE:
node, err := p.objectType()
if err != nil {
return nil, err
}
l.Add(node)
case token.BOOL:
// TODO(arslan) should we support? not supported by HCL yet
case token.LBRACK:
// TODO(arslan) should we support nested lists? Even though it's
// written in README of HCL, it's not a part of the grammar
// (not defined in parse.y)
case token.RBRACK:
// finished
return l, nil
default:
return nil, fmt.Errorf("unexpected token while parsing list: %s", tok.Type)
}
}
}
// literalType parses a literal type and returns a LiteralType AST
func (p *Parser) literalType() (*ast.LiteralType, error) {
defer un(trace(p, "ParseLiteral"))
return &ast.LiteralType{
Token: p.tok.HCLToken(),
}, nil
}
// scan returns the next token from the underlying scanner. If a token has
// been unscanned then read that instead.
func (p *Parser) scan() token.Token {
// If we have a token on the buffer, then return it.
if p.n != 0 {
p.n = 0
return p.tok
}
p.tok = p.sc.Scan()
return p.tok
}
// unscan pushes the previously read token back onto the buffer.
func (p *Parser) unscan() {
p.n = 1
}
// ----------------------------------------------------------------------------
// Parsing support
func (p *Parser) printTrace(a ...interface{}) {
if !p.enableTrace {
return
}
const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
const n = len(dots)
fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column)
i := 2 * p.indent
for i > n {
fmt.Print(dots)
i -= n
}
// i <= n
fmt.Print(dots[0:i])
fmt.Println(a...)
}
func trace(p *Parser, msg string) *Parser {
p.printTrace(msg, "(")
p.indent++
return p
}
// Usage pattern: defer un(trace(p, "..."))
func un(p *Parser) {
p.indent--
p.printTrace(")")
}

268
json/parser/parser_test.go Normal file
View File

@ -0,0 +1,268 @@
package parser
import (
"fmt"
"io/ioutil"
"path/filepath"
"reflect"
"runtime"
"testing"
"github.com/hashicorp/hcl/hcl/ast"
"github.com/hashicorp/hcl/hcl/token"
)
func TestType(t *testing.T) {
var literals = []struct {
typ token.Type
src string
}{
{token.STRING, `"foo": "bar"`},
{token.NUMBER, `"foo": 123`},
{token.FLOAT, `"foo": 123.12`},
{token.FLOAT, `"foo": -123.12`},
{token.BOOL, `"foo": true`},
{token.STRING, `"foo": null`},
}
for _, l := range literals {
t.Logf("Testing: %s", l.src)
p := newParser([]byte(l.src))
item, err := p.objectItem()
if err != nil {
t.Error(err)
}
lit, ok := item.Val.(*ast.LiteralType)
if !ok {
t.Errorf("node should be of type LiteralType, got: %T", item.Val)
}
if lit.Token.Type != l.typ {
t.Errorf("want: %s, got: %s", l.typ, lit.Token.Type)
}
}
}
func TestListType(t *testing.T) {
var literals = []struct {
src string
tokens []token.Type
}{
{
`"foo": ["123", 123]`,
[]token.Type{token.STRING, token.NUMBER},
},
{
`"foo": [123, "123",]`,
[]token.Type{token.NUMBER, token.STRING},
},
{
`"foo": []`,
[]token.Type{},
},
{
`"foo": ["123", 123]`,
[]token.Type{token.STRING, token.NUMBER},
},
{
`"foo": ["123", {}]`,
[]token.Type{token.STRING, token.LBRACE},
},
}
for _, l := range literals {
t.Logf("Testing: %s", l.src)
p := newParser([]byte(l.src))
item, err := p.objectItem()
if err != nil {
t.Error(err)
}
list, ok := item.Val.(*ast.ListType)
if !ok {
t.Errorf("node should be of type LiteralType, got: %T", item.Val)
}
tokens := []token.Type{}
for _, li := range list.List {
switch v := li.(type) {
case *ast.LiteralType:
tokens = append(tokens, v.Token.Type)
case *ast.ObjectType:
tokens = append(tokens, token.LBRACE)
}
}
equals(t, l.tokens, tokens)
}
}
func TestObjectType(t *testing.T) {
var literals = []struct {
src string
nodeType []ast.Node
itemLen int
}{
{
`"foo": {}`,
nil,
0,
},
{
`"foo": {
"bar": "fatih"
}`,
[]ast.Node{&ast.LiteralType{}},
1,
},
{
`"foo": {
"bar": "fatih",
"baz": ["arslan"]
}`,
[]ast.Node{
&ast.LiteralType{},
&ast.ListType{},
},
2,
},
{
`"foo": {
"bar": {}
}`,
[]ast.Node{
&ast.ObjectType{},
},
1,
},
{
`"foo": {
"bar": {},
"foo": true
}`,
[]ast.Node{
&ast.ObjectType{},
&ast.LiteralType{},
},
2,
},
}
for _, l := range literals {
t.Logf("Testing:\n%s\n", l.src)
p := newParser([]byte(l.src))
// p.enableTrace = true
item, err := p.objectItem()
if err != nil {
t.Error(err)
}
// we know that the ObjectKey name is foo for all cases, what matters
// is the object
obj, ok := item.Val.(*ast.ObjectType)
if !ok {
t.Errorf("node should be of type LiteralType, got: %T", item.Val)
}
// check if the total length of items are correct
equals(t, l.itemLen, len(obj.List.Items))
// check if the types are correct
for i, item := range obj.List.Items {
equals(t, reflect.TypeOf(l.nodeType[i]), reflect.TypeOf(item.Val))
}
}
}
func TestObjectKey(t *testing.T) {
keys := []struct {
exp []token.Type
src string
}{
{[]token.Type{token.STRING}, `"foo": {}`},
}
for _, k := range keys {
p := newParser([]byte(k.src))
keys, err := p.objectKey()
if err != nil {
t.Fatal(err)
}
tokens := []token.Type{}
for _, o := range keys {
tokens = append(tokens, o.Token.Type)
}
equals(t, k.exp, tokens)
}
errKeys := []struct {
src string
}{
{`foo 12 {}`},
{`foo bar = {}`},
{`foo []`},
{`12 {}`},
}
for _, k := range errKeys {
p := newParser([]byte(k.src))
_, err := p.objectKey()
if err == nil {
t.Errorf("case '%s' should give an error", k.src)
}
}
}
// Official HCL tests
func TestParse(t *testing.T) {
cases := []struct {
Name string
Err bool
}{
{
"basic.json",
false,
},
{
"object.json",
false,
},
{
"array.json",
false,
},
{
"types.json",
false,
},
}
const fixtureDir = "./test-fixtures"
for _, tc := range cases {
d, err := ioutil.ReadFile(filepath.Join(fixtureDir, tc.Name))
if err != nil {
t.Fatalf("err: %s", err)
}
_, err = Parse(d)
if (err != nil) != tc.Err {
t.Fatalf("Input: %s\n\nError: %s", tc.Name, err)
}
}
}
// equals fails the test if exp is not equal to act.
func equals(tb testing.TB, exp, act interface{}) {
if !reflect.DeepEqual(exp, act) {
_, file, line, _ := runtime.Caller(1)
fmt.Printf("\033[31m%s:%d:\n\n\texp: %#v\n\n\tgot: %#v\033[39m\n\n", filepath.Base(file), line, exp, act)
tb.FailNow()
}
}

View File

@ -0,0 +1,4 @@
{
"foo": [1, 2, "bar"],
"bar": "baz"
}

View File

@ -0,0 +1,3 @@
{
"foo": "bar"
}

View File

@ -0,0 +1,5 @@
{
"foo": {
"bar": [1,2]
}
}

View File

@ -0,0 +1,10 @@
{
"foo": "bar",
"bar": 7,
"baz": [1,2,3],
"foo": -12,
"bar": 3.14159,
"foo": true,
"bar": false,
"foo": null
}

451
json/scanner/scanner.go Normal file
View File

@ -0,0 +1,451 @@
package scanner
import (
"bytes"
"fmt"
"os"
"unicode"
"unicode/utf8"
"github.com/hashicorp/hcl/json/token"
)
// eof represents a marker rune for the end of the reader.
const eof = rune(0)
// Scanner defines a lexical scanner
type Scanner struct {
buf *bytes.Buffer // Source buffer for advancing and scanning
src []byte // Source buffer for immutable access
// Source Position
srcPos token.Pos // current position
prevPos token.Pos // previous position, used for peek() method
lastCharLen int // length of last character in bytes
lastLineLen int // length of last line in characters (for correct column reporting)
tokStart int // token text start position
tokEnd int // token text end position
// Error is called for each error encountered. If no Error
// function is set, the error is reported to os.Stderr.
Error func(pos token.Pos, msg string)
// ErrorCount is incremented by one for each error encountered.
ErrorCount int
// tokPos is the start position of most recently scanned token; set by
// Scan. The Filename field is always left untouched by the Scanner. If
// an error is reported (via Error) and Position is invalid, the scanner is
// not inside a token.
tokPos token.Pos
}
// New creates and initializes a new instance of Scanner using src as
// its source content.
func New(src []byte) *Scanner {
// even though we accept a src, we read from a io.Reader compatible type
// (*bytes.Buffer). So in the future we might easily change it to streaming
// read.
b := bytes.NewBuffer(src)
s := &Scanner{
buf: b,
src: src,
}
// srcPosition always starts with 1
s.srcPos.Line = 1
return s
}
// next reads the next rune from the bufferred reader. Returns the rune(0) if
// an error occurs (or io.EOF is returned).
func (s *Scanner) next() rune {
ch, size, err := s.buf.ReadRune()
if err != nil {
// advance for error reporting
s.srcPos.Column++
s.srcPos.Offset += size
s.lastCharLen = size
return eof
}
if ch == utf8.RuneError && size == 1 {
s.srcPos.Column++
s.srcPos.Offset += size
s.lastCharLen = size
s.err("illegal UTF-8 encoding")
return ch
}
// remember last position
s.prevPos = s.srcPos
s.srcPos.Column++
s.lastCharLen = size
s.srcPos.Offset += size
if ch == '\n' {
s.srcPos.Line++
s.lastLineLen = s.srcPos.Column
s.srcPos.Column = 0
}
// debug
// fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column)
return ch
}
// unread unreads the previous read Rune and updates the source position
func (s *Scanner) unread() {
if err := s.buf.UnreadRune(); err != nil {
panic(err) // this is user fault, we should catch it
}
s.srcPos = s.prevPos // put back last position
}
// peek returns the next rune without advancing the reader.
func (s *Scanner) peek() rune {
peek, _, err := s.buf.ReadRune()
if err != nil {
return eof
}
s.buf.UnreadRune()
return peek
}
// Scan scans the next token and returns the token.
func (s *Scanner) Scan() token.Token {
ch := s.next()
// skip white space
for isWhitespace(ch) {
ch = s.next()
}
var tok token.Type
// token text markings
s.tokStart = s.srcPos.Offset - s.lastCharLen
// token position, initial next() is moving the offset by one(size of rune
// actually), though we are interested with the starting point
s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen
if s.srcPos.Column > 0 {
// common case: last character was not a '\n'
s.tokPos.Line = s.srcPos.Line
s.tokPos.Column = s.srcPos.Column
} else {
// last character was a '\n'
// (we cannot be at the beginning of the source
// since we have called next() at least once)
s.tokPos.Line = s.srcPos.Line - 1
s.tokPos.Column = s.lastLineLen
}
switch {
case isLetter(ch):
lit := s.scanIdentifier()
if lit == "true" || lit == "false" {
tok = token.BOOL
} else if lit == "null" {
tok = token.NULL
} else {
s.err("illegal char")
}
case isDecimal(ch):
tok = s.scanNumber(ch)
default:
switch ch {
case eof:
tok = token.EOF
case '"':
tok = token.STRING
s.scanString()
case '.':
tok = token.PERIOD
ch = s.peek()
if isDecimal(ch) {
tok = token.FLOAT
ch = s.scanMantissa(ch)
ch = s.scanExponent(ch)
}
case '[':
tok = token.LBRACK
case ']':
tok = token.RBRACK
case '{':
tok = token.LBRACE
case '}':
tok = token.RBRACE
case ',':
tok = token.COMMA
case ':':
tok = token.COLON
case '-':
if isDecimal(s.peek()) {
ch := s.next()
tok = s.scanNumber(ch)
} else {
s.err("illegal char")
}
default:
s.err("illegal char: " + string(ch))
}
}
// finish token ending
s.tokEnd = s.srcPos.Offset
// create token literal
var tokenText string
if s.tokStart >= 0 {
tokenText = string(s.src[s.tokStart:s.tokEnd])
}
s.tokStart = s.tokEnd // ensure idempotency of tokenText() call
return token.Token{
Type: tok,
Pos: s.tokPos,
Text: tokenText,
}
}
// scanNumber scans a HCL number definition starting with the given rune
func (s *Scanner) scanNumber(ch rune) token.Type {
zero := ch == '0'
pos := s.srcPos
s.scanMantissa(ch)
ch = s.next() // seek forward
if ch == 'e' || ch == 'E' {
ch = s.scanExponent(ch)
return token.FLOAT
}
if ch == '.' {
ch = s.scanFraction(ch)
if ch == 'e' || ch == 'E' {
ch = s.next()
ch = s.scanExponent(ch)
}
return token.FLOAT
}
if ch != eof {
s.unread()
}
// If we have a larger number and this is zero, error
if zero && pos != s.srcPos {
s.err("numbers cannot start with 0")
}
return token.NUMBER
}
// scanMantissa scans the mantissa begining from the rune. It returns the next
// non decimal rune. It's used to determine wheter it's a fraction or exponent.
func (s *Scanner) scanMantissa(ch rune) rune {
scanned := false
for isDecimal(ch) {
ch = s.next()
scanned = true
}
if scanned && ch != eof {
s.unread()
}
return ch
}
// scanFraction scans the fraction after the '.' rune
func (s *Scanner) scanFraction(ch rune) rune {
if ch == '.' {
ch = s.peek() // we peek just to see if we can move forward
ch = s.scanMantissa(ch)
}
return ch
}
// scanExponent scans the remaining parts of an exponent after the 'e' or 'E'
// rune.
func (s *Scanner) scanExponent(ch rune) rune {
if ch == 'e' || ch == 'E' {
ch = s.next()
if ch == '-' || ch == '+' {
ch = s.next()
}
ch = s.scanMantissa(ch)
}
return ch
}
// scanString scans a quoted string
func (s *Scanner) scanString() {
braces := 0
for {
// '"' opening already consumed
// read character after quote
ch := s.next()
if ch == '\n' || ch < 0 || ch == eof {
s.err("literal not terminated")
return
}
if ch == '"' && braces == 0 {
break
}
// If we're going into a ${} then we can ignore quotes for awhile
if braces == 0 && ch == '$' && s.peek() == '{' {
braces++
s.next()
} else if braces > 0 && ch == '{' {
braces++
}
if braces > 0 && ch == '}' {
braces--
}
if ch == '\\' {
s.scanEscape()
}
}
return
}
// scanEscape scans an escape sequence
func (s *Scanner) scanEscape() rune {
// http://en.cppreference.com/w/cpp/language/escape
ch := s.next() // read character after '/'
switch ch {
case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"':
// nothing to do
case '0', '1', '2', '3', '4', '5', '6', '7':
// octal notation
ch = s.scanDigits(ch, 8, 3)
case 'x':
// hexademical notation
ch = s.scanDigits(s.next(), 16, 2)
case 'u':
// universal character name
ch = s.scanDigits(s.next(), 16, 4)
case 'U':
// universal character name
ch = s.scanDigits(s.next(), 16, 8)
default:
s.err("illegal char escape")
}
return ch
}
// scanDigits scans a rune with the given base for n times. For example an
// octal notation \184 would yield in scanDigits(ch, 8, 3)
func (s *Scanner) scanDigits(ch rune, base, n int) rune {
for n > 0 && digitVal(ch) < base {
ch = s.next()
n--
}
if n > 0 {
s.err("illegal char escape")
}
// we scanned all digits, put the last non digit char back
s.unread()
return ch
}
// scanIdentifier scans an identifier and returns the literal string
func (s *Scanner) scanIdentifier() string {
offs := s.srcPos.Offset - s.lastCharLen
ch := s.next()
for isLetter(ch) || isDigit(ch) || ch == '-' {
ch = s.next()
}
if ch != eof {
s.unread() // we got identifier, put back latest char
}
return string(s.src[offs:s.srcPos.Offset])
}
// recentPosition returns the position of the character immediately after the
// character or token returned by the last call to Scan.
func (s *Scanner) recentPosition() (pos token.Pos) {
pos.Offset = s.srcPos.Offset - s.lastCharLen
switch {
case s.srcPos.Column > 0:
// common case: last character was not a '\n'
pos.Line = s.srcPos.Line
pos.Column = s.srcPos.Column
case s.lastLineLen > 0:
// last character was a '\n'
// (we cannot be at the beginning of the source
// since we have called next() at least once)
pos.Line = s.srcPos.Line - 1
pos.Column = s.lastLineLen
default:
// at the beginning of the source
pos.Line = 1
pos.Column = 1
}
return
}
// err prints the error of any scanning to s.Error function. If the function is
// not defined, by default it prints them to os.Stderr
func (s *Scanner) err(msg string) {
s.ErrorCount++
pos := s.recentPosition()
if s.Error != nil {
s.Error(pos, msg)
return
}
fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg)
}
// isHexadecimal returns true if the given rune is a letter
func isLetter(ch rune) bool {
return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch)
}
// isHexadecimal returns true if the given rune is a decimal digit
func isDigit(ch rune) bool {
return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch)
}
// isHexadecimal returns true if the given rune is a decimal number
func isDecimal(ch rune) bool {
return '0' <= ch && ch <= '9'
}
// isHexadecimal returns true if the given rune is an hexadecimal number
func isHexadecimal(ch rune) bool {
return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F'
}
// isWhitespace returns true if the rune is a space, tab, newline or carriage return
func isWhitespace(ch rune) bool {
return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r'
}
// digitVal returns the integer value of a given octal,decimal or hexadecimal rune
func digitVal(ch rune) int {
switch {
case '0' <= ch && ch <= '9':
return int(ch - '0')
case 'a' <= ch && ch <= 'f':
return int(ch - 'a' + 10)
case 'A' <= ch && ch <= 'F':
return int(ch - 'A' + 10)
}
return 16 // larger than any legal digit val
}

View File

@ -0,0 +1,362 @@
package scanner
import (
"bytes"
"fmt"
"testing"
"github.com/hashicorp/hcl/json/token"
)
var f100 = "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
type tokenPair struct {
tok token.Type
text string
}
var tokenLists = map[string][]tokenPair{
"operator": []tokenPair{
{token.LBRACK, "["},
{token.LBRACE, "{"},
{token.COMMA, ","},
{token.PERIOD, "."},
{token.RBRACK, "]"},
{token.RBRACE, "}"},
},
"bool": []tokenPair{
{token.BOOL, "true"},
{token.BOOL, "false"},
},
"string": []tokenPair{
{token.STRING, `" "`},
{token.STRING, `"a"`},
{token.STRING, `"本"`},
{token.STRING, `"${file("foo")}"`},
{token.STRING, `"\a"`},
{token.STRING, `"\b"`},
{token.STRING, `"\f"`},
{token.STRING, `"\n"`},
{token.STRING, `"\r"`},
{token.STRING, `"\t"`},
{token.STRING, `"\v"`},
{token.STRING, `"\""`},
{token.STRING, `"\000"`},
{token.STRING, `"\777"`},
{token.STRING, `"\x00"`},
{token.STRING, `"\xff"`},
{token.STRING, `"\u0000"`},
{token.STRING, `"\ufA16"`},
{token.STRING, `"\U00000000"`},
{token.STRING, `"\U0000ffAB"`},
{token.STRING, `"` + f100 + `"`},
},
"number": []tokenPair{
{token.NUMBER, "0"},
{token.NUMBER, "1"},
{token.NUMBER, "9"},
{token.NUMBER, "42"},
{token.NUMBER, "1234567890"},
{token.NUMBER, "-0"},
{token.NUMBER, "-1"},
{token.NUMBER, "-9"},
{token.NUMBER, "-42"},
{token.NUMBER, "-1234567890"},
},
"float": []tokenPair{
{token.FLOAT, "0."},
{token.FLOAT, "1."},
{token.FLOAT, "42."},
{token.FLOAT, "01234567890."},
{token.FLOAT, ".0"},
{token.FLOAT, ".1"},
{token.FLOAT, ".42"},
{token.FLOAT, ".0123456789"},
{token.FLOAT, "0.0"},
{token.FLOAT, "1.0"},
{token.FLOAT, "42.0"},
{token.FLOAT, "01234567890.0"},
{token.FLOAT, "0e0"},
{token.FLOAT, "1e0"},
{token.FLOAT, "42e0"},
{token.FLOAT, "01234567890e0"},
{token.FLOAT, "0E0"},
{token.FLOAT, "1E0"},
{token.FLOAT, "42E0"},
{token.FLOAT, "01234567890E0"},
{token.FLOAT, "0e+10"},
{token.FLOAT, "1e-10"},
{token.FLOAT, "42e+10"},
{token.FLOAT, "01234567890e-10"},
{token.FLOAT, "0E+10"},
{token.FLOAT, "1E-10"},
{token.FLOAT, "42E+10"},
{token.FLOAT, "01234567890E-10"},
{token.FLOAT, "01.8e0"},
{token.FLOAT, "1.4e0"},
{token.FLOAT, "42.2e0"},
{token.FLOAT, "01234567890.12e0"},
{token.FLOAT, "0.E0"},
{token.FLOAT, "1.12E0"},
{token.FLOAT, "42.123E0"},
{token.FLOAT, "01234567890.213E0"},
{token.FLOAT, "0.2e+10"},
{token.FLOAT, "1.2e-10"},
{token.FLOAT, "42.54e+10"},
{token.FLOAT, "01234567890.98e-10"},
{token.FLOAT, "0.1E+10"},
{token.FLOAT, "1.1E-10"},
{token.FLOAT, "42.1E+10"},
{token.FLOAT, "01234567890.1E-10"},
{token.FLOAT, "-0.0"},
{token.FLOAT, "-1.0"},
{token.FLOAT, "-42.0"},
{token.FLOAT, "-01234567890.0"},
{token.FLOAT, "-0e0"},
{token.FLOAT, "-1e0"},
{token.FLOAT, "-42e0"},
{token.FLOAT, "-01234567890e0"},
{token.FLOAT, "-0E0"},
{token.FLOAT, "-1E0"},
{token.FLOAT, "-42E0"},
{token.FLOAT, "-01234567890E0"},
{token.FLOAT, "-0e+10"},
{token.FLOAT, "-1e-10"},
{token.FLOAT, "-42e+10"},
{token.FLOAT, "-01234567890e-10"},
{token.FLOAT, "-0E+10"},
{token.FLOAT, "-1E-10"},
{token.FLOAT, "-42E+10"},
{token.FLOAT, "-01234567890E-10"},
{token.FLOAT, "-01.8e0"},
{token.FLOAT, "-1.4e0"},
{token.FLOAT, "-42.2e0"},
{token.FLOAT, "-01234567890.12e0"},
{token.FLOAT, "-0.E0"},
{token.FLOAT, "-1.12E0"},
{token.FLOAT, "-42.123E0"},
{token.FLOAT, "-01234567890.213E0"},
{token.FLOAT, "-0.2e+10"},
{token.FLOAT, "-1.2e-10"},
{token.FLOAT, "-42.54e+10"},
{token.FLOAT, "-01234567890.98e-10"},
{token.FLOAT, "-0.1E+10"},
{token.FLOAT, "-1.1E-10"},
{token.FLOAT, "-42.1E+10"},
{token.FLOAT, "-01234567890.1E-10"},
},
}
var orderedTokenLists = []string{
"comment",
"operator",
"bool",
"string",
"number",
"float",
}
func TestPosition(t *testing.T) {
// create artifical source code
buf := new(bytes.Buffer)
for _, listName := range orderedTokenLists {
for _, ident := range tokenLists[listName] {
fmt.Fprintf(buf, "\t\t\t\t%s\n", ident.text)
}
}
s := New(buf.Bytes())
pos := token.Pos{"", 4, 1, 5}
s.Scan()
for _, listName := range orderedTokenLists {
for _, k := range tokenLists[listName] {
curPos := s.tokPos
// fmt.Printf("[%q] s = %+v:%+v\n", k.text, curPos.Offset, curPos.Column)
if curPos.Offset != pos.Offset {
t.Fatalf("offset = %d, want %d for %q", curPos.Offset, pos.Offset, k.text)
}
if curPos.Line != pos.Line {
t.Fatalf("line = %d, want %d for %q", curPos.Line, pos.Line, k.text)
}
if curPos.Column != pos.Column {
t.Fatalf("column = %d, want %d for %q", curPos.Column, pos.Column, k.text)
}
pos.Offset += 4 + len(k.text) + 1 // 4 tabs + token bytes + newline
pos.Line += countNewlines(k.text) + 1 // each token is on a new line
s.Error = func(pos token.Pos, msg string) {
t.Errorf("error %q for %q", msg, k.text)
}
s.Scan()
}
}
// make sure there were no token-internal errors reported by scanner
if s.ErrorCount != 0 {
t.Errorf("%d errors", s.ErrorCount)
}
}
func TestComment(t *testing.T) {
testTokenList(t, tokenLists["comment"])
}
func TestOperator(t *testing.T) {
testTokenList(t, tokenLists["operator"])
}
func TestBool(t *testing.T) {
testTokenList(t, tokenLists["bool"])
}
func TestIdent(t *testing.T) {
testTokenList(t, tokenLists["ident"])
}
func TestString(t *testing.T) {
testTokenList(t, tokenLists["string"])
}
func TestNumber(t *testing.T) {
testTokenList(t, tokenLists["number"])
}
func TestFloat(t *testing.T) {
testTokenList(t, tokenLists["float"])
}
func TestRealExample(t *testing.T) {
complexReal := `
{
"variable": {
"foo": {
"default": "bar",
"description": "bar",
"depends_on": ["something"]
}
}
}`
literals := []struct {
tokenType token.Type
literal string
}{
{token.LBRACE, `{`},
{token.STRING, `"variable"`},
{token.COLON, `:`},
{token.LBRACE, `{`},
{token.STRING, `"foo"`},
{token.COLON, `:`},
{token.LBRACE, `{`},
{token.STRING, `"default"`},
{token.COLON, `:`},
{token.STRING, `"bar"`},
{token.COMMA, `,`},
{token.STRING, `"description"`},
{token.COLON, `:`},
{token.STRING, `"bar"`},
{token.COMMA, `,`},
{token.STRING, `"depends_on"`},
{token.COLON, `:`},
{token.LBRACK, `[`},
{token.STRING, `"something"`},
{token.RBRACK, `]`},
{token.RBRACE, `}`},
{token.RBRACE, `}`},
{token.RBRACE, `}`},
{token.EOF, ``},
}
s := New([]byte(complexReal))
for _, l := range literals {
tok := s.Scan()
if l.tokenType != tok.Type {
t.Errorf("got: %s want %s for %s\n", tok, l.tokenType, tok.String())
}
if l.literal != tok.Text {
t.Errorf("got: %s want %s\n", tok, l.literal)
}
}
}
func TestError(t *testing.T) {
testError(t, "\x80", "1:1", "illegal UTF-8 encoding", token.ILLEGAL)
testError(t, "\xff", "1:1", "illegal UTF-8 encoding", token.ILLEGAL)
testError(t, `"ab`+"\x80", "1:4", "illegal UTF-8 encoding", token.STRING)
testError(t, `"abc`+"\xff", "1:5", "illegal UTF-8 encoding", token.STRING)
testError(t, `01238`, "1:7", "numbers cannot start with 0", token.NUMBER)
testError(t, `01238123`, "1:10", "numbers cannot start with 0", token.NUMBER)
testError(t, `'aa'`, "1:1", "illegal char: '", token.ILLEGAL)
testError(t, `"`, "1:2", "literal not terminated", token.STRING)
testError(t, `"abc`, "1:5", "literal not terminated", token.STRING)
testError(t, `"abc`+"\n", "1:5", "literal not terminated", token.STRING)
}
func testError(t *testing.T, src, pos, msg string, tok token.Type) {
s := New([]byte(src))
errorCalled := false
s.Error = func(p token.Pos, m string) {
if !errorCalled {
if pos != p.String() {
t.Errorf("pos = %q, want %q for %q", p, pos, src)
}
if m != msg {
t.Errorf("msg = %q, want %q for %q", m, msg, src)
}
errorCalled = true
}
}
tk := s.Scan()
if tk.Type != tok {
t.Errorf("tok = %s, want %s for %q", tk, tok, src)
}
if !errorCalled {
t.Errorf("error handler not called for %q", src)
}
if s.ErrorCount == 0 {
t.Errorf("count = %d, want > 0 for %q", s.ErrorCount, src)
}
}
func testTokenList(t *testing.T, tokenList []tokenPair) {
// create artifical source code
buf := new(bytes.Buffer)
for _, ident := range tokenList {
fmt.Fprintf(buf, "%s\n", ident.text)
}
s := New(buf.Bytes())
for _, ident := range tokenList {
tok := s.Scan()
if tok.Type != ident.tok {
t.Errorf("tok = %q want %q for %q\n", tok, ident.tok, ident.text)
}
if tok.Text != ident.text {
t.Errorf("text = %q want %q", tok.String(), ident.text)
}
}
}
func countNewlines(s string) int {
n := 0
for _, ch := range s {
if ch == '\n' {
n++
}
}
return n
}

46
json/token/position.go Normal file
View File

@ -0,0 +1,46 @@
package token
import "fmt"
// Pos describes an arbitrary source position
// including the file, line, and column location.
// A Position is valid if the line number is > 0.
type Pos struct {
Filename string // filename, if any
Offset int // offset, starting at 0
Line int // line number, starting at 1
Column int // column number, starting at 1 (character count)
}
// IsValid returns true if the position is valid.
func (p *Pos) IsValid() bool { return p.Line > 0 }
// String returns a string in one of several forms:
//
// file:line:column valid position with file name
// line:column valid position without file name
// file invalid position with file name
// - invalid position without file name
func (p Pos) String() string {
s := p.Filename
if p.IsValid() {
if s != "" {
s += ":"
}
s += fmt.Sprintf("%d:%d", p.Line, p.Column)
}
if s == "" {
s = "-"
}
return s
}
// Before reports whether the position p is before u.
func (p Pos) Before(u Pos) bool {
return u.Offset > p.Offset || u.Line > p.Line
}
// After reports whether the position p is after u.
func (p Pos) After(u Pos) bool {
return u.Offset < p.Offset || u.Line < p.Line
}

118
json/token/token.go Normal file
View File

@ -0,0 +1,118 @@
package token
import (
"fmt"
"strconv"
hcltoken "github.com/hashicorp/hcl/hcl/token"
)
// Token defines a single HCL token which can be obtained via the Scanner
type Token struct {
Type Type
Pos Pos
Text string
}
// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language)
type Type int
const (
// Special tokens
ILLEGAL Type = iota
EOF
identifier_beg
literal_beg
NUMBER // 12345
FLOAT // 123.45
BOOL // true,false
STRING // "abc"
NULL // null
literal_end
identifier_end
operator_beg
LBRACK // [
LBRACE // {
COMMA // ,
PERIOD // .
COLON // :
RBRACK // ]
RBRACE // }
operator_end
)
var tokens = [...]string{
ILLEGAL: "ILLEGAL",
EOF: "EOF",
NUMBER: "NUMBER",
FLOAT: "FLOAT",
BOOL: "BOOL",
STRING: "STRING",
NULL: "NULL",
LBRACK: "LBRACK",
LBRACE: "LBRACE",
COMMA: "COMMA",
PERIOD: "PERIOD",
COLON: "COLON",
RBRACK: "RBRACK",
RBRACE: "RBRACE",
}
// String returns the string corresponding to the token tok.
func (t Type) String() string {
s := ""
if 0 <= t && t < Type(len(tokens)) {
s = tokens[t]
}
if s == "" {
s = "token(" + strconv.Itoa(int(t)) + ")"
}
return s
}
// IsIdentifier returns true for tokens corresponding to identifiers and basic
// type literals; it returns false otherwise.
func (t Type) IsIdentifier() bool { return identifier_beg < t && t < identifier_end }
// IsLiteral returns true for tokens corresponding to basic type literals; it
// returns false otherwise.
func (t Type) IsLiteral() bool { return literal_beg < t && t < literal_end }
// IsOperator returns true for tokens corresponding to operators and
// delimiters; it returns false otherwise.
func (t Type) IsOperator() bool { return operator_beg < t && t < operator_end }
// String returns the token's literal text. Note that this is only
// applicable for certain token types, such as token.IDENT,
// token.STRING, etc..
func (t Token) String() string {
return fmt.Sprintf("%s %s %s", t.Pos.String(), t.Type.String(), t.Text)
}
// HCLToken converts this token to an HCL token.
//
// The token type must be a literal type or this will panic.
func (t Token) HCLToken() hcltoken.Token {
switch t.Type {
case BOOL:
return hcltoken.Token{Type: hcltoken.BOOL, Text: t.Text}
case FLOAT:
return hcltoken.Token{Type: hcltoken.FLOAT, Text: t.Text}
case NULL:
return hcltoken.Token{Type: hcltoken.STRING, Text: ""}
case NUMBER:
return hcltoken.Token{Type: hcltoken.NUMBER, Text: t.Text}
case STRING:
return hcltoken.Token{Type: hcltoken.STRING, Text: t.Text, JSON: true}
default:
panic(fmt.Sprintf("unimplemented HCLToken for type: %s", t.Type))
}
}

34
json/token/token_test.go Normal file
View File

@ -0,0 +1,34 @@
package token
import (
"testing"
)
func TestTypeString(t *testing.T) {
var tokens = []struct {
tt Type
str string
}{
{ILLEGAL, "ILLEGAL"},
{EOF, "EOF"},
{NUMBER, "NUMBER"},
{FLOAT, "FLOAT"},
{BOOL, "BOOL"},
{STRING, "STRING"},
{NULL, "NULL"},
{LBRACK, "LBRACK"},
{LBRACE, "LBRACE"},
{COMMA, "COMMA"},
{PERIOD, "PERIOD"},
{RBRACK, "RBRACK"},
{RBRACE, "RBRACE"},
}
for _, token := range tokens {
if token.tt.String() != token.str {
t.Errorf("want: %q got:%q\n", token.str, token.tt)
}
}
}

713
json/y.go
View File

@ -1,713 +0,0 @@
//line parse.y:3
package json
import __yyfmt__ "fmt"
//line parse.y:5
import (
"fmt"
"strconv"
"github.com/hashicorp/hcl/hcl/ast"
"github.com/hashicorp/hcl/hcl/token"
)
//line parse.y:16
type jsonSymType struct {
yys int
f float64
list []ast.Node
node ast.Node
num int
str string
obj *ast.ObjectType
objitem *ast.ObjectItem
objlist *ast.ObjectList
}
const FLOAT = 57346
const NUMBER = 57347
const COLON = 57348
const COMMA = 57349
const IDENTIFIER = 57350
const EQUAL = 57351
const NEWLINE = 57352
const STRING = 57353
const LEFTBRACE = 57354
const RIGHTBRACE = 57355
const LEFTBRACKET = 57356
const RIGHTBRACKET = 57357
const TRUE = 57358
const FALSE = 57359
const NULL = 57360
const MINUS = 57361
const PERIOD = 57362
const EPLUS = 57363
const EMINUS = 57364
var jsonToknames = [...]string{
"$end",
"error",
"$unk",
"FLOAT",
"NUMBER",
"COLON",
"COMMA",
"IDENTIFIER",
"EQUAL",
"NEWLINE",
"STRING",
"LEFTBRACE",
"RIGHTBRACE",
"LEFTBRACKET",
"RIGHTBRACKET",
"TRUE",
"FALSE",
"NULL",
"MINUS",
"PERIOD",
"EPLUS",
"EMINUS",
}
var jsonStatenames = [...]string{}
const jsonEofCode = 1
const jsonErrCode = 2
const jsonMaxDepth = 200
//line parse.y:227
//line yacctab:1
var jsonExca = [...]int{
-1, 1,
1, -1,
-2, 0,
}
const jsonNprod = 28
const jsonPrivate = 57344
var jsonTokenNames []string
var jsonStates []string
const jsonLast = 53
var jsonAct = [...]int{
12, 25, 24, 3, 20, 27, 28, 7, 13, 3,
21, 22, 30, 17, 18, 19, 23, 25, 24, 26,
25, 24, 36, 32, 13, 3, 10, 22, 33, 17,
18, 19, 23, 35, 34, 23, 38, 9, 7, 39,
5, 29, 6, 8, 37, 15, 2, 1, 4, 14,
31, 16, 11,
}
var jsonPact = [...]int{
-9, -1000, -1000, 27, 30, -1000, -1000, 20, -1000, -4,
13, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
-16, -16, -3, 16, -1000, -1000, -1000, 28, 17, -1000,
-1000, 29, -1000, -1000, -1000, -1000, -1000, -1000, 13, -1000,
}
var jsonPgo = [...]int{
0, 10, 51, 50, 49, 0, 4, 45, 42, 48,
19, 47,
}
var jsonR1 = [...]int{
0, 11, 7, 7, 9, 9, 8, 5, 5, 5,
5, 5, 5, 5, 2, 2, 3, 3, 4, 4,
4, 4, 6, 6, 1, 1, 10, 10,
}
var jsonR2 = [...]int{
0, 1, 3, 2, 1, 3, 3, 1, 1, 1,
1, 1, 1, 1, 2, 3, 1, 3, 1, 1,
2, 2, 2, 1, 2, 1, 2, 2,
}
var jsonChk = [...]int{
-1000, -11, -7, 12, -9, 13, -8, 11, 13, 7,
6, -8, -5, 11, -4, -7, -2, 16, 17, 18,
-6, -1, 14, 19, 5, 4, -10, 21, 22, -10,
15, -3, -5, -6, -1, 5, 5, 15, 7, -5,
}
var jsonDef = [...]int{
0, -2, 1, 0, 0, 3, 4, 0, 2, 0,
0, 5, 6, 7, 8, 9, 10, 11, 12, 13,
18, 19, 0, 0, 23, 25, 20, 0, 0, 21,
14, 0, 16, 22, 24, 26, 27, 15, 0, 17,
}
var jsonTok1 = [...]int{
1,
}
var jsonTok2 = [...]int{
2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22,
}
var jsonTok3 = [...]int{
0,
}
var jsonErrorMessages = [...]struct {
state int
token int
msg string
}{}
//line yaccpar:1
/* parser for yacc output */
var (
jsonDebug = 0
jsonErrorVerbose = false
)
type jsonLexer interface {
Lex(lval *jsonSymType) int
Error(s string)
}
type jsonParser interface {
Parse(jsonLexer) int
Lookahead() int
}
type jsonParserImpl struct {
lookahead func() int
}
func (p *jsonParserImpl) Lookahead() int {
return p.lookahead()
}
func jsonNewParser() jsonParser {
p := &jsonParserImpl{
lookahead: func() int { return -1 },
}
return p
}
const jsonFlag = -1000
func jsonTokname(c int) string {
if c >= 1 && c-1 < len(jsonToknames) {
if jsonToknames[c-1] != "" {
return jsonToknames[c-1]
}
}
return __yyfmt__.Sprintf("tok-%v", c)
}
func jsonStatname(s int) string {
if s >= 0 && s < len(jsonStatenames) {
if jsonStatenames[s] != "" {
return jsonStatenames[s]
}
}
return __yyfmt__.Sprintf("state-%v", s)
}
func jsonErrorMessage(state, lookAhead int) string {
const TOKSTART = 4
if !jsonErrorVerbose {
return "syntax error"
}
for _, e := range jsonErrorMessages {
if e.state == state && e.token == lookAhead {
return "syntax error: " + e.msg
}
}
res := "syntax error: unexpected " + jsonTokname(lookAhead)
// To match Bison, suggest at most four expected tokens.
expected := make([]int, 0, 4)
// Look for shiftable tokens.
base := jsonPact[state]
for tok := TOKSTART; tok-1 < len(jsonToknames); tok++ {
if n := base + tok; n >= 0 && n < jsonLast && jsonChk[jsonAct[n]] == tok {
if len(expected) == cap(expected) {
return res
}
expected = append(expected, tok)
}
}
if jsonDef[state] == -2 {
i := 0
for jsonExca[i] != -1 || jsonExca[i+1] != state {
i += 2
}
// Look for tokens that we accept or reduce.
for i += 2; jsonExca[i] >= 0; i += 2 {
tok := jsonExca[i]
if tok < TOKSTART || jsonExca[i+1] == 0 {
continue
}
if len(expected) == cap(expected) {
return res
}
expected = append(expected, tok)
}
// If the default action is to accept or reduce, give up.
if jsonExca[i+1] != 0 {
return res
}
}
for i, tok := range expected {
if i == 0 {
res += ", expecting "
} else {
res += " or "
}
res += jsonTokname(tok)
}
return res
}
func jsonlex1(lex jsonLexer, lval *jsonSymType) (char, token int) {
token = 0
char = lex.Lex(lval)
if char <= 0 {
token = jsonTok1[0]
goto out
}
if char < len(jsonTok1) {
token = jsonTok1[char]
goto out
}
if char >= jsonPrivate {
if char < jsonPrivate+len(jsonTok2) {
token = jsonTok2[char-jsonPrivate]
goto out
}
}
for i := 0; i < len(jsonTok3); i += 2 {
token = jsonTok3[i+0]
if token == char {
token = jsonTok3[i+1]
goto out
}
}
out:
if token == 0 {
token = jsonTok2[1] /* unknown char */
}
if jsonDebug >= 3 {
__yyfmt__.Printf("lex %s(%d)\n", jsonTokname(token), uint(char))
}
return char, token
}
func jsonParse(jsonlex jsonLexer) int {
return jsonNewParser().Parse(jsonlex)
}
func (jsonrcvr *jsonParserImpl) Parse(jsonlex jsonLexer) int {
var jsonn int
var jsonlval jsonSymType
var jsonVAL jsonSymType
var jsonDollar []jsonSymType
_ = jsonDollar // silence set and not used
jsonS := make([]jsonSymType, jsonMaxDepth)
Nerrs := 0 /* number of errors */
Errflag := 0 /* error recovery flag */
jsonstate := 0
jsonchar := -1
jsontoken := -1 // jsonchar translated into internal numbering
jsonrcvr.lookahead = func() int { return jsonchar }
defer func() {
// Make sure we report no lookahead when not parsing.
jsonstate = -1
jsonchar = -1
jsontoken = -1
}()
jsonp := -1
goto jsonstack
ret0:
return 0
ret1:
return 1
jsonstack:
/* put a state and value onto the stack */
if jsonDebug >= 4 {
__yyfmt__.Printf("char %v in %v\n", jsonTokname(jsontoken), jsonStatname(jsonstate))
}
jsonp++
if jsonp >= len(jsonS) {
nyys := make([]jsonSymType, len(jsonS)*2)
copy(nyys, jsonS)
jsonS = nyys
}
jsonS[jsonp] = jsonVAL
jsonS[jsonp].yys = jsonstate
jsonnewstate:
jsonn = jsonPact[jsonstate]
if jsonn <= jsonFlag {
goto jsondefault /* simple state */
}
if jsonchar < 0 {
jsonchar, jsontoken = jsonlex1(jsonlex, &jsonlval)
}
jsonn += jsontoken
if jsonn < 0 || jsonn >= jsonLast {
goto jsondefault
}
jsonn = jsonAct[jsonn]
if jsonChk[jsonn] == jsontoken { /* valid shift */
jsonchar = -1
jsontoken = -1
jsonVAL = jsonlval
jsonstate = jsonn
if Errflag > 0 {
Errflag--
}
goto jsonstack
}
jsondefault:
/* default state action */
jsonn = jsonDef[jsonstate]
if jsonn == -2 {
if jsonchar < 0 {
jsonchar, jsontoken = jsonlex1(jsonlex, &jsonlval)
}
/* look through exception table */
xi := 0
for {
if jsonExca[xi+0] == -1 && jsonExca[xi+1] == jsonstate {
break
}
xi += 2
}
for xi += 2; ; xi += 2 {
jsonn = jsonExca[xi+0]
if jsonn < 0 || jsonn == jsontoken {
break
}
}
jsonn = jsonExca[xi+1]
if jsonn < 0 {
goto ret0
}
}
if jsonn == 0 {
/* error ... attempt to resume parsing */
switch Errflag {
case 0: /* brand new error */
jsonlex.Error(jsonErrorMessage(jsonstate, jsontoken))
Nerrs++
if jsonDebug >= 1 {
__yyfmt__.Printf("%s", jsonStatname(jsonstate))
__yyfmt__.Printf(" saw %s\n", jsonTokname(jsontoken))
}
fallthrough
case 1, 2: /* incompletely recovered error ... try again */
Errflag = 3
/* find a state where "error" is a legal shift action */
for jsonp >= 0 {
jsonn = jsonPact[jsonS[jsonp].yys] + jsonErrCode
if jsonn >= 0 && jsonn < jsonLast {
jsonstate = jsonAct[jsonn] /* simulate a shift of "error" */
if jsonChk[jsonstate] == jsonErrCode {
goto jsonstack
}
}
/* the current p has no shift on "error", pop stack */
if jsonDebug >= 2 {
__yyfmt__.Printf("error recovery pops state %d\n", jsonS[jsonp].yys)
}
jsonp--
}
/* there is no state on the stack with an error shift ... abort */
goto ret1
case 3: /* no shift yet; clobber input char */
if jsonDebug >= 2 {
__yyfmt__.Printf("error recovery discards %s\n", jsonTokname(jsontoken))
}
if jsontoken == jsonEofCode {
goto ret1
}
jsonchar = -1
jsontoken = -1
goto jsonnewstate /* try again in the same state */
}
}
/* reduction by production jsonn */
if jsonDebug >= 2 {
__yyfmt__.Printf("reduce %v in:\n\t%v\n", jsonn, jsonStatname(jsonstate))
}
jsonnt := jsonn
jsonpt := jsonp
_ = jsonpt // guard against "declared and not used"
jsonp -= jsonR2[jsonn]
// jsonp is now the index of $0. Perform the default action. Iff the
// reduced production is ε, $1 is possibly out of range.
if jsonp+1 >= len(jsonS) {
nyys := make([]jsonSymType, len(jsonS)*2)
copy(nyys, jsonS)
jsonS = nyys
}
jsonVAL = jsonS[jsonp+1]
/* consult goto table to find next state */
jsonn = jsonR1[jsonn]
jsong := jsonPgo[jsonn]
jsonj := jsong + jsonS[jsonp].yys + 1
if jsonj >= jsonLast {
jsonstate = jsonAct[jsong]
} else {
jsonstate = jsonAct[jsonj]
if jsonChk[jsonstate] != -jsonn {
jsonstate = jsonAct[jsong]
}
}
// dummy call; replaced with literal code
switch jsonnt {
case 1:
jsonDollar = jsonS[jsonpt-1 : jsonpt+1]
//line parse.y:46
{
jsonResult = &ast.File{
Node: jsonDollar[1].obj.List,
}
}
case 2:
jsonDollar = jsonS[jsonpt-3 : jsonpt+1]
//line parse.y:54
{
jsonVAL.obj = &ast.ObjectType{
List: jsonDollar[2].objlist,
}
}
case 3:
jsonDollar = jsonS[jsonpt-2 : jsonpt+1]
//line parse.y:60
{
jsonVAL.obj = &ast.ObjectType{}
}
case 4:
jsonDollar = jsonS[jsonpt-1 : jsonpt+1]
//line parse.y:66
{
jsonVAL.objlist = &ast.ObjectList{
Items: []*ast.ObjectItem{jsonDollar[1].objitem},
}
}
case 5:
jsonDollar = jsonS[jsonpt-3 : jsonpt+1]
//line parse.y:72
{
jsonDollar[1].objlist.Items = append(jsonDollar[1].objlist.Items, jsonDollar[3].objitem)
jsonVAL.objlist = jsonDollar[1].objlist
}
case 6:
jsonDollar = jsonS[jsonpt-3 : jsonpt+1]
//line parse.y:79
{
jsonVAL.objitem = &ast.ObjectItem{
Keys: []*ast.ObjectKey{
&ast.ObjectKey{
Token: token.Token{
Type: token.IDENT,
Text: jsonDollar[1].str,
},
},
},
Val: jsonDollar[3].node,
}
}
case 7:
jsonDollar = jsonS[jsonpt-1 : jsonpt+1]
//line parse.y:96
{
jsonVAL.node = &ast.LiteralType{
Token: token.Token{
Type: token.STRING,
Text: fmt.Sprintf(`"%s"`, jsonDollar[1].str),
},
}
}
case 8:
jsonDollar = jsonS[jsonpt-1 : jsonpt+1]
//line parse.y:105
{
jsonVAL.node = jsonDollar[1].node
}
case 9:
jsonDollar = jsonS[jsonpt-1 : jsonpt+1]
//line parse.y:109
{
jsonVAL.node = jsonDollar[1].obj
}
case 10:
jsonDollar = jsonS[jsonpt-1 : jsonpt+1]
//line parse.y:113
{
jsonVAL.node = &ast.ListType{
List: jsonDollar[1].list,
}
}
case 11:
jsonDollar = jsonS[jsonpt-1 : jsonpt+1]
//line parse.y:119
{
jsonVAL.node = &ast.LiteralType{
Token: token.Token{Type: token.BOOL, Text: "true"},
}
}
case 12:
jsonDollar = jsonS[jsonpt-1 : jsonpt+1]
//line parse.y:125
{
jsonVAL.node = &ast.LiteralType{
Token: token.Token{Type: token.BOOL, Text: "false"},
}
}
case 13:
jsonDollar = jsonS[jsonpt-1 : jsonpt+1]
//line parse.y:131
{
jsonVAL.node = &ast.LiteralType{
Token: token.Token{Type: token.STRING, Text: ""},
}
}
case 14:
jsonDollar = jsonS[jsonpt-2 : jsonpt+1]
//line parse.y:139
{
jsonVAL.list = nil
}
case 15:
jsonDollar = jsonS[jsonpt-3 : jsonpt+1]
//line parse.y:143
{
jsonVAL.list = jsonDollar[2].list
}
case 16:
jsonDollar = jsonS[jsonpt-1 : jsonpt+1]
//line parse.y:149
{
jsonVAL.list = []ast.Node{jsonDollar[1].node}
}
case 17:
jsonDollar = jsonS[jsonpt-3 : jsonpt+1]
//line parse.y:153
{
jsonVAL.list = append(jsonDollar[1].list, jsonDollar[3].node)
}
case 18:
jsonDollar = jsonS[jsonpt-1 : jsonpt+1]
//line parse.y:159
{
jsonVAL.node = &ast.LiteralType{
Token: token.Token{
Type: token.NUMBER,
Text: fmt.Sprintf("%d", jsonDollar[1].num),
},
}
}
case 19:
jsonDollar = jsonS[jsonpt-1 : jsonpt+1]
//line parse.y:168
{
jsonVAL.node = &ast.LiteralType{
Token: token.Token{
Type: token.FLOAT,
Text: fmt.Sprintf("%f", jsonDollar[1].f),
},
}
}
case 20:
jsonDollar = jsonS[jsonpt-2 : jsonpt+1]
//line parse.y:177
{
fs := fmt.Sprintf("%d%s", jsonDollar[1].num, jsonDollar[2].str)
jsonVAL.node = &ast.LiteralType{
Token: token.Token{
Type: token.FLOAT,
Text: fs,
},
}
}
case 21:
jsonDollar = jsonS[jsonpt-2 : jsonpt+1]
//line parse.y:187
{
fs := fmt.Sprintf("%f%s", jsonDollar[1].f, jsonDollar[2].str)
jsonVAL.node = &ast.LiteralType{
Token: token.Token{
Type: token.FLOAT,
Text: fs,
},
}
}
case 22:
jsonDollar = jsonS[jsonpt-2 : jsonpt+1]
//line parse.y:199
{
jsonVAL.num = jsonDollar[2].num * -1
}
case 23:
jsonDollar = jsonS[jsonpt-1 : jsonpt+1]
//line parse.y:203
{
jsonVAL.num = jsonDollar[1].num
}
case 24:
jsonDollar = jsonS[jsonpt-2 : jsonpt+1]
//line parse.y:209
{
jsonVAL.f = jsonDollar[2].f * -1
}
case 25:
jsonDollar = jsonS[jsonpt-1 : jsonpt+1]
//line parse.y:213
{
jsonVAL.f = jsonDollar[1].f
}
case 26:
jsonDollar = jsonS[jsonpt-2 : jsonpt+1]
//line parse.y:219
{
jsonVAL.str = "e" + strconv.FormatInt(int64(jsonDollar[2].num), 10)
}
case 27:
jsonDollar = jsonS[jsonpt-2 : jsonpt+1]
//line parse.y:223
{
jsonVAL.str = "e-" + strconv.FormatInt(int64(jsonDollar[2].num), 10)
}
}
goto jsonstack /* stack new state and value */
}

View File

@ -5,7 +5,7 @@ import (
"github.com/hashicorp/hcl/hcl/ast"
hclParser "github.com/hashicorp/hcl/hcl/parser"
"github.com/hashicorp/hcl/json"
jsonParser "github.com/hashicorp/hcl/json/parser"
)
// Parse parses the given input and returns the root object.
@ -16,7 +16,7 @@ func Parse(input string) (*ast.File, error) {
case lexModeHcl:
return hclParser.Parse([]byte(input))
case lexModeJson:
return json.Parse(input)
return jsonParser.Parse([]byte(input))
}
return nil, fmt.Errorf("unknown config format")