hclpack: Body can now unmarshal from JSON
This allows us to round-trip Body to JSON and back without any loss as long as the expression source codes are always valid UTF-8, and we require that during expression parsing anyway so that is a fine restriction. The JSON encoding is a little noisy to read due to the extra annotations required to be lossless (including source ranges) but still relatively compact due to the base64-VLQ encoding of the source location information.
This commit is contained in:
parent
30da06ec3f
commit
dcefc5ca24
@ -87,7 +87,18 @@ func (b *Block) forJSON(pos map[string]map[hcl.Pos]posOfs) blockJSON {
|
|||||||
|
|
||||||
// UnmarshalJSON is an implementation of Unmarshaler from encoding/json,
|
// UnmarshalJSON is an implementation of Unmarshaler from encoding/json,
|
||||||
// allowing bodies to be included in other types that are JSON-unmarshalable.
|
// allowing bodies to be included in other types that are JSON-unmarshalable.
|
||||||
func (b *Body) UnmarshalJSON([]byte) error {
|
func (b *Body) UnmarshalJSON(data []byte) error {
|
||||||
|
var head jsonHeader
|
||||||
|
err := json.Unmarshal(data, &head)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
fns := head.Sources
|
||||||
|
positions := head.Pos.Unpack()
|
||||||
|
|
||||||
|
*b = head.Body.decode(fns, positions)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -107,6 +118,28 @@ type bodyJSON struct {
|
|||||||
Ranges rangesPacked `json:"r,omitempty"`
|
Ranges rangesPacked `json:"r,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (bj *bodyJSON) decode(fns []string, positions []position) Body {
|
||||||
|
var ret Body
|
||||||
|
|
||||||
|
if len(bj.Attrs) > 0 {
|
||||||
|
ret.Attributes = make(map[string]Attribute, len(bj.Attrs))
|
||||||
|
for name, aj := range bj.Attrs {
|
||||||
|
ret.Attributes[name] = aj.decode(fns, positions)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(bj.Blocks) > 0 {
|
||||||
|
ret.ChildBlocks = make([]Block, len(bj.Blocks))
|
||||||
|
for i, blj := range bj.Blocks {
|
||||||
|
ret.ChildBlocks[i] = blj.decode(fns, positions)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ret.MissingItemRange_ = bj.Ranges.UnpackIdx(fns, positions, 0)
|
||||||
|
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
type attrJSON struct {
|
type attrJSON struct {
|
||||||
// To keep things compact, in the JSON encoding we flatten the
|
// To keep things compact, in the JSON encoding we flatten the
|
||||||
// expression down into the attribute object, since overhead
|
// expression down into the attribute object, since overhead
|
||||||
@ -118,6 +151,31 @@ type attrJSON struct {
|
|||||||
Ranges rangesPacked `json:"r,omitempty"`
|
Ranges rangesPacked `json:"r,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (aj *attrJSON) decode(fns []string, positions []position) Attribute {
|
||||||
|
var ret Attribute
|
||||||
|
|
||||||
|
ret.Expr.Source = []byte(aj.Source)
|
||||||
|
switch aj.Syntax {
|
||||||
|
case 0:
|
||||||
|
ret.Expr.SourceType = ExprNative
|
||||||
|
case 1:
|
||||||
|
ret.Expr.SourceType = ExprTemplate
|
||||||
|
case 2:
|
||||||
|
ret.Expr.SourceType = ExprLiteralJSON
|
||||||
|
}
|
||||||
|
|
||||||
|
ret.Range = aj.Ranges.UnpackIdx(fns, positions, 0)
|
||||||
|
ret.NameRange = aj.Ranges.UnpackIdx(fns, positions, 1)
|
||||||
|
ret.Expr.Range_ = aj.Ranges.UnpackIdx(fns, positions, 2)
|
||||||
|
ret.Expr.StartRange_ = aj.Ranges.UnpackIdx(fns, positions, 3)
|
||||||
|
if ret.Expr.StartRange_ == (hcl.Range{}) {
|
||||||
|
// If the start range wasn't present then we'll just use the Range
|
||||||
|
ret.Expr.StartRange_ = ret.Expr.Range_
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
type blockJSON struct {
|
type blockJSON struct {
|
||||||
// Header is the type followed by any labels. We flatten this here
|
// Header is the type followed by any labels. We flatten this here
|
||||||
// to keep the JSON encoding compact.
|
// to keep the JSON encoding compact.
|
||||||
@ -128,3 +186,26 @@ type blockJSON struct {
|
|||||||
// each of the label ranges in turn.
|
// each of the label ranges in turn.
|
||||||
Ranges rangesPacked `json:"r,omitempty"`
|
Ranges rangesPacked `json:"r,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (blj *blockJSON) decode(fns []string, positions []position) Block {
|
||||||
|
var ret Block
|
||||||
|
|
||||||
|
if len(blj.Header) > 0 { // If the header is invalid then we'll end up with an empty type
|
||||||
|
ret.Type = blj.Header[0]
|
||||||
|
}
|
||||||
|
if len(blj.Header) > 1 {
|
||||||
|
ret.Labels = blj.Header[1:]
|
||||||
|
}
|
||||||
|
ret.Body = blj.Body.decode(fns, positions)
|
||||||
|
|
||||||
|
ret.DefRange = blj.Ranges.UnpackIdx(fns, positions, 0)
|
||||||
|
ret.TypeRange = blj.Ranges.UnpackIdx(fns, positions, 1)
|
||||||
|
if len(ret.Labels) > 0 {
|
||||||
|
ret.LabelRanges = make([]hcl.Range, len(ret.Labels))
|
||||||
|
for i := range ret.Labels {
|
||||||
|
ret.LabelRanges[i] = blj.Ranges.UnpackIdx(fns, positions, i+2)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
@ -2,8 +2,46 @@ package hclpack
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/google/go-cmp/cmp"
|
||||||
|
|
||||||
|
"github.com/hashicorp/hcl2/hcl"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestJSONRoundTrip(t *testing.T) {
|
func TestJSONRoundTrip(t *testing.T) {
|
||||||
|
src := `
|
||||||
|
service "example" {
|
||||||
|
priority = 2
|
||||||
|
platform {
|
||||||
|
os = "linux"
|
||||||
|
arch = "amd64"
|
||||||
|
}
|
||||||
|
process "web" {
|
||||||
|
exec = ["./webapp"]
|
||||||
|
}
|
||||||
|
process "worker" {
|
||||||
|
exec = ["./worker"]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
`
|
||||||
|
|
||||||
|
startBody, diags := PackNativeFile([]byte(src), "example.svc", hcl.Pos{Line: 1, Column: 1})
|
||||||
|
if diags.HasErrors() {
|
||||||
|
t.Fatalf("Failed to parse: %s", diags.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
jb, err := startBody.MarshalJSON()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to marshal: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
endBody := &Body{}
|
||||||
|
err = endBody.UnmarshalJSON(jb)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to unmarshal: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !cmp.Equal(startBody, endBody) {
|
||||||
|
t.Errorf("incorrect result\n%s", cmp.Diff(startBody, endBody))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -51,6 +51,48 @@ func (pp positionsPacked) MarshalText() ([]byte, error) {
|
|||||||
return ret, nil
|
return ret, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (pp *positionsPacked) UnmarshalBinary(data []byte) error {
|
||||||
|
buf := vlqBuf(data)
|
||||||
|
var ret positionsPacked
|
||||||
|
fileIdx := 0
|
||||||
|
for len(buf) > 0 {
|
||||||
|
if buf[0] == ';' {
|
||||||
|
// Starting a new file, then.
|
||||||
|
fileIdx++
|
||||||
|
buf = buf[1:]
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
var ppr positionPacked
|
||||||
|
var err error
|
||||||
|
ppr.LineDelta, buf, err = buf.ReadInt()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
ppr.ColumnDelta, buf, err = buf.ReadInt()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
ppr.ByteDelta, buf, err = buf.ReadInt()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
ret = append(ret, ppr)
|
||||||
|
}
|
||||||
|
*pp = ret
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pp *positionsPacked) UnmarshalText(data []byte) error {
|
||||||
|
maxL := base64.RawStdEncoding.DecodedLen(len(data))
|
||||||
|
into := make([]byte, maxL)
|
||||||
|
realL, err := base64.RawStdEncoding.Decode(into, data)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return pp.UnmarshalBinary(into[:realL])
|
||||||
|
}
|
||||||
|
|
||||||
type position struct {
|
type position struct {
|
||||||
FileIdx int
|
FileIdx int
|
||||||
Pos hcl.Pos
|
Pos hcl.Pos
|
||||||
@ -177,6 +219,39 @@ func (rp rangesPacked) MarshalText() ([]byte, error) {
|
|||||||
return ret, nil
|
return ret, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (rp *rangesPacked) UnmarshalBinary(data []byte) error {
|
||||||
|
buf := vlqBuf(data)
|
||||||
|
var ret rangesPacked
|
||||||
|
for len(buf) > 0 {
|
||||||
|
var startInt, endInt int
|
||||||
|
var err error
|
||||||
|
startInt, buf, err = buf.ReadInt()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
endInt, buf, err = buf.ReadInt()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
ret = append(ret, rangePacked{
|
||||||
|
Start: posOfs(startInt), // these are stored as 1-based offsets, so safe to convert directly
|
||||||
|
End: posOfs(endInt),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
*rp = ret
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rp *rangesPacked) UnmarshalText(data []byte) error {
|
||||||
|
maxL := base64.RawStdEncoding.DecodedLen(len(data))
|
||||||
|
into := make([]byte, maxL)
|
||||||
|
realL, err := base64.RawStdEncoding.Decode(into, data)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return rp.UnmarshalBinary(into[:realL])
|
||||||
|
}
|
||||||
|
|
||||||
func (rps rangesPacked) UnpackIdx(fns []string, poss []position, idx int) hcl.Range {
|
func (rps rangesPacked) UnpackIdx(fns []string, poss []position, idx int) hcl.Range {
|
||||||
if idx < 0 || idx >= len(rps) {
|
if idx < 0 || idx >= len(rps) {
|
||||||
return hcl.Range{} // out of bounds, so invalid
|
return hcl.Range{} // out of bounds, so invalid
|
||||||
|
@ -1,6 +1,8 @@
|
|||||||
package hclpack
|
package hclpack
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
|
|
||||||
"github.com/bsm/go-vlq"
|
"github.com/bsm/go-vlq"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -24,6 +26,21 @@ func (b vlqBuf) AppendInt(i int) vlqBuf {
|
|||||||
return b
|
return b
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (b vlqBuf) ReadInt() (int, vlqBuf, error) {
|
||||||
|
v, adv := vlq.Int([]byte(b))
|
||||||
|
if adv <= 0 {
|
||||||
|
if adv == 0 {
|
||||||
|
return 0, b, errors.New("missing expected VLQ value")
|
||||||
|
} else {
|
||||||
|
return 0, b, errors.New("invalid VLQ value")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if int64(int(v)) != v {
|
||||||
|
return 0, b, errors.New("VLQ value too big for integer on this platform")
|
||||||
|
}
|
||||||
|
return int(v), b[adv:], nil
|
||||||
|
}
|
||||||
|
|
||||||
func (b vlqBuf) AppendRawByte(by byte) vlqBuf {
|
func (b vlqBuf) AppendRawByte(by byte) vlqBuf {
|
||||||
return append(b, by)
|
return append(b, by)
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user