mirror of
https://github.com/google/go-jsonnet.git
synced 2025-09-29 17:31:02 +02:00
Improve test coverage, add README badges.
This commit is contained in:
parent
a04a6cf2e3
commit
cdbee43fa4
12
.travis.yml
Normal file
12
.travis.yml
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
language: go
|
||||||
|
sudo: false
|
||||||
|
go:
|
||||||
|
- 1.4
|
||||||
|
- 1.5
|
||||||
|
- tip
|
||||||
|
before_install:
|
||||||
|
- go get github.com/axw/gocov/gocov
|
||||||
|
- go get github.com/mattn/goveralls
|
||||||
|
- if ! go get github.com/golang/tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi
|
||||||
|
script:
|
||||||
|
- $HOME/gopath/bin/goveralls -service=travis-ci
|
@ -1,5 +1,11 @@
|
|||||||
# go-jsonnet
|
# go-jsonnet
|
||||||
|
|
||||||
|
[![GoDoc Widget]][GoDoc] [![Coverage Status Widget]][Coverage Status]
|
||||||
|
[GoDoc]: https://godoc.org/github.com/jbeda/go-jsonnet
|
||||||
|
[GoDoc Widget]: https://godoc.org/github.com/jbeda/go-jsonnet?status.png
|
||||||
|
[Coverage Status Widget]: https://coveralls.io/repos/github/jbeda/go-jsonnet/badge.svg?branch=master
|
||||||
|
[Coverage Status]: https://coveralls.io/github/jbeda/go-jsonnet?branch=master
|
||||||
|
|
||||||
This is a port of [jsonnet](http://jsonnet.org/) to go. It is very much a work in progress.
|
This is a port of [jsonnet](http://jsonnet.org/) to go. It is very much a work in progress.
|
||||||
|
|
||||||
This implementation is largely based on the the [jsonnet C++ implementation](https://github.com/google/jsonnet).
|
This implementation is largely based on the the [jsonnet C++ implementation](https://github.com/google/jsonnet).
|
22
lexer.go
22
lexer.go
@ -203,7 +203,7 @@ func (l *lexer) next() rune {
|
|||||||
if r == '\n' {
|
if r == '\n' {
|
||||||
l.prevLineNumber = l.lineNumber
|
l.prevLineNumber = l.lineNumber
|
||||||
l.prevLineStart = l.lineStart
|
l.prevLineStart = l.lineStart
|
||||||
l.lineNumber += 1
|
l.lineNumber++
|
||||||
l.lineStart = l.pos
|
l.lineStart = l.pos
|
||||||
}
|
}
|
||||||
return r
|
return r
|
||||||
@ -313,6 +313,8 @@ func (l *lexer) lexNumber() error {
|
|||||||
)
|
)
|
||||||
|
|
||||||
state := numBegin
|
state := numBegin
|
||||||
|
|
||||||
|
outerLoop:
|
||||||
for true {
|
for true {
|
||||||
r := l.next()
|
r := l.next()
|
||||||
switch state {
|
switch state {
|
||||||
@ -323,8 +325,8 @@ func (l *lexer) lexNumber() error {
|
|||||||
case r >= '1' && r <= '9':
|
case r >= '1' && r <= '9':
|
||||||
state = numAfterOneToNine
|
state = numAfterOneToNine
|
||||||
default:
|
default:
|
||||||
return makeStaticErrorPoint(
|
// The caller should ensure the first rune is a digit.
|
||||||
"Couldn't lex number", l.fileName, l.prevLocation())
|
panic("Couldn't lex number")
|
||||||
}
|
}
|
||||||
case numAfterZero:
|
case numAfterZero:
|
||||||
switch r {
|
switch r {
|
||||||
@ -333,7 +335,7 @@ func (l *lexer) lexNumber() error {
|
|||||||
case 'e', 'E':
|
case 'e', 'E':
|
||||||
state = numAfterE
|
state = numAfterE
|
||||||
default:
|
default:
|
||||||
goto end
|
break outerLoop
|
||||||
}
|
}
|
||||||
case numAfterOneToNine:
|
case numAfterOneToNine:
|
||||||
switch {
|
switch {
|
||||||
@ -344,7 +346,7 @@ func (l *lexer) lexNumber() error {
|
|||||||
case r >= '0' && r <= '9':
|
case r >= '0' && r <= '9':
|
||||||
state = numAfterOneToNine
|
state = numAfterOneToNine
|
||||||
default:
|
default:
|
||||||
goto end
|
break outerLoop
|
||||||
}
|
}
|
||||||
case numAfterDot:
|
case numAfterDot:
|
||||||
switch {
|
switch {
|
||||||
@ -362,7 +364,7 @@ func (l *lexer) lexNumber() error {
|
|||||||
case r >= '0' && r <= '9':
|
case r >= '0' && r <= '9':
|
||||||
state = numAfterDigit
|
state = numAfterDigit
|
||||||
default:
|
default:
|
||||||
goto end
|
break outerLoop
|
||||||
}
|
}
|
||||||
case numAfterE:
|
case numAfterE:
|
||||||
switch {
|
switch {
|
||||||
@ -388,11 +390,11 @@ func (l *lexer) lexNumber() error {
|
|||||||
if r >= '0' && r <= '9' {
|
if r >= '0' && r <= '9' {
|
||||||
state = numAfterExpDigit
|
state = numAfterExpDigit
|
||||||
} else {
|
} else {
|
||||||
goto end
|
break outerLoop
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
end:
|
|
||||||
l.backup()
|
l.backup()
|
||||||
l.emitToken(tokenNumber)
|
l.emitToken(tokenNumber)
|
||||||
return nil
|
return nil
|
||||||
@ -488,7 +490,7 @@ func (l *lexer) lexSymbol() error {
|
|||||||
l.resetTokenStart() // Throw out the leading /*
|
l.resetTokenStart() // Throw out the leading /*
|
||||||
for r = l.next(); ; r = l.next() {
|
for r = l.next(); ; r = l.next() {
|
||||||
if r == lexEOF {
|
if r == lexEOF {
|
||||||
return makeStaticErrorPoint("Multi-line comment has no terminating */.",
|
return makeStaticErrorPoint("Multi-line comment has no terminating */",
|
||||||
l.fileName, commentStartLoc)
|
l.fileName, commentStartLoc)
|
||||||
}
|
}
|
||||||
if r == '*' && l.peek() == '/' {
|
if r == '*' && l.peek() == '/' {
|
||||||
@ -514,7 +516,7 @@ func (l *lexer) lexSymbol() error {
|
|||||||
numWhiteSpace := checkWhitespace(l.input[l.pos:], l.input[l.pos:])
|
numWhiteSpace := checkWhitespace(l.input[l.pos:], l.input[l.pos:])
|
||||||
stringBlockIndent := l.input[l.pos : l.pos+numWhiteSpace]
|
stringBlockIndent := l.input[l.pos : l.pos+numWhiteSpace]
|
||||||
if numWhiteSpace == 0 {
|
if numWhiteSpace == 0 {
|
||||||
return makeStaticErrorPoint("Text block's first line must start with whitespace.",
|
return makeStaticErrorPoint("Text block's first line must start with whitespace",
|
||||||
l.fileName, commentStartLoc)
|
l.fileName, commentStartLoc)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -41,7 +41,7 @@ var lexTests = []lexTest{
|
|||||||
{"number 0", "0", tokens{{kind: tokenNumber, data: "0"}}, ""},
|
{"number 0", "0", tokens{{kind: tokenNumber, data: "0"}}, ""},
|
||||||
{"number 1", "1", tokens{{kind: tokenNumber, data: "1"}}, ""},
|
{"number 1", "1", tokens{{kind: tokenNumber, data: "1"}}, ""},
|
||||||
{"number 1.0", "1.0", tokens{{kind: tokenNumber, data: "1.0"}}, ""},
|
{"number 1.0", "1.0", tokens{{kind: tokenNumber, data: "1.0"}}, ""},
|
||||||
{"number 0.1", "0.1", tokens{{kind: tokenNumber, data: "0.1"}}, ""},
|
{"number 0.10", "0.10", tokens{{kind: tokenNumber, data: "0.10"}}, ""},
|
||||||
{"number 0e100", "0e100", tokens{{kind: tokenNumber, data: "0e100"}}, ""},
|
{"number 0e100", "0e100", tokens{{kind: tokenNumber, data: "0e100"}}, ""},
|
||||||
{"number 1e100", "1e100", tokens{{kind: tokenNumber, data: "1e100"}}, ""},
|
{"number 1e100", "1e100", tokens{{kind: tokenNumber, data: "1e100"}}, ""},
|
||||||
{"number 1.1e100", "1.1e100", tokens{{kind: tokenNumber, data: "1.1e100"}}, ""},
|
{"number 1.1e100", "1.1e100", tokens{{kind: tokenNumber, data: "1.1e100"}}, ""},
|
||||||
@ -90,11 +90,13 @@ var lexTests = []lexTest{
|
|||||||
{"then", "then", tokens{{kind: tokenThen, data: "then"}}, ""},
|
{"then", "then", tokens{{kind: tokenThen, data: "then"}}, ""},
|
||||||
{"true", "true", tokens{{kind: tokenTrue, data: "true"}}, ""},
|
{"true", "true", tokens{{kind: tokenTrue, data: "true"}}, ""},
|
||||||
|
|
||||||
{"identifier", "foobar", tokens{{kind: tokenIdentifier, data: "foobar"}}, ""},
|
{"identifier", "foobar123", tokens{{kind: tokenIdentifier, data: "foobar123"}}, ""},
|
||||||
|
{"identifier", "foo bar123", tokens{{kind: tokenIdentifier, data: "foo"}, {kind: tokenIdentifier, data: "bar123"}}, ""},
|
||||||
|
|
||||||
{"c++ comment", "// hi", tokens{}, ""}, // This test doesn't look at fodder (yet?)
|
{"c++ comment", "// hi", tokens{}, ""}, // This test doesn't look at fodder (yet?)
|
||||||
{"hash comment", "# hi", tokens{}, ""}, // This test doesn't look at fodder (yet?)
|
{"hash comment", "# hi", tokens{}, ""}, // This test doesn't look at fodder (yet?)
|
||||||
{"c comment", "/* hi */", tokens{}, ""}, // This test doesn't look at fodder (yet?)
|
{"c comment", "/* hi */", tokens{}, ""}, // This test doesn't look at fodder (yet?)
|
||||||
|
{"c comment no term", "/* hi", tokens{}, "c comment no term:1:1 Multi-line comment has no terminating */"}, // This test doesn't look at fodder (yet?)
|
||||||
|
|
||||||
{
|
{
|
||||||
"block string spaces",
|
"block string spaces",
|
||||||
@ -195,6 +197,14 @@ var lexTests = []lexTest{
|
|||||||
tokens{},
|
tokens{},
|
||||||
"block string not term:1:1 Text block not terminated with |||",
|
"block string not term:1:1 Text block not terminated with |||",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"block string no ws",
|
||||||
|
`|||
|
||||||
|
test
|
||||||
|
|||`,
|
||||||
|
tokens{},
|
||||||
|
"block string no ws:1:1 Text block's first line must start with whitespace",
|
||||||
|
},
|
||||||
|
|
||||||
{"op *", "*", tokens{{kind: tokenOperator, data: "*"}}, ""},
|
{"op *", "*", tokens{{kind: tokenOperator, data: "*"}}, ""},
|
||||||
{"op /", "/", tokens{{kind: tokenOperator, data: "/"}}, ""},
|
{"op /", "/", tokens{{kind: tokenOperator, data: "/"}}, ""},
|
||||||
|
Loading…
x
Reference in New Issue
Block a user