Improve test coverage, add README badges.

This commit is contained in:
Joe Beda 2016-01-21 13:40:20 -08:00
parent a04a6cf2e3
commit cdbee43fa4
4 changed files with 45 additions and 15 deletions

12
.travis.yml Normal file
View File

@ -0,0 +1,12 @@
language: go
sudo: false
go:
- 1.4
- 1.5
- tip
before_install:
- go get github.com/axw/gocov/gocov
- go get github.com/mattn/goveralls
- if ! go get github.com/golang/tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi
script:
- $HOME/gopath/bin/goveralls -service=travis-ci

View File

@ -1,5 +1,11 @@
# go-jsonnet
[![GoDoc Widget]][GoDoc] [![Coverage Status Widget]][Coverage Status]
[GoDoc]: https://godoc.org/github.com/jbeda/go-jsonnet
[GoDoc Widget]: https://godoc.org/github.com/jbeda/go-jsonnet?status.png
[Coverage Status Widget]: https://coveralls.io/repos/github/jbeda/go-jsonnet/badge.svg?branch=master
[Coverage Status]: https://coveralls.io/github/jbeda/go-jsonnet?branch=master
This is a port of [jsonnet](http://jsonnet.org/) to go. It is very much a work in progress.
This implementation is largely based on the the [jsonnet C++ implementation](https://github.com/google/jsonnet).

View File

@ -203,7 +203,7 @@ func (l *lexer) next() rune {
if r == '\n' {
l.prevLineNumber = l.lineNumber
l.prevLineStart = l.lineStart
l.lineNumber += 1
l.lineNumber++
l.lineStart = l.pos
}
return r
@ -313,6 +313,8 @@ func (l *lexer) lexNumber() error {
)
state := numBegin
outerLoop:
for true {
r := l.next()
switch state {
@ -323,8 +325,8 @@ func (l *lexer) lexNumber() error {
case r >= '1' && r <= '9':
state = numAfterOneToNine
default:
return makeStaticErrorPoint(
"Couldn't lex number", l.fileName, l.prevLocation())
// The caller should ensure the first rune is a digit.
panic("Couldn't lex number")
}
case numAfterZero:
switch r {
@ -333,7 +335,7 @@ func (l *lexer) lexNumber() error {
case 'e', 'E':
state = numAfterE
default:
goto end
break outerLoop
}
case numAfterOneToNine:
switch {
@ -344,7 +346,7 @@ func (l *lexer) lexNumber() error {
case r >= '0' && r <= '9':
state = numAfterOneToNine
default:
goto end
break outerLoop
}
case numAfterDot:
switch {
@ -362,7 +364,7 @@ func (l *lexer) lexNumber() error {
case r >= '0' && r <= '9':
state = numAfterDigit
default:
goto end
break outerLoop
}
case numAfterE:
switch {
@ -388,11 +390,11 @@ func (l *lexer) lexNumber() error {
if r >= '0' && r <= '9' {
state = numAfterExpDigit
} else {
goto end
break outerLoop
}
}
}
end:
l.backup()
l.emitToken(tokenNumber)
return nil
@ -488,7 +490,7 @@ func (l *lexer) lexSymbol() error {
l.resetTokenStart() // Throw out the leading /*
for r = l.next(); ; r = l.next() {
if r == lexEOF {
return makeStaticErrorPoint("Multi-line comment has no terminating */.",
return makeStaticErrorPoint("Multi-line comment has no terminating */",
l.fileName, commentStartLoc)
}
if r == '*' && l.peek() == '/' {
@ -514,7 +516,7 @@ func (l *lexer) lexSymbol() error {
numWhiteSpace := checkWhitespace(l.input[l.pos:], l.input[l.pos:])
stringBlockIndent := l.input[l.pos : l.pos+numWhiteSpace]
if numWhiteSpace == 0 {
return makeStaticErrorPoint("Text block's first line must start with whitespace.",
return makeStaticErrorPoint("Text block's first line must start with whitespace",
l.fileName, commentStartLoc)
}

View File

@ -41,7 +41,7 @@ var lexTests = []lexTest{
{"number 0", "0", tokens{{kind: tokenNumber, data: "0"}}, ""},
{"number 1", "1", tokens{{kind: tokenNumber, data: "1"}}, ""},
{"number 1.0", "1.0", tokens{{kind: tokenNumber, data: "1.0"}}, ""},
{"number 0.1", "0.1", tokens{{kind: tokenNumber, data: "0.1"}}, ""},
{"number 0.10", "0.10", tokens{{kind: tokenNumber, data: "0.10"}}, ""},
{"number 0e100", "0e100", tokens{{kind: tokenNumber, data: "0e100"}}, ""},
{"number 1e100", "1e100", tokens{{kind: tokenNumber, data: "1e100"}}, ""},
{"number 1.1e100", "1.1e100", tokens{{kind: tokenNumber, data: "1.1e100"}}, ""},
@ -90,11 +90,13 @@ var lexTests = []lexTest{
{"then", "then", tokens{{kind: tokenThen, data: "then"}}, ""},
{"true", "true", tokens{{kind: tokenTrue, data: "true"}}, ""},
{"identifier", "foobar", tokens{{kind: tokenIdentifier, data: "foobar"}}, ""},
{"identifier", "foobar123", tokens{{kind: tokenIdentifier, data: "foobar123"}}, ""},
{"identifier", "foo bar123", tokens{{kind: tokenIdentifier, data: "foo"}, {kind: tokenIdentifier, data: "bar123"}}, ""},
{"c++ comment", "// hi", tokens{}, ""}, // This test doesn't look at fodder (yet?)
{"hash comment", "# hi", tokens{}, ""}, // This test doesn't look at fodder (yet?)
{"c comment", "/* hi */", tokens{}, ""}, // This test doesn't look at fodder (yet?)
{"c comment no term", "/* hi", tokens{}, "c comment no term:1:1 Multi-line comment has no terminating */"}, // This test doesn't look at fodder (yet?)
{
"block string spaces",
@ -195,6 +197,14 @@ var lexTests = []lexTest{
tokens{},
"block string not term:1:1 Text block not terminated with |||",
},
{
"block string no ws",
`|||
test
|||`,
tokens{},
"block string no ws:1:1 Text block's first line must start with whitespace",
},
{"op *", "*", tokens{{kind: tokenOperator, data: "*"}}, ""},
{"op /", "/", tokens{{kind: tokenOperator, data: "/"}}, ""},