From af627bb2b986b9afac54689ce1360054f337e0e2 Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Fri, 13 Feb 2015 20:24:17 +0100 Subject: [PATCH 1/6] Copy vendored deps manually instead of using Godeps. We were using Godep incorrectly (cloning repos from the internet during build time instead of including Godeps/_workspace in the GOPATH via "godep go"). However, to avoid even having to fetch "godeps" from the internet during build, this now just copies the vendored files into the GOPATH. Also, the protocol buffer library moved from Google Code to GitHub, which is reflected in these updates. This fixes https://github.com/prometheus/prometheus/issues/525 --- Godeps/Godeps.json | 9 ++++----- .../golang_protobuf_extensions/ext/all_test.go | 4 ++-- .../matttproud/golang_protobuf_extensions/ext/decode.go | 2 +- .../matttproud/golang_protobuf_extensions/ext/encode.go | 2 +- .../golang_protobuf_extensions/ext/fixtures_test.go | 8 ++++---- Makefile | 3 +-- Makefile.INCLUDE | 1 - NOTICE | 2 +- config/Makefile | 2 +- config/config.go | 2 +- config/generated/config.pb.go | 2 +- config/load.go | 2 +- retrieval/targetmanager_test.go | 2 +- 13 files changed, 19 insertions(+), 22 deletions(-) diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 774f7caf8a..e40323763c 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -1,11 +1,10 @@ { "ImportPath": "github.com/prometheus/prometheus", - "GoVersion": "go1.4", + "GoVersion": "go1.4.1", "Deps": [ { - "ImportPath": "code.google.com/p/goprotobuf/proto", - "Comment": "go.r60-152", - "Rev": "36be16571e14f67e114bb0af619e5de2c1591679" + "ImportPath": "github.com/golang/protobuf/proto", + "Rev": "5677a0e3d5e89854c9974e1256839ee23f8233ca" }, { "ImportPath": "github.com/golang/glog", @@ -13,7 +12,7 @@ }, { "ImportPath": "github.com/matttproud/golang_protobuf_extensions/ext", - "Rev": "7a864a042e844af638df17ebbabf8183dace556a" + "Rev": "ba7d65ac66e9da93a714ca18f6d1bc7a0c09100c" }, { "ImportPath": "github.com/miekg/dns", diff --git a/Godeps/_workspace/src/github.com/matttproud/golang_protobuf_extensions/ext/all_test.go b/Godeps/_workspace/src/github.com/matttproud/golang_protobuf_extensions/ext/all_test.go index 98d4e2d818..7270b67a38 100644 --- a/Godeps/_workspace/src/github.com/matttproud/golang_protobuf_extensions/ext/all_test.go +++ b/Godeps/_workspace/src/github.com/matttproud/golang_protobuf_extensions/ext/all_test.go @@ -21,8 +21,8 @@ import ( "testing" "testing/quick" - . "code.google.com/p/goprotobuf/proto" - . "code.google.com/p/goprotobuf/proto/testdata" + . "github.com/golang/protobuf/proto" + . "github.com/golang/protobuf/proto/testdata" ) func TestWriteDelimited(t *testing.T) { diff --git a/Godeps/_workspace/src/github.com/matttproud/golang_protobuf_extensions/ext/decode.go b/Godeps/_workspace/src/github.com/matttproud/golang_protobuf_extensions/ext/decode.go index a9af23333c..28b520e4bb 100644 --- a/Godeps/_workspace/src/github.com/matttproud/golang_protobuf_extensions/ext/decode.go +++ b/Godeps/_workspace/src/github.com/matttproud/golang_protobuf_extensions/ext/decode.go @@ -19,7 +19,7 @@ import ( "errors" "io" - "code.google.com/p/goprotobuf/proto" + "github.com/golang/protobuf/proto" ) var errInvalidVarint = errors.New("invalid varint32 encountered") diff --git a/Godeps/_workspace/src/github.com/matttproud/golang_protobuf_extensions/ext/encode.go b/Godeps/_workspace/src/github.com/matttproud/golang_protobuf_extensions/ext/encode.go index a7a9345615..473b31dcca 100644 --- a/Godeps/_workspace/src/github.com/matttproud/golang_protobuf_extensions/ext/encode.go +++ b/Godeps/_workspace/src/github.com/matttproud/golang_protobuf_extensions/ext/encode.go @@ -18,7 +18,7 @@ import ( "encoding/binary" "io" - "code.google.com/p/goprotobuf/proto" + "github.com/golang/protobuf/proto" ) // WriteDelimited encodes and dumps a message to the provided writer prefixed diff --git a/Godeps/_workspace/src/github.com/matttproud/golang_protobuf_extensions/ext/fixtures_test.go b/Godeps/_workspace/src/github.com/matttproud/golang_protobuf_extensions/ext/fixtures_test.go index 9cfcbbf585..07e75c54da 100644 --- a/Godeps/_workspace/src/github.com/matttproud/golang_protobuf_extensions/ext/fixtures_test.go +++ b/Godeps/_workspace/src/github.com/matttproud/golang_protobuf_extensions/ext/fixtures_test.go @@ -1,5 +1,5 @@ // Copyright 2010 The Go Authors. All rights reserved. -// http://code.google.com/p/goprotobuf/ +// http://github.com/golang/protobuf/ // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are @@ -30,11 +30,11 @@ package ext import ( - . "code.google.com/p/goprotobuf/proto" - . "code.google.com/p/goprotobuf/proto/testdata" + . "github.com/golang/protobuf/proto" + . "github.com/golang/protobuf/proto/testdata" ) -// FROM https://code.google.com/p/goprotobuf/source/browse/proto/all_test.go. +// FROM https://github.com/golang/protobuf/blob/master/proto/all_test.go. func initGoTestField() *GoTestField { f := new(GoTestField) diff --git a/Makefile b/Makefile index e63876742a..dce56e777f 100644 --- a/Makefile +++ b/Makefile @@ -66,8 +66,7 @@ config: dependencies $(MAKE) -C config dependencies: $(GOCC) $(FULL_GOPATH) - $(GO) get github.com/tools/godep - $(GODEP) restore + cp -a $(CURDIR)/Godeps/_workspace/src/* $(GOPATH)/src $(GO) get -d documentation: search_index diff --git a/Makefile.INCLUDE b/Makefile.INCLUDE index 02c6f476b4..43eb8cf86d 100644 --- a/Makefile.INCLUDE +++ b/Makefile.INCLUDE @@ -47,7 +47,6 @@ GOCC = $(GOROOT)/bin/go TMPDIR = /tmp GOENV = TMPDIR=$(TMPDIR) GOROOT=$(GOROOT) GOPATH=$(GOPATH) GO = $(GOENV) $(GOCC) -GODEP = $(GOENV) $(GOPATH)/bin/godep GOFMT = $(GOROOT)/bin/gofmt UNAME := $(shell uname) diff --git a/NOTICE b/NOTICE index 6d8d2b8d83..a57f7990c6 100644 --- a/NOTICE +++ b/NOTICE @@ -38,7 +38,7 @@ Copyright jQuery Foundation and other contributors Licensed under the MIT License Go support for Protocol Buffers - Google's data interchange format -http://code.google.com/p/goprotobuf/ +http://github.com/golang/protobuf/ Copyright 2010 The Go Authors See source code for license details. diff --git a/config/Makefile b/config/Makefile index 972d550037..983c0a05dd 100644 --- a/config/Makefile +++ b/config/Makefile @@ -18,5 +18,5 @@ SUFFIXES: include ../Makefile.INCLUDE generated/config.pb.go: config.proto - $(GO_GET) code.google.com/p/goprotobuf/protoc-gen-go + $(GO) get github.com/golang/protobuf/protoc-gen-go $(PROTOC) --proto_path=$(PREFIX)/include:. --go_out=generated/ config.proto diff --git a/config/config.go b/config/config.go index afb8f05cf3..1292f6c7a7 100644 --- a/config/config.go +++ b/config/config.go @@ -18,7 +18,7 @@ import ( "regexp" "time" - "code.google.com/p/goprotobuf/proto" + "github.com/golang/protobuf/proto" clientmodel "github.com/prometheus/client_golang/model" diff --git a/config/generated/config.pb.go b/config/generated/config.pb.go index 82a649cff4..adc7dd272e 100644 --- a/config/generated/config.pb.go +++ b/config/generated/config.pb.go @@ -18,7 +18,7 @@ It has these top-level messages: */ package io_prometheus -import proto "code.google.com/p/goprotobuf/proto" +import proto "github.com/golang/protobuf/proto" import math "math" // Reference imports to suppress errors if they are not otherwise used. diff --git a/config/load.go b/config/load.go index f007ab5a84..195226aeed 100644 --- a/config/load.go +++ b/config/load.go @@ -16,7 +16,7 @@ package config import ( "io/ioutil" - "code.google.com/p/goprotobuf/proto" + "github.com/golang/protobuf/proto" pb "github.com/prometheus/prometheus/config/generated" ) diff --git a/retrieval/targetmanager_test.go b/retrieval/targetmanager_test.go index 52c28c47f3..fe1e97d498 100644 --- a/retrieval/targetmanager_test.go +++ b/retrieval/targetmanager_test.go @@ -17,7 +17,7 @@ import ( "testing" "time" - "code.google.com/p/goprotobuf/proto" + "github.com/golang/protobuf/proto" clientmodel "github.com/prometheus/client_golang/model" From 740a99a9accb7796355814acb8468dc49ce73429 Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Sun, 15 Feb 2015 13:54:43 +0100 Subject: [PATCH 2/6] Remove Google Code files, add GitHub files for protobuf lib. --- .../golang/protobuf}/proto/Makefile | 7 +- .../golang/protobuf}/proto/all_test.go | 84 +++++- .../golang/protobuf}/proto/clone.go | 38 ++- .../golang/protobuf}/proto/clone_test.go | 45 ++- .../golang/protobuf}/proto/decode.go | 112 +++++++- .../golang/protobuf}/proto/encode.go | 261 ++++++++++++++++-- .../golang/protobuf}/proto/equal.go | 19 +- .../golang/protobuf}/proto/equal_test.go | 29 +- .../golang/protobuf}/proto/extensions.go | 6 +- .../golang/protobuf/proto/extensions_test.go | 137 +++++++++ .../golang/protobuf}/proto/lib.go | 91 +++--- .../golang/protobuf}/proto/message_set.go | 66 ++++- .../protobuf}/proto/message_set_test.go | 2 +- .../golang/protobuf}/proto/pointer_reflect.go | 99 ++++++- .../golang/protobuf}/proto/pointer_unsafe.go | 52 +++- .../golang/protobuf}/proto/properties.go | 76 ++++- .../protobuf/proto/proto3_proto/Makefile | 44 +++ .../protobuf/proto/proto3_proto/proto3.proto} | 50 ++-- .../golang/protobuf/proto/proto3_test.go | 93 +++++++ .../golang/protobuf}/proto/size2_test.go | 2 +- .../golang/protobuf}/proto/size_test.go | 19 +- .../golang/protobuf}/proto/testdata/Makefile | 6 +- .../protobuf}/proto/testdata/golden_test.go | 2 +- .../protobuf}/proto/testdata/test.pb.go | 41 ++- .../protobuf}/proto/testdata/test.proto | 8 +- .../golang/protobuf}/proto/text.go | 108 +++++++- .../golang/protobuf}/proto/text_parser.go | 127 +++++++-- .../protobuf}/proto/text_parser_test.go | 57 +++- .../golang/protobuf}/proto/text_test.go | 32 ++- 29 files changed, 1546 insertions(+), 167 deletions(-) rename Godeps/_workspace/src/{code.google.com/p/goprotobuf => github.com/golang/protobuf}/proto/Makefile (94%) rename Godeps/_workspace/src/{code.google.com/p/goprotobuf => github.com/golang/protobuf}/proto/all_test.go (96%) rename Godeps/_workspace/src/{code.google.com/p/goprotobuf => github.com/golang/protobuf}/proto/clone.go (83%) rename Godeps/_workspace/src/{code.google.com/p/goprotobuf => github.com/golang/protobuf}/proto/clone_test.go (78%) rename Godeps/_workspace/src/{code.google.com/p/goprotobuf => github.com/golang/protobuf}/proto/decode.go (85%) rename Godeps/_workspace/src/{code.google.com/p/goprotobuf => github.com/golang/protobuf}/proto/encode.go (78%) rename Godeps/_workspace/src/{code.google.com/p/goprotobuf => github.com/golang/protobuf}/proto/equal.go (94%) rename Godeps/_workspace/src/{code.google.com/p/goprotobuf => github.com/golang/protobuf}/proto/equal_test.go (90%) rename Godeps/_workspace/src/{code.google.com/p/goprotobuf => github.com/golang/protobuf}/proto/extensions.go (98%) create mode 100644 Godeps/_workspace/src/github.com/golang/protobuf/proto/extensions_test.go rename Godeps/_workspace/src/{code.google.com/p/goprotobuf => github.com/golang/protobuf}/proto/lib.go (89%) rename Godeps/_workspace/src/{code.google.com/p/goprotobuf => github.com/golang/protobuf}/proto/message_set.go (79%) rename Godeps/_workspace/src/{code.google.com/p/goprotobuf => github.com/golang/protobuf}/proto/message_set_test.go (98%) rename Godeps/_workspace/src/{code.google.com/p/goprotobuf => github.com/golang/protobuf}/proto/pointer_reflect.go (82%) rename Godeps/_workspace/src/{code.google.com/p/goprotobuf => github.com/golang/protobuf}/proto/pointer_unsafe.go (84%) rename Godeps/_workspace/src/{code.google.com/p/goprotobuf => github.com/golang/protobuf}/proto/properties.go (88%) create mode 100644 Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_proto/Makefile rename Godeps/_workspace/src/{code.google.com/p/goprotobuf/proto/extensions_test.go => github.com/golang/protobuf/proto/proto3_proto/proto3.proto} (69%) create mode 100644 Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_test.go rename Godeps/_workspace/src/{code.google.com/p/goprotobuf => github.com/golang/protobuf}/proto/size2_test.go (98%) rename Godeps/_workspace/src/{code.google.com/p/goprotobuf => github.com/golang/protobuf}/proto/size_test.go (83%) rename Godeps/_workspace/src/{code.google.com/p/goprotobuf => github.com/golang/protobuf}/proto/testdata/Makefile (96%) rename Godeps/_workspace/src/{code.google.com/p/goprotobuf => github.com/golang/protobuf}/proto/testdata/golden_test.go (98%) rename Godeps/_workspace/src/{code.google.com/p/goprotobuf => github.com/golang/protobuf}/proto/testdata/test.pb.go (97%) rename Godeps/_workspace/src/{code.google.com/p/goprotobuf => github.com/golang/protobuf}/proto/testdata/test.proto (98%) rename Godeps/_workspace/src/{code.google.com/p/goprotobuf => github.com/golang/protobuf}/proto/text.go (87%) rename Godeps/_workspace/src/{code.google.com/p/goprotobuf => github.com/golang/protobuf}/proto/text_parser.go (84%) rename Godeps/_workspace/src/{code.google.com/p/goprotobuf => github.com/golang/protobuf}/proto/text_parser_test.go (87%) rename Godeps/_workspace/src/{code.google.com/p/goprotobuf => github.com/golang/protobuf}/proto/text_test.go (92%) diff --git a/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/Makefile b/Godeps/_workspace/src/github.com/golang/protobuf/proto/Makefile similarity index 94% rename from Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/Makefile rename to Godeps/_workspace/src/github.com/golang/protobuf/proto/Makefile index e99b839a7d..fb838ed2d7 100644 --- a/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/Makefile +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/Makefile @@ -1,7 +1,7 @@ # Go support for Protocol Buffers - Google's data interchange format # # Copyright 2010 The Go Authors. All rights reserved. -# http://code.google.com/p/goprotobuf/ +# https://github.com/golang/protobuf # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are @@ -37,4 +37,7 @@ test: install generate-test-pbs generate-test-pbs: - make install && cd testdata && make + make install + make -C testdata + make -C proto3_proto + make diff --git a/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/all_test.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/all_test.go similarity index 96% rename from Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/all_test.go rename to Godeps/_workspace/src/github.com/golang/protobuf/proto/all_test.go index b2a0191fcd..3fade175f7 100644 --- a/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/all_test.go +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/all_test.go @@ -1,7 +1,7 @@ // Go support for Protocol Buffers - Google's data interchange format // // Copyright 2010 The Go Authors. All rights reserved. -// http://code.google.com/p/goprotobuf/ +// https://github.com/golang/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are @@ -45,7 +45,7 @@ import ( "time" . "./testdata" - . "code.google.com/p/goprotobuf/proto" + . "github.com/golang/protobuf/proto" ) var globalO *Buffer @@ -1833,6 +1833,86 @@ func fuzzUnmarshal(t *testing.T, data []byte) { Unmarshal(data, pb) } +func TestMapFieldMarshal(t *testing.T) { + m := &MessageWithMap{ + NameMapping: map[int32]string{ + 1: "Rob", + 4: "Ian", + 8: "Dave", + }, + } + b, err := Marshal(m) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + + // b should be the concatenation of these three byte sequences in some order. + parts := []string{ + "\n\a\b\x01\x12\x03Rob", + "\n\a\b\x04\x12\x03Ian", + "\n\b\b\x08\x12\x04Dave", + } + ok := false + for i := range parts { + for j := range parts { + if j == i { + continue + } + for k := range parts { + if k == i || k == j { + continue + } + try := parts[i] + parts[j] + parts[k] + if bytes.Equal(b, []byte(try)) { + ok = true + break + } + } + } + } + if !ok { + t.Fatalf("Incorrect Marshal output.\n got %q\nwant %q (or a permutation of that)", b, parts[0]+parts[1]+parts[2]) + } + t.Logf("FYI b: %q", b) + + (new(Buffer)).DebugPrint("Dump of b", b) +} + +func TestMapFieldRoundTrips(t *testing.T) { + m := &MessageWithMap{ + NameMapping: map[int32]string{ + 1: "Rob", + 4: "Ian", + 8: "Dave", + }, + MsgMapping: map[int64]*FloatingPoint{ + 0x7001: &FloatingPoint{F: Float64(2.0)}, + }, + ByteMapping: map[bool][]byte{ + false: []byte("that's not right!"), + true: []byte("aye, 'tis true!"), + }, + } + b, err := Marshal(m) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + t.Logf("FYI b: %q", b) + m2 := new(MessageWithMap) + if err := Unmarshal(b, m2); err != nil { + t.Fatalf("Unmarshal: %v", err) + } + for _, pair := range [][2]interface{}{ + {m.NameMapping, m2.NameMapping}, + {m.MsgMapping, m2.MsgMapping}, + {m.ByteMapping, m2.ByteMapping}, + } { + if !reflect.DeepEqual(pair[0], pair[1]) { + t.Errorf("Map did not survive a round trip.\ninitial: %v\n final: %v", pair[0], pair[1]) + } + } +} + // Benchmarks func testMsg() *GoTest { diff --git a/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/clone.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/clone.go similarity index 83% rename from Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/clone.go rename to Godeps/_workspace/src/github.com/golang/protobuf/proto/clone.go index 8b94d9e186..6c6a7d95f2 100644 --- a/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/clone.go +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/clone.go @@ -1,7 +1,7 @@ // Go support for Protocol Buffers - Google's data interchange format // // Copyright 2011 The Go Authors. All rights reserved. -// http://code.google.com/p/goprotobuf/ +// https://github.com/golang/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are @@ -29,7 +29,7 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// Protocol buffer deep copy. +// Protocol buffer deep copy and merge. // TODO: MessageSet and RawMessage. package proto @@ -113,6 +113,29 @@ func mergeAny(out, in reflect.Value) { case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, reflect.String, reflect.Uint32, reflect.Uint64: out.Set(in) + case reflect.Map: + if in.Len() == 0 { + return + } + if out.IsNil() { + out.Set(reflect.MakeMap(in.Type())) + } + // For maps with value types of *T or []byte we need to deep copy each value. + elemKind := in.Type().Elem().Kind() + for _, key := range in.MapKeys() { + var val reflect.Value + switch elemKind { + case reflect.Ptr: + val = reflect.New(in.Type().Elem().Elem()) + mergeAny(val, in.MapIndex(key)) + case reflect.Slice: + val = in.MapIndex(key) + val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) + default: + val = in.MapIndex(key) + } + out.SetMapIndex(key, val) + } case reflect.Ptr: if in.IsNil() { return @@ -125,6 +148,14 @@ func mergeAny(out, in reflect.Value) { if in.IsNil() { return } + if in.Type().Elem().Kind() == reflect.Uint8 { + // []byte is a scalar bytes field, not a repeated field. + // Make a deep copy. + // Append to []byte{} instead of []byte(nil) so that we never end up + // with a nil result. + out.SetBytes(append([]byte{}, in.Bytes()...)) + return + } n := in.Len() if out.IsNil() { out.Set(reflect.MakeSlice(in.Type(), 0, n)) @@ -133,9 +164,6 @@ func mergeAny(out, in reflect.Value) { case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, reflect.String, reflect.Uint32, reflect.Uint64: out.Set(reflect.AppendSlice(out, in)) - case reflect.Uint8: - // []byte is a scalar bytes field. - out.Set(in) default: for i := 0; i < n; i++ { x := reflect.Indirect(reflect.New(in.Type().Elem())) diff --git a/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/clone_test.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/clone_test.go similarity index 78% rename from Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/clone_test.go rename to Godeps/_workspace/src/github.com/golang/protobuf/proto/clone_test.go index 39aaaf7bda..1ac177d216 100644 --- a/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/clone_test.go +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/clone_test.go @@ -1,7 +1,7 @@ // Go support for Protocol Buffers - Google's data interchange format // // Copyright 2011 The Go Authors. All rights reserved. -// http://code.google.com/p/goprotobuf/ +// https://github.com/golang/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are @@ -34,7 +34,7 @@ package proto_test import ( "testing" - "code.google.com/p/goprotobuf/proto" + "github.com/golang/protobuf/proto" pb "./testdata" ) @@ -79,6 +79,22 @@ func TestClone(t *testing.T) { if proto.Equal(m, cloneTestMessage) { t.Error("Mutating clone changed the original") } + // Byte fields and repeated fields should be copied. + if &m.Pet[0] == &cloneTestMessage.Pet[0] { + t.Error("Pet: repeated field not copied") + } + if &m.Others[0] == &cloneTestMessage.Others[0] { + t.Error("Others: repeated field not copied") + } + if &m.Others[0].Value[0] == &cloneTestMessage.Others[0].Value[0] { + t.Error("Others[0].Value: bytes field not copied") + } + if &m.RepBytes[0] == &cloneTestMessage.RepBytes[0] { + t.Error("RepBytes: repeated field not copied") + } + if &m.RepBytes[0][0] == &cloneTestMessage.RepBytes[0][0] { + t.Error("RepBytes[0]: bytes field not copied") + } } func TestCloneNil(t *testing.T) { @@ -173,6 +189,31 @@ var mergeTests = []struct { dst: &pb.OtherMessage{Value: []byte("bar")}, want: &pb.OtherMessage{Value: []byte("foo")}, }, + { + src: &pb.MessageWithMap{ + NameMapping: map[int32]string{6: "Nigel"}, + MsgMapping: map[int64]*pb.FloatingPoint{ + 0x4001: &pb.FloatingPoint{F: proto.Float64(2.0)}, + }, + ByteMapping: map[bool][]byte{true: []byte("wowsa")}, + }, + dst: &pb.MessageWithMap{ + NameMapping: map[int32]string{ + 6: "Bruce", // should be overwritten + 7: "Andrew", + }, + }, + want: &pb.MessageWithMap{ + NameMapping: map[int32]string{ + 6: "Nigel", + 7: "Andrew", + }, + MsgMapping: map[int64]*pb.FloatingPoint{ + 0x4001: &pb.FloatingPoint{F: proto.Float64(2.0)}, + }, + ByteMapping: map[bool][]byte{true: []byte("wowsa")}, + }, + }, } func TestMerge(t *testing.T) { diff --git a/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/decode.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/decode.go similarity index 85% rename from Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/decode.go rename to Godeps/_workspace/src/github.com/golang/protobuf/proto/decode.go index 49a487a375..129792ed12 100644 --- a/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/decode.go +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/decode.go @@ -1,7 +1,7 @@ // Go support for Protocol Buffers - Google's data interchange format // // Copyright 2010 The Go Authors. All rights reserved. -// http://code.google.com/p/goprotobuf/ +// https://github.com/golang/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are @@ -178,7 +178,7 @@ func (p *Buffer) DecodeZigzag32() (x uint64, err error) { func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) { n, err := p.DecodeVarint() if err != nil { - return + return nil, err } nb := int(n) @@ -362,7 +362,7 @@ func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group } tag := int(u >> 3) if tag <= 0 { - return fmt.Errorf("proto: %s: illegal tag %d", st, tag) + return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire) } fieldnum, ok := prop.decoderTags.get(tag) if !ok { @@ -465,6 +465,15 @@ func (o *Buffer) dec_bool(p *Properties, base structPointer) error { return nil } +func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + *structPointer_BoolVal(base, p.field) = u != 0 + return nil +} + // Decode an int32. func (o *Buffer) dec_int32(p *Properties, base structPointer) error { u, err := p.valDec(o) @@ -475,6 +484,15 @@ func (o *Buffer) dec_int32(p *Properties, base structPointer) error { return nil } +func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u)) + return nil +} + // Decode an int64. func (o *Buffer) dec_int64(p *Properties, base structPointer) error { u, err := p.valDec(o) @@ -485,15 +503,31 @@ func (o *Buffer) dec_int64(p *Properties, base structPointer) error { return nil } +func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + word64Val_Set(structPointer_Word64Val(base, p.field), o, u) + return nil +} + // Decode a string. func (o *Buffer) dec_string(p *Properties, base structPointer) error { s, err := o.DecodeStringBytes() if err != nil { return err } - sp := new(string) - *sp = s - *structPointer_String(base, p.field) = sp + *structPointer_String(base, p.field) = &s + return nil +} + +func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error { + s, err := o.DecodeStringBytes() + if err != nil { + return err + } + *structPointer_StringVal(base, p.field) = s return nil } @@ -632,6 +666,72 @@ func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error { return nil } +// Decode a map field. +func (o *Buffer) dec_new_map(p *Properties, base structPointer) error { + raw, err := o.DecodeRawBytes(false) + if err != nil { + return err + } + oi := o.index // index at the end of this map entry + o.index -= len(raw) // move buffer back to start of map entry + + mptr := structPointer_Map(base, p.field, p.mtype) // *map[K]V + if mptr.Elem().IsNil() { + mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem())) + } + v := mptr.Elem() // map[K]V + + // Prepare addressable doubly-indirect placeholders for the key and value types. + // See enc_new_map for why. + keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K + keybase := toStructPointer(keyptr.Addr()) // **K + + var valbase structPointer + var valptr reflect.Value + switch p.mtype.Elem().Kind() { + case reflect.Slice: + // []byte + var dummy []byte + valptr = reflect.ValueOf(&dummy) // *[]byte + valbase = toStructPointer(valptr) // *[]byte + case reflect.Ptr: + // message; valptr is **Msg; need to allocate the intermediate pointer + valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V + valptr.Set(reflect.New(valptr.Type().Elem())) + valbase = toStructPointer(valptr) + default: + // everything else + valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V + valbase = toStructPointer(valptr.Addr()) // **V + } + + // Decode. + // This parses a restricted wire format, namely the encoding of a message + // with two fields. See enc_new_map for the format. + for o.index < oi { + // tagcode for key and value properties are always a single byte + // because they have tags 1 and 2. + tagcode := o.buf[o.index] + o.index++ + switch tagcode { + case p.mkeyprop.tagcode[0]: + if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil { + return err + } + case p.mvalprop.tagcode[0]: + if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil { + return err + } + default: + // TODO: Should we silently skip this instead? + return fmt.Errorf("proto: bad map data tag %d", raw[0]) + } + } + + v.SetMapIndex(keyptr.Elem(), valptr.Elem()) + return nil +} + // Decode a group. func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error { bas := structPointer_GetStructPointer(base, p.field) diff --git a/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/encode.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/encode.go similarity index 78% rename from Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/encode.go rename to Godeps/_workspace/src/github.com/golang/protobuf/proto/encode.go index 35e5ad56ef..1512d605b2 100644 --- a/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/encode.go +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/encode.go @@ -1,7 +1,7 @@ // Go support for Protocol Buffers - Google's data interchange format // // Copyright 2010 The Go Authors. All rights reserved. -// http://code.google.com/p/goprotobuf/ +// https://github.com/golang/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are @@ -247,7 +247,7 @@ func (p *Buffer) Marshal(pb Message) error { return ErrNil } if err == nil { - err = p.enc_struct(t.Elem(), GetProperties(t.Elem()), base) + err = p.enc_struct(GetProperties(t.Elem()), base) } if collectStats { @@ -271,7 +271,7 @@ func Size(pb Message) (n int) { return 0 } if err == nil { - n = size_struct(t.Elem(), GetProperties(t.Elem()), base) + n = size_struct(GetProperties(t.Elem()), base) } if collectStats { @@ -298,6 +298,16 @@ func (o *Buffer) enc_bool(p *Properties, base structPointer) error { return nil } +func (o *Buffer) enc_proto3_bool(p *Properties, base structPointer) error { + v := *structPointer_BoolVal(base, p.field) + if !v { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, 1) + return nil +} + func size_bool(p *Properties, base structPointer) int { v := *structPointer_Bool(base, p.field) if v == nil { @@ -306,6 +316,14 @@ func size_bool(p *Properties, base structPointer) int { return len(p.tagcode) + 1 // each bool takes exactly one byte } +func size_proto3_bool(p *Properties, base structPointer) int { + v := *structPointer_BoolVal(base, p.field) + if !v { + return 0 + } + return len(p.tagcode) + 1 // each bool takes exactly one byte +} + // Encode an int32. func (o *Buffer) enc_int32(p *Properties, base structPointer) error { v := structPointer_Word32(base, p.field) @@ -318,6 +336,17 @@ func (o *Buffer) enc_int32(p *Properties, base structPointer) error { return nil } +func (o *Buffer) enc_proto3_int32(p *Properties, base structPointer) error { + v := structPointer_Word32Val(base, p.field) + x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range + if x == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + func size_int32(p *Properties, base structPointer) (n int) { v := structPointer_Word32(base, p.field) if word32_IsNil(v) { @@ -329,6 +358,17 @@ func size_int32(p *Properties, base structPointer) (n int) { return } +func size_proto3_int32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32Val(base, p.field) + x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range + if x == 0 { + return 0 + } + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + // Encode a uint32. // Exactly the same as int32, except for no sign extension. func (o *Buffer) enc_uint32(p *Properties, base structPointer) error { @@ -342,6 +382,17 @@ func (o *Buffer) enc_uint32(p *Properties, base structPointer) error { return nil } +func (o *Buffer) enc_proto3_uint32(p *Properties, base structPointer) error { + v := structPointer_Word32Val(base, p.field) + x := word32Val_Get(v) + if x == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + func size_uint32(p *Properties, base structPointer) (n int) { v := structPointer_Word32(base, p.field) if word32_IsNil(v) { @@ -353,6 +404,17 @@ func size_uint32(p *Properties, base structPointer) (n int) { return } +func size_proto3_uint32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32Val(base, p.field) + x := word32Val_Get(v) + if x == 0 { + return 0 + } + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + // Encode an int64. func (o *Buffer) enc_int64(p *Properties, base structPointer) error { v := structPointer_Word64(base, p.field) @@ -365,6 +427,17 @@ func (o *Buffer) enc_int64(p *Properties, base structPointer) error { return nil } +func (o *Buffer) enc_proto3_int64(p *Properties, base structPointer) error { + v := structPointer_Word64Val(base, p.field) + x := word64Val_Get(v) + if x == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, x) + return nil +} + func size_int64(p *Properties, base structPointer) (n int) { v := structPointer_Word64(base, p.field) if word64_IsNil(v) { @@ -376,6 +449,17 @@ func size_int64(p *Properties, base structPointer) (n int) { return } +func size_proto3_int64(p *Properties, base structPointer) (n int) { + v := structPointer_Word64Val(base, p.field) + x := word64Val_Get(v) + if x == 0 { + return 0 + } + n += len(p.tagcode) + n += p.valSize(x) + return +} + // Encode a string. func (o *Buffer) enc_string(p *Properties, base structPointer) error { v := *structPointer_String(base, p.field) @@ -388,6 +472,16 @@ func (o *Buffer) enc_string(p *Properties, base structPointer) error { return nil } +func (o *Buffer) enc_proto3_string(p *Properties, base structPointer) error { + v := *structPointer_StringVal(base, p.field) + if v == "" { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeStringBytes(v) + return nil +} + func size_string(p *Properties, base structPointer) (n int) { v := *structPointer_String(base, p.field) if v == nil { @@ -399,6 +493,16 @@ func size_string(p *Properties, base structPointer) (n int) { return } +func size_proto3_string(p *Properties, base structPointer) (n int) { + v := *structPointer_StringVal(base, p.field) + if v == "" { + return 0 + } + n += len(p.tagcode) + n += sizeStringBytes(v) + return +} + // All protocol buffer fields are nillable, but be careful. func isNil(v reflect.Value) bool { switch v.Kind() { @@ -429,7 +533,7 @@ func (o *Buffer) enc_struct_message(p *Properties, base structPointer) error { } o.buf = append(o.buf, p.tagcode...) - return o.enc_len_struct(p.stype, p.sprop, structp, &state) + return o.enc_len_struct(p.sprop, structp, &state) } func size_struct_message(p *Properties, base structPointer) int { @@ -448,7 +552,7 @@ func size_struct_message(p *Properties, base structPointer) int { } n0 := len(p.tagcode) - n1 := size_struct(p.stype, p.sprop, structp) + n1 := size_struct(p.sprop, structp) n2 := sizeVarint(uint64(n1)) // size of encoded length return n0 + n1 + n2 } @@ -462,7 +566,7 @@ func (o *Buffer) enc_struct_group(p *Properties, base structPointer) error { } o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) - err := o.enc_struct(p.stype, p.sprop, b) + err := o.enc_struct(p.sprop, b) if err != nil && !state.shouldContinue(err, nil) { return err } @@ -477,7 +581,7 @@ func size_struct_group(p *Properties, base structPointer) (n int) { } n += sizeVarint(uint64((p.Tag << 3) | WireStartGroup)) - n += size_struct(p.stype, p.sprop, b) + n += size_struct(p.sprop, b) n += sizeVarint(uint64((p.Tag << 3) | WireEndGroup)) return } @@ -551,6 +655,16 @@ func (o *Buffer) enc_slice_byte(p *Properties, base structPointer) error { return nil } +func (o *Buffer) enc_proto3_slice_byte(p *Properties, base structPointer) error { + s := *structPointer_Bytes(base, p.field) + if len(s) == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(s) + return nil +} + func size_slice_byte(p *Properties, base structPointer) (n int) { s := *structPointer_Bytes(base, p.field) if s == nil { @@ -561,6 +675,16 @@ func size_slice_byte(p *Properties, base structPointer) (n int) { return } +func size_proto3_slice_byte(p *Properties, base structPointer) (n int) { + s := *structPointer_Bytes(base, p.field) + if len(s) == 0 { + return 0 + } + n += len(p.tagcode) + n += sizeRawBytes(s) + return +} + // Encode a slice of int32s ([]int32). func (o *Buffer) enc_slice_int32(p *Properties, base structPointer) error { s := structPointer_Word32Slice(base, p.field) @@ -831,7 +955,7 @@ func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) err } o.buf = append(o.buf, p.tagcode...) - err := o.enc_len_struct(p.stype, p.sprop, structp, &state) + err := o.enc_len_struct(p.sprop, structp, &state) if err != nil && !state.shouldContinue(err, nil) { if err == ErrNil { return ErrRepeatedHasNil @@ -861,7 +985,7 @@ func size_slice_struct_message(p *Properties, base structPointer) (n int) { continue } - n0 := size_struct(p.stype, p.sprop, structp) + n0 := size_struct(p.sprop, structp) n1 := sizeVarint(uint64(n0)) // size of encoded length n += n0 + n1 } @@ -882,7 +1006,7 @@ func (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) - err := o.enc_struct(p.stype, p.sprop, b) + err := o.enc_struct(p.sprop, b) if err != nil && !state.shouldContinue(err, nil) { if err == ErrNil { @@ -908,7 +1032,7 @@ func size_slice_struct_group(p *Properties, base structPointer) (n int) { return // return size up to this point } - n += size_struct(p.stype, p.sprop, b) + n += size_struct(p.sprop, b) } return } @@ -945,12 +1069,112 @@ func size_map(p *Properties, base structPointer) int { return sizeExtensionMap(v) } +// Encode a map field. +func (o *Buffer) enc_new_map(p *Properties, base structPointer) error { + var state errorState // XXX: or do we need to plumb this through? + + /* + A map defined as + map map_field = N; + is encoded in the same way as + message MapFieldEntry { + key_type key = 1; + value_type value = 2; + } + repeated MapFieldEntry map_field = N; + */ + + v := structPointer_Map(base, p.field, p.mtype).Elem() // map[K]V + if v.Len() == 0 { + return nil + } + + keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) + + enc := func() error { + if err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil { + return err + } + if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil { + return err + } + return nil + } + + keys := v.MapKeys() + sort.Sort(mapKeys(keys)) + for _, key := range keys { + val := v.MapIndex(key) + + keycopy.Set(key) + valcopy.Set(val) + + o.buf = append(o.buf, p.tagcode...) + if err := o.enc_len_thing(enc, &state); err != nil { + return err + } + } + return nil +} + +func size_new_map(p *Properties, base structPointer) int { + v := structPointer_Map(base, p.field, p.mtype).Elem() // map[K]V + + keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) + + n := 0 + for _, key := range v.MapKeys() { + val := v.MapIndex(key) + keycopy.Set(key) + valcopy.Set(val) + + // Tag codes are two bytes per map entry. + n += 2 + n += p.mkeyprop.size(p.mkeyprop, keybase) + n += p.mvalprop.size(p.mvalprop, valbase) + } + return n +} + +// mapEncodeScratch returns a new reflect.Value matching the map's value type, +// and a structPointer suitable for passing to an encoder or sizer. +func mapEncodeScratch(mapType reflect.Type) (keycopy, valcopy reflect.Value, keybase, valbase structPointer) { + // Prepare addressable doubly-indirect placeholders for the key and value types. + // This is needed because the element-type encoders expect **T, but the map iteration produces T. + + keycopy = reflect.New(mapType.Key()).Elem() // addressable K + keyptr := reflect.New(reflect.PtrTo(keycopy.Type())).Elem() // addressable *K + keyptr.Set(keycopy.Addr()) // + keybase = toStructPointer(keyptr.Addr()) // **K + + // Value types are more varied and require special handling. + switch mapType.Elem().Kind() { + case reflect.Slice: + // []byte + var dummy []byte + valcopy = reflect.ValueOf(&dummy).Elem() // addressable []byte + valbase = toStructPointer(valcopy.Addr()) + case reflect.Ptr: + // message; the generated field type is map[K]*Msg (so V is *Msg), + // so we only need one level of indirection. + valcopy = reflect.New(mapType.Elem()).Elem() // addressable V + valbase = toStructPointer(valcopy.Addr()) + default: + // everything else + valcopy = reflect.New(mapType.Elem()).Elem() // addressable V + valptr := reflect.New(reflect.PtrTo(valcopy.Type())).Elem() // addressable *V + valptr.Set(valcopy.Addr()) // + valbase = toStructPointer(valptr.Addr()) // **V + } + return +} + // Encode a struct. -func (o *Buffer) enc_struct(t reflect.Type, prop *StructProperties, base structPointer) error { +func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error { var state errorState // Encode fields in tag order so that decoders may use optimizations // that depend on the ordering. - // http://code.google.com/apis/protocolbuffers/docs/encoding.html#order + // https://developers.google.com/protocol-buffers/docs/encoding#order for _, i := range prop.order { p := prop.Prop[i] if p.enc != nil { @@ -978,7 +1202,7 @@ func (o *Buffer) enc_struct(t reflect.Type, prop *StructProperties, base structP return state.err } -func size_struct(t reflect.Type, prop *StructProperties, base structPointer) (n int) { +func size_struct(prop *StructProperties, base structPointer) (n int) { for _, i := range prop.order { p := prop.Prop[i] if p.size != nil { @@ -998,11 +1222,16 @@ func size_struct(t reflect.Type, prop *StructProperties, base structPointer) (n var zeroes [20]byte // longer than any conceivable sizeVarint // Encode a struct, preceded by its encoded length (as a varint). -func (o *Buffer) enc_len_struct(t reflect.Type, prop *StructProperties, base structPointer, state *errorState) error { +func (o *Buffer) enc_len_struct(prop *StructProperties, base structPointer, state *errorState) error { + return o.enc_len_thing(func() error { return o.enc_struct(prop, base) }, state) +} + +// Encode something, preceded by its encoded length (as a varint). +func (o *Buffer) enc_len_thing(enc func() error, state *errorState) error { iLen := len(o.buf) o.buf = append(o.buf, 0, 0, 0, 0) // reserve four bytes for length iMsg := len(o.buf) - err := o.enc_struct(t, prop, base) + err := enc() if err != nil && !state.shouldContinue(err, nil) { return err } diff --git a/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/equal.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/equal.go similarity index 94% rename from Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/equal.go rename to Godeps/_workspace/src/github.com/golang/protobuf/proto/equal.go index 42542e6928..d8673a3e97 100644 --- a/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/equal.go +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/equal.go @@ -1,7 +1,7 @@ // Go support for Protocol Buffers - Google's data interchange format // // Copyright 2011 The Go Authors. All rights reserved. -// http://code.google.com/p/goprotobuf/ +// https://github.com/golang/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are @@ -57,7 +57,7 @@ Equality is defined in this way: although represented by []byte, is not a repeated field) - Two unset fields are equal. - Two unknown field sets are equal if their current - encoded state is equal. (TODO) + encoded state is equal. - Two extension sets are equal iff they have corresponding elements that are pairwise equal. - Every other combination of things are not equal. @@ -154,6 +154,21 @@ func equalAny(v1, v2 reflect.Value) bool { return v1.Float() == v2.Float() case reflect.Int32, reflect.Int64: return v1.Int() == v2.Int() + case reflect.Map: + if v1.Len() != v2.Len() { + return false + } + for _, key := range v1.MapKeys() { + val2 := v2.MapIndex(key) + if !val2.IsValid() { + // This key was not found in the second map. + return false + } + if !equalAny(v1.MapIndex(key), val2) { + return false + } + } + return true case reflect.Ptr: return equalAny(v1.Elem(), v2.Elem()) case reflect.Slice: diff --git a/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/equal_test.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/equal_test.go similarity index 90% rename from Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/equal_test.go rename to Godeps/_workspace/src/github.com/golang/protobuf/proto/equal_test.go index cdf3fd9c24..cc25833ca4 100644 --- a/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/equal_test.go +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/equal_test.go @@ -1,7 +1,7 @@ // Go support for Protocol Buffers - Google's data interchange format // // Copyright 2011 The Go Authors. All rights reserved. -// http://code.google.com/p/goprotobuf/ +// https://github.com/golang/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are @@ -35,7 +35,7 @@ import ( "testing" pb "./testdata" - . "code.google.com/p/goprotobuf/proto" + . "github.com/golang/protobuf/proto" ) // Four identical base messages. @@ -155,6 +155,31 @@ var EqualTests = []struct { }, true, }, + + { + "map same", + &pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}}, + &pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}}, + true, + }, + { + "map different entry", + &pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}}, + &pb.MessageWithMap{NameMapping: map[int32]string{2: "Rob"}}, + false, + }, + { + "map different key only", + &pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}}, + &pb.MessageWithMap{NameMapping: map[int32]string{2: "Ken"}}, + false, + }, + { + "map different value only", + &pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}}, + &pb.MessageWithMap{NameMapping: map[int32]string{1: "Rob"}}, + false, + }, } func TestEqual(t *testing.T) { diff --git a/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/extensions.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/extensions.go similarity index 98% rename from Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/extensions.go rename to Godeps/_workspace/src/github.com/golang/protobuf/proto/extensions.go index e592053c51..f7667fab48 100644 --- a/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/extensions.go +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/extensions.go @@ -1,7 +1,7 @@ // Go support for Protocol Buffers - Google's data interchange format // // Copyright 2010 The Go Authors. All rights reserved. -// http://code.google.com/p/goprotobuf/ +// https://github.com/golang/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are @@ -227,7 +227,8 @@ func GetExtension(pb extendableProto, extension *ExtensionDesc) (interface{}, er return nil, err } - e, ok := pb.ExtensionMap()[extension.Field] + emap := pb.ExtensionMap() + e, ok := emap[extension.Field] if !ok { return nil, ErrMissingExtension } @@ -252,6 +253,7 @@ func GetExtension(pb extendableProto, extension *ExtensionDesc) (interface{}, er e.value = v e.desc = extension e.enc = nil + emap[extension.Field] = e return e.value, nil } diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/extensions_test.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/extensions_test.go new file mode 100644 index 0000000000..451ad871a2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/extensions_test.go @@ -0,0 +1,137 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2014 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto_test + +import ( + "testing" + + pb "./testdata" + "github.com/golang/protobuf/proto" +) + +func TestGetExtensionsWithMissingExtensions(t *testing.T) { + msg := &pb.MyMessage{} + ext1 := &pb.Ext{} + if err := proto.SetExtension(msg, pb.E_Ext_More, ext1); err != nil { + t.Fatalf("Could not set ext1: %s", ext1) + } + exts, err := proto.GetExtensions(msg, []*proto.ExtensionDesc{ + pb.E_Ext_More, + pb.E_Ext_Text, + }) + if err != nil { + t.Fatalf("GetExtensions() failed: %s", err) + } + if exts[0] != ext1 { + t.Errorf("ext1 not in returned extensions: %T %v", exts[0], exts[0]) + } + if exts[1] != nil { + t.Errorf("ext2 in returned extensions: %T %v", exts[1], exts[1]) + } +} + +func TestGetExtensionStability(t *testing.T) { + check := func(m *pb.MyMessage) bool { + ext1, err := proto.GetExtension(m, pb.E_Ext_More) + if err != nil { + t.Fatalf("GetExtension() failed: %s", err) + } + ext2, err := proto.GetExtension(m, pb.E_Ext_More) + if err != nil { + t.Fatalf("GetExtension() failed: %s", err) + } + return ext1 == ext2 + } + msg := &pb.MyMessage{Count: proto.Int32(4)} + ext0 := &pb.Ext{} + if err := proto.SetExtension(msg, pb.E_Ext_More, ext0); err != nil { + t.Fatalf("Could not set ext1: %s", ext0) + } + if !check(msg) { + t.Errorf("GetExtension() not stable before marshaling") + } + bb, err := proto.Marshal(msg) + if err != nil { + t.Fatalf("Marshal() failed: %s", err) + } + msg1 := &pb.MyMessage{} + err = proto.Unmarshal(bb, msg1) + if err != nil { + t.Fatalf("Unmarshal() failed: %s", err) + } + if !check(msg1) { + t.Errorf("GetExtension() not stable after unmarshaling") + } +} + +func TestExtensionsRoundTrip(t *testing.T) { + msg := &pb.MyMessage{} + ext1 := &pb.Ext{ + Data: proto.String("hi"), + } + ext2 := &pb.Ext{ + Data: proto.String("there"), + } + exists := proto.HasExtension(msg, pb.E_Ext_More) + if exists { + t.Error("Extension More present unexpectedly") + } + if err := proto.SetExtension(msg, pb.E_Ext_More, ext1); err != nil { + t.Error(err) + } + if err := proto.SetExtension(msg, pb.E_Ext_More, ext2); err != nil { + t.Error(err) + } + e, err := proto.GetExtension(msg, pb.E_Ext_More) + if err != nil { + t.Error(err) + } + x, ok := e.(*pb.Ext) + if !ok { + t.Errorf("e has type %T, expected testdata.Ext", e) + } else if *x.Data != "there" { + t.Errorf("SetExtension failed to overwrite, got %+v, not 'there'", x) + } + proto.ClearExtension(msg, pb.E_Ext_More) + if _, err = proto.GetExtension(msg, pb.E_Ext_More); err != proto.ErrMissingExtension { + t.Errorf("got %v, expected ErrMissingExtension", e) + } + if _, err := proto.GetExtension(msg, pb.E_X215); err == nil { + t.Error("expected bad extension error, got nil") + } + if err := proto.SetExtension(msg, pb.E_X215, 12); err == nil { + t.Error("expected extension err") + } + if err := proto.SetExtension(msg, pb.E_Ext_More, 12); err == nil { + t.Error("expected some sort of type mismatch error, got nil") + } +} diff --git a/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/lib.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/lib.go similarity index 89% rename from Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/lib.go rename to Godeps/_workspace/src/github.com/golang/protobuf/proto/lib.go index 46a4416fdf..f81052f2ae 100644 --- a/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/lib.go +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/lib.go @@ -1,7 +1,7 @@ // Go support for Protocol Buffers - Google's data interchange format // // Copyright 2010 The Go Authors. All rights reserved. -// http://code.google.com/p/goprotobuf/ +// https://github.com/golang/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are @@ -39,7 +39,7 @@ - Names are turned from camel_case to CamelCase for export. - There are no methods on v to set fields; just treat - them as structure fields. + them as structure fields. - There are getters that return a field's value if set, and return the field's default value if unset. The getters work even if the receiver is a nil message. @@ -50,17 +50,16 @@ That is, optional or required field int32 f becomes F *int32. - Repeated fields are slices. - Helper functions are available to aid the setting of fields. - Helpers for getting values are superseded by the - GetFoo methods and their use is deprecated. - msg.Foo = proto.String("hello") // set field + msg.Foo = proto.String("hello") // set field - Constants are defined to hold the default values of all fields that have them. They have the form Default_StructName_FieldName. Because the getter methods handle defaulted values, direct use of these constants should be rare. - Enums are given type names and maps from names to values. - Enum values are prefixed with the enum's type name. Enum types have - a String method, and a Enum method to assist in message construction. - - Nested groups and enums have type names prefixed with the name of + Enum values are prefixed by the enclosing message's name, or by the + enum's type name if it is a top-level enum. Enum types have a String + method, and a Enum method to assist in message construction. + - Nested messages, groups and enums have type names prefixed with the name of the surrounding message type. - Extensions are given descriptor names that start with E_, followed by an underscore-delimited list of the nested messages @@ -74,7 +73,7 @@ package example; - enum FOO { X = 17; }; + enum FOO { X = 17; } message Test { required string label = 1; @@ -89,7 +88,8 @@ package example - import "code.google.com/p/goprotobuf/proto" + import proto "github.com/golang/protobuf/proto" + import math "math" type FOO int32 const ( @@ -110,6 +110,14 @@ func (x FOO) String() string { return proto.EnumName(FOO_name, int32(x)) } + func (x *FOO) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FOO_value, data) + if err != nil { + return err + } + *x = FOO(value) + return nil + } type Test struct { Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` @@ -118,41 +126,41 @@ Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` XXX_unrecognized []byte `json:"-"` } - func (this *Test) Reset() { *this = Test{} } - func (this *Test) String() string { return proto.CompactTextString(this) } + func (m *Test) Reset() { *m = Test{} } + func (m *Test) String() string { return proto.CompactTextString(m) } + func (*Test) ProtoMessage() {} const Default_Test_Type int32 = 77 - func (this *Test) GetLabel() string { - if this != nil && this.Label != nil { - return *this.Label + func (m *Test) GetLabel() string { + if m != nil && m.Label != nil { + return *m.Label } return "" } - func (this *Test) GetType() int32 { - if this != nil && this.Type != nil { - return *this.Type + func (m *Test) GetType() int32 { + if m != nil && m.Type != nil { + return *m.Type } return Default_Test_Type } - func (this *Test) GetOptionalgroup() *Test_OptionalGroup { - if this != nil { - return this.Optionalgroup + func (m *Test) GetOptionalgroup() *Test_OptionalGroup { + if m != nil { + return m.Optionalgroup } return nil } type Test_OptionalGroup struct { - RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"` - XXX_unrecognized []byte `json:"-"` + RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"` } - func (this *Test_OptionalGroup) Reset() { *this = Test_OptionalGroup{} } - func (this *Test_OptionalGroup) String() string { return proto.CompactTextString(this) } + func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} } + func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) } - func (this *Test_OptionalGroup) GetRequiredField() string { - if this != nil && this.RequiredField != nil { - return *this.RequiredField + func (m *Test_OptionalGroup) GetRequiredField() string { + if m != nil && m.RequiredField != nil { + return *m.RequiredField } return "" } @@ -168,15 +176,15 @@ import ( "log" - "code.google.com/p/goprotobuf/proto" - "./example.pb" + "github.com/golang/protobuf/proto" + pb "./example.pb" ) func main() { - test := &example.Test{ + test := &pb.Test{ Label: proto.String("hello"), Type: proto.Int32(17), - Optionalgroup: &example.Test_OptionalGroup{ + Optionalgroup: &pb.Test_OptionalGroup{ RequiredField: proto.String("good bye"), }, } @@ -184,7 +192,7 @@ if err != nil { log.Fatal("marshaling error: ", err) } - newTest := new(example.Test) + newTest := &pb.Test{} err = proto.Unmarshal(data, newTest) if err != nil { log.Fatal("unmarshaling error: ", err) @@ -323,9 +331,7 @@ func Float64(v float64) *float64 { // Uint32 is a helper routine that allocates a new uint32 value // to store v and returns a pointer to it. func Uint32(v uint32) *uint32 { - p := new(uint32) - *p = v - return p + return &v } // Uint64 is a helper routine that allocates a new uint64 value @@ -738,3 +744,16 @@ func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { return dm } + +// Map fields may have key types of non-float scalars, strings and enums. +// The easiest way to sort them in some deterministic order is to use fmt. +// If this turns out to be inefficient we can always consider other options, +// such as doing a Schwartzian transform. + +type mapKeys []reflect.Value + +func (s mapKeys) Len() int { return len(s) } +func (s mapKeys) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s mapKeys) Less(i, j int) bool { + return fmt.Sprint(s[i].Interface()) < fmt.Sprint(s[j].Interface()) +} diff --git a/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/message_set.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/message_set.go similarity index 79% rename from Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/message_set.go rename to Godeps/_workspace/src/github.com/golang/protobuf/proto/message_set.go index 1a17809830..9d912bce19 100644 --- a/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/message_set.go +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/message_set.go @@ -1,7 +1,7 @@ // Go support for Protocol Buffers - Google's data interchange format // // Copyright 2010 The Go Authors. All rights reserved. -// http://code.google.com/p/goprotobuf/ +// https://github.com/golang/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are @@ -36,7 +36,10 @@ package proto */ import ( + "bytes" + "encoding/json" "errors" + "fmt" "reflect" "sort" ) @@ -211,6 +214,61 @@ func UnmarshalMessageSet(buf []byte, m map[int32]Extension) error { return nil } +// MarshalMessageSetJSON encodes the extension map represented by m in JSON format. +// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option. +func MarshalMessageSetJSON(m map[int32]Extension) ([]byte, error) { + var b bytes.Buffer + b.WriteByte('{') + + // Process the map in key order for deterministic output. + ids := make([]int32, 0, len(m)) + for id := range m { + ids = append(ids, id) + } + sort.Sort(int32Slice(ids)) // int32Slice defined in text.go + + for i, id := range ids { + ext := m[id] + if i > 0 { + b.WriteByte(',') + } + + msd, ok := messageSetMap[id] + if !ok { + // Unknown type; we can't render it, so skip it. + continue + } + fmt.Fprintf(&b, `"[%s]":`, msd.name) + + x := ext.value + if x == nil { + x = reflect.New(msd.t.Elem()).Interface() + if err := Unmarshal(ext.enc, x.(Message)); err != nil { + return nil, err + } + } + d, err := json.Marshal(x) + if err != nil { + return nil, err + } + b.Write(d) + } + b.WriteByte('}') + return b.Bytes(), nil +} + +// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format. +// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option. +func UnmarshalMessageSetJSON(buf []byte, m map[int32]Extension) error { + // Common-case fast path. + if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) { + return nil + } + + // This is fairly tricky, and it's not clear that it is needed. + return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented") +} + // A global registry of types that can be used in a MessageSet. var messageSetMap = make(map[int32]messageSetDesc) @@ -221,9 +279,9 @@ type messageSetDesc struct { } // RegisterMessageSetType is called from the generated code. -func RegisterMessageSetType(i messageTypeIder, name string) { - messageSetMap[i.MessageTypeId()] = messageSetDesc{ - t: reflect.TypeOf(i), +func RegisterMessageSetType(m Message, fieldNum int32, name string) { + messageSetMap[fieldNum] = messageSetDesc{ + t: reflect.TypeOf(m), name: name, } } diff --git a/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/message_set_test.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/message_set_test.go similarity index 98% rename from Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/message_set_test.go rename to Godeps/_workspace/src/github.com/golang/protobuf/proto/message_set_test.go index bb311bcccf..7c29bccf4b 100644 --- a/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/message_set_test.go +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/message_set_test.go @@ -1,7 +1,7 @@ // Go support for Protocol Buffers - Google's data interchange format // // Copyright 2014 The Go Authors. All rights reserved. -// http://code.google.com/p/goprotobuf/ +// https://github.com/golang/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are diff --git a/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/pointer_reflect.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/pointer_reflect.go similarity index 82% rename from Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/pointer_reflect.go rename to Godeps/_workspace/src/github.com/golang/protobuf/proto/pointer_reflect.go index 61141ba858..c68b12525c 100644 --- a/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/pointer_reflect.go +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/pointer_reflect.go @@ -1,7 +1,7 @@ // Go support for Protocol Buffers - Google's data interchange format // // Copyright 2012 The Go Authors. All rights reserved. -// http://code.google.com/p/goprotobuf/ +// https://github.com/golang/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are @@ -29,7 +29,7 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// +build appengine,!appenginevm +// +build appengine // This file contains an implementation of proto field accesses using package reflect. // It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can @@ -114,6 +114,11 @@ func structPointer_Bool(p structPointer, f field) **bool { return structPointer_ifield(p, f).(**bool) } +// BoolVal returns the address of a bool field in the struct. +func structPointer_BoolVal(p structPointer, f field) *bool { + return structPointer_ifield(p, f).(*bool) +} + // BoolSlice returns the address of a []bool field in the struct. func structPointer_BoolSlice(p structPointer, f field) *[]bool { return structPointer_ifield(p, f).(*[]bool) @@ -124,6 +129,11 @@ func structPointer_String(p structPointer, f field) **string { return structPointer_ifield(p, f).(**string) } +// StringVal returns the address of a string field in the struct. +func structPointer_StringVal(p structPointer, f field) *string { + return structPointer_ifield(p, f).(*string) +} + // StringSlice returns the address of a []string field in the struct. func structPointer_StringSlice(p structPointer, f field) *[]string { return structPointer_ifield(p, f).(*[]string) @@ -134,6 +144,11 @@ func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { return structPointer_ifield(p, f).(*map[int32]Extension) } +// Map returns the reflect.Value for the address of a map field in the struct. +func structPointer_Map(p structPointer, f field, typ reflect.Type) reflect.Value { + return structPointer_field(p, f).Addr() +} + // SetStructPointer writes a *struct field in the struct. func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { structPointer_field(p, f).Set(q.v) @@ -235,6 +250,49 @@ func structPointer_Word32(p structPointer, f field) word32 { return word32{structPointer_field(p, f)} } +// A word32Val represents a field of type int32, uint32, float32, or enum. +// That is, v.Type() is int32, uint32, float32, or enum and v is assignable. +type word32Val struct { + v reflect.Value +} + +// Set sets *p to x. +func word32Val_Set(p word32Val, x uint32) { + switch p.v.Type() { + case int32Type: + p.v.SetInt(int64(x)) + return + case uint32Type: + p.v.SetUint(uint64(x)) + return + case float32Type: + p.v.SetFloat(float64(math.Float32frombits(x))) + return + } + + // must be enum + p.v.SetInt(int64(int32(x))) +} + +// Get gets the bits pointed at by p, as a uint32. +func word32Val_Get(p word32Val) uint32 { + elem := p.v + switch elem.Kind() { + case reflect.Int32: + return uint32(elem.Int()) + case reflect.Uint32: + return uint32(elem.Uint()) + case reflect.Float32: + return math.Float32bits(float32(elem.Float())) + } + panic("unreachable") +} + +// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct. +func structPointer_Word32Val(p structPointer, f field) word32Val { + return word32Val{structPointer_field(p, f)} +} + // A word32Slice is a slice of 32-bit values. // That is, v.Type() is []int32, []uint32, []float32, or []enum. type word32Slice struct { @@ -339,6 +397,43 @@ func structPointer_Word64(p structPointer, f field) word64 { return word64{structPointer_field(p, f)} } +// word64Val is like word32Val but for 64-bit values. +type word64Val struct { + v reflect.Value +} + +func word64Val_Set(p word64Val, o *Buffer, x uint64) { + switch p.v.Type() { + case int64Type: + p.v.SetInt(int64(x)) + return + case uint64Type: + p.v.SetUint(x) + return + case float64Type: + p.v.SetFloat(math.Float64frombits(x)) + return + } + panic("unreachable") +} + +func word64Val_Get(p word64Val) uint64 { + elem := p.v + switch elem.Kind() { + case reflect.Int64: + return uint64(elem.Int()) + case reflect.Uint64: + return elem.Uint() + case reflect.Float64: + return math.Float64bits(elem.Float()) + } + panic("unreachable") +} + +func structPointer_Word64Val(p structPointer, f field) word64Val { + return word64Val{structPointer_field(p, f)} +} + type word64Slice struct { v reflect.Value } diff --git a/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/pointer_unsafe.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/pointer_unsafe.go similarity index 84% rename from Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/pointer_unsafe.go rename to Godeps/_workspace/src/github.com/golang/protobuf/proto/pointer_unsafe.go index 27a536c88d..48bc0fa482 100644 --- a/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/pointer_unsafe.go +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/pointer_unsafe.go @@ -1,7 +1,7 @@ // Go support for Protocol Buffers - Google's data interchange format // // Copyright 2012 The Go Authors. All rights reserved. -// http://code.google.com/p/goprotobuf/ +// https://github.com/golang/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are @@ -29,7 +29,7 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// +build !appengine appenginevm +// +build !appengine // This file contains the implementation of the proto field accesses using package unsafe. @@ -100,6 +100,11 @@ func structPointer_Bool(p structPointer, f field) **bool { return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) } +// BoolVal returns the address of a bool field in the struct. +func structPointer_BoolVal(p structPointer, f field) *bool { + return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + // BoolSlice returns the address of a []bool field in the struct. func structPointer_BoolSlice(p structPointer, f field) *[]bool { return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) @@ -110,6 +115,11 @@ func structPointer_String(p structPointer, f field) **string { return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f))) } +// StringVal returns the address of a string field in the struct. +func structPointer_StringVal(p structPointer, f field) *string { + return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + // StringSlice returns the address of a []string field in the struct. func structPointer_StringSlice(p structPointer, f field) *[]string { return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f))) @@ -120,6 +130,11 @@ func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f))) } +// Map returns the reflect.Value for the address of a map field in the struct. +func structPointer_Map(p structPointer, f field, typ reflect.Type) reflect.Value { + return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f))) +} + // SetStructPointer writes a *struct field in the struct. func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q @@ -170,6 +185,24 @@ func structPointer_Word32(p structPointer, f field) word32 { return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) } +// A word32Val is the address of a 32-bit value field. +type word32Val *uint32 + +// Set sets *p to x. +func word32Val_Set(p word32Val, x uint32) { + *p = x +} + +// Get gets the value pointed at by p. +func word32Val_Get(p word32Val) uint32 { + return *p +} + +// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct. +func structPointer_Word32Val(p structPointer, f field) word32Val { + return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + // A word32Slice is a slice of 32-bit values. type word32Slice []uint32 @@ -206,6 +239,21 @@ func structPointer_Word64(p structPointer, f field) word64 { return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) } +// word64Val is like word32Val but for 64-bit values. +type word64Val *uint64 + +func word64Val_Set(p word64Val, o *Buffer, x uint64) { + *p = x +} + +func word64Val_Get(p word64Val) uint64 { + return *p +} + +func structPointer_Word64Val(p structPointer, f field) word64Val { + return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + // word64Slice is like word32Slice but for 64-bit values. type word64Slice []uint64 diff --git a/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/properties.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/properties.go similarity index 88% rename from Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/properties.go rename to Godeps/_workspace/src/github.com/golang/protobuf/proto/properties.go index f2c1e861cd..730a595797 100644 --- a/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/properties.go +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/properties.go @@ -1,7 +1,7 @@ // Go support for Protocol Buffers - Google's data interchange format // // Copyright 2010 The Go Authors. All rights reserved. -// http://code.google.com/p/goprotobuf/ +// https://github.com/golang/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are @@ -155,6 +155,7 @@ type Properties struct { Repeated bool Packed bool // relevant for repeated primitives only Enum string // set for enum types only + proto3 bool // whether this is known to be a proto3 field; set for []byte only Default string // default value HasDefault bool // whether an explicit default was provided @@ -170,6 +171,10 @@ type Properties struct { isMarshaler bool isUnmarshaler bool + mtype reflect.Type // set for map types only + mkeyprop *Properties // set for map types only + mvalprop *Properties // set for map types only + size sizer valSize valueSizer // set for bool and numeric types only @@ -200,6 +205,9 @@ func (p *Properties) String() string { if p.OrigName != p.Name { s += ",name=" + p.OrigName } + if p.proto3 { + s += ",proto3" + } if len(p.Enum) > 0 { s += ",enum=" + p.Enum } @@ -274,6 +282,8 @@ func (p *Properties) Parse(s string) { p.OrigName = f[5:] case strings.HasPrefix(f, "enum="): p.Enum = f[5:] + case f == "proto3": + p.proto3 = true case strings.HasPrefix(f, "def="): p.HasDefault = true p.Default = f[4:] // rest of string @@ -293,19 +303,50 @@ func logNoSliceEnc(t1, t2 reflect.Type) { var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() // Initialize the fields for encoding and decoding. -func (p *Properties) setEncAndDec(typ reflect.Type, lockGetProp bool) { +func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { p.enc = nil p.dec = nil p.size = nil switch t1 := typ; t1.Kind() { default: - fmt.Fprintf(os.Stderr, "proto: no coders for %T\n", t1) + fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1) + + // proto3 scalar types + + case reflect.Bool: + p.enc = (*Buffer).enc_proto3_bool + p.dec = (*Buffer).dec_proto3_bool + p.size = size_proto3_bool + case reflect.Int32: + p.enc = (*Buffer).enc_proto3_int32 + p.dec = (*Buffer).dec_proto3_int32 + p.size = size_proto3_int32 + case reflect.Uint32: + p.enc = (*Buffer).enc_proto3_uint32 + p.dec = (*Buffer).dec_proto3_int32 // can reuse + p.size = size_proto3_uint32 + case reflect.Int64, reflect.Uint64: + p.enc = (*Buffer).enc_proto3_int64 + p.dec = (*Buffer).dec_proto3_int64 + p.size = size_proto3_int64 + case reflect.Float32: + p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits + p.dec = (*Buffer).dec_proto3_int32 + p.size = size_proto3_uint32 + case reflect.Float64: + p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits + p.dec = (*Buffer).dec_proto3_int64 + p.size = size_proto3_int64 + case reflect.String: + p.enc = (*Buffer).enc_proto3_string + p.dec = (*Buffer).dec_proto3_string + p.size = size_proto3_string case reflect.Ptr: switch t2 := t1.Elem(); t2.Kind() { default: - fmt.Fprintf(os.Stderr, "proto: no encoder function for %T -> %T\n", t1, t2) + fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2) break case reflect.Bool: p.enc = (*Buffer).enc_bool @@ -399,6 +440,10 @@ func (p *Properties) setEncAndDec(typ reflect.Type, lockGetProp bool) { p.enc = (*Buffer).enc_slice_byte p.dec = (*Buffer).dec_slice_byte p.size = size_slice_byte + if p.proto3 { + p.enc = (*Buffer).enc_proto3_slice_byte + p.size = size_proto3_slice_byte + } case reflect.Float32, reflect.Float64: switch t2.Bits() { case 32: @@ -461,6 +506,23 @@ func (p *Properties) setEncAndDec(typ reflect.Type, lockGetProp bool) { p.size = size_slice_slice_byte } } + + case reflect.Map: + p.enc = (*Buffer).enc_new_map + p.dec = (*Buffer).dec_new_map + p.size = size_new_map + + p.mtype = t1 + p.mkeyprop = &Properties{} + p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) + p.mvalprop = &Properties{} + vtype := p.mtype.Elem() + if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { + // The value type is not a message (*T) or bytes ([]byte), + // so we need encoders for the pointer to this type. + vtype = reflect.PtrTo(vtype) + } + p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) } // precalculate tag code @@ -529,7 +591,7 @@ func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructF return } p.Parse(tag) - p.setEncAndDec(typ, lockGetProp) + p.setEncAndDec(typ, f, lockGetProp) } var ( @@ -538,7 +600,11 @@ var ( ) // GetProperties returns the list of properties for the type represented by t. +// t must represent a generated struct type of a protocol message. func GetProperties(t reflect.Type) *StructProperties { + if t.Kind() != reflect.Struct { + panic("proto: type must have kind struct") + } mutex.Lock() sprop := getPropertiesLocked(t) mutex.Unlock() diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_proto/Makefile b/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_proto/Makefile new file mode 100644 index 0000000000..75144b582e --- /dev/null +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_proto/Makefile @@ -0,0 +1,44 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2014 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +include ../../Make.protobuf + +all: regenerate + +regenerate: + rm -f proto3.pb.go + make proto3.pb.go + +# The following rules are just aids to development. Not needed for typical testing. + +diff: regenerate + git diff proto3.pb.go diff --git a/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/extensions_test.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_proto/proto3.proto similarity index 69% rename from Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/extensions_test.go rename to Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_proto/proto3.proto index 03f756b6f8..3e327ded1d 100644 --- a/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/extensions_test.go +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_proto/proto3.proto @@ -1,7 +1,7 @@ // Go support for Protocol Buffers - Google's data interchange format // // Copyright 2014 The Go Authors. All rights reserved. -// http://code.google.com/p/goprotobuf/ +// https://github.com/golang/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are @@ -29,32 +29,30 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -package proto_test +syntax = "proto3"; -import ( - "testing" +package proto3_proto; - pb "./testdata" - "code.google.com/p/goprotobuf/proto" -) +message Message { + enum Humour { + UNKNOWN = 0; + PUNS = 1; + SLAPSTICK = 2; + BILL_BAILEY = 3; + } -func TestGetExtensionsWithMissingExtensions(t *testing.T) { - msg := &pb.MyMessage{} - ext1 := &pb.Ext{} - if err := proto.SetExtension(msg, pb.E_Ext_More, ext1); err != nil { - t.Fatalf("Could not set ext1: %s", ext1) - } - exts, err := proto.GetExtensions(msg, []*proto.ExtensionDesc{ - pb.E_Ext_More, - pb.E_Ext_Text, - }) - if err != nil { - t.Fatalf("GetExtensions() failed: %s", err) - } - if exts[0] != ext1 { - t.Errorf("ext1 not in returned extensions: %T %v", exts[0], exts[0]) - } - if exts[1] != nil { - t.Errorf("ext2 in returned extensions: %T %v", exts[1], exts[1]) - } + string name = 1; + Humour hilarity = 2; + uint32 height_in_cm = 3; + bytes data = 4; + int64 result_count = 7; + bool true_scotsman = 8; + float score = 9; + + repeated uint64 key = 5; + Nested nested = 6; +} + +message Nested { + string bunny = 1; } diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_test.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_test.go new file mode 100644 index 0000000000..d4c96a9e73 --- /dev/null +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_test.go @@ -0,0 +1,93 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2014 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto_test + +import ( + "testing" + + pb "./proto3_proto" + "github.com/golang/protobuf/proto" +) + +func TestProto3ZeroValues(t *testing.T) { + tests := []struct { + desc string + m proto.Message + }{ + {"zero message", &pb.Message{}}, + {"empty bytes field", &pb.Message{Data: []byte{}}}, + } + for _, test := range tests { + b, err := proto.Marshal(test.m) + if err != nil { + t.Errorf("%s: proto.Marshal: %v", test.desc, err) + continue + } + if len(b) > 0 { + t.Errorf("%s: Encoding is non-empty: %q", test.desc, b) + } + } +} + +func TestRoundTripProto3(t *testing.T) { + m := &pb.Message{ + Name: "David", // (2 | 1<<3): 0x0a 0x05 "David" + Hilarity: pb.Message_PUNS, // (0 | 2<<3): 0x10 0x01 + HeightInCm: 178, // (0 | 3<<3): 0x18 0xb2 0x01 + Data: []byte("roboto"), // (2 | 4<<3): 0x20 0x06 "roboto" + ResultCount: 47, // (0 | 7<<3): 0x38 0x2f + TrueScotsman: true, // (0 | 8<<3): 0x40 0x01 + Score: 8.1, // (5 | 9<<3): 0x4d <8.1> + + Key: []uint64{1, 0xdeadbeef}, + Nested: &pb.Nested{ + Bunny: "Monty", + }, + } + t.Logf(" m: %v", m) + + b, err := proto.Marshal(m) + if err != nil { + t.Fatalf("proto.Marshal: %v", err) + } + t.Logf(" b: %q", b) + + m2 := new(pb.Message) + if err := proto.Unmarshal(b, m2); err != nil { + t.Fatalf("proto.Unmarshal: %v", err) + } + t.Logf("m2: %v", m2) + + if !proto.Equal(m, m2) { + t.Errorf("proto.Equal returned false:\n m: %v\nm2: %v", m, m2) + } +} diff --git a/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/size2_test.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/size2_test.go similarity index 98% rename from Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/size2_test.go rename to Godeps/_workspace/src/github.com/golang/protobuf/proto/size2_test.go index 55902a4a9c..a2729c39a1 100644 --- a/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/size2_test.go +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/size2_test.go @@ -1,7 +1,7 @@ // Go support for Protocol Buffers - Google's data interchange format // // Copyright 2012 The Go Authors. All rights reserved. -// http://code.google.com/p/goprotobuf/ +// https://github.com/golang/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are diff --git a/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/size_test.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/size_test.go similarity index 83% rename from Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/size_test.go rename to Godeps/_workspace/src/github.com/golang/protobuf/proto/size_test.go index 6cb9de6d1d..e5f92d6b90 100644 --- a/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/size_test.go +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/size_test.go @@ -1,7 +1,7 @@ // Go support for Protocol Buffers - Google's data interchange format // // Copyright 2012 The Go Authors. All rights reserved. -// http://code.google.com/p/goprotobuf/ +// https://github.com/golang/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are @@ -35,8 +35,9 @@ import ( "log" "testing" + proto3pb "./proto3_proto" pb "./testdata" - . "code.google.com/p/goprotobuf/proto" + . "github.com/golang/protobuf/proto" ) var messageWithExtension1 = &pb.MyMessage{Count: Int32(7)} @@ -102,6 +103,20 @@ var SizeTests = []struct { {"unrecognized", &pb.MoreRepeated{XXX_unrecognized: []byte{13<<3 | 0, 4}}}, {"extension (unencoded)", messageWithExtension1}, {"extension (encoded)", messageWithExtension3}, + // proto3 message + {"proto3 empty", &proto3pb.Message{}}, + {"proto3 bool", &proto3pb.Message{TrueScotsman: true}}, + {"proto3 int64", &proto3pb.Message{ResultCount: 1}}, + {"proto3 uint32", &proto3pb.Message{HeightInCm: 123}}, + {"proto3 float", &proto3pb.Message{Score: 12.6}}, + {"proto3 string", &proto3pb.Message{Name: "Snezana"}}, + {"proto3 bytes", &proto3pb.Message{Data: []byte("wowsa")}}, + {"proto3 bytes, empty", &proto3pb.Message{Data: []byte{}}}, + {"proto3 enum", &proto3pb.Message{Hilarity: proto3pb.Message_PUNS}}, + + {"map field", &pb.MessageWithMap{NameMapping: map[int32]string{1: "Rob", 7: "Andrew"}}}, + {"map field with message", &pb.MessageWithMap{MsgMapping: map[int64]*pb.FloatingPoint{0x7001: &pb.FloatingPoint{F: Float64(2.0)}}}}, + {"map field with bytes", &pb.MessageWithMap{ByteMapping: map[bool][]byte{true: []byte("this time for sure")}}}, } func TestSize(t *testing.T) { diff --git a/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/testdata/Makefile b/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/Makefile similarity index 96% rename from Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/testdata/Makefile rename to Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/Makefile index 9fa10e4c69..fc288628a7 100644 --- a/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/testdata/Makefile +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/Makefile @@ -1,7 +1,7 @@ # Go support for Protocol Buffers - Google's data interchange format # # Copyright 2010 The Go Authors. All rights reserved. -# http://code.google.com/p/goprotobuf/ +# https://github.com/golang/protobuf # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are @@ -37,11 +37,11 @@ all: regenerate regenerate: rm -f test.pb.go make test.pb.go - + # The following rules are just aids to development. Not needed for typical testing. diff: regenerate - hg diff test.pb.go + git diff test.pb.go restore: cp test.pb.go.golden test.pb.go diff --git a/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/testdata/golden_test.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/golden_test.go similarity index 98% rename from Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/testdata/golden_test.go rename to Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/golden_test.go index f614aa1b73..7172d0e969 100644 --- a/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/testdata/golden_test.go +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/golden_test.go @@ -1,7 +1,7 @@ // Go support for Protocol Buffers - Google's data interchange format // // Copyright 2012 The Go Authors. All rights reserved. -// http://code.google.com/p/goprotobuf/ +// https://github.com/golang/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are diff --git a/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/testdata/test.pb.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/test.pb.go similarity index 97% rename from Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/testdata/test.pb.go rename to Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/test.pb.go index 5a9a603662..f47d9e0e39 100644 --- a/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/testdata/test.pb.go +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/test.pb.go @@ -33,10 +33,11 @@ It has these top-level messages: GroupOld GroupNew FloatingPoint + MessageWithMap */ package testdata -import proto "code.google.com/p/goprotobuf/proto" +import proto "github.com/golang/protobuf/proto" import math "math" // Reference imports to suppress errors if they are not otherwise used. @@ -1416,6 +1417,12 @@ func (m *MyMessageSet) Marshal() ([]byte, error) { func (m *MyMessageSet) Unmarshal(buf []byte) error { return proto.UnmarshalMessageSet(buf, m.ExtensionMap()) } +func (m *MyMessageSet) MarshalJSON() ([]byte, error) { + return proto.MarshalMessageSetJSON(m.XXX_extensions) +} +func (m *MyMessageSet) UnmarshalJSON(buf []byte) error { + return proto.UnmarshalMessageSetJSON(buf, m.XXX_extensions) +} // ensure MyMessageSet satisfies proto.Marshaler and proto.Unmarshaler var _ proto.Marshaler = (*MyMessageSet)(nil) @@ -1879,6 +1886,38 @@ func (m *FloatingPoint) GetF() float64 { return 0 } +type MessageWithMap struct { + NameMapping map[int32]string `protobuf:"bytes,1,rep,name=name_mapping" json:"name_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + MsgMapping map[int64]*FloatingPoint `protobuf:"bytes,2,rep,name=msg_mapping" json:"msg_mapping,omitempty" protobuf_key:"zigzag64,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + ByteMapping map[bool][]byte `protobuf:"bytes,3,rep,name=byte_mapping" json:"byte_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MessageWithMap) Reset() { *m = MessageWithMap{} } +func (m *MessageWithMap) String() string { return proto.CompactTextString(m) } +func (*MessageWithMap) ProtoMessage() {} + +func (m *MessageWithMap) GetNameMapping() map[int32]string { + if m != nil { + return m.NameMapping + } + return nil +} + +func (m *MessageWithMap) GetMsgMapping() map[int64]*FloatingPoint { + if m != nil { + return m.MsgMapping + } + return nil +} + +func (m *MessageWithMap) GetByteMapping() map[bool][]byte { + if m != nil { + return m.ByteMapping + } + return nil +} + var E_Greeting = &proto.ExtensionDesc{ ExtendedType: (*MyMessage)(nil), ExtensionType: ([]string)(nil), diff --git a/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/testdata/test.proto b/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/test.proto similarity index 98% rename from Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/testdata/test.proto rename to Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/test.proto index 9c0c4c8970..6cc755baef 100644 --- a/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/testdata/test.proto +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/test.proto @@ -1,7 +1,7 @@ // Go support for Protocol Buffers - Google's data interchange format // // Copyright 2010 The Go Authors. All rights reserved. -// http://code.google.com/p/goprotobuf/ +// https://github.com/golang/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are @@ -426,3 +426,9 @@ message GroupNew { message FloatingPoint { required double f = 1; } + +message MessageWithMap { + map name_mapping = 1; + map msg_mapping = 2; + map byte_mapping = 3; +} diff --git a/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/text.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/text.go similarity index 87% rename from Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/text.go rename to Godeps/_workspace/src/github.com/golang/protobuf/proto/text.go index 5063009c27..720eac4705 100644 --- a/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/text.go +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/text.go @@ -1,7 +1,7 @@ // Go support for Protocol Buffers - Google's data interchange format // // Copyright 2010 The Go Authors. All rights reserved. -// http://code.google.com/p/goprotobuf/ +// https://github.com/golang/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are @@ -36,6 +36,7 @@ package proto import ( "bufio" "bytes" + "encoding" "fmt" "io" "log" @@ -74,13 +75,6 @@ type textWriter struct { w writer } -// textMarshaler is implemented by Messages that can marshal themsleves. -// It is identical to encoding.TextMarshaler, introduced in go 1.2, -// which will eventually replace it. -type textMarshaler interface { - MarshalText() (text []byte, err error) -} - func (w *textWriter) WriteString(s string) (n int, err error) { if !strings.Contains(s, "\n") { if !w.compact && w.complete { @@ -250,6 +244,100 @@ func writeStruct(w *textWriter, sv reflect.Value) error { } continue } + if fv.Kind() == reflect.Map { + // Map fields are rendered as a repeated struct with key/value fields. + keys := fv.MapKeys() // TODO: should we sort these for deterministic output? + sort.Sort(mapKeys(keys)) + for _, key := range keys { + val := fv.MapIndex(key) + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + // open struct + if err := w.WriteByte('<'); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + // key + if _, err := w.WriteString("key:"); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := writeAny(w, key, props.mkeyprop); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + // value + if _, err := w.WriteString("value:"); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := writeAny(w, val, props.mvalprop); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + // close struct + w.unindent() + if err := w.WriteByte('>'); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + continue + } + if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 { + // empty bytes field + continue + } + if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice { + // proto3 non-repeated scalar field; skip if zero value + switch fv.Kind() { + case reflect.Bool: + if !fv.Bool() { + continue + } + case reflect.Int32, reflect.Int64: + if fv.Int() == 0 { + continue + } + case reflect.Uint32, reflect.Uint64: + if fv.Uint() == 0 { + continue + } + case reflect.Float32, reflect.Float64: + if fv.Float() == 0 { + continue + } + case reflect.String: + if fv.String() == "" { + continue + } + } + } if err := writeName(w, props); err != nil { return err @@ -358,7 +446,7 @@ func writeAny(w *textWriter, v reflect.Value, props *Properties) error { } } w.indent() - if tm, ok := v.Interface().(textMarshaler); ok { + if tm, ok := v.Interface().(encoding.TextMarshaler); ok { text, err := tm.MarshalText() if err != nil { return err @@ -653,7 +741,7 @@ func marshalText(w io.Writer, pb Message, compact bool) error { compact: compact, } - if tm, ok := pb.(textMarshaler); ok { + if tm, ok := pb.(encoding.TextMarshaler); ok { text, err := tm.MarshalText() if err != nil { return err diff --git a/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/text_parser.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_parser.go similarity index 84% rename from Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/text_parser.go rename to Godeps/_workspace/src/github.com/golang/protobuf/proto/text_parser.go index d235ca9bc8..d1caeff5d9 100644 --- a/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/text_parser.go +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_parser.go @@ -1,7 +1,7 @@ // Go support for Protocol Buffers - Google's data interchange format // // Copyright 2010 The Go Authors. All rights reserved. -// http://code.google.com/p/goprotobuf/ +// https://github.com/golang/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are @@ -35,6 +35,7 @@ package proto // TODO: message sets. import ( + "encoding" "errors" "fmt" "reflect" @@ -43,13 +44,6 @@ import ( "unicode/utf8" ) -// textUnmarshaler is implemented by Messages that can unmarshal themsleves. -// It is identical to encoding.TextUnmarshaler, introduced in go 1.2, -// which will eventually replace it. -type textUnmarshaler interface { - UnmarshalText(text []byte) error -} - type ParseError struct { Message string Line int // 1-based line number @@ -361,8 +355,20 @@ func (p *textParser) next() *token { return &p.cur } -// Return an error indicating which required field was not set. -func (p *textParser) missingRequiredFieldError(sv reflect.Value) *ParseError { +func (p *textParser) consumeToken(s string) error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != s { + p.back() + return p.errorf("expected %q, found %q", s, tok.value) + } + return nil +} + +// Return a RequiredNotSetError indicating which required field was not set. +func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError { st := sv.Type() sprops := GetProperties(st) for i := 0; i < st.NumField(); i++ { @@ -372,10 +378,10 @@ func (p *textParser) missingRequiredFieldError(sv reflect.Value) *ParseError { props := sprops.Prop[i] if props.Required { - return p.errorf("message %v missing required field %q", st, props.OrigName) + return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)} } } - return p.errorf("message %v missing required field", st) // should not happen + return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen } // Returns the index in the struct for the named field, as well as the parsed tag properties. @@ -415,6 +421,10 @@ func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseEr if typ.Elem().Kind() != reflect.Ptr { break } + } else if typ.Kind() == reflect.String { + // The proto3 exception is for a string field, + // which requires a colon. + break } needColon = false } @@ -426,9 +436,11 @@ func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseEr return nil } -func (p *textParser) readStruct(sv reflect.Value, terminator string) *ParseError { +func (p *textParser) readStruct(sv reflect.Value, terminator string) error { st := sv.Type() reqCount := GetProperties(st).reqCount + var reqFieldErr error + fieldSet := make(map[string]bool) // A struct is a sequence of "name: value", terminated by one of // '>' or '}', or the end of the input. A name may also be // "[extension]". @@ -489,7 +501,10 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) *ParseError ext = reflect.New(typ.Elem()).Elem() } if err := p.readAny(ext, props); err != nil { - return err + if _, ok := err.(*RequiredNotSetError); !ok { + return err + } + reqFieldErr = err } ep := sv.Addr().Interface().(extendableProto) if !rep { @@ -507,17 +522,71 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) *ParseError } } else { // This is a normal, non-extension field. - fi, props, ok := structFieldByName(st, tok.value) + name := tok.value + fi, props, ok := structFieldByName(st, name) if !ok { - return p.errorf("unknown field name %q in %v", tok.value, st) + return p.errorf("unknown field name %q in %v", name, st) } dst := sv.Field(fi) - isDstNil := isNil(dst) + + if dst.Kind() == reflect.Map { + // Consume any colon. + if err := p.checkForColon(props, dst.Type()); err != nil { + return err + } + + // Construct the map if it doesn't already exist. + if dst.IsNil() { + dst.Set(reflect.MakeMap(dst.Type())) + } + key := reflect.New(dst.Type().Key()).Elem() + val := reflect.New(dst.Type().Elem()).Elem() + + // The map entry should be this sequence of tokens: + // < key : KEY value : VALUE > + // Technically the "key" and "value" could come in any order, + // but in practice they won't. + + tok := p.next() + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + if err := p.consumeToken("key"); err != nil { + return err + } + if err := p.consumeToken(":"); err != nil { + return err + } + if err := p.readAny(key, props.mkeyprop); err != nil { + return err + } + if err := p.consumeToken("value"); err != nil { + return err + } + if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil { + return err + } + if err := p.readAny(val, props.mvalprop); err != nil { + return err + } + if err := p.consumeToken(terminator); err != nil { + return err + } + + dst.SetMapIndex(key, val) + continue + } // Check that it's not already set if it's not a repeated field. - if !props.Repeated && !isDstNil { - return p.errorf("non-repeated field %q was repeated", tok.value) + if !props.Repeated && fieldSet[name] { + return p.errorf("non-repeated field %q was repeated", name) } if err := p.checkForColon(props, st.Field(fi).Type); err != nil { @@ -525,11 +594,13 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) *ParseError } // Parse into the field. + fieldSet[name] = true if err := p.readAny(dst, props); err != nil { - return err - } - - if props.Required { + if _, ok := err.(*RequiredNotSetError); !ok { + return err + } + reqFieldErr = err + } else if props.Required { reqCount-- } } @@ -547,10 +618,10 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) *ParseError if reqCount > 0 { return p.missingRequiredFieldError(sv) } - return nil + return reqFieldErr } -func (p *textParser) readAny(v reflect.Value, props *Properties) *ParseError { +func (p *textParser) readAny(v reflect.Value, props *Properties) error { tok := p.next() if tok.err != nil { return tok.err @@ -652,7 +723,7 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) *ParseError { default: return p.errorf("expected '{' or '<', found %q", tok.value) } - // TODO: Handle nested messages which implement textUnmarshaler. + // TODO: Handle nested messages which implement encoding.TextUnmarshaler. return p.readStruct(fv, terminator) case reflect.Uint32: if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { @@ -670,8 +741,10 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) *ParseError { // UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb // before starting to unmarshal, so any existing data in pb is always removed. +// If a required field is not set and no other error occurs, +// UnmarshalText returns *RequiredNotSetError. func UnmarshalText(s string, pb Message) error { - if um, ok := pb.(textUnmarshaler); ok { + if um, ok := pb.(encoding.TextUnmarshaler); ok { err := um.UnmarshalText([]byte(s)) return err } diff --git a/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/text_parser_test.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_parser_test.go similarity index 87% rename from Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/text_parser_test.go rename to Godeps/_workspace/src/github.com/golang/protobuf/proto/text_parser_test.go index d043769ec1..1360e8e8b7 100644 --- a/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/text_parser_test.go +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_parser_test.go @@ -1,7 +1,7 @@ // Go support for Protocol Buffers - Google's data interchange format // // Copyright 2010 The Go Authors. All rights reserved. -// http://code.google.com/p/goprotobuf/ +// https://github.com/golang/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are @@ -36,8 +36,9 @@ import ( "reflect" "testing" + proto3pb "./proto3_proto" . "./testdata" - . "code.google.com/p/goprotobuf/proto" + . "github.com/golang/protobuf/proto" ) type UnmarshalTextTest struct { @@ -294,8 +295,11 @@ var unMarshalTextTests = []UnmarshalTextTest{ // Missing required field { - in: ``, - err: `line 1.0: message testdata.MyMessage missing required field "count"`, + in: `name: "Pawel"`, + err: `proto: required field "testdata.MyMessage.count" not set`, + out: &MyMessage{ + Name: String("Pawel"), + }, }, // Repeated non-repeated field @@ -408,6 +412,9 @@ func TestUnmarshalText(t *testing.T) { } else if err.Error() != test.err { t.Errorf("Test %d: Incorrect error.\nHave: %v\nWant: %v", i, err.Error(), test.err) + } else if _, ok := err.(*RequiredNotSetError); ok && test.out != nil && !reflect.DeepEqual(pb, test.out) { + t.Errorf("Test %d: Incorrect populated \nHave: %v\nWant: %v", + i, pb, test.out) } } } @@ -437,6 +444,48 @@ func TestRepeatedEnum(t *testing.T) { } } +func TestProto3TextParsing(t *testing.T) { + m := new(proto3pb.Message) + const in = `name: "Wallace" true_scotsman: true` + want := &proto3pb.Message{ + Name: "Wallace", + TrueScotsman: true, + } + if err := UnmarshalText(in, m); err != nil { + t.Fatal(err) + } + if !Equal(m, want) { + t.Errorf("\n got %v\nwant %v", m, want) + } +} + +func TestMapParsing(t *testing.T) { + m := new(MessageWithMap) + const in = `name_mapping: name_mapping:` + + `msg_mapping:>` + + `msg_mapping>` + // no colon after "value" + `byte_mapping:` + want := &MessageWithMap{ + NameMapping: map[int32]string{ + 1: "Beatles", + 1234: "Feist", + }, + MsgMapping: map[int64]*FloatingPoint{ + -4: {F: Float64(2.0)}, + -2: {F: Float64(4.0)}, + }, + ByteMapping: map[bool][]byte{ + true: []byte("so be it"), + }, + } + if err := UnmarshalText(in, m); err != nil { + t.Fatal(err) + } + if !Equal(m, want) { + t.Errorf("\n got %v\nwant %v", m, want) + } +} + var benchInput string func init() { diff --git a/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/text_test.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_test.go similarity index 92% rename from Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/text_test.go rename to Godeps/_workspace/src/github.com/golang/protobuf/proto/text_test.go index 224a484e50..707bedd000 100644 --- a/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto/text_test.go +++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_test.go @@ -1,7 +1,7 @@ // Go support for Protocol Buffers - Google's data interchange format // // Copyright 2010 The Go Authors. All rights reserved. -// http://code.google.com/p/goprotobuf/ +// https://github.com/golang/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are @@ -39,8 +39,9 @@ import ( "strings" "testing" - "code.google.com/p/goprotobuf/proto" + "github.com/golang/protobuf/proto" + proto3pb "./proto3_proto" pb "./testdata" ) @@ -406,3 +407,30 @@ Message t.Errorf(" got: %s\nwant: %s", s, want) } } + +func TestProto3Text(t *testing.T) { + tests := []struct { + m proto.Message + want string + }{ + // zero message + {&proto3pb.Message{}, ``}, + // zero message except for an empty byte slice + {&proto3pb.Message{Data: []byte{}}, ``}, + // trivial case + {&proto3pb.Message{Name: "Rob", HeightInCm: 175}, `name:"Rob" height_in_cm:175`}, + // empty map + {&pb.MessageWithMap{}, ``}, + // non-empty map; current map format is the same as a repeated struct + { + &pb.MessageWithMap{NameMapping: map[int32]string{1234: "Feist"}}, + `name_mapping:`, + }, + } + for _, test := range tests { + got := strings.TrimSpace(test.m.String()) + if got != test.want { + t.Errorf("\n got %s\nwant %s", got, test.want) + } + } +} From b85c72bc50bcc3b8728be409100f19b39d077f79 Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Mon, 16 Feb 2015 23:38:35 +0100 Subject: [PATCH 3/6] Get rid of unnecessary tabs in Makefile.INCLUDE. --- Makefile.INCLUDE | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/Makefile.INCLUDE b/Makefile.INCLUDE index 43eb8cf86d..6ea78da00e 100644 --- a/Makefile.INCLUDE +++ b/Makefile.INCLUDE @@ -26,8 +26,8 @@ MAC_OS_X_VERSION ?= 10.8 BUILD_PATH = $(PWD)/.build -GO_VERSION := 1.4 -GOOS = $(subst Darwin,darwin,$(subst Linux,linux,$(subst FreeBSD,freebsd,$(OS)))) +GO_VERSION := 1.4 +GOOS = $(subst Darwin,darwin,$(subst Linux,linux,$(subst FreeBSD,freebsd,$(OS)))) ifeq ($(GOOS),darwin) RELEASE_SUFFIX ?= -osx$(MAC_OS_X_VERSION) @@ -38,16 +38,16 @@ endif # Never honor GOBIN, should it be set at all. unexport GOBIN -GOARCH = $(subst x86_64,amd64,$(patsubst i%86,386,$(ARCH))) -GOPKG ?= go$(GO_VERSION).$(GOOS)-$(GOARCH)$(RELEASE_SUFFIX).tar.gz -GOURL ?= https://golang.org/dl -GOROOT = $(BUILD_PATH)/root/go -GOPATH = $(BUILD_PATH)/root/gopath -GOCC = $(GOROOT)/bin/go -TMPDIR = /tmp -GOENV = TMPDIR=$(TMPDIR) GOROOT=$(GOROOT) GOPATH=$(GOPATH) -GO = $(GOENV) $(GOCC) -GOFMT = $(GOROOT)/bin/gofmt +GOARCH = $(subst x86_64,amd64,$(patsubst i%86,386,$(ARCH))) +GOPKG ?= go$(GO_VERSION).$(GOOS)-$(GOARCH)$(RELEASE_SUFFIX).tar.gz +GOURL ?= https://golang.org/dl +GOROOT = $(BUILD_PATH)/root/go +GOPATH = $(BUILD_PATH)/root/gopath +GOCC = $(GOROOT)/bin/go +TMPDIR = /tmp +GOENV = TMPDIR=$(TMPDIR) GOROOT=$(GOROOT) GOPATH=$(GOPATH) +GO = $(GOENV) $(GOCC) +GOFMT = $(GOROOT)/bin/gofmt UNAME := $(shell uname) FULL_GOPATH := $(GOPATH)/src/github.com/prometheus/prometheus @@ -66,7 +66,7 @@ BRANCH := $(shell git rev-parse --abbrev-ref HEAD) HOSTNAME := $(shell hostname -f) BUILD_DATE := $(shell date +%Y%m%d-%H:%M:%S) BUILDFLAGS := -ldflags \ - " -X main.buildVersion $(VERSION)\ + " -X main.buildVersion $(VERSION)\ -X main.buildRevision $(REV)\ -X main.buildBranch $(BRANCH)\ -X main.buildUser $(USER)@$(HOSTNAME)\ From 5643587cc95d45bf3e5e4d0835ef98dafb687163 Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Tue, 17 Feb 2015 02:02:26 +0100 Subject: [PATCH 4/6] Also vendor Prometheus-internal deps and update existing ones. --- .gitignore | 2 +- Godeps/Godeps.json | 51 +- .../src/github.com/miekg/dns/.travis.yml | 4 +- .../src/github.com/miekg/dns/README.md | 11 +- .../src/github.com/miekg/dns/client.go | 8 +- .../src/github.com/miekg/dns/client_test.go | 49 +- .../src/github.com/miekg/dns/clientconfig.go | 9 +- .../github.com/miekg/dns/clientconfig_test.go | 55 + .../src/github.com/miekg/dns/defaults.go | 6 +- .../src/github.com/miekg/dns/dns.go | 6 +- .../src/github.com/miekg/dns/dns_test.go | 81 +- .../src/github.com/miekg/dns/dnssec.go | 235 +- .../src/github.com/miekg/dns/dnssec_keygen.go | 155 ++ .../miekg/dns/{kscan.go => dnssec_keyscan.go} | 61 +- .../github.com/miekg/dns/dnssec_privkey.go | 143 + .../src/github.com/miekg/dns/dnssec_test.go | 60 +- .../src/github.com/miekg/dns/edns.go | 4 +- .../src/github.com/miekg/dns/example_test.go | 2 +- .../src/github.com/miekg/dns/format.go | 96 + .../src/github.com/miekg/dns/keygen.go | 149 - .../src/github.com/miekg/dns/msg.go | 207 +- .../src/github.com/miekg/dns/nsecx.go | 2 +- .../src/github.com/miekg/dns/parse_test.go | 172 +- .../src/github.com/miekg/dns/server.go | 30 +- .../src/github.com/miekg/dns/server_test.go | 84 +- .../src/github.com/miekg/dns/sig0.go | 236 ++ .../src/github.com/miekg/dns/sig0_test.go | 96 + .../src/github.com/miekg/dns/tsig.go | 21 +- .../src/github.com/miekg/dns/types.go | 275 +- .../src/github.com/miekg/dns/types_test.go | 42 + .../src/github.com/miekg/dns/update.go | 5 +- .../src/github.com/miekg/dns/update_test.go | 89 + .../src/github.com/miekg/dns/xfr.go | 32 +- .../src/github.com/miekg/dns/xfr_test.go | 99 + .../src/github.com/miekg/dns/zscan.go | 23 +- .../src/github.com/miekg/dns/zscan_rr.go | 246 +- .../client_golang/_vendor/goautoneg/MANIFEST | 1 + .../client_golang/_vendor/goautoneg/Makefile | 13 + .../_vendor/goautoneg/README.txt | 67 + .../_vendor/goautoneg/autoneg.go | 162 ++ .../_vendor/goautoneg/autoneg_test.go | 33 + .../_vendor/perks/quantile/bench_test.go | 63 + .../_vendor/perks/quantile/example_test.go | 112 + .../_vendor/perks/quantile/exampledata.txt | 2388 +++++++++++++++++ .../_vendor/perks/quantile/stream.go | 292 ++ .../_vendor/perks/quantile/stream_test.go | 185 ++ .../client_golang/extraction/discriminator.go | 74 + .../extraction/discriminator_test.go | 126 + .../client_golang/extraction/extraction.go | 15 + .../extraction/fixtures/empty.json | 0 .../fixtures/test0_0_1-0_0_2-large.json | 1032 +++++++ .../extraction/fixtures/test0_0_1-0_0_2.json | 79 + .../extraction/metricfamilyprocessor.go | 295 ++ .../extraction/metricfamilyprocessor_test.go | 153 ++ .../client_golang/extraction/processor.go | 84 + .../extraction/processor0_0_1.go | 127 + .../extraction/processor0_0_1_test.go | 186 ++ .../extraction/processor0_0_2.go | 106 + .../extraction/processor0_0_2_test.go | 226 ++ .../client_golang/extraction/textprocessor.go | 40 + .../extraction/textprocessor_test.go | 100 + .../client_golang/model/fingerprinting.go | 110 + .../client_golang/model/labelname.go | 63 + .../client_golang/model/labelname_test.go | 55 + .../client_golang/model/labelset.go | 64 + .../client_golang/model/labelvalue.go | 36 + .../client_golang/model/labelvalue_test.go | 55 + .../prometheus/client_golang/model/metric.go | 164 ++ .../client_golang/model/metric_test.go | 76 + .../prometheus/client_golang/model/model.go | 15 + .../prometheus/client_golang/model/sample.go | 79 + .../client_golang/model/sample_test.go | 126 + .../client_golang/model/samplevalue.go | 37 + .../client_golang/model/signature.go | 97 + .../client_golang/model/signature_test.go | 120 + .../client_golang/model/timestamp.go | 115 + .../client_golang/model/timestamp_test.go | 86 + .../client_golang/prometheus/.gitignore | 1 + .../client_golang/prometheus/README.md | 53 + .../prometheus/benchmark_test.go | 131 + .../client_golang/prometheus/collector.go | 75 + .../client_golang/prometheus/counter.go | 175 ++ .../client_golang/prometheus/counter_test.go | 58 + .../client_golang/prometheus/desc.go | 199 ++ .../client_golang/prometheus/doc.go | 108 + .../prometheus/example_clustermanager_test.go | 130 + .../prometheus/example_memstats_test.go | 87 + .../prometheus/example_selfcollector_test.go | 69 + .../client_golang/prometheus/examples_test.go | 454 ++++ .../client_golang/prometheus/expvar.go | 119 + .../client_golang/prometheus/expvar_test.go | 97 + .../client_golang/prometheus/gauge.go | 147 + .../client_golang/prometheus/gauge_test.go | 182 ++ .../client_golang/prometheus/go_collector.go | 31 + .../prometheus/go_collector_test.go | 58 + .../client_golang/prometheus/http.go | 322 +++ .../client_golang/prometheus/http_test.go | 121 + .../client_golang/prometheus/metric.go | 164 ++ .../client_golang/prometheus/metric_test.go | 35 + .../prometheus/process_collector.go | 102 + .../prometheus/process_collector_procfs.go | 63 + .../prometheus/process_collector_rest.go | 24 + .../prometheus/process_collector_test.go | 54 + .../client_golang/prometheus/registry.go | 721 +++++ .../client_golang/prometheus/registry_test.go | 489 ++++ .../client_golang/prometheus/summary.go | 424 +++ .../client_golang/prometheus/summary_test.go | 328 +++ .../client_golang/prometheus/untyped.go | 145 + .../client_golang/prometheus/value.go | 230 ++ .../client_golang/prometheus/vec.go | 241 ++ .../client_golang/prometheus/vec_test.go | 91 + .../client_golang/text/bench_test.go | 168 ++ .../prometheus/client_golang/text/create.go | 298 ++ .../client_golang/text/create_test.go | 395 +++ .../prometheus/client_golang/text/parse.go | 739 +++++ .../client_golang/text/parse_test.go | 588 ++++ .../prometheus/client_golang/text/proto.go | 43 + .../client_golang/text/testdata/protobuf | Bin 0 -> 8243 bytes .../client_golang/text/testdata/protobuf.gz | Bin 0 -> 2053 bytes .../client_golang/text/testdata/text | 322 +++ .../client_golang/text/testdata/text.gz | Bin 0 -> 2595 bytes .../prometheus/client_model/go/metrics.pb.go | 364 +++ .../github.com/prometheus/procfs/AUTHORS.md | 11 + .../prometheus/procfs/CONTRIBUTING.md | 18 + .../src/github.com/prometheus/procfs/LICENSE | 201 ++ .../src/github.com/prometheus/procfs/NOTICE | 7 + .../github.com/prometheus/procfs/README.md | 11 + .../src/github.com/prometheus/procfs/doc.go | 45 + .../prometheus/procfs/fixtures/26231/cmdline | Bin 0 -> 16 bytes .../prometheus/procfs/fixtures/26231/fd/0 | 0 .../prometheus/procfs/fixtures/26231/fd/1 | 0 .../prometheus/procfs/fixtures/26231/fd/2 | 0 .../prometheus/procfs/fixtures/26231/fd/3 | 0 .../prometheus/procfs/fixtures/26231/fd/4 | 0 .../prometheus/procfs/fixtures/26231/limits | 17 + .../prometheus/procfs/fixtures/26231/stat | 1 + .../prometheus/procfs/fixtures/584/stat | 2 + .../prometheus/procfs/fixtures/stat | 16 + .../src/github.com/prometheus/procfs/fs.go | 36 + .../github.com/prometheus/procfs/fs_test.go | 13 + .../src/github.com/prometheus/procfs/proc.go | 149 + .../prometheus/procfs/proc_limits.go | 111 + .../prometheus/procfs/proc_limits_test.go | 36 + .../github.com/prometheus/procfs/proc_stat.go | 165 ++ .../prometheus/procfs/proc_stat_test.go | 112 + .../github.com/prometheus/procfs/proc_test.go | 123 + .../src/github.com/prometheus/procfs/stat.go | 55 + .../github.com/prometheus/procfs/stat_test.go | 19 + .../{go13_bench_test.go => bench2_test.go} | 2 +- .../goleveldb/leveldb/cache/bench2_test.go | 30 + .../goleveldb/leveldb/cache/cache_test.go | 16 - 151 files changed, 19101 insertions(+), 843 deletions(-) create mode 100644 Godeps/_workspace/src/github.com/miekg/dns/clientconfig_test.go create mode 100644 Godeps/_workspace/src/github.com/miekg/dns/dnssec_keygen.go rename Godeps/_workspace/src/github.com/miekg/dns/{kscan.go => dnssec_keyscan.go} (81%) create mode 100644 Godeps/_workspace/src/github.com/miekg/dns/dnssec_privkey.go create mode 100644 Godeps/_workspace/src/github.com/miekg/dns/format.go delete mode 100644 Godeps/_workspace/src/github.com/miekg/dns/keygen.go create mode 100644 Godeps/_workspace/src/github.com/miekg/dns/sig0.go create mode 100644 Godeps/_workspace/src/github.com/miekg/dns/sig0_test.go create mode 100644 Godeps/_workspace/src/github.com/miekg/dns/types_test.go create mode 100644 Godeps/_workspace/src/github.com/miekg/dns/update_test.go create mode 100644 Godeps/_workspace/src/github.com/miekg/dns/xfr_test.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/_vendor/goautoneg/MANIFEST create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/_vendor/goautoneg/Makefile create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/_vendor/goautoneg/README.txt create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/_vendor/goautoneg/autoneg.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/_vendor/goautoneg/autoneg_test.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/_vendor/perks/quantile/bench_test.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/_vendor/perks/quantile/example_test.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/_vendor/perks/quantile/exampledata.txt create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/_vendor/perks/quantile/stream.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/_vendor/perks/quantile/stream_test.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/discriminator.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/discriminator_test.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/extraction.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/fixtures/empty.json create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/fixtures/test0_0_1-0_0_2-large.json create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/fixtures/test0_0_1-0_0_2.json create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/metricfamilyprocessor.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/metricfamilyprocessor_test.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/processor.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/processor0_0_1.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/processor0_0_1_test.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/processor0_0_2.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/processor0_0_2_test.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/textprocessor.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/textprocessor_test.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/model/fingerprinting.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/model/labelname.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/model/labelname_test.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/model/labelset.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/model/labelvalue.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/model/labelvalue_test.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/model/metric.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/model/metric_test.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/model/model.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/model/sample.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/model/sample_test.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/model/samplevalue.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/model/signature.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/model/signature_test.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/model/timestamp.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/model/timestamp_test.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/.gitignore create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/README.md create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/benchmark_test.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/collector.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/counter.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/counter_test.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/desc.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/doc.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/example_clustermanager_test.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/example_memstats_test.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/example_selfcollector_test.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/examples_test.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/expvar.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/expvar_test.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/gauge.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/gauge_test.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/go_collector.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/go_collector_test.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/http.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/http_test.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/metric.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/metric_test.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/process_collector.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/process_collector_procfs.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/process_collector_rest.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/process_collector_test.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/registry.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/registry_test.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/summary.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/summary_test.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/untyped.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/value.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/vec.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/vec_test.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/text/bench_test.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/text/create.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/text/create_test.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/text/parse.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/text/parse_test.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/text/proto.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/text/testdata/protobuf create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/text/testdata/protobuf.gz create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/text/testdata/text create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_golang/text/testdata/text.gz create mode 100644 Godeps/_workspace/src/github.com/prometheus/client_model/go/metrics.pb.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/procfs/AUTHORS.md create mode 100644 Godeps/_workspace/src/github.com/prometheus/procfs/CONTRIBUTING.md create mode 100644 Godeps/_workspace/src/github.com/prometheus/procfs/LICENSE create mode 100644 Godeps/_workspace/src/github.com/prometheus/procfs/NOTICE create mode 100644 Godeps/_workspace/src/github.com/prometheus/procfs/README.md create mode 100644 Godeps/_workspace/src/github.com/prometheus/procfs/doc.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26231/cmdline create mode 100644 Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26231/fd/0 create mode 100644 Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26231/fd/1 create mode 100644 Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26231/fd/2 create mode 100644 Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26231/fd/3 create mode 100644 Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26231/fd/4 create mode 100644 Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26231/limits create mode 100644 Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26231/stat create mode 100644 Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/584/stat create mode 100644 Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/stat create mode 100644 Godeps/_workspace/src/github.com/prometheus/procfs/fs.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/procfs/fs_test.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/procfs/proc.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/procfs/proc_limits.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/procfs/proc_limits_test.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/procfs/proc_stat.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/procfs/proc_stat_test.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/procfs/proc_test.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/procfs/stat.go create mode 100644 Godeps/_workspace/src/github.com/prometheus/procfs/stat_test.go rename Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/{go13_bench_test.go => bench2_test.go} (98%) create mode 100644 Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/bench2_test.go diff --git a/.gitignore b/.gitignore index 748aea229b..59ce6935c6 100644 --- a/.gitignore +++ b/.gitignore @@ -26,7 +26,7 @@ _cgo_* core *-stamp -prometheus +/prometheus benchmark.txt .#* diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index e40323763c..078e4b4b17 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -2,25 +2,64 @@ "ImportPath": "github.com/prometheus/prometheus", "GoVersion": "go1.4.1", "Deps": [ - { - "ImportPath": "github.com/golang/protobuf/proto", - "Rev": "5677a0e3d5e89854c9974e1256839ee23f8233ca" - }, { "ImportPath": "github.com/golang/glog", "Rev": "44145f04b68cf362d9c4df2182967c2275eaefed" }, + { + "ImportPath": "github.com/golang/protobuf/proto", + "Rev": "5677a0e3d5e89854c9974e1256839ee23f8233ca" + }, { "ImportPath": "github.com/matttproud/golang_protobuf_extensions/ext", "Rev": "ba7d65ac66e9da93a714ca18f6d1bc7a0c09100c" }, { "ImportPath": "github.com/miekg/dns", - "Rev": "6b75215519f9916839204d80413bb178b94ef769" + "Rev": "b65f52f3f0dd1afa25cbbf63f8e7eb15fb5c0641" + }, + { + "ImportPath": "github.com/prometheus/client_golang/_vendor/goautoneg", + "Comment": "0.1.0-24-g4627d59", + "Rev": "4627d59e8a09c330c5ccfe7414baca28d8df847d" + }, + { + "ImportPath": "github.com/prometheus/client_golang/_vendor/perks/quantile", + "Comment": "0.1.0-24-g4627d59", + "Rev": "4627d59e8a09c330c5ccfe7414baca28d8df847d" + }, + { + "ImportPath": "github.com/prometheus/client_golang/extraction", + "Comment": "0.1.0-24-g4627d59", + "Rev": "4627d59e8a09c330c5ccfe7414baca28d8df847d" + }, + { + "ImportPath": "github.com/prometheus/client_golang/model", + "Comment": "0.1.0-24-g4627d59", + "Rev": "4627d59e8a09c330c5ccfe7414baca28d8df847d" + }, + { + "ImportPath": "github.com/prometheus/client_golang/prometheus", + "Comment": "0.1.0-24-g4627d59", + "Rev": "4627d59e8a09c330c5ccfe7414baca28d8df847d" + }, + { + "ImportPath": "github.com/prometheus/client_golang/text", + "Comment": "0.1.0-24-g4627d59", + "Rev": "4627d59e8a09c330c5ccfe7414baca28d8df847d" + }, + { + "ImportPath": "github.com/prometheus/client_model/go", + "Comment": "model-0.0.2-12-gfa8ad6f", + "Rev": "fa8ad6fec33561be4280a8f0514318c79d7f6cb6" + }, + { + "ImportPath": "github.com/prometheus/procfs", + "Rev": "92faa308558161acab0ada1db048e9996ecec160" }, { "ImportPath": "github.com/syndtr/goleveldb/leveldb", - "Rev": "63c9e642efad852f49e20a6f90194cae112fd2ac" + "Rev": "e9e2c8f6d3b9c313fb4acaac5ab06285bcf30b04" }, { "ImportPath": "github.com/syndtr/gosnappy/snappy", diff --git a/Godeps/_workspace/src/github.com/miekg/dns/.travis.yml b/Godeps/_workspace/src/github.com/miekg/dns/.travis.yml index acf314c6df..4485679769 100644 --- a/Godeps/_workspace/src/github.com/miekg/dns/.travis.yml +++ b/Godeps/_workspace/src/github.com/miekg/dns/.travis.yml @@ -16,4 +16,6 @@ script: - GOARCH=$BUILD_GOARCH GOOS=$BUILD_GOOS go build # only test on linux - - if [ $BUILD_GOOS == "linux" ]; then GOARCH=$BUILD_GOARCH GOOS=$BUILD_GOOS go test -bench=.; fi + # also specify -short; the crypto tests fail in weird ways *sometimes* + # See issue #151 + - if [ $BUILD_GOOS == "linux" ]; then GOARCH=$BUILD_GOARCH GOOS=$BUILD_GOOS go test -short -bench=.; fi diff --git a/Godeps/_workspace/src/github.com/miekg/dns/README.md b/Godeps/_workspace/src/github.com/miekg/dns/README.md index 20fe64ba26..86624c621d 100644 --- a/Godeps/_workspace/src/github.com/miekg/dns/README.md +++ b/Godeps/_workspace/src/github.com/miekg/dns/README.md @@ -34,6 +34,7 @@ A not-so-up-to-date-list-that-may-be-actually-current: * https://github.com/skynetservices/skydns * https://github.com/DevelopersPL/godnsagent * https://github.com/duedil-ltd/discodns +* https://github.com/StalkR/dns-reverse-proxy Send pull request if you want to be listed here. @@ -49,7 +50,7 @@ Send pull request if you want to be listed here. * DNSSEC: signing, validating and key generation for DSA, RSA and ECDSA; * EDNS0, NSID; * AXFR/IXFR; -* TSIG; +* TSIG, SIG(0); * DNS name compression; * Depends only on the standard library. @@ -95,7 +96,8 @@ Example programs can be found in the `github.com/miekg/exdns` repository. * 3225 - DO bit (DNSSEC OK) * 340{1,2,3} - NAPTR record * 3445 - Limiting the scope of (DNS)KEY -* 3597 - Unkown RRs +* 3597 - Unknown RRs +* 4025 - IPSECKEY * 403{3,4,5} - DNSSEC + validation functions * 4255 - SSHFP record * 4343 - Case insensitivity @@ -112,6 +114,7 @@ Example programs can be found in the `github.com/miekg/exdns` repository. * 5936 - AXFR * 5966 - TCP implementation recommendations * 6605 - ECDSA +* 6725 - IANA Registry Update * 6742 - ILNP DNS * 6891 - EDNS0 update * 6895 - DNS IANA considerations @@ -131,10 +134,8 @@ Example programs can be found in the `github.com/miekg/exdns` repository. ## TODO * privatekey.Precompute() when signing? -* Last remaining RRs: APL, ATMA, A6 and NXT; +* Last remaining RRs: APL, ATMA, A6 and NXT and IPSECKEY; * Missing in parsing: ISDN, UNSPEC, ATMA; * CAA parsing is broken; * NSEC(3) cover/match/closest enclose; * Replies with TC bit are not parsed to the end; -* SIG(0); -* Create IsMsg to validate a message before fully parsing it. diff --git a/Godeps/_workspace/src/github.com/miekg/dns/client.go b/Godeps/_workspace/src/github.com/miekg/dns/client.go index ee8e22333b..76767522e9 100644 --- a/Godeps/_workspace/src/github.com/miekg/dns/client.go +++ b/Godeps/_workspace/src/github.com/miekg/dns/client.go @@ -9,7 +9,7 @@ import ( "time" ) -const dnsTimeout time.Duration = 2 * 1e9 +const dnsTimeout time.Duration = 2 * time.Second const tcpIdleTimeout time.Duration = 8 * time.Second // A Conn represents a connection to a DNS server. @@ -26,9 +26,9 @@ type Conn struct { type Client struct { Net string // if "tcp" a TCP query will be initiated, otherwise an UDP one (default is "" for UDP) UDPSize uint16 // minimum receive buffer for UDP messages - DialTimeout time.Duration // net.DialTimeout (ns), defaults to 2 * 1e9 - ReadTimeout time.Duration // net.Conn.SetReadTimeout value for connections (ns), defaults to 2 * 1e9 - WriteTimeout time.Duration // net.Conn.SetWriteTimeout value for connections (ns), defaults to 2 * 1e9 + DialTimeout time.Duration // net.DialTimeout, defaults to 2 seconds + ReadTimeout time.Duration // net.Conn.SetReadTimeout value for connections, defaults to 2 seconds + WriteTimeout time.Duration // net.Conn.SetWriteTimeout value for connections, defaults to 2 seconds TsigSecret map[string]string // secret(s) for Tsig map[], zonename must be fully qualified SingleInflight bool // if true suppress multiple outstanding queries for the same Qname, Qtype and Qclass group singleflight diff --git a/Godeps/_workspace/src/github.com/miekg/dns/client_test.go b/Godeps/_workspace/src/github.com/miekg/dns/client_test.go index 9691a9cd2a..1f4fd0f317 100644 --- a/Godeps/_workspace/src/github.com/miekg/dns/client_test.go +++ b/Godeps/_workspace/src/github.com/miekg/dns/client_test.go @@ -118,54 +118,7 @@ Loop: } } -/* -func TestClientTsigAXFR(t *testing.T) { - m := new(Msg) - m.SetAxfr("example.nl.") - m.SetTsig("axfr.", HmacMD5, 300, time.Now().Unix()) - - tr := new(Transfer) - tr.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="} - - if a, err := tr.In(m, "176.58.119.54:53"); err != nil { - t.Log("failed to setup axfr: " + err.Error()) - t.Fatal() - } else { - for ex := range a { - if ex.Error != nil { - t.Logf("error %s\n", ex.Error.Error()) - t.Fail() - break - } - for _, rr := range ex.RR { - t.Logf("%s\n", rr.String()) - } - } - } -} - -func TestClientAXFRMultipleEnvelopes(t *testing.T) { - m := new(Msg) - m.SetAxfr("nlnetlabs.nl.") - - tr := new(Transfer) - if a, err := tr.In(m, "213.154.224.1:53"); err != nil { - t.Log("Failed to setup axfr" + err.Error()) - t.Fail() - return - } else { - for ex := range a { - if ex.Error != nil { - t.Logf("Error %s\n", ex.Error.Error()) - t.Fail() - break - } - } - } -} -*/ - -// ExapleUpdateLeaseTSIG shows how to update a lease signed with TSIG. +// ExampleUpdateLeaseTSIG shows how to update a lease signed with TSIG. func ExampleUpdateLeaseTSIG(t *testing.T) { m := new(Msg) m.SetUpdate("t.local.ip6.io.") diff --git a/Godeps/_workspace/src/github.com/miekg/dns/clientconfig.go b/Godeps/_workspace/src/github.com/miekg/dns/clientconfig.go index 87cf896186..0681e49166 100644 --- a/Godeps/_workspace/src/github.com/miekg/dns/clientconfig.go +++ b/Godeps/_workspace/src/github.com/miekg/dns/clientconfig.go @@ -26,14 +26,19 @@ func ClientConfigFromFile(resolvconf string) (*ClientConfig, error) { } defer file.Close() c := new(ClientConfig) - b := bufio.NewReader(file) + scanner := bufio.NewScanner(file) c.Servers = make([]string, 0) c.Search = make([]string, 0) c.Port = "53" c.Ndots = 1 c.Timeout = 5 c.Attempts = 2 - for line, ok := b.ReadString('\n'); ok == nil; line, ok = b.ReadString('\n') { + + for scanner.Scan() { + if err := scanner.Err(); err != nil { + return nil, err + } + line := scanner.Text() f := strings.Fields(line) if len(f) < 1 { continue diff --git a/Godeps/_workspace/src/github.com/miekg/dns/clientconfig_test.go b/Godeps/_workspace/src/github.com/miekg/dns/clientconfig_test.go new file mode 100644 index 0000000000..a261ff0e5b --- /dev/null +++ b/Godeps/_workspace/src/github.com/miekg/dns/clientconfig_test.go @@ -0,0 +1,55 @@ +package dns + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +const normal string = ` +# Comment +domain somedomain.com +nameserver 10.28.10.2 +nameserver 11.28.10.1 +` + +const missingNewline string = ` +domain somedomain.com +nameserver 10.28.10.2 +nameserver 11.28.10.1` // <- NOTE: NO newline. + +func testConfig(t *testing.T, data string) { + tempDir, err := ioutil.TempDir("", "") + if err != nil { + t.Fatalf("TempDir: %v", err) + } + defer os.RemoveAll(tempDir) + + path := filepath.Join(tempDir, "resolv.conf") + if err := ioutil.WriteFile(path, []byte(data), 0644); err != nil { + t.Fatalf("WriteFile: %v", err) + } + cc, err := ClientConfigFromFile(path) + if err != nil { + t.Errorf("error parsing resolv.conf: %s", err) + } + if l := len(cc.Servers); l != 2 { + t.Errorf("incorrect number of nameservers detected: %d", l) + } + if l := len(cc.Search); l != 1 { + t.Errorf("domain directive not parsed correctly: %v", cc.Search) + } else { + if cc.Search[0] != "somedomain.com" { + t.Errorf("domain is unexpected: %v", cc.Search[0]) + } + } +} + +func TestNameserver(t *testing.T) { + testConfig(t, normal) +} + +func TestMissingFinalNewLine(t *testing.T) { + testConfig(t, missingNewline) +} diff --git a/Godeps/_workspace/src/github.com/miekg/dns/defaults.go b/Godeps/_workspace/src/github.com/miekg/dns/defaults.go index f483d4a342..d77cb01d69 100644 --- a/Godeps/_workspace/src/github.com/miekg/dns/defaults.go +++ b/Godeps/_workspace/src/github.com/miekg/dns/defaults.go @@ -73,13 +73,15 @@ func (dns *Msg) SetUpdate(z string) *Msg { } // SetIxfr creates message for requesting an IXFR. -func (dns *Msg) SetIxfr(z string, serial uint32) *Msg { +func (dns *Msg) SetIxfr(z string, serial uint32, ns, mbox string) *Msg { dns.Id = Id() dns.Question = make([]Question, 1) dns.Ns = make([]RR, 1) s := new(SOA) s.Hdr = RR_Header{z, TypeSOA, ClassINET, defaultTtl, 0} s.Serial = serial + s.Ns = ns + s.Mbox = mbox dns.Question[0] = Question{z, TypeIXFR, ClassINET} dns.Ns[0] = s return dns @@ -169,7 +171,7 @@ func IsMsg(buf []byte) error { return errors.New("dns: bad message header") } // Header: Opcode - + // TODO(miek): more checks here, e.g. check all header bits. return nil } diff --git a/Godeps/_workspace/src/github.com/miekg/dns/dns.go b/Godeps/_workspace/src/github.com/miekg/dns/dns.go index 7540c0d5bc..1ad78b4e61 100644 --- a/Godeps/_workspace/src/github.com/miekg/dns/dns.go +++ b/Godeps/_workspace/src/github.com/miekg/dns/dns.go @@ -93,9 +93,7 @@ // spaces, semicolons and the at symbol are escaped. package dns -import ( - "strconv" -) +import "strconv" const ( year68 = 1 << 31 // For RFC1982 (Serial Arithmetic) calculations in 32 bits. @@ -124,7 +122,7 @@ type RR interface { String() string // copy returns a copy of the RR copy() RR - // len returns the length (in octects) of the uncompressed RR in wire format. + // len returns the length (in octets) of the uncompressed RR in wire format. len() int } diff --git a/Godeps/_workspace/src/github.com/miekg/dns/dns_test.go b/Godeps/_workspace/src/github.com/miekg/dns/dns_test.go index b3063c90dd..0b30af6fd1 100644 --- a/Godeps/_workspace/src/github.com/miekg/dns/dns_test.go +++ b/Godeps/_workspace/src/github.com/miekg/dns/dns_test.go @@ -426,11 +426,21 @@ func BenchmarkUnpackDomainName(b *testing.B) { } } +func BenchmarkUnpackDomainNameUnprintable(b *testing.B) { + name1 := "\x02\x02\x02\x025\x02\x02\x02\x02.12345678.123." + buf := make([]byte, len(name1)+1) + _, _ = PackDomainName(name1, buf, 0, nil, false) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _, _ = UnpackDomainName(buf, 0) + } +} + func TestToRFC3597(t *testing.T) { a, _ := NewRR("miek.nl. IN A 10.0.1.1") x := new(RFC3597) x.ToRFC3597(a) - if x.String() != `miek.nl. 3600 IN A \# 4 0a000101` { + if x.String() != `miek.nl. 3600 CLASS1 TYPE1 \# 4 0a000101` { t.Fail() } } @@ -499,3 +509,72 @@ func TestCopy(t *testing.T) { t.Fatalf("Copy() failed %s != %s", rr.String(), rr1.String()) } } + +func TestMsgCopy(t *testing.T) { + m := new(Msg) + m.SetQuestion("miek.nl.", TypeA) + rr, _ := NewRR("miek.nl. 2311 IN A 127.0.0.1") + m.Answer = []RR{rr} + rr, _ = NewRR("miek.nl. 2311 IN NS 127.0.0.1") + m.Ns = []RR{rr} + + m1 := m.Copy() + if m.String() != m1.String() { + t.Fatalf("Msg.Copy() failed %s != %s", m.String(), m1.String()) + } + + m1.Answer[0], _ = NewRR("somethingelse.nl. 2311 IN A 127.0.0.1") + if m.String() == m1.String() { + t.Fatalf("Msg.Copy() failed; change to copy changed template %s", m.String()) + } + + rr, _ = NewRR("miek.nl. 2311 IN A 127.0.0.2") + m1.Answer = append(m1.Answer, rr) + if m1.Ns[0].String() == m1.Answer[1].String() { + t.Fatalf("Msg.Copy() failed; append changed underlying array %s", m1.Ns[0].String()) + } +} + +func BenchmarkCopy(b *testing.B) { + b.ReportAllocs() + m := new(Msg) + m.SetQuestion("miek.nl.", TypeA) + rr, _ := NewRR("miek.nl. 2311 IN A 127.0.0.1") + m.Answer = []RR{rr} + rr, _ = NewRR("miek.nl. 2311 IN NS 127.0.0.1") + m.Ns = []RR{rr} + rr, _ = NewRR("miek.nl. 2311 IN A 127.0.0.1") + m.Extra = []RR{rr} + + b.ResetTimer() + for i := 0; i < b.N; i++ { + m.Copy() + } +} + +func TestPackIPSECKEY(t *testing.T) { + tests := []string{ + "38.2.0.192.in-addr.arpa. 7200 IN IPSECKEY ( 10 1 2 192.0.2.38 AQNRU3mG7TVTO2BkR47usntb102uFJtugbo6BSGvgqt4AQ== )", + "38.2.0.192.in-addr.arpa. 7200 IN IPSECKEY ( 10 0 2 . AQNRU3mG7TVTO2BkR47usntb102uFJtugbo6BSGvgqt4AQ== )", + "38.2.0.192.in-addr.arpa. 7200 IN IPSECKEY ( 10 1 2 192.0.2.3 AQNRU3mG7TVTO2BkR47usntb102uFJtugbo6BSGvgqt4AQ== )", + "38.1.0.192.in-addr.arpa. 7200 IN IPSECKEY ( 10 3 2 mygateway.example.com. AQNRU3mG7TVTO2BkR47usntb102uFJtugbo6BSGvgqt4AQ== )", + "0.d.4.0.3.0.e.f.f.f.3.f.0.1.2.0 7200 IN IPSECKEY ( 10 2 2 2001:0DB8:0:8002::2000:1 AQNRU3mG7TVTO2BkR47usntb102uFJtugbo6BSGvgqt4AQ== )", + } + buf := make([]byte, 1024) + for _, t1 := range tests { + rr, _ := NewRR(t1) + off, e := PackRR(rr, buf, 0, nil, false) + if e != nil { + t.Logf("failed to pack IPSECKEY %s: %s\n", e, t1) + t.Fail() + continue + } + + rr, _, e = UnpackRR(buf[:off], 0) + if e != nil { + t.Logf("failed to unpack IPSECKEY %s: %s\n", e, t1) + t.Fail() + } + t.Logf("%s\n", rr) + } +} diff --git a/Godeps/_workspace/src/github.com/miekg/dns/dnssec.go b/Godeps/_workspace/src/github.com/miekg/dns/dnssec.go index db32fe5544..4af0faeb40 100644 --- a/Godeps/_workspace/src/github.com/miekg/dns/dnssec.go +++ b/Godeps/_workspace/src/github.com/miekg/dns/dnssec.go @@ -20,7 +20,6 @@ import ( "crypto/ecdsa" "crypto/elliptic" "crypto/md5" - "crypto/rand" "crypto/rsa" "crypto/sha1" "crypto/sha256" @@ -36,31 +35,34 @@ import ( // DNSSEC encryption algorithm codes. const ( - RSAMD5 = 1 - DH = 2 - DSA = 3 - ECC = 4 - RSASHA1 = 5 - DSANSEC3SHA1 = 6 - RSASHA1NSEC3SHA1 = 7 - RSASHA256 = 8 - RSASHA512 = 10 - ECCGOST = 12 - ECDSAP256SHA256 = 13 - ECDSAP384SHA384 = 14 - INDIRECT = 252 - PRIVATEDNS = 253 // Private (experimental keys) - PRIVATEOID = 254 + _ uint8 = iota + RSAMD5 + DH + DSA + _ // Skip 4, RFC 6725, section 2.1 + RSASHA1 + DSANSEC3SHA1 + RSASHA1NSEC3SHA1 + RSASHA256 + _ // Skip 9, RFC 6725, section 2.1 + RSASHA512 + _ // Skip 11, RFC 6725, section 2.1 + ECCGOST + ECDSAP256SHA256 + ECDSAP384SHA384 + INDIRECT uint8 = 252 + PRIVATEDNS uint8 = 253 // Private (experimental keys) + PRIVATEOID uint8 = 254 ) // DNSSEC hashing algorithm codes. const ( - _ = iota - SHA1 // RFC 4034 - SHA256 // RFC 4509 - GOST94 // RFC 5933 - SHA384 // Experimental - SHA512 // Experimental + _ uint8 = iota + SHA1 // RFC 4034 + SHA256 // RFC 4509 + GOST94 // RFC 5933 + SHA384 // Experimental + SHA512 // Experimental ) // DNSKEY flag values. @@ -94,6 +96,10 @@ type dnskeyWireFmt struct { /* Nothing is left out */ } +func divRoundUp(a, b int) int { + return (a + b - 1) / b +} + // KeyTag calculates the keytag (or key-id) of the DNSKEY. func (k *DNSKEY) KeyTag() uint16 { if k == nil { @@ -105,7 +111,7 @@ func (k *DNSKEY) KeyTag() uint16 { // Look at the bottom two bytes of the modules, which the last // item in the pubkey. We could do this faster by looking directly // at the base64 values. But I'm lazy. - modulus, _ := packBase64([]byte(k.PublicKey)) + modulus, _ := fromBase64([]byte(k.PublicKey)) if len(modulus) > 1 { x, _ := unpackUint16(modulus, len(modulus)-2) keytag = int(x) @@ -136,7 +142,7 @@ func (k *DNSKEY) KeyTag() uint16 { } // ToDS converts a DNSKEY record to a DS record. -func (k *DNSKEY) ToDS(h int) *DS { +func (k *DNSKEY) ToDS(h uint8) *DS { if k == nil { return nil } @@ -146,7 +152,7 @@ func (k *DNSKEY) ToDS(h int) *DS { ds.Hdr.Rrtype = TypeDS ds.Hdr.Ttl = k.Hdr.Ttl ds.Algorithm = k.Algorithm - ds.DigestType = uint8(h) + ds.DigestType = h ds.KeyTag = k.KeyTag() keywire := new(dnskeyWireFmt) @@ -249,60 +255,36 @@ func (rr *RRSIG) Sign(k PrivateKey, rrset []RR) error { } signdata = append(signdata, wire...) - var sighash []byte var h hash.Hash - var ch crypto.Hash // Only need for RSA switch rr.Algorithm { case DSA, DSANSEC3SHA1: - // Implicit in the ParameterSizes + // TODO: this seems bugged, will panic case RSASHA1, RSASHA1NSEC3SHA1: h = sha1.New() - ch = crypto.SHA1 case RSASHA256, ECDSAP256SHA256: h = sha256.New() - ch = crypto.SHA256 case ECDSAP384SHA384: h = sha512.New384() case RSASHA512: h = sha512.New() - ch = crypto.SHA512 case RSAMD5: fallthrough // Deprecated in RFC 6725 default: return ErrAlg } - io.WriteString(h, string(signdata)) - sighash = h.Sum(nil) - switch p := k.(type) { - case *dsa.PrivateKey: - r1, s1, err := dsa.Sign(rand.Reader, p, sighash) - if err != nil { - return err - } - signature := []byte{0x4D} // T value, here the ASCII M for Miek (not used in DNSSEC) - signature = append(signature, r1.Bytes()...) - signature = append(signature, s1.Bytes()...) - rr.Signature = unpackBase64(signature) - case *rsa.PrivateKey: - // We can use nil as rand.Reader here (says AGL) - signature, err := rsa.SignPKCS1v15(nil, p, ch, sighash) - if err != nil { - return err - } - rr.Signature = unpackBase64(signature) - case *ecdsa.PrivateKey: - r1, s1, err := ecdsa.Sign(rand.Reader, p, sighash) - if err != nil { - return err - } - signature := r1.Bytes() - signature = append(signature, s1.Bytes()...) - rr.Signature = unpackBase64(signature) - default: - // Not given the correct key - return ErrKeyAlg + _, err = h.Write(signdata) + if err != nil { + return err } + sighash := h.Sum(nil) + + signature, err := k.Sign(sighash, rr.Algorithm) + if err != nil { + return err + } + rr.Signature = toBase64(signature) + return nil } @@ -395,7 +377,7 @@ func (rr *RRSIG) Verify(k *DNSKEY, rrset []RR) error { sighash := h.Sum(nil) return rsa.VerifyPKCS1v15(pubkey, ch, sighash, sigbuf) case ECDSAP256SHA256, ECDSAP384SHA384: - pubkey := k.publicKeyCurve() + pubkey := k.publicKeyECDSA() if pubkey == nil { return ErrKey } @@ -441,41 +423,16 @@ func (rr *RRSIG) ValidityPeriod(t time.Time) bool { // Return the signatures base64 encodedig sigdata as a byte slice. func (s *RRSIG) sigBuf() []byte { - sigbuf, err := packBase64([]byte(s.Signature)) + sigbuf, err := fromBase64([]byte(s.Signature)) if err != nil { return nil } return sigbuf } -// setPublicKeyInPrivate sets the public key in the private key. -func (k *DNSKEY) setPublicKeyInPrivate(p PrivateKey) bool { - switch t := p.(type) { - case *dsa.PrivateKey: - x := k.publicKeyDSA() - if x == nil { - return false - } - t.PublicKey = *x - case *rsa.PrivateKey: - x := k.publicKeyRSA() - if x == nil { - return false - } - t.PublicKey = *x - case *ecdsa.PrivateKey: - x := k.publicKeyCurve() - if x == nil { - return false - } - t.PublicKey = *x - } - return true -} - // publicKeyRSA returns the RSA public key from a DNSKEY record. func (k *DNSKEY) publicKeyRSA() *rsa.PublicKey { - keybuf, err := packBase64([]byte(k.PublicKey)) + keybuf, err := fromBase64([]byte(k.PublicKey)) if err != nil { return nil } @@ -511,9 +468,9 @@ func (k *DNSKEY) publicKeyRSA() *rsa.PublicKey { return pubkey } -// publicKeyCurve returns the Curve public key from the DNSKEY record. -func (k *DNSKEY) publicKeyCurve() *ecdsa.PublicKey { - keybuf, err := packBase64([]byte(k.PublicKey)) +// publicKeyECDSA returns the Curve public key from the DNSKEY record. +func (k *DNSKEY) publicKeyECDSA() *ecdsa.PublicKey { + keybuf, err := fromBase64([]byte(k.PublicKey)) if err != nil { return nil } @@ -540,97 +497,29 @@ func (k *DNSKEY) publicKeyCurve() *ecdsa.PublicKey { } func (k *DNSKEY) publicKeyDSA() *dsa.PublicKey { - keybuf, err := packBase64([]byte(k.PublicKey)) + keybuf, err := fromBase64([]byte(k.PublicKey)) if err != nil { return nil } - if len(keybuf) < 22 { // TODO: check + if len(keybuf) < 22 { return nil } - t := int(keybuf[0]) + t, keybuf := int(keybuf[0]), keybuf[1:] size := 64 + t*8 + q, keybuf := keybuf[:20], keybuf[20:] + if len(keybuf) != 3*size { + return nil + } + p, keybuf := keybuf[:size], keybuf[size:] + g, y := keybuf[:size], keybuf[size:] pubkey := new(dsa.PublicKey) - pubkey.Parameters.Q = big.NewInt(0) - pubkey.Parameters.Q.SetBytes(keybuf[1:21]) // +/- 1 ? - pubkey.Parameters.P = big.NewInt(0) - pubkey.Parameters.P.SetBytes(keybuf[22 : 22+size]) - pubkey.Parameters.G = big.NewInt(0) - pubkey.Parameters.G.SetBytes(keybuf[22+size+1 : 22+size*2]) - pubkey.Y = big.NewInt(0) - pubkey.Y.SetBytes(keybuf[22+size*2+1 : 22+size*3]) + pubkey.Parameters.Q = big.NewInt(0).SetBytes(q) + pubkey.Parameters.P = big.NewInt(0).SetBytes(p) + pubkey.Parameters.G = big.NewInt(0).SetBytes(g) + pubkey.Y = big.NewInt(0).SetBytes(y) return pubkey } -// Set the public key (the value E and N) -func (k *DNSKEY) setPublicKeyRSA(_E int, _N *big.Int) bool { - if _E == 0 || _N == nil { - return false - } - buf := exponentToBuf(_E) - buf = append(buf, _N.Bytes()...) - k.PublicKey = unpackBase64(buf) - return true -} - -// Set the public key for Elliptic Curves -func (k *DNSKEY) setPublicKeyCurve(_X, _Y *big.Int) bool { - if _X == nil || _Y == nil { - return false - } - buf := curveToBuf(_X, _Y) - // Check the length of the buffer, either 64 or 92 bytes - k.PublicKey = unpackBase64(buf) - return true -} - -// Set the public key for DSA -func (k *DNSKEY) setPublicKeyDSA(_Q, _P, _G, _Y *big.Int) bool { - if _Q == nil || _P == nil || _G == nil || _Y == nil { - return false - } - buf := dsaToBuf(_Q, _P, _G, _Y) - k.PublicKey = unpackBase64(buf) - return true -} - -// Set the public key (the values E and N) for RSA -// RFC 3110: Section 2. RSA Public KEY Resource Records -func exponentToBuf(_E int) []byte { - var buf []byte - i := big.NewInt(int64(_E)) - if len(i.Bytes()) < 256 { - buf = make([]byte, 1) - buf[0] = uint8(len(i.Bytes())) - } else { - buf = make([]byte, 3) - buf[0] = 0 - buf[1] = uint8(len(i.Bytes()) >> 8) - buf[2] = uint8(len(i.Bytes())) - } - buf = append(buf, i.Bytes()...) - return buf -} - -// Set the public key for X and Y for Curve. The two -// values are just concatenated. -func curveToBuf(_X, _Y *big.Int) []byte { - buf := _X.Bytes() - buf = append(buf, _Y.Bytes()...) - return buf -} - -// Set the public key for X and Y for Curve. The two -// values are just concatenated. -func dsaToBuf(_Q, _P, _G, _Y *big.Int) []byte { - t := byte((len(_G.Bytes()) - 64) / 8) - buf := []byte{t} - buf = append(buf, _Q.Bytes()...) - buf = append(buf, _P.Bytes()...) - buf = append(buf, _G.Bytes()...) - buf = append(buf, _Y.Bytes()...) - return buf -} - type wireSlice [][]byte func (p wireSlice) Len() int { return len(p) } diff --git a/Godeps/_workspace/src/github.com/miekg/dns/dnssec_keygen.go b/Godeps/_workspace/src/github.com/miekg/dns/dnssec_keygen.go new file mode 100644 index 0000000000..f49018c577 --- /dev/null +++ b/Godeps/_workspace/src/github.com/miekg/dns/dnssec_keygen.go @@ -0,0 +1,155 @@ +package dns + +import ( + "crypto/dsa" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "math/big" +) + +// Generate generates a DNSKEY of the given bit size. +// The public part is put inside the DNSKEY record. +// The Algorithm in the key must be set as this will define +// what kind of DNSKEY will be generated. +// The ECDSA algorithms imply a fixed keysize, in that case +// bits should be set to the size of the algorithm. +func (r *DNSKEY) Generate(bits int) (PrivateKey, error) { + switch r.Algorithm { + case DSA, DSANSEC3SHA1: + if bits != 1024 { + return nil, ErrKeySize + } + case RSAMD5, RSASHA1, RSASHA256, RSASHA1NSEC3SHA1: + if bits < 512 || bits > 4096 { + return nil, ErrKeySize + } + case RSASHA512: + if bits < 1024 || bits > 4096 { + return nil, ErrKeySize + } + case ECDSAP256SHA256: + if bits != 256 { + return nil, ErrKeySize + } + case ECDSAP384SHA384: + if bits != 384 { + return nil, ErrKeySize + } + } + + switch r.Algorithm { + case DSA, DSANSEC3SHA1: + params := new(dsa.Parameters) + if err := dsa.GenerateParameters(params, rand.Reader, dsa.L1024N160); err != nil { + return nil, err + } + priv := new(dsa.PrivateKey) + priv.PublicKey.Parameters = *params + err := dsa.GenerateKey(priv, rand.Reader) + if err != nil { + return nil, err + } + r.setPublicKeyDSA(params.Q, params.P, params.G, priv.PublicKey.Y) + return (*DSAPrivateKey)(priv), nil + case RSAMD5, RSASHA1, RSASHA256, RSASHA512, RSASHA1NSEC3SHA1: + priv, err := rsa.GenerateKey(rand.Reader, bits) + if err != nil { + return nil, err + } + r.setPublicKeyRSA(priv.PublicKey.E, priv.PublicKey.N) + return (*RSAPrivateKey)(priv), nil + case ECDSAP256SHA256, ECDSAP384SHA384: + var c elliptic.Curve + switch r.Algorithm { + case ECDSAP256SHA256: + c = elliptic.P256() + case ECDSAP384SHA384: + c = elliptic.P384() + } + priv, err := ecdsa.GenerateKey(c, rand.Reader) + if err != nil { + return nil, err + } + r.setPublicKeyECDSA(priv.PublicKey.X, priv.PublicKey.Y) + return (*ECDSAPrivateKey)(priv), nil + default: + return nil, ErrAlg + } +} + +// Set the public key (the value E and N) +func (k *DNSKEY) setPublicKeyRSA(_E int, _N *big.Int) bool { + if _E == 0 || _N == nil { + return false + } + buf := exponentToBuf(_E) + buf = append(buf, _N.Bytes()...) + k.PublicKey = toBase64(buf) + return true +} + +// Set the public key for Elliptic Curves +func (k *DNSKEY) setPublicKeyECDSA(_X, _Y *big.Int) bool { + if _X == nil || _Y == nil { + return false + } + var intlen int + switch k.Algorithm { + case ECDSAP256SHA256: + intlen = 32 + case ECDSAP384SHA384: + intlen = 48 + } + k.PublicKey = toBase64(curveToBuf(_X, _Y, intlen)) + return true +} + +// Set the public key for DSA +func (k *DNSKEY) setPublicKeyDSA(_Q, _P, _G, _Y *big.Int) bool { + if _Q == nil || _P == nil || _G == nil || _Y == nil { + return false + } + buf := dsaToBuf(_Q, _P, _G, _Y) + k.PublicKey = toBase64(buf) + return true +} + +// Set the public key (the values E and N) for RSA +// RFC 3110: Section 2. RSA Public KEY Resource Records +func exponentToBuf(_E int) []byte { + var buf []byte + i := big.NewInt(int64(_E)) + if len(i.Bytes()) < 256 { + buf = make([]byte, 1) + buf[0] = uint8(len(i.Bytes())) + } else { + buf = make([]byte, 3) + buf[0] = 0 + buf[1] = uint8(len(i.Bytes()) >> 8) + buf[2] = uint8(len(i.Bytes())) + } + buf = append(buf, i.Bytes()...) + return buf +} + +// Set the public key for X and Y for Curve. The two +// values are just concatenated. +func curveToBuf(_X, _Y *big.Int, intlen int) []byte { + buf := intToBytes(_X, intlen) + buf = append(buf, intToBytes(_Y, intlen)...) + return buf +} + +// Set the public key for X and Y for Curve. The two +// values are just concatenated. +func dsaToBuf(_Q, _P, _G, _Y *big.Int) []byte { + t := divRoundUp(divRoundUp(_G.BitLen(), 8)-64, 8) + buf := []byte{byte(t)} + buf = append(buf, intToBytes(_Q, 20)...) + buf = append(buf, intToBytes(_P, 64+t*8)...) + buf = append(buf, intToBytes(_G, 64+t*8)...) + buf = append(buf, intToBytes(_Y, 64+t*8)...) + return buf +} diff --git a/Godeps/_workspace/src/github.com/miekg/dns/kscan.go b/Godeps/_workspace/src/github.com/miekg/dns/dnssec_keyscan.go similarity index 81% rename from Godeps/_workspace/src/github.com/miekg/dns/kscan.go rename to Godeps/_workspace/src/github.com/miekg/dns/dnssec_keyscan.go index 8cc729abce..ce52b877ba 100644 --- a/Godeps/_workspace/src/github.com/miekg/dns/kscan.go +++ b/Godeps/_workspace/src/github.com/miekg/dns/dnssec_keyscan.go @@ -18,8 +18,8 @@ func (k *DNSKEY) NewPrivateKey(s string) (PrivateKey, error) { // ReadPrivateKey reads a private key from the io.Reader q. The string file is // only used in error reporting. -// The public key must be -// known, because some cryptographic algorithms embed the public inside the privatekey. +// The public key must be known, because some cryptographic algorithms embed +// the public inside the privatekey. func (k *DNSKEY) ReadPrivateKey(q io.Reader, file string) (PrivateKey, error) { m, e := parseKey(q, file) if m == nil { @@ -34,14 +34,16 @@ func (k *DNSKEY) ReadPrivateKey(q io.Reader, file string) (PrivateKey, error) { // TODO(mg): check if the pubkey matches the private key switch m["algorithm"] { case "3 (DSA)": - p, e := readPrivateKeyDSA(m) + priv, e := readPrivateKeyDSA(m) if e != nil { return nil, e } - if !k.setPublicKeyInPrivate(p) { - return nil, ErrPrivKey + pub := k.publicKeyDSA() + if pub == nil { + return nil, ErrKey } - return p, e + priv.PublicKey = *pub + return (*DSAPrivateKey)(priv), e case "1 (RSAMD5)": fallthrough case "5 (RSASHA1)": @@ -51,44 +53,44 @@ func (k *DNSKEY) ReadPrivateKey(q io.Reader, file string) (PrivateKey, error) { case "8 (RSASHA256)": fallthrough case "10 (RSASHA512)": - p, e := readPrivateKeyRSA(m) + priv, e := readPrivateKeyRSA(m) if e != nil { return nil, e } - if !k.setPublicKeyInPrivate(p) { - return nil, ErrPrivKey + pub := k.publicKeyRSA() + if pub == nil { + return nil, ErrKey } - return p, e + priv.PublicKey = *pub + return (*RSAPrivateKey)(priv), e case "12 (ECC-GOST)": - p, e := readPrivateKeyGOST(m) - if e != nil { - return nil, e - } - // setPublicKeyInPrivate(p) - return p, e + return nil, ErrPrivKey case "13 (ECDSAP256SHA256)": fallthrough case "14 (ECDSAP384SHA384)": - p, e := readPrivateKeyECDSA(m) + priv, e := readPrivateKeyECDSA(m) if e != nil { return nil, e } - if !k.setPublicKeyInPrivate(p) { - return nil, ErrPrivKey + pub := k.publicKeyECDSA() + if pub == nil { + return nil, ErrKey } - return p, e + priv.PublicKey = *pub + return (*ECDSAPrivateKey)(priv), e + default: + return nil, ErrPrivKey } - return nil, ErrPrivKey } // Read a private key (file) string and create a public key. Return the private key. -func readPrivateKeyRSA(m map[string]string) (PrivateKey, error) { +func readPrivateKeyRSA(m map[string]string) (*rsa.PrivateKey, error) { p := new(rsa.PrivateKey) p.Primes = []*big.Int{nil, nil} for k, v := range m { switch k { case "modulus", "publicexponent", "privateexponent", "prime1", "prime2": - v1, err := packBase64([]byte(v)) + v1, err := fromBase64([]byte(v)) if err != nil { return nil, err } @@ -119,13 +121,13 @@ func readPrivateKeyRSA(m map[string]string) (PrivateKey, error) { return p, nil } -func readPrivateKeyDSA(m map[string]string) (PrivateKey, error) { +func readPrivateKeyDSA(m map[string]string) (*dsa.PrivateKey, error) { p := new(dsa.PrivateKey) p.X = big.NewInt(0) for k, v := range m { switch k { case "private_value(x)": - v1, err := packBase64([]byte(v)) + v1, err := fromBase64([]byte(v)) if err != nil { return nil, err } @@ -137,14 +139,14 @@ func readPrivateKeyDSA(m map[string]string) (PrivateKey, error) { return p, nil } -func readPrivateKeyECDSA(m map[string]string) (PrivateKey, error) { +func readPrivateKeyECDSA(m map[string]string) (*ecdsa.PrivateKey, error) { p := new(ecdsa.PrivateKey) p.D = big.NewInt(0) // TODO: validate that the required flags are present for k, v := range m { switch k { case "privatekey": - v1, err := packBase64([]byte(v)) + v1, err := fromBase64([]byte(v)) if err != nil { return nil, err } @@ -156,11 +158,6 @@ func readPrivateKeyECDSA(m map[string]string) (PrivateKey, error) { return p, nil } -func readPrivateKeyGOST(m map[string]string) (PrivateKey, error) { - // TODO(miek) - return nil, nil -} - // parseKey reads a private key from r. It returns a map[string]string, // with the key-value pairs, or an error when the file is not correct. func parseKey(r io.Reader, file string) (map[string]string, error) { diff --git a/Godeps/_workspace/src/github.com/miekg/dns/dnssec_privkey.go b/Godeps/_workspace/src/github.com/miekg/dns/dnssec_privkey.go new file mode 100644 index 0000000000..955798a78e --- /dev/null +++ b/Godeps/_workspace/src/github.com/miekg/dns/dnssec_privkey.go @@ -0,0 +1,143 @@ +package dns + +import ( + "crypto" + "crypto/dsa" + "crypto/ecdsa" + "crypto/rand" + "crypto/rsa" + "math/big" + "strconv" +) + +const _FORMAT = "Private-key-format: v1.3\n" + +type PrivateKey interface { + Sign([]byte, uint8) ([]byte, error) + String(uint8) string +} + +// PrivateKeyString converts a PrivateKey to a string. This string has the same +// format as the private-key-file of BIND9 (Private-key-format: v1.3). +// It needs some info from the key (the algorithm), so its a method of the +// DNSKEY and calls PrivateKey.String(alg). +func (r *DNSKEY) PrivateKeyString(p PrivateKey) string { + return p.String(r.Algorithm) +} + +type RSAPrivateKey rsa.PrivateKey + +func (p *RSAPrivateKey) Sign(hashed []byte, alg uint8) ([]byte, error) { + var hash crypto.Hash + switch alg { + case RSASHA1, RSASHA1NSEC3SHA1: + hash = crypto.SHA1 + case RSASHA256: + hash = crypto.SHA256 + case RSASHA512: + hash = crypto.SHA512 + default: + return nil, ErrAlg + } + return rsa.SignPKCS1v15(nil, (*rsa.PrivateKey)(p), hash, hashed) +} + +func (p *RSAPrivateKey) String(alg uint8) string { + algorithm := strconv.Itoa(int(alg)) + " (" + AlgorithmToString[alg] + ")" + modulus := toBase64(p.PublicKey.N.Bytes()) + e := big.NewInt(int64(p.PublicKey.E)) + publicExponent := toBase64(e.Bytes()) + privateExponent := toBase64(p.D.Bytes()) + prime1 := toBase64(p.Primes[0].Bytes()) + prime2 := toBase64(p.Primes[1].Bytes()) + // Calculate Exponent1/2 and Coefficient as per: http://en.wikipedia.org/wiki/RSA#Using_the_Chinese_remainder_algorithm + // and from: http://code.google.com/p/go/issues/detail?id=987 + one := big.NewInt(1) + p_1 := big.NewInt(0).Sub(p.Primes[0], one) + q_1 := big.NewInt(0).Sub(p.Primes[1], one) + exp1 := big.NewInt(0).Mod(p.D, p_1) + exp2 := big.NewInt(0).Mod(p.D, q_1) + coeff := big.NewInt(0).ModInverse(p.Primes[1], p.Primes[0]) + + exponent1 := toBase64(exp1.Bytes()) + exponent2 := toBase64(exp2.Bytes()) + coefficient := toBase64(coeff.Bytes()) + + return _FORMAT + + "Algorithm: " + algorithm + "\n" + + "Modulus: " + modulus + "\n" + + "PublicExponent: " + publicExponent + "\n" + + "PrivateExponent: " + privateExponent + "\n" + + "Prime1: " + prime1 + "\n" + + "Prime2: " + prime2 + "\n" + + "Exponent1: " + exponent1 + "\n" + + "Exponent2: " + exponent2 + "\n" + + "Coefficient: " + coefficient + "\n" +} + +type ECDSAPrivateKey ecdsa.PrivateKey + +func (p *ECDSAPrivateKey) Sign(hashed []byte, alg uint8) ([]byte, error) { + var intlen int + switch alg { + case ECDSAP256SHA256: + intlen = 32 + case ECDSAP384SHA384: + intlen = 48 + default: + return nil, ErrAlg + } + r1, s1, err := ecdsa.Sign(rand.Reader, (*ecdsa.PrivateKey)(p), hashed) + if err != nil { + return nil, err + } + signature := intToBytes(r1, intlen) + signature = append(signature, intToBytes(s1, intlen)...) + return signature, nil +} + +func (p *ECDSAPrivateKey) String(alg uint8) string { + algorithm := strconv.Itoa(int(alg)) + " (" + AlgorithmToString[alg] + ")" + var intlen int + switch alg { + case ECDSAP256SHA256: + intlen = 32 + case ECDSAP384SHA384: + intlen = 48 + } + private := toBase64(intToBytes(p.D, intlen)) + return _FORMAT + + "Algorithm: " + algorithm + "\n" + + "PrivateKey: " + private + "\n" +} + +type DSAPrivateKey dsa.PrivateKey + +func (p *DSAPrivateKey) Sign(hashed []byte, alg uint8) ([]byte, error) { + r1, s1, err := dsa.Sign(rand.Reader, (*dsa.PrivateKey)(p), hashed) + if err != nil { + return nil, err + } + t := divRoundUp(divRoundUp(p.PublicKey.Y.BitLen(), 8)-64, 8) + signature := []byte{byte(t)} + signature = append(signature, intToBytes(r1, 20)...) + signature = append(signature, intToBytes(s1, 20)...) + return signature, nil +} + +func (p *DSAPrivateKey) String(alg uint8) string { + algorithm := strconv.Itoa(int(alg)) + " (" + AlgorithmToString[alg] + ")" + T := divRoundUp(divRoundUp(p.PublicKey.Parameters.G.BitLen(), 8)-64, 8) + prime := toBase64(intToBytes(p.PublicKey.Parameters.P, 64+T*8)) + subprime := toBase64(intToBytes(p.PublicKey.Parameters.Q, 20)) + base := toBase64(intToBytes(p.PublicKey.Parameters.G, 64+T*8)) + priv := toBase64(intToBytes(p.X, 20)) + pub := toBase64(intToBytes(p.PublicKey.Y, 64+T*8)) + return _FORMAT + + "Algorithm: " + algorithm + "\n" + + "Prime(p): " + prime + "\n" + + "Subprime(q): " + subprime + "\n" + + "Base(g): " + base + "\n" + + "Private_value(x): " + priv + "\n" + + "Public_value(y): " + pub + "\n" +} diff --git a/Godeps/_workspace/src/github.com/miekg/dns/dnssec_test.go b/Godeps/_workspace/src/github.com/miekg/dns/dnssec_test.go index 484367cf1e..76f56c242d 100644 --- a/Godeps/_workspace/src/github.com/miekg/dns/dnssec_test.go +++ b/Godeps/_workspace/src/github.com/miekg/dns/dnssec_test.go @@ -1,7 +1,6 @@ package dns import ( - "crypto/rsa" "reflect" "strings" "testing" @@ -34,6 +33,9 @@ func getSoa() *SOA { } func TestGenerateEC(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } key := new(DNSKEY) key.Hdr.Rrtype = TypeDNSKEY key.Hdr.Name = "miek.nl." @@ -48,6 +50,9 @@ func TestGenerateEC(t *testing.T) { } func TestGenerateDSA(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } key := new(DNSKEY) key.Hdr.Rrtype = TypeDNSKEY key.Hdr.Name = "miek.nl." @@ -62,6 +67,9 @@ func TestGenerateDSA(t *testing.T) { } func TestGenerateRSA(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } key := new(DNSKEY) key.Hdr.Rrtype = TypeDNSKEY key.Hdr.Name = "miek.nl." @@ -240,12 +248,13 @@ func Test65534(t *testing.T) { } func TestDnskey(t *testing.T) { - // f, _ := os.Open("t/Kmiek.nl.+010+05240.key") - pubkey, _ := ReadRR(strings.NewReader(` + pubkey, err := ReadRR(strings.NewReader(` miek.nl. IN DNSKEY 256 3 10 AwEAAZuMCu2FdugHkTrXYgl5qixvcDw1aDDlvL46/xJKbHBAHY16fNUb2b65cwko2Js/aJxUYJbZk5dwCDZxYfrfbZVtDPQuc3o8QaChVxC7/JYz2AHc9qHvqQ1j4VrH71RWINlQo6VYjzN/BGpMhOZoZOEwzp1HfsOE3lNYcoWU1smL ;{id = 5240 (zsk), size = 1024b} `), "Kmiek.nl.+010+05240.key") - privkey, _ := pubkey.(*DNSKEY).ReadPrivateKey(strings.NewReader(` -Private-key-format: v1.2 + if err != nil { + t.Fatal(err) + } + privStr := `Private-key-format: v1.3 Algorithm: 10 (RSASHA512) Modulus: m4wK7YV26AeROtdiCXmqLG9wPDVoMOW8vjr/EkpscEAdjXp81RvZvrlzCSjYmz9onFRgltmTl3AINnFh+t9tlW0M9C5zejxBoKFXELv8ljPYAdz2oe+pDWPhWsfvVFYg2VCjpViPM38EakyE5mhk4TDOnUd+w4TeU1hyhZTWyYs= PublicExponent: AQAB @@ -255,13 +264,21 @@ Prime2: xA1bF8M0RTIQ6+A11AoVG6GIR/aPGg5sogRkIZ7ID/sF6g9HMVU/CM2TqVEBJLRPp73cv6Ze Exponent1: xzkblyZ96bGYxTVZm2/vHMOXswod4KWIyMoOepK6B/ZPcZoIT6omLCgtypWtwHLfqyCz3MK51Nc0G2EGzg8rFQ== Exponent2: Pu5+mCEb7T5F+kFNZhQadHUklt0JUHbi3hsEvVoHpEGSw3BGDQrtIflDde0/rbWHgDPM4WQY+hscd8UuTXrvLw== Coefficient: UuRoNqe7YHnKmQzE6iDWKTMIWTuoqqrFAmXPmKQnC+Y+BQzOVEHUo9bXdDnoI9hzXP1gf8zENMYwYLeWpuYlFQ== -`), "Kmiek.nl.+010+05240.private") +` + privkey, err := pubkey.(*DNSKEY).ReadPrivateKey(strings.NewReader(privStr), + "Kmiek.nl.+010+05240.private") + if err != nil { + t.Fatal(err) + } if pubkey.(*DNSKEY).PublicKey != "AwEAAZuMCu2FdugHkTrXYgl5qixvcDw1aDDlvL46/xJKbHBAHY16fNUb2b65cwko2Js/aJxUYJbZk5dwCDZxYfrfbZVtDPQuc3o8QaChVxC7/JYz2AHc9qHvqQ1j4VrH71RWINlQo6VYjzN/BGpMhOZoZOEwzp1HfsOE3lNYcoWU1smL" { t.Log("pubkey is not what we've read") t.Fail() } - // Coefficient looks fishy... - t.Logf("%s", pubkey.(*DNSKEY).PrivateKeyString(privkey)) + if pubkey.(*DNSKEY).PrivateKeyString(privkey) != privStr { + t.Log("privkey is not what we've read") + t.Logf("%v", pubkey.(*DNSKEY).PrivateKeyString(privkey)) + t.Fail() + } } func TestTag(t *testing.T) { @@ -283,6 +300,9 @@ func TestTag(t *testing.T) { } func TestKeyRSA(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } key := new(DNSKEY) key.Hdr.Name = "miek.nl." key.Hdr.Rrtype = TypeDNSKEY @@ -368,7 +388,7 @@ Activate: 20110302104537` t.Fail() } switch priv := p.(type) { - case *rsa.PrivateKey: + case *RSAPrivateKey: if 65537 != priv.PublicKey.E { t.Log("exponenent should be 65537") t.Fail() @@ -443,12 +463,19 @@ PrivateKey: WURgWHCcYIYUPWgeLmiPY2DJJk02vgrmTfitxgqcL4vwW7BOrbawVmVe0d9V94SR` sig.SignerName = eckey.(*DNSKEY).Hdr.Name sig.Algorithm = eckey.(*DNSKEY).Algorithm - sig.Sign(privkey, []RR{a}) + if sig.Sign(privkey, []RR{a}) != nil { + t.Fatal("failure to sign the record") + } - t.Logf("%s", sig.String()) if e := sig.Verify(eckey.(*DNSKEY), []RR{a}); e != nil { - t.Logf("failure to validate: %s", e.Error()) - t.Fail() + t.Logf("\n%s\n%s\n%s\n\n%s\n\n", + eckey.(*DNSKEY).String(), + a.String(), + sig.String(), + eckey.(*DNSKEY).PrivateKeyString(privkey), + ) + + t.Fatalf("failure to validate: %s", e.Error()) } } @@ -491,6 +518,13 @@ func TestSignVerifyECDSA2(t *testing.T) { err = sig.Verify(key, []RR{srv}) if err != nil { + t.Logf("\n%s\n%s\n%s\n\n%s\n\n", + key.String(), + srv.String(), + sig.String(), + key.PrivateKeyString(privkey), + ) + t.Fatal("Failure to validate:", err) } } diff --git a/Godeps/_workspace/src/github.com/miekg/dns/edns.go b/Godeps/_workspace/src/github.com/miekg/dns/edns.go index e5003e0bde..8b676e6124 100644 --- a/Godeps/_workspace/src/github.com/miekg/dns/edns.go +++ b/Godeps/_workspace/src/github.com/miekg/dns/edns.go @@ -287,7 +287,7 @@ func (e *EDNS0_SUBNET) unpack(b []byte) error { case 1: addr := make([]byte, 4) for i := 0; i < int(e.SourceNetmask/8); i++ { - if 4+i > len(b) { + if i >= len(addr) || 4+i >= len(b) { return ErrBuf } addr[i] = b[4+i] @@ -296,7 +296,7 @@ func (e *EDNS0_SUBNET) unpack(b []byte) error { case 2: addr := make([]byte, 16) for i := 0; i < int(e.SourceNetmask/8); i++ { - if 4+i > len(b) { + if i >= len(addr) || 4+i >= len(b) { return ErrBuf } addr[i] = b[4+i] diff --git a/Godeps/_workspace/src/github.com/miekg/dns/example_test.go b/Godeps/_workspace/src/github.com/miekg/dns/example_test.go index 216b8b39c3..1578a4d053 100644 --- a/Godeps/_workspace/src/github.com/miekg/dns/example_test.go +++ b/Godeps/_workspace/src/github.com/miekg/dns/example_test.go @@ -49,7 +49,7 @@ func ExampleDS(zone string) { } for _, k := range r.Answer { if key, ok := k.(*dns.DNSKEY); ok { - for _, alg := range []int{dns.SHA1, dns.SHA256, dns.SHA384} { + for _, alg := range []uint8{dns.SHA1, dns.SHA256, dns.SHA384} { fmt.Printf("%s; %d\n", key.ToDS(alg).String(), key.Flags) } } diff --git a/Godeps/_workspace/src/github.com/miekg/dns/format.go b/Godeps/_workspace/src/github.com/miekg/dns/format.go new file mode 100644 index 0000000000..1ac1664fe2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/miekg/dns/format.go @@ -0,0 +1,96 @@ +package dns + +import ( + "net" + "reflect" + "strconv" +) + +// NumField returns the number of rdata fields r has. +func NumField(r RR) int { + return reflect.ValueOf(r).Elem().NumField() - 1 // Remove RR_Header +} + +// Field returns the rdata field i as a string. Fields are indexed starting from 1. +// RR types that holds slice data, for instance the NSEC type bitmap will return a single +// string where the types are concatenated using a space. +// Accessing non existing fields will cause a panic. +func Field(r RR, i int) string { + if i == 0 { + return "" + } + d := reflect.ValueOf(r).Elem().Field(i) + switch k := d.Kind(); k { + case reflect.String: + return d.String() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return strconv.FormatInt(d.Int(), 10) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return strconv.FormatUint(d.Uint(), 10) + case reflect.Slice: + switch reflect.ValueOf(r).Elem().Type().Field(i).Tag { + case `dns:"a"`: + // TODO(miek): Hmm store this as 16 bytes + if d.Len() < net.IPv6len { + return net.IPv4(byte(d.Index(0).Uint()), + byte(d.Index(1).Uint()), + byte(d.Index(2).Uint()), + byte(d.Index(3).Uint())).String() + } + return net.IPv4(byte(d.Index(12).Uint()), + byte(d.Index(13).Uint()), + byte(d.Index(14).Uint()), + byte(d.Index(15).Uint())).String() + case `dns:"aaaa"`: + return net.IP{ + byte(d.Index(0).Uint()), + byte(d.Index(1).Uint()), + byte(d.Index(2).Uint()), + byte(d.Index(3).Uint()), + byte(d.Index(4).Uint()), + byte(d.Index(5).Uint()), + byte(d.Index(6).Uint()), + byte(d.Index(7).Uint()), + byte(d.Index(8).Uint()), + byte(d.Index(9).Uint()), + byte(d.Index(10).Uint()), + byte(d.Index(11).Uint()), + byte(d.Index(12).Uint()), + byte(d.Index(13).Uint()), + byte(d.Index(14).Uint()), + byte(d.Index(15).Uint()), + }.String() + case `dns:"nsec"`: + if d.Len() == 0 { + return "" + } + s := Type(d.Index(0).Uint()).String() + for i := 1; i < d.Len(); i++ { + s += " " + Type(d.Index(i).Uint()).String() + } + return s + case `dns:"wks"`: + if d.Len() == 0 { + return "" + } + s := strconv.Itoa(int(d.Index(0).Uint())) + for i := 0; i < d.Len(); i++ { + s += " " + strconv.Itoa(int(d.Index(i).Uint())) + } + return s + default: + // if it does not have a tag its a string slice + fallthrough + case `dns:"txt"`: + if d.Len() == 0 { + return "" + } + s := d.Index(0).String() + for i := 1; i < d.Len(); i++ { + s += " " + d.Index(i).String() + } + return s + } + } + return "" +} diff --git a/Godeps/_workspace/src/github.com/miekg/dns/keygen.go b/Godeps/_workspace/src/github.com/miekg/dns/keygen.go deleted file mode 100644 index bc1a079779..0000000000 --- a/Godeps/_workspace/src/github.com/miekg/dns/keygen.go +++ /dev/null @@ -1,149 +0,0 @@ -package dns - -import ( - "crypto/dsa" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/rsa" - "math/big" - "strconv" -) - -const _FORMAT = "Private-key-format: v1.3\n" - -// Empty interface that is used as a wrapper around all possible -// private key implementations from the crypto package. -type PrivateKey interface{} - -// Generate generates a DNSKEY of the given bit size. -// The public part is put inside the DNSKEY record. -// The Algorithm in the key must be set as this will define -// what kind of DNSKEY will be generated. -// The ECDSA algorithms imply a fixed keysize, in that case -// bits should be set to the size of the algorithm. -func (r *DNSKEY) Generate(bits int) (PrivateKey, error) { - switch r.Algorithm { - case DSA, DSANSEC3SHA1: - if bits != 1024 { - return nil, ErrKeySize - } - case RSAMD5, RSASHA1, RSASHA256, RSASHA1NSEC3SHA1: - if bits < 512 || bits > 4096 { - return nil, ErrKeySize - } - case RSASHA512: - if bits < 1024 || bits > 4096 { - return nil, ErrKeySize - } - case ECDSAP256SHA256: - if bits != 256 { - return nil, ErrKeySize - } - case ECDSAP384SHA384: - if bits != 384 { - return nil, ErrKeySize - } - } - - switch r.Algorithm { - case DSA, DSANSEC3SHA1: - params := new(dsa.Parameters) - if err := dsa.GenerateParameters(params, rand.Reader, dsa.L1024N160); err != nil { - return nil, err - } - priv := new(dsa.PrivateKey) - priv.PublicKey.Parameters = *params - err := dsa.GenerateKey(priv, rand.Reader) - if err != nil { - return nil, err - } - r.setPublicKeyDSA(params.Q, params.P, params.G, priv.PublicKey.Y) - return priv, nil - case RSAMD5, RSASHA1, RSASHA256, RSASHA512, RSASHA1NSEC3SHA1: - priv, err := rsa.GenerateKey(rand.Reader, bits) - if err != nil { - return nil, err - } - r.setPublicKeyRSA(priv.PublicKey.E, priv.PublicKey.N) - return priv, nil - case ECDSAP256SHA256, ECDSAP384SHA384: - var c elliptic.Curve - switch r.Algorithm { - case ECDSAP256SHA256: - c = elliptic.P256() - case ECDSAP384SHA384: - c = elliptic.P384() - } - priv, err := ecdsa.GenerateKey(c, rand.Reader) - if err != nil { - return nil, err - } - r.setPublicKeyCurve(priv.PublicKey.X, priv.PublicKey.Y) - return priv, nil - default: - return nil, ErrAlg - } - return nil, nil // Dummy return -} - -// PrivateKeyString converts a PrivateKey to a string. This -// string has the same format as the private-key-file of BIND9 (Private-key-format: v1.3). -// It needs some info from the key (hashing, keytag), so its a method of the DNSKEY. -func (r *DNSKEY) PrivateKeyString(p PrivateKey) (s string) { - switch t := p.(type) { - case *rsa.PrivateKey: - algorithm := strconv.Itoa(int(r.Algorithm)) + " (" + AlgorithmToString[r.Algorithm] + ")" - modulus := unpackBase64(t.PublicKey.N.Bytes()) - e := big.NewInt(int64(t.PublicKey.E)) - publicExponent := unpackBase64(e.Bytes()) - privateExponent := unpackBase64(t.D.Bytes()) - prime1 := unpackBase64(t.Primes[0].Bytes()) - prime2 := unpackBase64(t.Primes[1].Bytes()) - // Calculate Exponent1/2 and Coefficient as per: http://en.wikipedia.org/wiki/RSA#Using_the_Chinese_remainder_algorithm - // and from: http://code.google.com/p/go/issues/detail?id=987 - one := big.NewInt(1) - minusone := big.NewInt(-1) - p_1 := big.NewInt(0).Sub(t.Primes[0], one) - q_1 := big.NewInt(0).Sub(t.Primes[1], one) - exp1 := big.NewInt(0).Mod(t.D, p_1) - exp2 := big.NewInt(0).Mod(t.D, q_1) - coeff := big.NewInt(0).Exp(t.Primes[1], minusone, t.Primes[0]) - - exponent1 := unpackBase64(exp1.Bytes()) - exponent2 := unpackBase64(exp2.Bytes()) - coefficient := unpackBase64(coeff.Bytes()) - - s = _FORMAT + - "Algorithm: " + algorithm + "\n" + - "Modules: " + modulus + "\n" + - "PublicExponent: " + publicExponent + "\n" + - "PrivateExponent: " + privateExponent + "\n" + - "Prime1: " + prime1 + "\n" + - "Prime2: " + prime2 + "\n" + - "Exponent1: " + exponent1 + "\n" + - "Exponent2: " + exponent2 + "\n" + - "Coefficient: " + coefficient + "\n" - case *ecdsa.PrivateKey: - algorithm := strconv.Itoa(int(r.Algorithm)) + " (" + AlgorithmToString[r.Algorithm] + ")" - private := unpackBase64(t.D.Bytes()) - s = _FORMAT + - "Algorithm: " + algorithm + "\n" + - "PrivateKey: " + private + "\n" - case *dsa.PrivateKey: - algorithm := strconv.Itoa(int(r.Algorithm)) + " (" + AlgorithmToString[r.Algorithm] + ")" - prime := unpackBase64(t.PublicKey.Parameters.P.Bytes()) - subprime := unpackBase64(t.PublicKey.Parameters.Q.Bytes()) - base := unpackBase64(t.PublicKey.Parameters.G.Bytes()) - priv := unpackBase64(t.X.Bytes()) - pub := unpackBase64(t.PublicKey.Y.Bytes()) - s = _FORMAT + - "Algorithm: " + algorithm + "\n" + - "Prime(p): " + prime + "\n" + - "Subprime(q): " + subprime + "\n" + - "Base(g): " + base + "\n" + - "Private_value(x): " + priv + "\n" + - "Public_value(y): " + pub + "\n" - } - return -} diff --git a/Godeps/_workspace/src/github.com/miekg/dns/msg.go b/Godeps/_workspace/src/github.com/miekg/dns/msg.go index 2b12cd4801..aded96532f 100644 --- a/Godeps/_workspace/src/github.com/miekg/dns/msg.go +++ b/Godeps/_workspace/src/github.com/miekg/dns/msg.go @@ -12,7 +12,7 @@ import ( "encoding/base32" "encoding/base64" "encoding/hex" - "fmt" + "math/big" "math/rand" "net" "reflect" @@ -91,6 +91,7 @@ var TypeToString = map[uint16]string{ TypeATMA: "ATMA", TypeAXFR: "AXFR", // Meta RR TypeCAA: "CAA", + TypeCDNSKEY: "CDNSKEY", TypeCDS: "CDS", TypeCERT: "CERT", TypeCNAME: "CNAME", @@ -109,6 +110,7 @@ var TypeToString = map[uint16]string{ TypeIPSECKEY: "IPSECKEY", TypeISDN: "ISDN", TypeIXFR: "IXFR", // Meta RR + TypeKEY: "KEY", TypeKX: "KX", TypeL32: "L32", TypeL64: "L64", @@ -139,6 +141,7 @@ var TypeToString = map[uint16]string{ TypeRP: "RP", TypeRRSIG: "RRSIG", TypeRT: "RT", + TypeSIG: "SIG", TypeSOA: "SOA", TypeSPF: "SPF", TypeSRV: "SRV", @@ -419,7 +422,15 @@ Loop: s = append(s, '\\', 'r') default: if b < 32 || b >= 127 { // unprintable use \DDD - s = append(s, fmt.Sprintf("\\%03d", b)...) + var buf [3]byte + bufs := strconv.AppendInt(buf[:0], int64(b), 10) + s = append(s, '\\') + for i := 0; i < 3-len(bufs); i++ { + s = append(s, '0') + } + for _, r := range bufs { + s = append(s, r) + } } else { s = append(s, b) } @@ -554,7 +565,15 @@ func unpackTxtString(msg []byte, offset int) (string, int, error) { s = append(s, `\n`...) default: if b < 32 || b > 127 { // unprintable - s = append(s, fmt.Sprintf("\\%03d", b)...) + var buf [3]byte + bufs := strconv.AppendInt(buf[:0], int64(b), 10) + s = append(s, '\\') + for i := 0; i < 3-len(bufs); i++ { + s = append(s, '0') + } + for _, r := range bufs { + s = append(s, r) + } } else { s = append(s, b) } @@ -633,6 +652,12 @@ func packStructValue(val reflect.Value, msg []byte, off int, compression map[str off += len(b) } case `dns:"a"`: + if val.Type().String() == "dns.IPSECKEY" { + // Field(2) is GatewayType, must be 1 + if val.Field(2).Uint() != 1 { + continue + } + } // It must be a slice of 4, even if it is 16, we encode // only the first 4 if off+net.IPv4len > lenmsg { @@ -657,6 +682,12 @@ func packStructValue(val reflect.Value, msg []byte, off int, compression map[str return lenmsg, &Error{err: "overflow packing a"} } case `dns:"aaaa"`: + if val.Type().String() == "dns.IPSECKEY" { + // Field(2) is GatewayType, must be 2 + if val.Field(2).Uint() != 2 { + continue + } + } if fv.Len() == 0 { break } @@ -668,6 +699,7 @@ func packStructValue(val reflect.Value, msg []byte, off int, compression map[str off++ } case `dns:"wks"`: + // TODO(miek): this is wrong should be lenrd if off == lenmsg { break // dyn. updates } @@ -794,13 +826,20 @@ func packStructValue(val reflect.Value, msg []byte, off int, compression map[str default: return lenmsg, &Error{"bad tag packing string: " + typefield.Tag.Get("dns")} case `dns:"base64"`: - b64, e := packBase64([]byte(s)) + b64, e := fromBase64([]byte(s)) if e != nil { return lenmsg, e } copy(msg[off:off+len(b64)], b64) off += len(b64) case `dns:"domain-name"`: + if val.Type().String() == "dns.IPSECKEY" { + // Field(2) is GatewayType, 1 and 2 or used for addresses + x := val.Field(2).Uint() + if x == 1 || x == 2 { + continue + } + } if off, err = PackDomainName(s, msg, off, compression, false && compress); err != nil { return lenmsg, err } @@ -815,7 +854,7 @@ func packStructValue(val reflect.Value, msg []byte, off int, compression map[str msg[off-1] = 20 fallthrough case `dns:"base32"`: - b32, e := packBase32([]byte(s)) + b32, e := fromBase32([]byte(s)) if e != nil { return lenmsg, e } @@ -875,9 +914,12 @@ func packStructCompress(any interface{}, msg []byte, off int, compression map[st // Unpack a reflect.StructValue from msg. // Same restrictions as packStructValue. func unpackStructValue(val reflect.Value, msg []byte, off int) (off1 int, err error) { - var rdend int + var lenrd int lenmsg := len(msg) for i := 0; i < val.NumField(); i++ { + if lenrd != 0 && lenrd == off { + break + } if off > lenmsg { return lenmsg, &Error{"bad offset unpacking"} } @@ -889,7 +931,7 @@ func unpackStructValue(val reflect.Value, msg []byte, off int) (off1 int, err er // therefore it's expected that this interface would be PrivateRdata switch data := fv.Interface().(type) { case PrivateRdata: - n, err := data.Unpack(msg[off:rdend]) + n, err := data.Unpack(msg[off:lenrd]) if err != nil { return lenmsg, err } @@ -905,7 +947,7 @@ func unpackStructValue(val reflect.Value, msg []byte, off int) (off1 int, err er // HIP record slice of name (or none) servers := make([]string, 0) var s string - for off < rdend { + for off < lenrd { s, off, err = UnpackDomainName(msg, off) if err != nil { return lenmsg, err @@ -914,17 +956,17 @@ func unpackStructValue(val reflect.Value, msg []byte, off int) (off1 int, err er } fv.Set(reflect.ValueOf(servers)) case `dns:"txt"`: - if off == lenmsg || rdend == off { + if off == lenmsg || lenrd == off { break } var txt []string - txt, off, err = unpackTxt(msg, off, rdend) + txt, off, err = unpackTxt(msg, off, lenrd) if err != nil { return lenmsg, err } fv.Set(reflect.ValueOf(txt)) case `dns:"opt"`: // edns0 - if off == rdend { + if off == lenrd { // This is an EDNS0 (OPT Record) with no rdata // We can safely return here. break @@ -937,7 +979,7 @@ func unpackStructValue(val reflect.Value, msg []byte, off int) (off1 int, err er } code, off = unpackUint16(msg, off) optlen, off1 := unpackUint16(msg, off) - if off1+int(optlen) > rdend { + if off1+int(optlen) > lenrd { return lenmsg, &Error{err: "overflow unpacking opt"} } switch code { @@ -997,24 +1039,36 @@ func unpackStructValue(val reflect.Value, msg []byte, off int) (off1 int, err er // do nothing? off = off1 + int(optlen) } - if off < rdend { + if off < lenrd { goto Option } fv.Set(reflect.ValueOf(edns)) case `dns:"a"`: - if off == lenmsg { + if val.Type().String() == "dns.IPSECKEY" { + // Field(2) is GatewayType, must be 1 + if val.Field(2).Uint() != 1 { + continue + } + } + if off == lenrd { break // dyn. update } - if off+net.IPv4len > rdend { + if off+net.IPv4len > lenrd || off+net.IPv4len > lenmsg { return lenmsg, &Error{err: "overflow unpacking a"} } fv.Set(reflect.ValueOf(net.IPv4(msg[off], msg[off+1], msg[off+2], msg[off+3]))) off += net.IPv4len case `dns:"aaaa"`: - if off == rdend { + if val.Type().String() == "dns.IPSECKEY" { + // Field(2) is GatewayType, must be 2 + if val.Field(2).Uint() != 2 { + continue + } + } + if off == lenrd { break } - if off+net.IPv6len > rdend || off+net.IPv6len > lenmsg { + if off+net.IPv6len > lenrd || off+net.IPv6len > lenmsg { return lenmsg, &Error{err: "overflow unpacking aaaa"} } fv.Set(reflect.ValueOf(net.IP{msg[off], msg[off+1], msg[off+2], msg[off+3], msg[off+4], @@ -1025,7 +1079,7 @@ func unpackStructValue(val reflect.Value, msg []byte, off int) (off1 int, err er // Rest of the record is the bitmap serv := make([]uint16, 0) j := 0 - for off < rdend { + for off < lenrd { if off+1 > lenmsg { return lenmsg, &Error{err: "overflow unpacking wks"} } @@ -1060,17 +1114,17 @@ func unpackStructValue(val reflect.Value, msg []byte, off int) (off1 int, err er } fv.Set(reflect.ValueOf(serv)) case `dns:"nsec"`: // NSEC/NSEC3 - if off == rdend { + if off == lenrd { break } // Rest of the record is the type bitmap - if off+2 > rdend || off+2 > lenmsg { + if off+2 > lenrd || off+2 > lenmsg { return lenmsg, &Error{err: "overflow unpacking nsecx"} } nsec := make([]uint16, 0) length := 0 window := 0 - for off+2 < rdend { + for off+2 < lenrd { window = int(msg[off]) length = int(msg[off+1]) //println("off, windows, length, end", off, window, length, endrr) @@ -1127,7 +1181,7 @@ func unpackStructValue(val reflect.Value, msg []byte, off int) (off1 int, err er return lenmsg, err } if val.Type().Field(i).Name == "Hdr" { - rdend = off + int(val.FieldByName("Hdr").FieldByName("Rdlength").Uint()) + lenrd = off + int(val.FieldByName("Hdr").FieldByName("Rdlength").Uint()) } case reflect.Uint8: if off == lenmsg { @@ -1184,29 +1238,36 @@ func unpackStructValue(val reflect.Value, msg []byte, off int) (off1 int, err er default: return lenmsg, &Error{"bad tag unpacking string: " + val.Type().Field(i).Tag.Get("dns")} case `dns:"hex"`: - hexend := rdend + hexend := lenrd if val.FieldByName("Hdr").FieldByName("Rrtype").Uint() == uint64(TypeHIP) { hexend = off + int(val.FieldByName("HitLength").Uint()) } - if hexend > rdend || hexend > lenmsg { + if hexend > lenrd || hexend > lenmsg { return lenmsg, &Error{err: "overflow unpacking hex"} } s = hex.EncodeToString(msg[off:hexend]) off = hexend case `dns:"base64"`: // Rest of the RR is base64 encoded value - b64end := rdend + b64end := lenrd if val.FieldByName("Hdr").FieldByName("Rrtype").Uint() == uint64(TypeHIP) { b64end = off + int(val.FieldByName("PublicKeyLength").Uint()) } - if b64end > rdend || b64end > lenmsg { + if b64end > lenrd || b64end > lenmsg { return lenmsg, &Error{err: "overflow unpacking base64"} } - s = unpackBase64(msg[off:b64end]) + s = toBase64(msg[off:b64end]) off = b64end case `dns:"cdomain-name"`: fallthrough case `dns:"domain-name"`: + if val.Type().String() == "dns.IPSECKEY" { + // Field(2) is GatewayType, 1 and 2 or used for addresses + x := val.Field(2).Uint() + if x == 1 || x == 2 { + continue + } + } if off == lenmsg { // zero rdata foo, OK for dyn. updates break @@ -1228,7 +1289,7 @@ func unpackStructValue(val reflect.Value, msg []byte, off int) (off1 int, err er if off+size > lenmsg { return lenmsg, &Error{err: "overflow unpacking base32"} } - s = unpackBase32(msg[off : off+size]) + s = toBase32(msg[off : off+size]) off += size case `dns:"size-hex"`: // a "size" string, but it must be encoded in hex in the string @@ -1276,58 +1337,53 @@ func dddToByte(s []byte) byte { return byte((s[0]-'0')*100 + (s[1]-'0')*10 + (s[2] - '0')) } -// Helper function for unpacking -func unpackUint16(msg []byte, off int) (v uint16, off1 int) { - v = uint16(msg[off])<<8 | uint16(msg[off+1]) - off1 = off + 2 - return -} - // UnpackStruct unpacks a binary message from offset off to the interface // value given. -func UnpackStruct(any interface{}, msg []byte, off int) (off1 int, err error) { - off, err = unpackStructValue(structValue(any), msg, off) - return off, err +func UnpackStruct(any interface{}, msg []byte, off int) (int, error) { + return unpackStructValue(structValue(any), msg, off) } -func unpackBase32(b []byte) string { - b32 := make([]byte, base32.HexEncoding.EncodedLen(len(b))) - base32.HexEncoding.Encode(b32, b) - return string(b32) +// Helper function for packing and unpacking +func intToBytes(i *big.Int, length int) []byte { + buf := i.Bytes() + if len(buf) < length { + b := make([]byte, length) + copy(b[length-len(buf):], buf) + return b + } + return buf } -func unpackBase64(b []byte) string { - b64 := make([]byte, base64.StdEncoding.EncodedLen(len(b))) - base64.StdEncoding.Encode(b64, b) - return string(b64) +func unpackUint16(msg []byte, off int) (uint16, int) { + return uint16(msg[off])<<8 | uint16(msg[off+1]), off + 2 } -// Helper function for packing func packUint16(i uint16) (byte, byte) { return byte(i >> 8), byte(i) } -func packBase64(s []byte) ([]byte, error) { - b64len := base64.StdEncoding.DecodedLen(len(s)) - buf := make([]byte, b64len) - n, err := base64.StdEncoding.Decode(buf, []byte(s)) - if err != nil { - return nil, err - } - buf = buf[:n] - return buf, nil +func toBase32(b []byte) string { + return base32.HexEncoding.EncodeToString(b) } -// Helper function for packing, mostly used in dnssec.go -func packBase32(s []byte) ([]byte, error) { - b32len := base32.HexEncoding.DecodedLen(len(s)) - buf := make([]byte, b32len) - n, err := base32.HexEncoding.Decode(buf, []byte(s)) - if err != nil { - return nil, err - } +func fromBase32(s []byte) (buf []byte, err error) { + buflen := base32.HexEncoding.DecodedLen(len(s)) + buf = make([]byte, buflen) + n, err := base32.HexEncoding.Decode(buf, s) buf = buf[:n] - return buf, nil + return +} + +func toBase64(b []byte) string { + return base64.StdEncoding.EncodeToString(b) +} + +func fromBase64(s []byte) (buf []byte, err error) { + buflen := base64.StdEncoding.DecodedLen(len(s)) + buf = make([]byte, buflen) + n, err := base64.StdEncoding.Decode(buf, s) + buf = buf[:n] + return } // PackRR packs a resource record rr into msg[off:]. @@ -1856,25 +1912,34 @@ func (dns *Msg) Copy() *Msg { copy(r1.Question, dns.Question) // TODO(miek): Question is an immutable value, ok to do a shallow-copy } + rrArr := make([]RR, len(dns.Answer)+len(dns.Ns)+len(dns.Extra)) + var rri int + if len(dns.Answer) > 0 { - r1.Answer = make([]RR, len(dns.Answer)) + rrbegin := rri for i := 0; i < len(dns.Answer); i++ { - r1.Answer[i] = dns.Answer[i].copy() + rrArr[rri] = dns.Answer[i].copy() + rri++ } + r1.Answer = rrArr[rrbegin:rri:rri] } if len(dns.Ns) > 0 { - r1.Ns = make([]RR, len(dns.Ns)) + rrbegin := rri for i := 0; i < len(dns.Ns); i++ { - r1.Ns[i] = dns.Ns[i].copy() + rrArr[rri] = dns.Ns[i].copy() + rri++ } + r1.Ns = rrArr[rrbegin:rri:rri] } if len(dns.Extra) > 0 { - r1.Extra = make([]RR, len(dns.Extra)) + rrbegin := rri for i := 0; i < len(dns.Extra); i++ { - r1.Extra[i] = dns.Extra[i].copy() + rrArr[rri] = dns.Extra[i].copy() + rri++ } + r1.Extra = rrArr[rrbegin:rri:rri] } return r1 diff --git a/Godeps/_workspace/src/github.com/miekg/dns/nsecx.go b/Godeps/_workspace/src/github.com/miekg/dns/nsecx.go index 28bf08d9be..ac48da0f5a 100644 --- a/Godeps/_workspace/src/github.com/miekg/dns/nsecx.go +++ b/Godeps/_workspace/src/github.com/miekg/dns/nsecx.go @@ -47,7 +47,7 @@ func HashName(label string, ha uint8, iter uint16, salt string) string { io.WriteString(s, string(nsec3)) nsec3 = s.Sum(nil) } - return unpackBase32(nsec3) + return toBase32(nsec3) } type Denialer interface { diff --git a/Godeps/_workspace/src/github.com/miekg/dns/parse_test.go b/Godeps/_workspace/src/github.com/miekg/dns/parse_test.go index 3c18b38d69..0bc47369c4 100644 --- a/Godeps/_workspace/src/github.com/miekg/dns/parse_test.go +++ b/Godeps/_workspace/src/github.com/miekg/dns/parse_test.go @@ -386,8 +386,8 @@ func TestNSEC(t *testing.T) { func TestParseLOC(t *testing.T) { lt := map[string]string{ - "SW1A2AA.find.me.uk. LOC 51 30 12.748 N 00 07 39.611 W 0.00m 0.00m 0.00m 0.00m": "SW1A2AA.find.me.uk.\t3600\tIN\tLOC\t51 30 12.748 N 00 07 39.611 W 0.00m 0.00m 0.00m 0.00m", - "SW1A2AA.find.me.uk. LOC 51 0 0.0 N 00 07 39.611 W 0.00m 0.00m 0.00m 0.00m": "SW1A2AA.find.me.uk.\t3600\tIN\tLOC\t51 00 0.000 N 00 07 39.611 W 0.00m 0.00m 0.00m 0.00m", + "SW1A2AA.find.me.uk. LOC 51 30 12.748 N 00 07 39.611 W 0.00m 0.00m 0.00m 0.00m": "SW1A2AA.find.me.uk.\t3600\tIN\tLOC\t51 30 12.748 N 00 07 39.611 W 0m 0.00m 0.00m 0.00m", + "SW1A2AA.find.me.uk. LOC 51 0 0.0 N 00 07 39.611 W 0.00m 0.00m 0.00m 0.00m": "SW1A2AA.find.me.uk.\t3600\tIN\tLOC\t51 00 0.000 N 00 07 39.611 W 0m 0.00m 0.00m 0.00m", } for i, o := range lt { rr, e := NewRR(i) @@ -652,7 +652,7 @@ moutamassey NS ns01.yahoodomains.jp. } func ExampleHIP() { - h := `www.example.com IN HIP ( 2 200100107B1A74DF365639CC39F1D578 + h := `www.example.com IN HIP ( 2 200100107B1A74DF365639CC39F1D578 AwEAAbdxyhNuSutc5EMzxTs9LBPCIkOFH8cIvM4p 9+LrV4e19WzK00+CI6zBCQTdtWsuxKbWIy87UOoJTwkUs7lBu+Upr1gsNrut79ryra+bSRGQ b1slImA8YVJyuIDsj7kwzG7jnERNqnWxZ48AWkskmdHaVDP4BcelrTI3rMXdXF5D @@ -661,7 +661,7 @@ b1slImA8YVJyuIDsj7kwzG7jnERNqnWxZ48AWkskmdHaVDP4BcelrTI3rMXdXF5D fmt.Printf("%s\n", hip.String()) } // Output: - // www.example.com. 3600 IN HIP 2 200100107B1A74DF365639CC39F1D578 AwEAAbdxyhNuSutc5EMzxTs9LBPCIkOFH8cIvM4p9+LrV4e19WzK00+CI6zBCQTdtWsuxKbWIy87UOoJTwkUs7lBu+Upr1gsNrut79ryra+bSRGQb1slImA8YVJyuIDsj7kwzG7jnERNqnWxZ48AWkskmdHaVDP4BcelrTI3rMXdXF5D rvs.example.com. + // www.example.com. 3600 IN HIP 2 200100107B1A74DF365639CC39F1D578 AwEAAbdxyhNuSutc5EMzxTs9LBPCIkOFH8cIvM4p9+LrV4e19WzK00+CI6zBCQTdtWsuxKbWIy87UOoJTwkUs7lBu+Upr1gsNrut79ryra+bSRGQb1slImA8YVJyuIDsj7kwzG7jnERNqnWxZ48AWkskmdHaVDP4BcelrTI3rMXdXF5D rvs.example.com. } func TestHIP(t *testing.T) { @@ -1225,35 +1225,21 @@ func TestMalformedPackets(t *testing.T) { } } -func TestDynamicUpdateParsing(t *testing.T) { - prefix := "example.com. IN " - for _, typ := range TypeToString { - if typ == "CAA" || typ == "OPT" || typ == "AXFR" || typ == "IXFR" || typ == "ANY" || typ == "TKEY" || - typ == "TSIG" || typ == "ISDN" || typ == "UNSPEC" || typ == "NULL" || typ == "ATMA" { - continue - } - r, e := NewRR(prefix + typ) - if e != nil { - t.Log("failure to parse: " + prefix + typ) - t.Fail() - } else { - t.Logf("parsed: %s", r.String()) - } - } -} - type algorithm struct { name uint8 bits int } -func TestNewPrivateKeyECDSA(t *testing.T) { +func TestNewPrivateKey(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } algorithms := []algorithm{ algorithm{ECDSAP256SHA256, 256}, algorithm{ECDSAP384SHA384, 384}, algorithm{RSASHA1, 1024}, algorithm{RSASHA256, 2048}, - // algorithm{DSA, 1024}, // TODO: STILL BROKEN! + algorithm{DSA, 1024}, } for _, algo := range algorithms { @@ -1272,12 +1258,15 @@ func TestNewPrivateKeyECDSA(t *testing.T) { newPrivKey, err := key.NewPrivateKey(key.PrivateKeyString(privkey)) if err != nil { + t.Log(key.String()) + t.Log(key.PrivateKeyString(privkey)) + t.Fatal(err.Error()) } switch newPrivKey := newPrivKey.(type) { - case *rsa.PrivateKey: - newPrivKey.Precompute() + case *RSAPrivateKey: + (*rsa.PrivateKey)(newPrivKey).Precompute() } if !reflect.DeepEqual(privkey, newPrivKey) { @@ -1285,3 +1274,136 @@ func TestNewPrivateKeyECDSA(t *testing.T) { } } } + +// special input test +func TestNewRRSpecial(t *testing.T) { + var ( + rr RR + err error + expect string + ) + + rr, err = NewRR("; comment") + expect = "" + if err != nil { + t.Errorf("unexpected err: %s", err) + } + if rr != nil { + t.Errorf("unexpected result: [%s] != [%s]", rr, expect) + } + + rr, err = NewRR("") + expect = "" + if err != nil { + t.Errorf("unexpected err: %s", err) + } + if rr != nil { + t.Errorf("unexpected result: [%s] != [%s]", rr, expect) + } + + rr, err = NewRR("$ORIGIN foo.") + expect = "" + if err != nil { + t.Errorf("unexpected err: %s", err) + } + if rr != nil { + t.Errorf("unexpected result: [%s] != [%s]", rr, expect) + } + + rr, err = NewRR(" ") + expect = "" + if err != nil { + t.Errorf("unexpected err: %s", err) + } + if rr != nil { + t.Errorf("unexpected result: [%s] != [%s]", rr, expect) + } + + rr, err = NewRR("\n") + expect = "" + if err != nil { + t.Errorf("unexpected err: %s", err) + } + if rr != nil { + t.Errorf("unexpected result: [%s] != [%s]", rr, expect) + } + + rr, err = NewRR("foo. A 1.1.1.1\nbar. A 2.2.2.2") + expect = "foo.\t3600\tIN\tA\t1.1.1.1" + if err != nil { + t.Errorf("unexpected err: %s", err) + } + if rr == nil || rr.String() != expect { + t.Errorf("unexpected result: [%s] != [%s]", rr, expect) + } +} + +func TestPrintfVerbsRdata(t *testing.T) { + x, _ := NewRR("www.miek.nl. IN MX 20 mx.miek.nl.") + if Field(x, 1) != "20" { + t.Errorf("should be 20") + } + if Field(x, 2) != "mx.miek.nl." { + t.Errorf("should be mx.miek.nl.") + } + + x, _ = NewRR("www.miek.nl. IN A 127.0.0.1") + if Field(x, 1) != "127.0.0.1" { + t.Errorf("should be 127.0.0.1") + } + + x, _ = NewRR("www.miek.nl. IN AAAA ::1") + if Field(x, 1) != "::1" { + t.Errorf("should be ::1") + } + + x, _ = NewRR("www.miek.nl. IN NSEC a.miek.nl. A NS SOA MX AAAA") + if Field(x, 1) != "a.miek.nl." { + t.Errorf("should be a.miek.nl.") + } + if Field(x, 2) != "A NS SOA MX AAAA" { + t.Errorf("should be A NS SOA MX AAAA") + } + + x, _ = NewRR("www.miek.nl. IN TXT \"first\" \"second\"") + if Field(x, 1) != "first second" { + t.Errorf("should be first second") + } + if Field(x, 0) != "" { + t.Errorf("should be empty") + } +} + +func TestParseIPSECKEY(t *testing.T) { + tests := []string{ + "38.2.0.192.in-addr.arpa. 7200 IN IPSECKEY ( 10 1 2 192.0.2.38 AQNRU3mG7TVTO2BkR47usntb102uFJtugbo6BSGvgqt4AQ== )", + "38.2.0.192.in-addr.arpa.\t7200\tIN\tIPSECKEY\t10 1 2 192.0.2.38 AQNRU3mG7TVTO2BkR47usntb102uFJtugbo6BSGvgqt4AQ==", + + "38.2.0.192.in-addr.arpa. 7200 IN IPSECKEY ( 10 0 2 . AQNRU3mG7TVTO2BkR47usntb102uFJtugbo6BSGvgqt4AQ== )", + "38.2.0.192.in-addr.arpa.\t7200\tIN\tIPSECKEY\t10 0 2 . AQNRU3mG7TVTO2BkR47usntb102uFJtugbo6BSGvgqt4AQ==", + + "38.2.0.192.in-addr.arpa. 7200 IN IPSECKEY ( 10 1 2 192.0.2.3 AQNRU3mG7TVTO2BkR47usntb102uFJtugbo6BSGvgqt4AQ== )", + "38.2.0.192.in-addr.arpa.\t7200\tIN\tIPSECKEY\t10 1 2 192.0.2.3 AQNRU3mG7TVTO2BkR47usntb102uFJtugbo6BSGvgqt4AQ==", + + "38.1.0.192.in-addr.arpa. 7200 IN IPSECKEY ( 10 3 2 mygateway.example.com. AQNRU3mG7TVTO2BkR47usntb102uFJtugbo6BSGvgqt4AQ== )", + "38.1.0.192.in-addr.arpa.\t7200\tIN\tIPSECKEY\t10 3 2 mygateway.example.com. AQNRU3mG7TVTO2BkR47usntb102uFJtugbo6BSGvgqt4AQ==", + + "0.d.4.0.3.0.e.f.f.f.3.f.0.1.2.0 7200 IN IPSECKEY ( 10 2 2 2001:0DB8:0:8002::2000:1 AQNRU3mG7TVTO2BkR47usntb102uFJtugbo6BSGvgqt4AQ== )", + "0.d.4.0.3.0.e.f.f.f.3.f.0.1.2.0.\t7200\tIN\tIPSECKEY\t10 2 2 2001:db8:0:8002::2000:1 AQNRU3mG7TVTO2BkR47usntb102uFJtugbo6BSGvgqt4AQ==", + } + for i := 0; i < len(tests)-1; i++ { + t1 := tests[i] + e1 := tests[i+1] + r, e := NewRR(t1) + if e != nil { + t.Logf("failed to parse IPSECKEY %s", e) + continue + } + if r.String() != e1 { + t.Logf("these two IPSECKEY records should match") + t.Logf("\n%s\n%s\n", r.String(), e1) + t.Fail() + } + i++ + } +} diff --git a/Godeps/_workspace/src/github.com/miekg/dns/server.go b/Godeps/_workspace/src/github.com/miekg/dns/server.go index cf3353bd76..707305e6ef 100644 --- a/Godeps/_workspace/src/github.com/miekg/dns/server.go +++ b/Godeps/_workspace/src/github.com/miekg/dns/server.go @@ -77,8 +77,7 @@ func (f HandlerFunc) ServeDNS(w ResponseWriter, r *Msg) { f(w, r) } -// FailedHandler returns a HandlerFunc -// returns SERVFAIL for every request it gets. +// FailedHandler returns a HandlerFunc that returns SERVFAIL for every request it gets. func HandleFailed(w ResponseWriter, r *Msg) { m := new(Msg) m.SetRcode(r, RcodeServerFailure) @@ -170,10 +169,10 @@ func (mux *ServeMux) HandleRemove(pattern string) { // is sought. // If no handler is found a standard SERVFAIL message is returned // If the request message does not have exactly one question in the -// question section a SERVFAIL is returned. +// question section a SERVFAIL is returned, unlesss Unsafe is true. func (mux *ServeMux) ServeDNS(w ResponseWriter, request *Msg) { var h Handler - if len(request.Question) != 1 { + if len(request.Question) < 1 { // allow more than one question h = failedHandler() } else { if h = mux.match(request.Question[0].Name, request.Question[0].Qtype); h == nil { @@ -221,6 +220,11 @@ type Server struct { IdleTimeout func() time.Duration // Secret(s) for Tsig map[]. TsigSecret map[string]string + // Unsafe instructs the server to disregard any sanity checks and directly hand the message to + // the handler. It will specfically not check if the query has the QR bit not set. + Unsafe bool + // If NotifyStartedFunc is set is is called, once the server has started listening. + NotifyStartedFunc func() // For graceful shutdown. stopUDP chan bool @@ -237,6 +241,7 @@ type Server struct { func (srv *Server) ListenAndServe() error { srv.lock.Lock() if srv.started { + srv.lock.Unlock() return &Error{err: "server already started"} } srv.stopUDP, srv.stopTCP = make(chan bool), make(chan bool) @@ -282,14 +287,12 @@ func (srv *Server) ListenAndServe() error { func (srv *Server) ActivateAndServe() error { srv.lock.Lock() if srv.started { + srv.lock.Unlock() return &Error{err: "server already started"} } srv.stopUDP, srv.stopTCP = make(chan bool), make(chan bool) srv.started = true srv.lock.Unlock() - if srv.UDPSize == 0 { - srv.UDPSize = MinMsgSize - } if srv.PacketConn != nil { if srv.UDPSize == 0 { srv.UDPSize = MinMsgSize @@ -316,6 +319,7 @@ func (srv *Server) ActivateAndServe() error { func (srv *Server) Shutdown() error { srv.lock.Lock() if !srv.started { + srv.lock.Unlock() return &Error{err: "server not started"} } srv.started = false @@ -371,6 +375,11 @@ func (srv *Server) getReadTimeout() time.Duration { // Each request is handled in a seperate goroutine. func (srv *Server) serveTCP(l *net.TCPListener) error { defer l.Close() + + if srv.NotifyStartedFunc != nil { + srv.NotifyStartedFunc() + } + handler := srv.Handler if handler == nil { handler = DefaultServeMux @@ -402,6 +411,10 @@ func (srv *Server) serveTCP(l *net.TCPListener) error { func (srv *Server) serveUDP(l *net.UDPConn) error { defer l.Close() + if srv.NotifyStartedFunc != nil { + srv.NotifyStartedFunc() + } + handler := srv.Handler if handler == nil { handler = DefaultServeMux @@ -445,6 +458,9 @@ Redo: w.WriteMsg(x) goto Exit } + if !srv.Unsafe && req.Response { + goto Exit + } w.tsigStatus = nil if w.tsigSecret != nil { diff --git a/Godeps/_workspace/src/github.com/miekg/dns/server_test.go b/Godeps/_workspace/src/github.com/miekg/dns/server_test.go index b743013897..5efb1cf897 100644 --- a/Godeps/_workspace/src/github.com/miekg/dns/server_test.go +++ b/Godeps/_workspace/src/github.com/miekg/dns/server_test.go @@ -32,10 +32,37 @@ func RunLocalUDPServer(laddr string) (*Server, string, error) { return nil, "", err } server := &Server{PacketConn: pc} + + waitLock := sync.Mutex{} + waitLock.Lock() + server.NotifyStartedFunc = waitLock.Unlock + go func() { server.ActivateAndServe() pc.Close() }() + + waitLock.Lock() + return server, pc.LocalAddr().String(), nil +} + +func RunLocalUDPServerUnsafe(laddr string) (*Server, string, error) { + pc, err := net.ListenPacket("udp", laddr) + if err != nil { + return nil, "", err + } + server := &Server{PacketConn: pc, Unsafe: true} + + waitLock := sync.Mutex{} + waitLock.Lock() + server.NotifyStartedFunc = waitLock.Unlock + + go func() { + server.ActivateAndServe() + pc.Close() + }() + + waitLock.Lock() return server, pc.LocalAddr().String(), nil } @@ -44,11 +71,19 @@ func RunLocalTCPServer(laddr string) (*Server, string, error) { if err != nil { return nil, "", err } + server := &Server{Listener: l} + + waitLock := sync.Mutex{} + waitLock.Lock() + server.NotifyStartedFunc = waitLock.Unlock + go func() { server.ActivateAndServe() l.Close() }() + + waitLock.Lock() return server, l.Addr().String(), nil } @@ -68,7 +103,7 @@ func TestServing(t *testing.T) { m := new(Msg) m.SetQuestion("miek.nl.", TypeTXT) r, _, err := c.Exchange(m, addrstr) - if err != nil { + if err != nil || len(r.Extra) == 0 { t.Log("failed to exchange miek.nl", err) t.Fatal() } @@ -302,14 +337,52 @@ func TestServingLargeResponses(t *testing.T) { } } +func TestServingResponse(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + HandleFunc("miek.nl.", HelloServer) + s, addrstr, err := RunLocalUDPServer("127.0.0.1:0") + if err != nil { + t.Fatalf("Unable to run test server: %s", err) + } + + c := new(Client) + m := new(Msg) + m.SetQuestion("miek.nl.", TypeTXT) + m.Response = false + _, _, err = c.Exchange(m, addrstr) + if err != nil { + t.Log("failed to exchange", err) + t.Fatal() + } + m.Response = true + _, _, err = c.Exchange(m, addrstr) + if err == nil { + t.Log("exchanged response message") + t.Fatal() + } + + s.Shutdown() + s, addrstr, err = RunLocalUDPServerUnsafe("127.0.0.1:0") + if err != nil { + t.Fatalf("Unable to run test server: %s", err) + } + defer s.Shutdown() + + m.Response = true + _, _, err = c.Exchange(m, addrstr) + if err != nil { + t.Log("could exchanged response message in Unsafe mode") + t.Fatal() + } +} + func TestShutdownTCP(t *testing.T) { s, _, err := RunLocalTCPServer("127.0.0.1:0") if err != nil { t.Fatalf("Unable to run test server: %s", err) } - // it normally is too early to shutting down because server - // activates in goroutine. - runtime.Gosched() err = s.Shutdown() if err != nil { t.Errorf("Could not shutdown test TCP server, %s", err) @@ -321,9 +394,6 @@ func TestShutdownUDP(t *testing.T) { if err != nil { t.Fatalf("Unable to run test server: %s", err) } - // it normally is too early to shutting down because server - // activates in goroutine. - runtime.Gosched() err = s.Shutdown() if err != nil { t.Errorf("Could not shutdown test UDP server, %s", err) diff --git a/Godeps/_workspace/src/github.com/miekg/dns/sig0.go b/Godeps/_workspace/src/github.com/miekg/dns/sig0.go new file mode 100644 index 0000000000..bef88cf951 --- /dev/null +++ b/Godeps/_workspace/src/github.com/miekg/dns/sig0.go @@ -0,0 +1,236 @@ +// SIG(0) +// +// From RFC 2931: +// +// SIG(0) provides protection for DNS transactions and requests .... +// ... protection for glue records, DNS requests, protection for message headers +// on requests and responses, and protection of the overall integrity of a response. +// +// It works like TSIG, except that SIG(0) uses public key cryptography, instead of the shared +// secret approach in TSIG. +// Supported algorithms: DSA, ECDSAP256SHA256, ECDSAP384SHA384, RSASHA1, RSASHA256 and +// RSASHA512. +// +// Signing subsequent messages in multi-message sessions is not implemented. +// +package dns + +import ( + "crypto" + "crypto/dsa" + "crypto/ecdsa" + "crypto/rsa" + "math/big" + "strings" + "time" +) + +// Sign signs a dns.Msg. It fills the signature with the appropriate data. +// The SIG record should have the SignerName, KeyTag, Algorithm, Inception +// and Expiration set. +func (rr *SIG) Sign(k PrivateKey, m *Msg) ([]byte, error) { + if k == nil { + return nil, ErrPrivKey + } + if rr.KeyTag == 0 || len(rr.SignerName) == 0 || rr.Algorithm == 0 { + return nil, ErrKey + } + rr.Header().Rrtype = TypeSIG + rr.Header().Class = ClassANY + rr.Header().Ttl = 0 + rr.Header().Name = "." + rr.OrigTtl = 0 + rr.TypeCovered = 0 + rr.Labels = 0 + + buf := make([]byte, m.Len()+rr.len()) + mbuf, err := m.PackBuffer(buf) + if err != nil { + return nil, err + } + if &buf[0] != &mbuf[0] { + return nil, ErrBuf + } + off, err := PackRR(rr, buf, len(mbuf), nil, false) + if err != nil { + return nil, err + } + buf = buf[:off:cap(buf)] + var hash crypto.Hash + switch rr.Algorithm { + case DSA, RSASHA1: + hash = crypto.SHA1 + case RSASHA256, ECDSAP256SHA256: + hash = crypto.SHA256 + case ECDSAP384SHA384: + hash = crypto.SHA384 + case RSASHA512: + hash = crypto.SHA512 + default: + return nil, ErrAlg + } + hasher := hash.New() + // Write SIG rdata + hasher.Write(buf[len(mbuf)+1+2+2+4+2:]) + // Write message + hasher.Write(buf[:len(mbuf)]) + hashed := hasher.Sum(nil) + + sig, err := k.Sign(hashed, rr.Algorithm) + if err != nil { + return nil, err + } + rr.Signature = toBase64(sig) + buf = append(buf, sig...) + if len(buf) > int(^uint16(0)) { + return nil, ErrBuf + } + // Adjust sig data length + rdoff := len(mbuf) + 1 + 2 + 2 + 4 + rdlen, _ := unpackUint16(buf, rdoff) + rdlen += uint16(len(sig)) + buf[rdoff], buf[rdoff+1] = packUint16(rdlen) + // Adjust additional count + adc, _ := unpackUint16(buf, 10) + adc += 1 + buf[10], buf[11] = packUint16(adc) + return buf, nil +} + +// Verify validates the message buf using the key k. +// It's assumed that buf is a valid message from which rr was unpacked. +func (rr *SIG) Verify(k *KEY, buf []byte) error { + if k == nil { + return ErrKey + } + if rr.KeyTag == 0 || len(rr.SignerName) == 0 || rr.Algorithm == 0 { + return ErrKey + } + + var hash crypto.Hash + switch rr.Algorithm { + case DSA, RSASHA1: + hash = crypto.SHA1 + case RSASHA256, ECDSAP256SHA256: + hash = crypto.SHA256 + case ECDSAP384SHA384: + hash = crypto.SHA384 + case RSASHA512: + hash = crypto.SHA512 + default: + return ErrAlg + } + hasher := hash.New() + + buflen := len(buf) + qdc, _ := unpackUint16(buf, 4) + anc, _ := unpackUint16(buf, 6) + auc, _ := unpackUint16(buf, 8) + adc, offset := unpackUint16(buf, 10) + var err error + for i := uint16(0); i < qdc && offset < buflen; i++ { + _, offset, err = UnpackDomainName(buf, offset) + if err != nil { + return err + } + // Skip past Type and Class + offset += 2 + 2 + } + for i := uint16(1); i < anc+auc+adc && offset < buflen; i++ { + _, offset, err = UnpackDomainName(buf, offset) + if err != nil { + return err + } + // Skip past Type, Class and TTL + offset += 2 + 2 + 4 + if offset+1 >= buflen { + continue + } + var rdlen uint16 + rdlen, offset = unpackUint16(buf, offset) + offset += int(rdlen) + } + if offset >= buflen { + return &Error{err: "overflowing unpacking signed message"} + } + + // offset should be just prior to SIG + bodyend := offset + // owner name SHOULD be root + _, offset, err = UnpackDomainName(buf, offset) + if err != nil { + return err + } + // Skip Type, Class, TTL, RDLen + offset += 2 + 2 + 4 + 2 + sigstart := offset + // Skip Type Covered, Algorithm, Labels, Original TTL + offset += 2 + 1 + 1 + 4 + if offset+4+4 >= buflen { + return &Error{err: "overflow unpacking signed message"} + } + expire := uint32(buf[offset])<<24 | uint32(buf[offset+1])<<16 | uint32(buf[offset+2])<<8 | uint32(buf[offset+3]) + offset += 4 + incept := uint32(buf[offset])<<24 | uint32(buf[offset+1])<<16 | uint32(buf[offset+2])<<8 | uint32(buf[offset+3]) + offset += 4 + now := uint32(time.Now().Unix()) + if now < incept || now > expire { + return ErrTime + } + // Skip key tag + offset += 2 + var signername string + signername, offset, err = UnpackDomainName(buf, offset) + if err != nil { + return err + } + // If key has come from the DNS name compression might + // have mangled the case of the name + if strings.ToLower(signername) != strings.ToLower(k.Header().Name) { + return &Error{err: "signer name doesn't match key name"} + } + sigend := offset + hasher.Write(buf[sigstart:sigend]) + hasher.Write(buf[:10]) + hasher.Write([]byte{ + byte((adc - 1) << 8), + byte(adc - 1), + }) + hasher.Write(buf[12:bodyend]) + + hashed := hasher.Sum(nil) + sig := buf[sigend:] + switch k.Algorithm { + case DSA: + pk := k.publicKeyDSA() + sig = sig[1:] + r := big.NewInt(0) + r.SetBytes(sig[:len(sig)/2]) + s := big.NewInt(0) + s.SetBytes(sig[len(sig)/2:]) + if pk != nil { + if dsa.Verify(pk, hashed, r, s) { + return nil + } + return ErrSig + } + case RSASHA1, RSASHA256, RSASHA512: + pk := k.publicKeyRSA() + if pk != nil { + return rsa.VerifyPKCS1v15(pk, hash, hashed, sig) + } + case ECDSAP256SHA256, ECDSAP384SHA384: + pk := k.publicKeyECDSA() + r := big.NewInt(0) + r.SetBytes(sig[:len(sig)/2]) + s := big.NewInt(0) + s.SetBytes(sig[len(sig)/2:]) + if pk != nil { + if ecdsa.Verify(pk, hashed, r, s) { + return nil + } + return ErrSig + } + } + return ErrKeyAlg +} diff --git a/Godeps/_workspace/src/github.com/miekg/dns/sig0_test.go b/Godeps/_workspace/src/github.com/miekg/dns/sig0_test.go new file mode 100644 index 0000000000..6ca76fb848 --- /dev/null +++ b/Godeps/_workspace/src/github.com/miekg/dns/sig0_test.go @@ -0,0 +1,96 @@ +package dns + +import ( + "testing" + "time" +) + +func TestSIG0(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + m := new(Msg) + m.SetQuestion("example.org.", TypeSOA) + for _, alg := range []uint8{DSA, ECDSAP256SHA256, ECDSAP384SHA384, RSASHA1, RSASHA256, RSASHA512} { + algstr := AlgorithmToString[alg] + keyrr := new(KEY) + keyrr.Hdr.Name = algstr + "." + keyrr.Hdr.Rrtype = TypeKEY + keyrr.Hdr.Class = ClassINET + keyrr.Algorithm = alg + keysize := 1024 + switch alg { + case ECDSAP256SHA256: + keysize = 256 + case ECDSAP384SHA384: + keysize = 384 + } + pk, err := keyrr.Generate(keysize) + if err != nil { + t.Logf("Failed to generate key for “%s”: %v", algstr, err) + t.Fail() + continue + } + now := uint32(time.Now().Unix()) + sigrr := new(SIG) + sigrr.Hdr.Name = "." + sigrr.Hdr.Rrtype = TypeSIG + sigrr.Hdr.Class = ClassANY + sigrr.Algorithm = alg + sigrr.Expiration = now + 300 + sigrr.Inception = now - 300 + sigrr.KeyTag = keyrr.KeyTag() + sigrr.SignerName = keyrr.Hdr.Name + mb, err := sigrr.Sign(pk, m) + if err != nil { + t.Logf("Failed to sign message using “%s”: %v", algstr, err) + t.Fail() + continue + } + m := new(Msg) + if err := m.Unpack(mb); err != nil { + t.Logf("Failed to unpack message signed using “%s”: %v", algstr, err) + t.Fail() + continue + } + if len(m.Extra) != 1 { + t.Logf("Missing SIG for message signed using “%s”", algstr) + t.Fail() + continue + } + var sigrrwire *SIG + switch rr := m.Extra[0].(type) { + case *SIG: + sigrrwire = rr + default: + t.Logf("Expected SIG RR, instead: %v", rr) + t.Fail() + continue + } + for _, rr := range []*SIG{sigrr, sigrrwire} { + id := "sigrr" + if rr == sigrrwire { + id = "sigrrwire" + } + if err := rr.Verify(keyrr, mb); err != nil { + t.Logf("Failed to verify “%s” signed SIG(%s): %v", algstr, id, err) + t.Fail() + continue + } + } + mb[13]++ + if err := sigrr.Verify(keyrr, mb); err == nil { + t.Logf("Verify succeeded on an altered message using “%s”", algstr) + t.Fail() + continue + } + sigrr.Expiration = 2 + sigrr.Inception = 1 + mb, _ = sigrr.Sign(pk, m) + if err := sigrr.Verify(keyrr, mb); err == nil { + t.Logf("Verify succeeded on an expired message using “%s”", algstr) + t.Fail() + continue + } + } +} diff --git a/Godeps/_workspace/src/github.com/miekg/dns/tsig.go b/Godeps/_workspace/src/github.com/miekg/dns/tsig.go index f31b15be4a..4f79894f1c 100644 --- a/Godeps/_workspace/src/github.com/miekg/dns/tsig.go +++ b/Godeps/_workspace/src/github.com/miekg/dns/tsig.go @@ -1,7 +1,7 @@ // TRANSACTION SIGNATURE // // An TSIG or transaction signature adds a HMAC TSIG record to each message sent. -// The supported algorithms include: HmacMD5, HmacSHA1 and HmacSHA256. +// The supported algorithms include: HmacMD5, HmacSHA1, HmacSHA256 and HmacSHA512. // // Basic use pattern when querying with a TSIG name "axfr." (note that these key names // must be fully qualified - as they are domain names) and the base64 secret @@ -58,6 +58,7 @@ import ( "crypto/md5" "crypto/sha1" "crypto/sha256" + "crypto/sha512" "encoding/hex" "hash" "io" @@ -71,6 +72,7 @@ const ( HmacMD5 = "hmac-md5.sig-alg.reg.int." HmacSHA1 = "hmac-sha1." HmacSHA256 = "hmac-sha256." + HmacSHA512 = "hmac-sha512." ) type TSIG struct { @@ -159,7 +161,7 @@ func TsigGenerate(m *Msg, secret, requestMAC string, timersOnly bool) ([]byte, s panic("dns: TSIG not last RR in additional") } // If we barf here, the caller is to blame - rawsecret, err := packBase64([]byte(secret)) + rawsecret, err := fromBase64([]byte(secret)) if err != nil { return nil, "", err } @@ -181,6 +183,8 @@ func TsigGenerate(m *Msg, secret, requestMAC string, timersOnly bool) ([]byte, s h = hmac.New(sha1.New, []byte(rawsecret)) case HmacSHA256: h = hmac.New(sha256.New, []byte(rawsecret)) + case HmacSHA512: + h = hmac.New(sha512.New, []byte(rawsecret)) default: return nil, "", ErrKeyAlg } @@ -209,7 +213,7 @@ func TsigGenerate(m *Msg, secret, requestMAC string, timersOnly bool) ([]byte, s // If the signature does not validate err contains the // error, otherwise it is nil. func TsigVerify(msg []byte, secret, requestMAC string, timersOnly bool) error { - rawsecret, err := packBase64([]byte(secret)) + rawsecret, err := fromBase64([]byte(secret)) if err != nil { return err } @@ -225,7 +229,14 @@ func TsigVerify(msg []byte, secret, requestMAC string, timersOnly bool) error { } buf := tsigBuffer(stripped, tsig, requestMAC, timersOnly) - ti := uint64(time.Now().Unix()) - tsig.TimeSigned + + // Fudge factor works both ways. A message can arrive before it was signed because + // of clock skew. + now := uint64(time.Now().Unix()) + ti := now - tsig.TimeSigned + if now < tsig.TimeSigned { + ti = tsig.TimeSigned - now + } if uint64(tsig.Fudge) < ti { return ErrTime } @@ -238,6 +249,8 @@ func TsigVerify(msg []byte, secret, requestMAC string, timersOnly bool) error { h = hmac.New(sha1.New, rawsecret) case HmacSHA256: h = hmac.New(sha256.New, rawsecret) + case HmacSHA512: + h = hmac.New(sha512.New, rawsecret) default: return ErrKeyAlg } diff --git a/Godeps/_workspace/src/github.com/miekg/dns/types.go b/Godeps/_workspace/src/github.com/miekg/dns/types.go index d5b5be4065..a650850b25 100644 --- a/Godeps/_workspace/src/github.com/miekg/dns/types.go +++ b/Godeps/_workspace/src/github.com/miekg/dns/types.go @@ -75,6 +75,7 @@ const ( TypeRKEY uint16 = 57 TypeTALINK uint16 = 58 TypeCDS uint16 = 59 + TypeCDNSKEY uint16 = 60 TypeOPENPGPKEY uint16 = 61 TypeSPF uint16 = 99 TypeUINFO uint16 = 100 @@ -159,10 +160,14 @@ const ( _AD = 1 << 5 // authticated data _CD = 1 << 4 // checking disabled -) + LOC_EQUATOR = 1 << 31 // RFC 1876, Section 2. + LOC_PRIMEMERIDIAN = 1 << 31 // RFC 1876, Section 2. -// RFC 1876, Section 2 -const _LOC_EQUATOR = 1 << 31 + LOC_HOURS = 60 * 1000 + LOC_DEGREES = 60 * LOC_HOURS + + LOC_ALTITUDEBASE = 100000 +) // RFC 4398, Section 2.1 const ( @@ -307,7 +312,7 @@ func (rr *MF) copy() RR { return &MF{*rr.Hdr.copyHeader(), rr.Mf} } func (rr *MF) len() int { return rr.Hdr.len() + len(rr.Mf) + 1 } func (rr *MF) String() string { - return rr.Hdr.String() + " " + sprintName(rr.Mf) + return rr.Hdr.String() + sprintName(rr.Mf) } type MD struct { @@ -320,7 +325,7 @@ func (rr *MD) copy() RR { return &MD{*rr.Hdr.copyHeader(), rr.Md} } func (rr *MD) len() int { return rr.Hdr.len() + len(rr.Md) + 1 } func (rr *MD) String() string { - return rr.Hdr.String() + " " + sprintName(rr.Md) + return rr.Hdr.String() + sprintName(rr.Md) } type MX struct { @@ -527,7 +532,17 @@ func appendTXTStringByte(s []byte, b byte) []byte { return append(s, '\\', b) } if b < ' ' || b > '~' { - return append(s, fmt.Sprintf("\\%03d", b)...) + var buf [3]byte + bufs := strconv.AppendInt(buf[:0], int64(b), 10) + s = append(s, '\\') + for i := 0; i < 3-len(bufs); i++ { + s = append(s, '0') + } + for _, r := range bufs { + s = append(s, r) + } + return s + } return append(s, b) } @@ -772,51 +787,77 @@ func (rr *LOC) copy() RR { return &LOC{*rr.Hdr.copyHeader(), rr.Version, rr.Size, rr.HorizPre, rr.VertPre, rr.Latitude, rr.Longitude, rr.Altitude} } +// cmToM takes a cm value expressed in RFC1876 SIZE mantissa/exponent +// format and returns a string in m (two decimals for the cm) +func cmToM(m, e uint8) string { + if e < 2 { + if e == 1 { + m *= 10 + } + + return fmt.Sprintf("0.%02d", m) + } + + s := fmt.Sprintf("%d", m) + for e > 2 { + s += "0" + e -= 1 + } + return s +} + +// String returns a string version of a LOC func (rr *LOC) String() string { s := rr.Hdr.String() - // Copied from ldns - // Latitude - lat := rr.Latitude - north := "N" - if lat > _LOC_EQUATOR { - lat = lat - _LOC_EQUATOR - } else { - north = "S" - lat = _LOC_EQUATOR - lat - } - h := lat / (1000 * 60 * 60) - lat = lat % (1000 * 60 * 60) - m := lat / (1000 * 60) - lat = lat % (1000 * 60) - s += fmt.Sprintf("%02d %02d %0.3f %s ", h, m, (float32(lat) / 1000), north) - // Longitude - lon := rr.Longitude - east := "E" - if lon > _LOC_EQUATOR { - lon = lon - _LOC_EQUATOR - } else { - east = "W" - lon = _LOC_EQUATOR - lon - } - h = lon / (1000 * 60 * 60) - lon = lon % (1000 * 60 * 60) - m = lon / (1000 * 60) - lon = lon % (1000 * 60) - s += fmt.Sprintf("%02d %02d %0.3f %s ", h, m, (float32(lon) / 1000), east) - s1 := rr.Altitude / 100.00 - s1 -= 100000 - if rr.Altitude%100 == 0 { - s += fmt.Sprintf("%.2fm ", float32(s1)) + lat := rr.Latitude + ns := "N" + if lat > LOC_EQUATOR { + lat = lat - LOC_EQUATOR } else { - s += fmt.Sprintf("%.0fm ", float32(s1)) + ns = "S" + lat = LOC_EQUATOR - lat } - s += cmToString((rr.Size&0xf0)>>4, rr.Size&0x0f) + "m " - s += cmToString((rr.HorizPre&0xf0)>>4, rr.HorizPre&0x0f) + "m " - s += cmToString((rr.VertPre&0xf0)>>4, rr.VertPre&0x0f) + "m" + h := lat / LOC_DEGREES + lat = lat % LOC_DEGREES + m := lat / LOC_HOURS + lat = lat % LOC_HOURS + s += fmt.Sprintf("%02d %02d %0.3f %s ", h, m, (float64(lat) / 1000), ns) + + lon := rr.Longitude + ew := "E" + if lon > LOC_PRIMEMERIDIAN { + lon = lon - LOC_PRIMEMERIDIAN + } else { + ew = "W" + lon = LOC_PRIMEMERIDIAN - lon + } + h = lon / LOC_DEGREES + lon = lon % LOC_DEGREES + m = lon / LOC_HOURS + lon = lon % LOC_HOURS + s += fmt.Sprintf("%02d %02d %0.3f %s ", h, m, (float64(lon) / 1000), ew) + + var alt float64 = float64(rr.Altitude) / 100 + alt -= LOC_ALTITUDEBASE + if rr.Altitude%100 != 0 { + s += fmt.Sprintf("%.2fm ", alt) + } else { + s += fmt.Sprintf("%.0fm ", alt) + } + + s += cmToM((rr.Size&0xf0)>>4, rr.Size&0x0f) + "m " + s += cmToM((rr.HorizPre&0xf0)>>4, rr.HorizPre&0x0f) + "m " + s += cmToM((rr.VertPre&0xf0)>>4, rr.VertPre&0x0f) + "m" + return s } +// SIG is identical to RRSIG and nowadays only used for SIG(0), RFC2931. +type SIG struct { + RRSIG +} + type RRSIG struct { Hdr RR_Header TypeCovered uint16 @@ -888,6 +929,14 @@ func (rr *NSEC) len() int { return l } +type DLV struct { + DS +} + +type CDS struct { + DS +} + type DS struct { Hdr RR_Header KeyTag uint16 @@ -909,48 +958,6 @@ func (rr *DS) String() string { " " + strings.ToUpper(rr.Digest) } -type CDS struct { - Hdr RR_Header - KeyTag uint16 - Algorithm uint8 - DigestType uint8 - Digest string `dns:"hex"` -} - -func (rr *CDS) Header() *RR_Header { return &rr.Hdr } -func (rr *CDS) len() int { return rr.Hdr.len() + 4 + len(rr.Digest)/2 } -func (rr *CDS) copy() RR { - return &CDS{*rr.Hdr.copyHeader(), rr.KeyTag, rr.Algorithm, rr.DigestType, rr.Digest} -} - -func (rr *CDS) String() string { - return rr.Hdr.String() + strconv.Itoa(int(rr.KeyTag)) + - " " + strconv.Itoa(int(rr.Algorithm)) + - " " + strconv.Itoa(int(rr.DigestType)) + - " " + strings.ToUpper(rr.Digest) -} - -type DLV struct { - Hdr RR_Header - KeyTag uint16 - Algorithm uint8 - DigestType uint8 - Digest string `dns:"hex"` -} - -func (rr *DLV) Header() *RR_Header { return &rr.Hdr } -func (rr *DLV) len() int { return rr.Hdr.len() + 4 + len(rr.Digest)/2 } -func (rr *DLV) copy() RR { - return &DLV{*rr.Hdr.copyHeader(), rr.KeyTag, rr.Algorithm, rr.DigestType, rr.Digest} -} - -func (rr *DLV) String() string { - return rr.Hdr.String() + strconv.Itoa(int(rr.KeyTag)) + - " " + strconv.Itoa(int(rr.Algorithm)) + - " " + strconv.Itoa(int(rr.DigestType)) + - " " + strings.ToUpper(rr.Digest) -} - type KX struct { Hdr RR_Header Preference uint16 @@ -999,7 +1006,7 @@ func (rr *TALINK) len() int { return rr.Hdr.len() + len(rr.PreviousNam func (rr *TALINK) String() string { return rr.Hdr.String() + - " " + sprintName(rr.PreviousName) + " " + sprintName(rr.NextName) + sprintName(rr.PreviousName) + " " + sprintName(rr.NextName) } type SSHFP struct { @@ -1022,30 +1029,67 @@ func (rr *SSHFP) String() string { } type IPSECKEY struct { - Hdr RR_Header - Precedence uint8 + Hdr RR_Header + Precedence uint8 + // GatewayType: 1: A record, 2: AAAA record, 3: domainname. + // 0 is use for no type and GatewayName should be "." then. GatewayType uint8 Algorithm uint8 - Gateway string `dns:"ipseckey"` + // Gateway can be an A record, AAAA record or a domain name. + GatewayA net.IP `dns:"a"` + GatewayAAAA net.IP `dns:"aaaa"` + GatewayName string `dns:"domain-name"` PublicKey string `dns:"base64"` } func (rr *IPSECKEY) Header() *RR_Header { return &rr.Hdr } func (rr *IPSECKEY) copy() RR { - return &IPSECKEY{*rr.Hdr.copyHeader(), rr.Precedence, rr.GatewayType, rr.Algorithm, rr.Gateway, rr.PublicKey} + return &IPSECKEY{*rr.Hdr.copyHeader(), rr.Precedence, rr.GatewayType, rr.Algorithm, rr.GatewayA, rr.GatewayAAAA, rr.GatewayName, rr.PublicKey} } func (rr *IPSECKEY) String() string { - return rr.Hdr.String() + strconv.Itoa(int(rr.Precedence)) + + s := rr.Hdr.String() + strconv.Itoa(int(rr.Precedence)) + " " + strconv.Itoa(int(rr.GatewayType)) + - " " + strconv.Itoa(int(rr.Algorithm)) + - " " + rr.Gateway + - " " + rr.PublicKey + " " + strconv.Itoa(int(rr.Algorithm)) + switch rr.GatewayType { + case 0: + fallthrough + case 3: + s += " " + rr.GatewayName + case 1: + s += " " + rr.GatewayA.String() + case 2: + s += " " + rr.GatewayAAAA.String() + default: + s += " ." + } + s += " " + rr.PublicKey + return s } func (rr *IPSECKEY) len() int { - return rr.Hdr.len() + 3 + len(rr.Gateway) + 1 + - base64.StdEncoding.DecodedLen(len(rr.PublicKey)) + l := rr.Hdr.len() + 3 + 1 + switch rr.GatewayType { + default: + fallthrough + case 0: + fallthrough + case 3: + l += len(rr.GatewayName) + case 1: + l += 4 + case 2: + l += 16 + } + return l + base64.StdEncoding.DecodedLen(len(rr.PublicKey)) +} + +type KEY struct { + DNSKEY +} + +type CDNSKEY struct { + DNSKEY } type DNSKEY struct { @@ -1221,11 +1265,23 @@ func (rr *RFC3597) copy() RR { return &RFC3597{*rr.Hdr.copyHeader(), r func (rr *RFC3597) len() int { return rr.Hdr.len() + len(rr.Rdata)/2 + 2 } func (rr *RFC3597) String() string { - s := rr.Hdr.String() + // Let's call it a hack + s := rfc3597Header(rr.Hdr) + s += "\\# " + strconv.Itoa(len(rr.Rdata)/2) + " " + rr.Rdata return s } +func rfc3597Header(h RR_Header) string { + var s string + + s += sprintName(h.Name) + "\t" + s += strconv.FormatInt(int64(h.Ttl), 10) + "\t" + s += "CLASS" + strconv.Itoa(int(h.Class)) + "\t" + s += "TYPE" + strconv.Itoa(int(h.Rrtype)) + "\t" + return s +} + type URI struct { Hdr RR_Header Priority uint16 @@ -1280,7 +1336,7 @@ func (rr *TLSA) copy() RR { func (rr *TLSA) String() string { return rr.Hdr.String() + - " " + strconv.Itoa(int(rr.Usage)) + + strconv.Itoa(int(rr.Usage)) + " " + strconv.Itoa(int(rr.Selector)) + " " + strconv.Itoa(int(rr.MatchingType)) + " " + rr.Certificate @@ -1305,7 +1361,7 @@ func (rr *HIP) copy() RR { func (rr *HIP) String() string { s := rr.Hdr.String() + - " " + strconv.Itoa(int(rr.PublicKeyAlgorithm)) + + strconv.Itoa(int(rr.PublicKeyAlgorithm)) + " " + rr.Hit + " " + rr.PublicKey for _, d := range rr.RendezvousServers { @@ -1367,6 +1423,7 @@ func (rr *WKS) String() (s string) { if rr.Address != nil { s += rr.Address.String() } + // TODO(miek): missing protocol here, see /etc/protocols for i := 0; i < len(rr.BitMap); i++ { // should lookup the port s += " " + strconv.Itoa(int(rr.BitMap[i])) @@ -1578,23 +1635,6 @@ func saltToString(s string) string { return strings.ToUpper(s) } -func cmToString(mantissa, exponent uint8) string { - switch exponent { - case 0, 1: - if exponent == 1 { - mantissa *= 10 - } - return fmt.Sprintf("%.02f", float32(mantissa)) - default: - s := fmt.Sprintf("%d", mantissa) - for i := uint8(0); i < exponent-2; i++ { - s += "0" - } - return s - } - panic("dns: not reached") -} - func euiToString(eui uint64, bits int) (hex string) { switch bits { case 64: @@ -1628,6 +1668,7 @@ var typeToRR = map[uint16]func() RR{ TypeDHCID: func() RR { return new(DHCID) }, TypeDLV: func() RR { return new(DLV) }, TypeDNAME: func() RR { return new(DNAME) }, + TypeKEY: func() RR { return new(KEY) }, TypeDNSKEY: func() RR { return new(DNSKEY) }, TypeDS: func() RR { return new(DS) }, TypeEUI48: func() RR { return new(EUI48) }, @@ -1637,6 +1678,7 @@ var typeToRR = map[uint16]func() RR{ TypeEID: func() RR { return new(EID) }, TypeHINFO: func() RR { return new(HINFO) }, TypeHIP: func() RR { return new(HIP) }, + TypeIPSECKEY: func() RR { return new(IPSECKEY) }, TypeKX: func() RR { return new(KX) }, TypeL32: func() RR { return new(L32) }, TypeL64: func() RR { return new(L64) }, @@ -1665,6 +1707,7 @@ var typeToRR = map[uint16]func() RR{ TypeRKEY: func() RR { return new(RKEY) }, TypeRP: func() RR { return new(RP) }, TypePX: func() RR { return new(PX) }, + TypeSIG: func() RR { return new(SIG) }, TypeRRSIG: func() RR { return new(RRSIG) }, TypeRT: func() RR { return new(RT) }, TypeSOA: func() RR { return new(SOA) }, diff --git a/Godeps/_workspace/src/github.com/miekg/dns/types_test.go b/Godeps/_workspace/src/github.com/miekg/dns/types_test.go new file mode 100644 index 0000000000..118612946b --- /dev/null +++ b/Godeps/_workspace/src/github.com/miekg/dns/types_test.go @@ -0,0 +1,42 @@ +package dns + +import ( + "testing" +) + +func TestCmToM(t *testing.T) { + s := cmToM(0, 0) + if s != "0.00" { + t.Error("0, 0") + } + + s = cmToM(1, 0) + if s != "0.01" { + t.Error("1, 0") + } + + s = cmToM(3, 1) + if s != "0.30" { + t.Error("3, 1") + } + + s = cmToM(4, 2) + if s != "4" { + t.Error("4, 2") + } + + s = cmToM(5, 3) + if s != "50" { + t.Error("5, 3") + } + + s = cmToM(7, 5) + if s != "7000" { + t.Error("7, 5") + } + + s = cmToM(9, 9) + if s != "90000000" { + t.Error("9, 9") + } +} diff --git a/Godeps/_workspace/src/github.com/miekg/dns/update.go b/Godeps/_workspace/src/github.com/miekg/dns/update.go index 053e42245f..f9558817db 100644 --- a/Godeps/_workspace/src/github.com/miekg/dns/update.go +++ b/Godeps/_workspace/src/github.com/miekg/dns/update.go @@ -106,10 +106,7 @@ func (u *Msg) Insert(rr []RR) { func (u *Msg) RemoveRRset(rr []RR) { u.Ns = make([]RR, len(rr)) for i, r := range rr { - u.Ns[i] = r - u.Ns[i].Header().Class = ClassANY - u.Ns[i].Header().Rdlength = 0 - u.Ns[i].Header().Ttl = 0 + u.Ns[i] = &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: r.Header().Rrtype, Class: ClassANY}} } } diff --git a/Godeps/_workspace/src/github.com/miekg/dns/update_test.go b/Godeps/_workspace/src/github.com/miekg/dns/update_test.go new file mode 100644 index 0000000000..bdcaac4d73 --- /dev/null +++ b/Godeps/_workspace/src/github.com/miekg/dns/update_test.go @@ -0,0 +1,89 @@ +package dns + +import ( + "bytes" + "testing" +) + +func TestDynamicUpdateParsing(t *testing.T) { + prefix := "example.com. IN " + for _, typ := range TypeToString { + if typ == "CAA" || typ == "OPT" || typ == "AXFR" || typ == "IXFR" || typ == "ANY" || typ == "TKEY" || + typ == "TSIG" || typ == "ISDN" || typ == "UNSPEC" || typ == "NULL" || typ == "ATMA" { + continue + } + r, e := NewRR(prefix + typ) + if e != nil { + t.Log("failure to parse: " + prefix + typ) + t.Fail() + } else { + t.Logf("parsed: %s", r.String()) + } + } +} + +func TestDynamicUpdateUnpack(t *testing.T) { + // From https://github.com/miekg/dns/issues/150#issuecomment-62296803 + // It should be an update message for the zone "example.", + // deleting the A RRset "example." and then adding an A record at "example.". + // class ANY, TYPE A + buf := []byte{171, 68, 40, 0, 0, 1, 0, 0, 0, 2, 0, 0, 7, 101, 120, 97, 109, 112, 108, 101, 0, 0, 6, 0, 1, 192, 12, 0, 1, 0, 255, 0, 0, 0, 0, 0, 0, 192, 12, 0, 1, 0, 1, 0, 0, 0, 0, 0, 4, 127, 0, 0, 1} + msg := new(Msg) + err := msg.Unpack(buf) + if err != nil { + t.Log("failed to unpack: " + err.Error() + "\n" + msg.String()) + t.Fail() + } +} + +func TestDynamicUpdateZeroRdataUnpack(t *testing.T) { + m := new(Msg) + rr := &RR_Header{Name: ".", Rrtype: 0, Class: 1, Ttl: ^uint32(0), Rdlength: 0} + m.Answer = []RR{rr, rr, rr, rr, rr} + m.Ns = m.Answer + for n, s := range TypeToString { + rr.Rrtype = n + bytes, err := m.Pack() + if err != nil { + t.Logf("failed to pack %s: %v", s, err) + t.Fail() + continue + } + if err := new(Msg).Unpack(bytes); err != nil { + t.Logf("failed to unpack %s: %v", s, err) + t.Fail() + } + } +} + +func TestRemoveRRset(t *testing.T) { + // Should add a zero data RR in Class ANY with a TTL of 0 + // for each set mentioned in the RRs provided to it. + rr, err := NewRR(". 100 IN A 127.0.0.1") + if err != nil { + t.Fatalf("Error constructing RR: %v", err) + } + m := new(Msg) + m.Ns = []RR{&RR_Header{Name: ".", Rrtype: TypeA, Class: ClassANY, Ttl: 0, Rdlength: 0}} + expectstr := m.String() + expect, err := m.Pack() + if err != nil { + t.Fatalf("Error packing expected msg: %v", err) + } + + m.Ns = nil + m.RemoveRRset([]RR{rr}) + actual, err := m.Pack() + if err != nil { + t.Fatalf("Error packing actual msg: %v", err) + } + if !bytes.Equal(actual, expect) { + tmp := new(Msg) + if err := tmp.Unpack(actual); err != nil { + t.Fatalf("Error unpacking actual msg: %v", err) + } + t.Logf("Expected msg:\n%s", expectstr) + t.Logf("Actual msg:\n%v", tmp) + t.Fail() + } +} diff --git a/Godeps/_workspace/src/github.com/miekg/dns/xfr.go b/Godeps/_workspace/src/github.com/miekg/dns/xfr.go index 57bfb1676f..e9a500a70f 100644 --- a/Godeps/_workspace/src/github.com/miekg/dns/xfr.go +++ b/Godeps/_workspace/src/github.com/miekg/dns/xfr.go @@ -13,9 +13,9 @@ type Envelope struct { // A Transfer defines parameters that are used during a zone transfer. type Transfer struct { *Conn - DialTimeout time.Duration // net.DialTimeout (ns), defaults to 2 * 1e9 - ReadTimeout time.Duration // net.Conn.SetReadTimeout value for connections (ns), defaults to 2 * 1e9 - WriteTimeout time.Duration // net.Conn.SetWriteTimeout value for connections (ns), defaults to 2 * 1e9 + DialTimeout time.Duration // net.DialTimeout, defaults to 2 seconds + ReadTimeout time.Duration // net.Conn.SetReadTimeout value for connections, defaults to 2 seconds + WriteTimeout time.Duration // net.Conn.SetWriteTimeout value for connections, defaults to 2 seconds TsigSecret map[string]string // Secret(s) for Tsig map[], zonename must be fully qualified tsigTimersOnly bool } @@ -160,22 +160,18 @@ func (t *Transfer) inIxfr(id uint16, c chan *Envelope) { // The server is responsible for sending the correct sequence of RRs through the // channel ch. func (t *Transfer) Out(w ResponseWriter, q *Msg, ch chan *Envelope) error { - r := new(Msg) - // Compress? - r.SetReply(q) - r.Authoritative = true - - go func() { - for x := range ch { - // assume it fits TODO(miek): fix - r.Answer = append(r.Answer, x.RR...) - if err := w.WriteMsg(r); err != nil { - return - } + for x := range ch { + r := new(Msg) + // Compress? + r.SetReply(q) + r.Authoritative = true + // assume it fits TODO(miek): fix + r.Answer = append(r.Answer, x.RR...) + if err := w.WriteMsg(r); err != nil { + return err } - w.TsigTimersOnly(true) - r.Answer = nil - }() + } + w.TsigTimersOnly(true) return nil } diff --git a/Godeps/_workspace/src/github.com/miekg/dns/xfr_test.go b/Godeps/_workspace/src/github.com/miekg/dns/xfr_test.go new file mode 100644 index 0000000000..4e3d1b6c02 --- /dev/null +++ b/Godeps/_workspace/src/github.com/miekg/dns/xfr_test.go @@ -0,0 +1,99 @@ +package dns + +import ( + "net" + "testing" + "time" +) + +func getIP(s string) string { + a, e := net.LookupAddr(s) + if e != nil { + return "" + } + return a[0] +} + +// flaky, need to setup local server and test from +// that. +func testClientAXFR(t *testing.T) { + if testing.Short() { + return + } + m := new(Msg) + m.SetAxfr("miek.nl.") + + server := getIP("linode.atoom.net") + + tr := new(Transfer) + + if a, err := tr.In(m, net.JoinHostPort(server, "53")); err != nil { + t.Log("failed to setup axfr: " + err.Error()) + t.Fatal() + } else { + for ex := range a { + if ex.Error != nil { + t.Logf("error %s\n", ex.Error.Error()) + t.Fail() + break + } + for _, rr := range ex.RR { + t.Logf("%s\n", rr.String()) + } + } + } +} + +// fails. +func testClientAXFRMultipleEnvelopes(t *testing.T) { + if testing.Short() { + return + } + m := new(Msg) + m.SetAxfr("nlnetlabs.nl.") + + server := getIP("open.nlnetlabs.nl.") + + tr := new(Transfer) + if a, err := tr.In(m, net.JoinHostPort(server, "53")); err != nil { + t.Log("Failed to setup axfr" + err.Error() + "for server: " + server) + t.Fail() + return + } else { + for ex := range a { + if ex.Error != nil { + t.Logf("Error %s\n", ex.Error.Error()) + t.Fail() + break + } + } + } +} + +func testClientTsigAXFR(t *testing.T) { + if testing.Short() { + return + } + m := new(Msg) + m.SetAxfr("example.nl.") + m.SetTsig("axfr.", HmacMD5, 300, time.Now().Unix()) + + tr := new(Transfer) + tr.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="} + + if a, err := tr.In(m, "176.58.119.54:53"); err != nil { + t.Log("failed to setup axfr: " + err.Error()) + t.Fatal() + } else { + for ex := range a { + if ex.Error != nil { + t.Logf("error %s\n", ex.Error.Error()) + t.Fail() + break + } + for _, rr := range ex.RR { + t.Logf("%s\n", rr.String()) + } + } + } +} diff --git a/Godeps/_workspace/src/github.com/miekg/dns/zscan.go b/Godeps/_workspace/src/github.com/miekg/dns/zscan.go index 41a328e42d..0fc2a29229 100644 --- a/Godeps/_workspace/src/github.com/miekg/dns/zscan.go +++ b/Godeps/_workspace/src/github.com/miekg/dns/zscan.go @@ -102,12 +102,13 @@ type Token struct { Comment string // a potential comment positioned after the RR and on the same line } -// NewRR reads the RR contained in the string s. Only the first RR is returned. -// The class defaults to IN and TTL defaults to 3600. The full zone file -// syntax like $TTL, $ORIGIN, etc. is supported. -// All fields of the returned RR are set, except RR.Header().Rdlength which is set to 0. +// NewRR reads the RR contained in the string s. Only the first RR is +// returned. If s contains no RR, return nil with no error. The class +// defaults to IN and TTL defaults to 3600. The full zone file syntax +// like $TTL, $ORIGIN, etc. is supported. All fields of the returned +// RR are set, except RR.Header().Rdlength which is set to 0. func NewRR(s string) (RR, error) { - if s[len(s)-1] != '\n' { // We need a closing newline + if len(s) > 0 && s[len(s)-1] != '\n' { // We need a closing newline return ReadRR(strings.NewReader(s+"\n"), "") } return ReadRR(strings.NewReader(s), "") @@ -117,6 +118,10 @@ func NewRR(s string) (RR, error) { // See NewRR for more documentation. func ReadRR(q io.Reader, filename string) (RR, error) { r := <-parseZoneHelper(q, ".", filename, 1) + if r == nil { + return nil, nil + } + if r.Error != nil { return nil, r.Error } @@ -899,9 +904,9 @@ func appendOrigin(name, origin string) string { func locCheckNorth(token string, latitude uint32) (uint32, bool) { switch token { case "n", "N": - return _LOC_EQUATOR + latitude, true + return LOC_EQUATOR + latitude, true case "s", "S": - return _LOC_EQUATOR - latitude, true + return LOC_EQUATOR - latitude, true } return latitude, false } @@ -910,9 +915,9 @@ func locCheckNorth(token string, latitude uint32) (uint32, bool) { func locCheckEast(token string, longitude uint32) (uint32, bool) { switch token { case "e", "E": - return _LOC_EQUATOR + longitude, true + return LOC_EQUATOR + longitude, true case "w", "W": - return _LOC_EQUATOR - longitude, true + return LOC_EQUATOR - longitude, true } return longitude, false } diff --git a/Godeps/_workspace/src/github.com/miekg/dns/zscan_rr.go b/Godeps/_workspace/src/github.com/miekg/dns/zscan_rr.go index 957b9e15ab..ed796989e5 100644 --- a/Godeps/_workspace/src/github.com/miekg/dns/zscan_rr.go +++ b/Godeps/_workspace/src/github.com/miekg/dns/zscan_rr.go @@ -1065,6 +1065,14 @@ func setOPENPGPKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, strin return rr, nil, c1 } +func setSIG(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + r, e, s := setRRSIG(h, c, o, f) + if r != nil { + return &SIG{*r.(*RRSIG)}, e, s + } + return nil, e, s +} + func setRRSIG(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { rr := new(RRSIG) rr.Hdr = h @@ -1452,7 +1460,7 @@ func setSSHFP(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { return rr, nil, "" } -func setDNSKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { +func setDNSKEYs(h RR_Header, c chan lex, o, f, typ string) (RR, *ParseError, string) { rr := new(DNSKEY) rr.Hdr = h @@ -1461,25 +1469,25 @@ func setDNSKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { return rr, nil, l.comment } if i, e := strconv.Atoi(l.token); e != nil { - return nil, &ParseError{f, "bad DNSKEY Flags", l}, "" + return nil, &ParseError{f, "bad " + typ + " Flags", l}, "" } else { rr.Flags = uint16(i) } <-c // _BLANK l = <-c // _STRING if i, e := strconv.Atoi(l.token); e != nil { - return nil, &ParseError{f, "bad DNSKEY Protocol", l}, "" + return nil, &ParseError{f, "bad " + typ + " Protocol", l}, "" } else { rr.Protocol = uint8(i) } <-c // _BLANK l = <-c // _STRING if i, e := strconv.Atoi(l.token); e != nil { - return nil, &ParseError{f, "bad DNSKEY Algorithm", l}, "" + return nil, &ParseError{f, "bad " + typ + " Algorithm", l}, "" } else { rr.Algorithm = uint8(i) } - s, e, c1 := endingToString(c, "bad DNSKEY PublicKey", f) + s, e, c1 := endingToString(c, "bad "+typ+" PublicKey", f) if e != nil { return nil, e, c1 } @@ -1487,6 +1495,27 @@ func setDNSKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { return rr, nil, c1 } +func setKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + r, e, s := setDNSKEYs(h, c, o, f, "KEY") + if r != nil { + return &KEY{*r.(*DNSKEY)}, e, s + } + return nil, e, s +} + +func setDNSKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + r, e, s := setDNSKEYs(h, c, o, f, "DNSKEY") + return r, e, s +} + +func setCDNSKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + r, e, s := setDNSKEYs(h, c, o, f, "CDNSKEY") + if r != nil { + return &CDNSKEY{*r.(*DNSKEY)}, e, s + } + return nil, e, s +} + func setRKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { rr := new(RKEY) rr.Hdr = h @@ -1522,44 +1551,6 @@ func setRKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { return rr, nil, c1 } -func setDS(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(DS) - rr.Hdr = h - l := <-c - if l.length == 0 { - return rr, nil, l.comment - } - if i, e := strconv.Atoi(l.token); e != nil { - return nil, &ParseError{f, "bad DS KeyTag", l}, "" - } else { - rr.KeyTag = uint16(i) - } - <-c // _BLANK - l = <-c - if i, e := strconv.Atoi(l.token); e != nil { - if i, ok := StringToAlgorithm[l.tokenUpper]; !ok { - return nil, &ParseError{f, "bad DS Algorithm", l}, "" - } else { - rr.Algorithm = i - } - } else { - rr.Algorithm = uint8(i) - } - <-c // _BLANK - l = <-c - if i, e := strconv.Atoi(l.token); e != nil { - return nil, &ParseError{f, "bad DS DigestType", l}, "" - } else { - rr.DigestType = uint8(i) - } - s, e, c1 := endingToString(c, "bad DS Digest", f) - if e != nil { - return nil, e, c1 - } - rr.Digest = s - return rr, nil, c1 -} - func setEID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { rr := new(EID) rr.Hdr = h @@ -1632,15 +1623,15 @@ func setGPOS(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { return rr, nil, "" } -func setCDS(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(CDS) +func setDSs(h RR_Header, c chan lex, o, f, typ string) (RR, *ParseError, string) { + rr := new(DS) rr.Hdr = h l := <-c if l.length == 0 { return rr, nil, l.comment } if i, e := strconv.Atoi(l.token); e != nil { - return nil, &ParseError{f, "bad CDS KeyTag", l}, "" + return nil, &ParseError{f, "bad " + typ + " KeyTag", l}, "" } else { rr.KeyTag = uint16(i) } @@ -1648,7 +1639,7 @@ func setCDS(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { l = <-c if i, e := strconv.Atoi(l.token); e != nil { if i, ok := StringToAlgorithm[l.tokenUpper]; !ok { - return nil, &ParseError{f, "bad CDS Algorithm", l}, "" + return nil, &ParseError{f, "bad " + typ + " Algorithm", l}, "" } else { rr.Algorithm = i } @@ -1658,11 +1649,11 @@ func setCDS(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { <-c // _BLANK l = <-c if i, e := strconv.Atoi(l.token); e != nil { - return nil, &ParseError{f, "bad CDS DigestType", l}, "" + return nil, &ParseError{f, "bad " + typ + " DigestType", l}, "" } else { rr.DigestType = uint8(i) } - s, e, c1 := endingToString(c, "bad CDS Digest", f) + s, e, c1 := endingToString(c, "bad "+typ+" Digest", f) if e != nil { return nil, e, c1 } @@ -1670,42 +1661,25 @@ func setCDS(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { return rr, nil, c1 } +func setDS(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + r, e, s := setDSs(h, c, o, f, "DS") + return r, e, s +} + func setDLV(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(DLV) - rr.Hdr = h - l := <-c - if l.length == 0 { - return rr, nil, l.comment + r, e, s := setDSs(h, c, o, f, "DLV") + if r != nil { + return &DLV{*r.(*DS)}, e, s } - if i, e := strconv.Atoi(l.token); e != nil { - return nil, &ParseError{f, "bad DLV KeyTag", l}, "" - } else { - rr.KeyTag = uint16(i) + return nil, e, s +} + +func setCDS(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + r, e, s := setDSs(h, c, o, f, "DLV") + if r != nil { + return &CDS{*r.(*DS)}, e, s } - <-c // _BLANK - l = <-c - if i, e := strconv.Atoi(l.token); e != nil { - if i, ok := StringToAlgorithm[l.tokenUpper]; !ok { - return nil, &ParseError{f, "bad DLV Algorithm", l}, "" - } else { - rr.Algorithm = i - } - } else { - rr.Algorithm = uint8(i) - } - <-c // _BLANK - l = <-c - if i, e := strconv.Atoi(l.token); e != nil { - return nil, &ParseError{f, "bad DLV DigestType", l}, "" - } else { - rr.DigestType = uint8(i) - } - s, e, c1 := endingToString(c, "bad DLV Digest", f) - if e != nil { - return nil, e, c1 - } - rr.Digest = s - return rr, nil, c1 + return nil, e, s } func setTA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { @@ -1873,44 +1847,6 @@ func setURI(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { return rr, nil, c1 } -func setIPSECKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(IPSECKEY) - rr.Hdr = h - - l := <-c - if l.length == 0 { - return rr, nil, l.comment - } - if i, e := strconv.Atoi(l.token); e != nil { - return nil, &ParseError{f, "bad IPSECKEY Precedence", l}, "" - } else { - rr.Precedence = uint8(i) - } - <-c // _BLANK - l = <-c - if i, e := strconv.Atoi(l.token); e != nil { - return nil, &ParseError{f, "bad IPSECKEY GatewayType", l}, "" - } else { - rr.GatewayType = uint8(i) - } - <-c // _BLANK - l = <-c - if i, e := strconv.Atoi(l.token); e != nil { - return nil, &ParseError{f, "bad IPSECKEY Algorithm", l}, "" - } else { - rr.Algorithm = uint8(i) - } - <-c - l = <-c - rr.Gateway = l.token - s, e, c1 := endingToString(c, "bad IPSECKEY PublicKey", f) - if e != nil { - return nil, e, c1 - } - rr.PublicKey = s - return rr, nil, c1 -} - func setDHCID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { // awesome record to parse! rr := new(DHCID) @@ -2113,16 +2049,85 @@ func setPX(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { return rr, nil, "" } +func setIPSECKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(IPSECKEY) + rr.Hdr = h + l := <-c + if l.length == 0 { + return rr, nil, l.comment + } + if i, err := strconv.Atoi(l.token); err != nil { + return nil, &ParseError{f, "bad IPSECKEY Precedence", l}, "" + } else { + rr.Precedence = uint8(i) + } + <-c // _BLANK + l = <-c + if i, err := strconv.Atoi(l.token); err != nil { + return nil, &ParseError{f, "bad IPSECKEY GatewayType", l}, "" + } else { + rr.GatewayType = uint8(i) + } + <-c // _BLANK + l = <-c + if i, err := strconv.Atoi(l.token); err != nil { + return nil, &ParseError{f, "bad IPSECKEY Algorithm", l}, "" + } else { + rr.Algorithm = uint8(i) + } + + // Now according to GatewayType we can have different elements here + <-c // _BLANK + l = <-c + switch rr.GatewayType { + case 0: + fallthrough + case 3: + rr.GatewayName = l.token + if l.token == "@" { + rr.GatewayName = o + } + _, ok := IsDomainName(l.token) + if !ok { + return nil, &ParseError{f, "bad IPSECKEY GatewayName", l}, "" + } + if rr.GatewayName[l.length-1] != '.' { + rr.GatewayName = appendOrigin(rr.GatewayName, o) + } + case 1: + rr.GatewayA = net.ParseIP(l.token) + if rr.GatewayA == nil { + return nil, &ParseError{f, "bad IPSECKEY GatewayA", l}, "" + } + case 2: + rr.GatewayAAAA = net.ParseIP(l.token) + if rr.GatewayAAAA == nil { + return nil, &ParseError{f, "bad IPSECKEY GatewayAAAA", l}, "" + } + default: + return nil, &ParseError{f, "bad IPSECKEY GatewayType", l}, "" + } + + s, e, c1 := endingToString(c, "bad IPSECKEY PublicKey", f) + if e != nil { + return nil, e, c1 + } + rr.PublicKey = s + return rr, nil, c1 +} + var typeToparserFunc = map[uint16]parserFunc{ TypeAAAA: parserFunc{setAAAA, false}, TypeAFSDB: parserFunc{setAFSDB, false}, TypeA: parserFunc{setA, false}, TypeCDS: parserFunc{setCDS, true}, + TypeCDNSKEY: parserFunc{setCDNSKEY, true}, TypeCERT: parserFunc{setCERT, true}, TypeCNAME: parserFunc{setCNAME, false}, TypeDHCID: parserFunc{setDHCID, true}, TypeDLV: parserFunc{setDLV, true}, TypeDNAME: parserFunc{setDNAME, false}, + TypeKEY: parserFunc{setKEY, true}, TypeDNSKEY: parserFunc{setDNSKEY, true}, TypeDS: parserFunc{setDS, true}, TypeEID: parserFunc{setEID, true}, @@ -2158,6 +2163,7 @@ var typeToparserFunc = map[uint16]parserFunc{ TypeOPENPGPKEY: parserFunc{setOPENPGPKEY, true}, TypePTR: parserFunc{setPTR, false}, TypePX: parserFunc{setPX, false}, + TypeSIG: parserFunc{setSIG, true}, TypeRKEY: parserFunc{setRKEY, true}, TypeRP: parserFunc{setRP, false}, TypeRRSIG: parserFunc{setRRSIG, true}, diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/_vendor/goautoneg/MANIFEST b/Godeps/_workspace/src/github.com/prometheus/client_golang/_vendor/goautoneg/MANIFEST new file mode 100644 index 0000000000..71bfe39aa9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/_vendor/goautoneg/MANIFEST @@ -0,0 +1 @@ +Imported at 75cd24fc2f2c from https://bitbucket.org/ww/goautoneg. diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/_vendor/goautoneg/Makefile b/Godeps/_workspace/src/github.com/prometheus/client_golang/_vendor/goautoneg/Makefile new file mode 100644 index 0000000000..e33ee17303 --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/_vendor/goautoneg/Makefile @@ -0,0 +1,13 @@ +include $(GOROOT)/src/Make.inc + +TARG=bitbucket.org/ww/goautoneg +GOFILES=autoneg.go + +include $(GOROOT)/src/Make.pkg + +format: + gofmt -w *.go + +docs: + gomake clean + godoc ${TARG} > README.txt diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/_vendor/goautoneg/README.txt b/Godeps/_workspace/src/github.com/prometheus/client_golang/_vendor/goautoneg/README.txt new file mode 100644 index 0000000000..7723656d58 --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/_vendor/goautoneg/README.txt @@ -0,0 +1,67 @@ +PACKAGE + +package goautoneg +import "bitbucket.org/ww/goautoneg" + +HTTP Content-Type Autonegotiation. + +The functions in this package implement the behaviour specified in +http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html + +Copyright (c) 2011, Open Knowledge Foundation Ltd. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + Neither the name of the Open Knowledge Foundation Ltd. nor the + names of its contributors may be used to endorse or promote + products derived from this software without specific prior written + permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +FUNCTIONS + +func Negotiate(header string, alternatives []string) (content_type string) +Negotiate the most appropriate content_type given the accept header +and a list of alternatives. + +func ParseAccept(header string) (accept []Accept) +Parse an Accept Header string returning a sorted list +of clauses + + +TYPES + +type Accept struct { + Type, SubType string + Q float32 + Params map[string]string +} +Structure to represent a clause in an HTTP Accept Header + + +SUBDIRECTORIES + + .hg diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/_vendor/goautoneg/autoneg.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/_vendor/goautoneg/autoneg.go new file mode 100644 index 0000000000..648b38cb65 --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/_vendor/goautoneg/autoneg.go @@ -0,0 +1,162 @@ +/* +HTTP Content-Type Autonegotiation. + +The functions in this package implement the behaviour specified in +http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html + +Copyright (c) 2011, Open Knowledge Foundation Ltd. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + Neither the name of the Open Knowledge Foundation Ltd. nor the + names of its contributors may be used to endorse or promote + products derived from this software without specific prior written + permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +*/ +package goautoneg + +import ( + "sort" + "strconv" + "strings" +) + +// Structure to represent a clause in an HTTP Accept Header +type Accept struct { + Type, SubType string + Q float64 + Params map[string]string +} + +// For internal use, so that we can use the sort interface +type accept_slice []Accept + +func (accept accept_slice) Len() int { + slice := []Accept(accept) + return len(slice) +} + +func (accept accept_slice) Less(i, j int) bool { + slice := []Accept(accept) + ai, aj := slice[i], slice[j] + if ai.Q > aj.Q { + return true + } + if ai.Type != "*" && aj.Type == "*" { + return true + } + if ai.SubType != "*" && aj.SubType == "*" { + return true + } + return false +} + +func (accept accept_slice) Swap(i, j int) { + slice := []Accept(accept) + slice[i], slice[j] = slice[j], slice[i] +} + +// Parse an Accept Header string returning a sorted list +// of clauses +func ParseAccept(header string) (accept []Accept) { + parts := strings.Split(header, ",") + accept = make([]Accept, 0, len(parts)) + for _, part := range parts { + part := strings.Trim(part, " ") + + a := Accept{} + a.Params = make(map[string]string) + a.Q = 1.0 + + mrp := strings.Split(part, ";") + + media_range := mrp[0] + sp := strings.Split(media_range, "/") + a.Type = strings.Trim(sp[0], " ") + + switch { + case len(sp) == 1 && a.Type == "*": + a.SubType = "*" + case len(sp) == 2: + a.SubType = strings.Trim(sp[1], " ") + default: + continue + } + + if len(mrp) == 1 { + accept = append(accept, a) + continue + } + + for _, param := range mrp[1:] { + sp := strings.SplitN(param, "=", 2) + if len(sp) != 2 { + continue + } + token := strings.Trim(sp[0], " ") + if token == "q" { + a.Q, _ = strconv.ParseFloat(sp[1], 32) + } else { + a.Params[token] = strings.Trim(sp[1], " ") + } + } + + accept = append(accept, a) + } + + slice := accept_slice(accept) + sort.Sort(slice) + + return +} + +// Negotiate the most appropriate content_type given the accept header +// and a list of alternatives. +func Negotiate(header string, alternatives []string) (content_type string) { + asp := make([][]string, 0, len(alternatives)) + for _, ctype := range alternatives { + asp = append(asp, strings.SplitN(ctype, "/", 2)) + } + for _, clause := range ParseAccept(header) { + for i, ctsp := range asp { + if clause.Type == ctsp[0] && clause.SubType == ctsp[1] { + content_type = alternatives[i] + return + } + if clause.Type == ctsp[0] && clause.SubType == "*" { + content_type = alternatives[i] + return + } + if clause.Type == "*" && clause.SubType == "*" { + content_type = alternatives[i] + return + } + } + } + return +} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/_vendor/goautoneg/autoneg_test.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/_vendor/goautoneg/autoneg_test.go new file mode 100644 index 0000000000..41d328f1d5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/_vendor/goautoneg/autoneg_test.go @@ -0,0 +1,33 @@ +package goautoneg + +import ( + "testing" +) + +var chrome = "application/xml,application/xhtml+xml,text/html;q=0.9,text/plain;q=0.8,image/png,*/*;q=0.5" + +func TestParseAccept(t *testing.T) { + alternatives := []string{"text/html", "image/png"} + content_type := Negotiate(chrome, alternatives) + if content_type != "image/png" { + t.Errorf("got %s expected image/png", content_type) + } + + alternatives = []string{"text/html", "text/plain", "text/n3"} + content_type = Negotiate(chrome, alternatives) + if content_type != "text/html" { + t.Errorf("got %s expected text/html", content_type) + } + + alternatives = []string{"text/n3", "text/plain"} + content_type = Negotiate(chrome, alternatives) + if content_type != "text/plain" { + t.Errorf("got %s expected text/plain", content_type) + } + + alternatives = []string{"text/n3", "application/rdf+xml"} + content_type = Negotiate(chrome, alternatives) + if content_type != "text/n3" { + t.Errorf("got %s expected text/n3", content_type) + } +} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/_vendor/perks/quantile/bench_test.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/_vendor/perks/quantile/bench_test.go new file mode 100644 index 0000000000..0bd0e4e775 --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/_vendor/perks/quantile/bench_test.go @@ -0,0 +1,63 @@ +package quantile + +import ( + "testing" +) + +func BenchmarkInsertTargeted(b *testing.B) { + b.ReportAllocs() + + s := NewTargeted(Targets) + b.ResetTimer() + for i := float64(0); i < float64(b.N); i++ { + s.Insert(i) + } +} + +func BenchmarkInsertTargetedSmallEpsilon(b *testing.B) { + s := NewTargeted(TargetsSmallEpsilon) + b.ResetTimer() + for i := float64(0); i < float64(b.N); i++ { + s.Insert(i) + } +} + +func BenchmarkInsertBiased(b *testing.B) { + s := NewLowBiased(0.01) + b.ResetTimer() + for i := float64(0); i < float64(b.N); i++ { + s.Insert(i) + } +} + +func BenchmarkInsertBiasedSmallEpsilon(b *testing.B) { + s := NewLowBiased(0.0001) + b.ResetTimer() + for i := float64(0); i < float64(b.N); i++ { + s.Insert(i) + } +} + +func BenchmarkQuery(b *testing.B) { + s := NewTargeted(Targets) + for i := float64(0); i < 1e6; i++ { + s.Insert(i) + } + b.ResetTimer() + n := float64(b.N) + for i := float64(0); i < n; i++ { + s.Query(i / n) + } +} + +func BenchmarkQuerySmallEpsilon(b *testing.B) { + s := NewTargeted(TargetsSmallEpsilon) + for i := float64(0); i < 1e6; i++ { + s.Insert(i) + } + b.ResetTimer() + n := float64(b.N) + for i := float64(0); i < n; i++ { + s.Query(i / n) + } +} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/_vendor/perks/quantile/example_test.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/_vendor/perks/quantile/example_test.go new file mode 100644 index 0000000000..43121e2630 --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/_vendor/perks/quantile/example_test.go @@ -0,0 +1,112 @@ +// +build go1.1 + +package quantile_test + +import ( + "bufio" + "fmt" + "github.com/bmizerany/perks/quantile" + "log" + "os" + "strconv" + "time" +) + +func Example_simple() { + ch := make(chan float64) + go sendFloats(ch) + + // Compute the 50th, 90th, and 99th percentile. + q := quantile.NewTargeted(0.50, 0.90, 0.99) + for v := range ch { + q.Insert(v) + } + + fmt.Println("perc50:", q.Query(0.50)) + fmt.Println("perc90:", q.Query(0.90)) + fmt.Println("perc99:", q.Query(0.99)) + fmt.Println("count:", q.Count()) + // Output: + // perc50: 5 + // perc90: 14 + // perc99: 40 + // count: 2388 +} + +func Example_mergeMultipleStreams() { + // Scenario: + // We have multiple database shards. On each shard, there is a process + // collecting query response times from the database logs and inserting + // them into a Stream (created via NewTargeted(0.90)), much like the + // Simple example. These processes expose a network interface for us to + // ask them to serialize and send us the results of their + // Stream.Samples so we may Merge and Query them. + // + // NOTES: + // * These sample sets are small, allowing us to get them + // across the network much faster than sending the entire list of data + // points. + // + // * For this to work correctly, we must supply the same quantiles + // a priori the process collecting the samples supplied to NewTargeted, + // even if we do not plan to query them all here. + ch := make(chan quantile.Samples) + getDBQuerySamples(ch) + q := quantile.NewTargeted(0.90) + for samples := range ch { + q.Merge(samples) + } + fmt.Println("perc90:", q.Query(0.90)) +} + +func Example_window() { + // Scenario: We want the 90th, 95th, and 99th percentiles for each + // minute. + + ch := make(chan float64) + go sendStreamValues(ch) + + tick := time.NewTicker(1 * time.Minute) + q := quantile.NewTargeted(0.90, 0.95, 0.99) + for { + select { + case t := <-tick.C: + flushToDB(t, q.Samples()) + q.Reset() + case v := <-ch: + q.Insert(v) + } + } +} + +func sendStreamValues(ch chan float64) { + // Use your imagination +} + +func flushToDB(t time.Time, samples quantile.Samples) { + // Use your imagination +} + +// This is a stub for the above example. In reality this would hit the remote +// servers via http or something like it. +func getDBQuerySamples(ch chan quantile.Samples) {} + +func sendFloats(ch chan<- float64) { + f, err := os.Open("exampledata.txt") + if err != nil { + log.Fatal(err) + } + sc := bufio.NewScanner(f) + for sc.Scan() { + b := sc.Bytes() + v, err := strconv.ParseFloat(string(b), 64) + if err != nil { + log.Fatal(err) + } + ch <- v + } + if sc.Err() != nil { + log.Fatal(sc.Err()) + } + close(ch) +} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/_vendor/perks/quantile/exampledata.txt b/Godeps/_workspace/src/github.com/prometheus/client_golang/_vendor/perks/quantile/exampledata.txt new file mode 100644 index 0000000000..1602287d7c --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/_vendor/perks/quantile/exampledata.txt @@ -0,0 +1,2388 @@ +8 +5 +26 +12 +5 +235 +13 +6 +28 +30 +3 +3 +3 +3 +5 +2 +33 +7 +2 +4 +7 +12 +14 +5 +8 +3 +10 +4 +5 +3 +6 +6 +209 +20 +3 +10 +14 +3 +4 +6 +8 +5 +11 +7 +3 +2 +3 +3 +212 +5 +222 +4 +10 +10 +5 +6 +3 +8 +3 +10 +254 +220 +2 +3 +5 +24 +5 +4 +222 +7 +3 +3 +223 +8 +15 +12 +14 +14 +3 +2 +2 +3 +13 +3 +11 +4 +4 +6 +5 +7 +13 +5 +3 +5 +2 +5 +3 +5 +2 +7 +15 +17 +14 +3 +6 +6 +3 +17 +5 +4 +7 +6 +4 +4 +8 +6 +8 +3 +9 +3 +6 +3 +4 +5 +3 +3 +660 +4 +6 +10 +3 +6 +3 +2 +5 +13 +2 +4 +4 +10 +4 +8 +4 +3 +7 +9 +9 +3 +10 +37 +3 +13 +4 +12 +3 +6 +10 +8 +5 +21 +2 +3 +8 +3 +2 +3 +3 +4 +12 +2 +4 +8 +8 +4 +3 +2 +20 +1 +6 +32 +2 +11 +6 +18 +3 +8 +11 +3 +212 +3 +4 +2 +6 +7 +12 +11 +3 +2 +16 +10 +6 +4 +6 +3 +2 +7 +3 +2 +2 +2 +2 +5 +6 +4 +3 +10 +3 +4 +6 +5 +3 +4 +4 +5 +6 +4 +3 +4 +4 +5 +7 +5 +5 +3 +2 +7 +2 +4 +12 +4 +5 +6 +2 +4 +4 +8 +4 +15 +13 +7 +16 +5 +3 +23 +5 +5 +7 +3 +2 +9 +8 +7 +5 +8 +11 +4 +10 +76 +4 +47 +4 +3 +2 +7 +4 +2 +3 +37 +10 +4 +2 +20 +5 +4 +4 +10 +10 +4 +3 +7 +23 +240 +7 +13 +5 +5 +3 +3 +2 +5 +4 +2 +8 +7 +19 +2 +23 +8 +7 +2 +5 +3 +8 +3 +8 +13 +5 +5 +5 +2 +3 +23 +4 +9 +8 +4 +3 +3 +5 +220 +2 +3 +4 +6 +14 +3 +53 +6 +2 +5 +18 +6 +3 +219 +6 +5 +2 +5 +3 +6 +5 +15 +4 +3 +17 +3 +2 +4 +7 +2 +3 +3 +4 +4 +3 +2 +664 +6 +3 +23 +5 +5 +16 +5 +8 +2 +4 +2 +24 +12 +3 +2 +3 +5 +8 +3 +5 +4 +3 +14 +3 +5 +8 +2 +3 +7 +9 +4 +2 +3 +6 +8 +4 +3 +4 +6 +5 +3 +3 +6 +3 +19 +4 +4 +6 +3 +6 +3 +5 +22 +5 +4 +4 +3 +8 +11 +4 +9 +7 +6 +13 +4 +4 +4 +6 +17 +9 +3 +3 +3 +4 +3 +221 +5 +11 +3 +4 +2 +12 +6 +3 +5 +7 +5 +7 +4 +9 +7 +14 +37 +19 +217 +16 +3 +5 +2 +2 +7 +19 +7 +6 +7 +4 +24 +5 +11 +4 +7 +7 +9 +13 +3 +4 +3 +6 +28 +4 +4 +5 +5 +2 +5 +6 +4 +4 +6 +10 +5 +4 +3 +2 +3 +3 +6 +5 +5 +4 +3 +2 +3 +7 +4 +6 +18 +16 +8 +16 +4 +5 +8 +6 +9 +13 +1545 +6 +215 +6 +5 +6 +3 +45 +31 +5 +2 +2 +4 +3 +3 +2 +5 +4 +3 +5 +7 +7 +4 +5 +8 +5 +4 +749 +2 +31 +9 +11 +2 +11 +5 +4 +4 +7 +9 +11 +4 +5 +4 +7 +3 +4 +6 +2 +15 +3 +4 +3 +4 +3 +5 +2 +13 +5 +5 +3 +3 +23 +4 +4 +5 +7 +4 +13 +2 +4 +3 +4 +2 +6 +2 +7 +3 +5 +5 +3 +29 +5 +4 +4 +3 +10 +2 +3 +79 +16 +6 +6 +7 +7 +3 +5 +5 +7 +4 +3 +7 +9 +5 +6 +5 +9 +6 +3 +6 +4 +17 +2 +10 +9 +3 +6 +2 +3 +21 +22 +5 +11 +4 +2 +17 +2 +224 +2 +14 +3 +4 +4 +2 +4 +4 +4 +4 +5 +3 +4 +4 +10 +2 +6 +3 +3 +5 +7 +2 +7 +5 +6 +3 +218 +2 +2 +5 +2 +6 +3 +5 +222 +14 +6 +33 +3 +2 +5 +3 +3 +3 +9 +5 +3 +3 +2 +7 +4 +3 +4 +3 +5 +6 +5 +26 +4 +13 +9 +7 +3 +221 +3 +3 +4 +4 +4 +4 +2 +18 +5 +3 +7 +9 +6 +8 +3 +10 +3 +11 +9 +5 +4 +17 +5 +5 +6 +6 +3 +2 +4 +12 +17 +6 +7 +218 +4 +2 +4 +10 +3 +5 +15 +3 +9 +4 +3 +3 +6 +29 +3 +3 +4 +5 +5 +3 +8 +5 +6 +6 +7 +5 +3 +5 +3 +29 +2 +31 +5 +15 +24 +16 +5 +207 +4 +3 +3 +2 +15 +4 +4 +13 +5 +5 +4 +6 +10 +2 +7 +8 +4 +6 +20 +5 +3 +4 +3 +12 +12 +5 +17 +7 +3 +3 +3 +6 +10 +3 +5 +25 +80 +4 +9 +3 +2 +11 +3 +3 +2 +3 +8 +7 +5 +5 +19 +5 +3 +3 +12 +11 +2 +6 +5 +5 +5 +3 +3 +3 +4 +209 +14 +3 +2 +5 +19 +4 +4 +3 +4 +14 +5 +6 +4 +13 +9 +7 +4 +7 +10 +2 +9 +5 +7 +2 +8 +4 +6 +5 +5 +222 +8 +7 +12 +5 +216 +3 +4 +4 +6 +3 +14 +8 +7 +13 +4 +3 +3 +3 +3 +17 +5 +4 +3 +33 +6 +6 +33 +7 +5 +3 +8 +7 +5 +2 +9 +4 +2 +233 +24 +7 +4 +8 +10 +3 +4 +15 +2 +16 +3 +3 +13 +12 +7 +5 +4 +207 +4 +2 +4 +27 +15 +2 +5 +2 +25 +6 +5 +5 +6 +13 +6 +18 +6 +4 +12 +225 +10 +7 +5 +2 +2 +11 +4 +14 +21 +8 +10 +3 +5 +4 +232 +2 +5 +5 +3 +7 +17 +11 +6 +6 +23 +4 +6 +3 +5 +4 +2 +17 +3 +6 +5 +8 +3 +2 +2 +14 +9 +4 +4 +2 +5 +5 +3 +7 +6 +12 +6 +10 +3 +6 +2 +2 +19 +5 +4 +4 +9 +2 +4 +13 +3 +5 +6 +3 +6 +5 +4 +9 +6 +3 +5 +7 +3 +6 +6 +4 +3 +10 +6 +3 +221 +3 +5 +3 +6 +4 +8 +5 +3 +6 +4 +4 +2 +54 +5 +6 +11 +3 +3 +4 +4 +4 +3 +7 +3 +11 +11 +7 +10 +6 +13 +223 +213 +15 +231 +7 +3 +7 +228 +2 +3 +4 +4 +5 +6 +7 +4 +13 +3 +4 +5 +3 +6 +4 +6 +7 +2 +4 +3 +4 +3 +3 +6 +3 +7 +3 +5 +18 +5 +6 +8 +10 +3 +3 +3 +2 +4 +2 +4 +4 +5 +6 +6 +4 +10 +13 +3 +12 +5 +12 +16 +8 +4 +19 +11 +2 +4 +5 +6 +8 +5 +6 +4 +18 +10 +4 +2 +216 +6 +6 +6 +2 +4 +12 +8 +3 +11 +5 +6 +14 +5 +3 +13 +4 +5 +4 +5 +3 +28 +6 +3 +7 +219 +3 +9 +7 +3 +10 +6 +3 +4 +19 +5 +7 +11 +6 +15 +19 +4 +13 +11 +3 +7 +5 +10 +2 +8 +11 +2 +6 +4 +6 +24 +6 +3 +3 +3 +3 +6 +18 +4 +11 +4 +2 +5 +10 +8 +3 +9 +5 +3 +4 +5 +6 +2 +5 +7 +4 +4 +14 +6 +4 +4 +5 +5 +7 +2 +4 +3 +7 +3 +3 +6 +4 +5 +4 +4 +4 +3 +3 +3 +3 +8 +14 +2 +3 +5 +3 +2 +4 +5 +3 +7 +3 +3 +18 +3 +4 +4 +5 +7 +3 +3 +3 +13 +5 +4 +8 +211 +5 +5 +3 +5 +2 +5 +4 +2 +655 +6 +3 +5 +11 +2 +5 +3 +12 +9 +15 +11 +5 +12 +217 +2 +6 +17 +3 +3 +207 +5 +5 +4 +5 +9 +3 +2 +8 +5 +4 +3 +2 +5 +12 +4 +14 +5 +4 +2 +13 +5 +8 +4 +225 +4 +3 +4 +5 +4 +3 +3 +6 +23 +9 +2 +6 +7 +233 +4 +4 +6 +18 +3 +4 +6 +3 +4 +4 +2 +3 +7 +4 +13 +227 +4 +3 +5 +4 +2 +12 +9 +17 +3 +7 +14 +6 +4 +5 +21 +4 +8 +9 +2 +9 +25 +16 +3 +6 +4 +7 +8 +5 +2 +3 +5 +4 +3 +3 +5 +3 +3 +3 +2 +3 +19 +2 +4 +3 +4 +2 +3 +4 +4 +2 +4 +3 +3 +3 +2 +6 +3 +17 +5 +6 +4 +3 +13 +5 +3 +3 +3 +4 +9 +4 +2 +14 +12 +4 +5 +24 +4 +3 +37 +12 +11 +21 +3 +4 +3 +13 +4 +2 +3 +15 +4 +11 +4 +4 +3 +8 +3 +4 +4 +12 +8 +5 +3 +3 +4 +2 +220 +3 +5 +223 +3 +3 +3 +10 +3 +15 +4 +241 +9 +7 +3 +6 +6 +23 +4 +13 +7 +3 +4 +7 +4 +9 +3 +3 +4 +10 +5 +5 +1 +5 +24 +2 +4 +5 +5 +6 +14 +3 +8 +2 +3 +5 +13 +13 +3 +5 +2 +3 +15 +3 +4 +2 +10 +4 +4 +4 +5 +5 +3 +5 +3 +4 +7 +4 +27 +3 +6 +4 +15 +3 +5 +6 +6 +5 +4 +8 +3 +9 +2 +6 +3 +4 +3 +7 +4 +18 +3 +11 +3 +3 +8 +9 +7 +24 +3 +219 +7 +10 +4 +5 +9 +12 +2 +5 +4 +4 +4 +3 +3 +19 +5 +8 +16 +8 +6 +22 +3 +23 +3 +242 +9 +4 +3 +3 +5 +7 +3 +3 +5 +8 +3 +7 +5 +14 +8 +10 +3 +4 +3 +7 +4 +6 +7 +4 +10 +4 +3 +11 +3 +7 +10 +3 +13 +6 +8 +12 +10 +5 +7 +9 +3 +4 +7 +7 +10 +8 +30 +9 +19 +4 +3 +19 +15 +4 +13 +3 +215 +223 +4 +7 +4 +8 +17 +16 +3 +7 +6 +5 +5 +4 +12 +3 +7 +4 +4 +13 +4 +5 +2 +5 +6 +5 +6 +6 +7 +10 +18 +23 +9 +3 +3 +6 +5 +2 +4 +2 +7 +3 +3 +2 +5 +5 +14 +10 +224 +6 +3 +4 +3 +7 +5 +9 +3 +6 +4 +2 +5 +11 +4 +3 +3 +2 +8 +4 +7 +4 +10 +7 +3 +3 +18 +18 +17 +3 +3 +3 +4 +5 +3 +3 +4 +12 +7 +3 +11 +13 +5 +4 +7 +13 +5 +4 +11 +3 +12 +3 +6 +4 +4 +21 +4 +6 +9 +5 +3 +10 +8 +4 +6 +4 +4 +6 +5 +4 +8 +6 +4 +6 +4 +4 +5 +9 +6 +3 +4 +2 +9 +3 +18 +2 +4 +3 +13 +3 +6 +6 +8 +7 +9 +3 +2 +16 +3 +4 +6 +3 +2 +33 +22 +14 +4 +9 +12 +4 +5 +6 +3 +23 +9 +4 +3 +5 +5 +3 +4 +5 +3 +5 +3 +10 +4 +5 +5 +8 +4 +4 +6 +8 +5 +4 +3 +4 +6 +3 +3 +3 +5 +9 +12 +6 +5 +9 +3 +5 +3 +2 +2 +2 +18 +3 +2 +21 +2 +5 +4 +6 +4 +5 +10 +3 +9 +3 +2 +10 +7 +3 +6 +6 +4 +4 +8 +12 +7 +3 +7 +3 +3 +9 +3 +4 +5 +4 +4 +5 +5 +10 +15 +4 +4 +14 +6 +227 +3 +14 +5 +216 +22 +5 +4 +2 +2 +6 +3 +4 +2 +9 +9 +4 +3 +28 +13 +11 +4 +5 +3 +3 +2 +3 +3 +5 +3 +4 +3 +5 +23 +26 +3 +4 +5 +6 +4 +6 +3 +5 +5 +3 +4 +3 +2 +2 +2 +7 +14 +3 +6 +7 +17 +2 +2 +15 +14 +16 +4 +6 +7 +13 +6 +4 +5 +6 +16 +3 +3 +28 +3 +6 +15 +3 +9 +2 +4 +6 +3 +3 +22 +4 +12 +6 +7 +2 +5 +4 +10 +3 +16 +6 +9 +2 +5 +12 +7 +5 +5 +5 +5 +2 +11 +9 +17 +4 +3 +11 +7 +3 +5 +15 +4 +3 +4 +211 +8 +7 +5 +4 +7 +6 +7 +6 +3 +6 +5 +6 +5 +3 +4 +4 +26 +4 +6 +10 +4 +4 +3 +2 +3 +3 +4 +5 +9 +3 +9 +4 +4 +5 +5 +8 +2 +4 +2 +3 +8 +4 +11 +19 +5 +8 +6 +3 +5 +6 +12 +3 +2 +4 +16 +12 +3 +4 +4 +8 +6 +5 +6 +6 +219 +8 +222 +6 +16 +3 +13 +19 +5 +4 +3 +11 +6 +10 +4 +7 +7 +12 +5 +3 +3 +5 +6 +10 +3 +8 +2 +5 +4 +7 +2 +4 +4 +2 +12 +9 +6 +4 +2 +40 +2 +4 +10 +4 +223 +4 +2 +20 +6 +7 +24 +5 +4 +5 +2 +20 +16 +6 +5 +13 +2 +3 +3 +19 +3 +2 +4 +5 +6 +7 +11 +12 +5 +6 +7 +7 +3 +5 +3 +5 +3 +14 +3 +4 +4 +2 +11 +1 +7 +3 +9 +6 +11 +12 +5 +8 +6 +221 +4 +2 +12 +4 +3 +15 +4 +5 +226 +7 +218 +7 +5 +4 +5 +18 +4 +5 +9 +4 +4 +2 +9 +18 +18 +9 +5 +6 +6 +3 +3 +7 +3 +5 +4 +4 +4 +12 +3 +6 +31 +5 +4 +7 +3 +6 +5 +6 +5 +11 +2 +2 +11 +11 +6 +7 +5 +8 +7 +10 +5 +23 +7 +4 +3 +5 +34 +2 +5 +23 +7 +3 +6 +8 +4 +4 +4 +2 +5 +3 +8 +5 +4 +8 +25 +2 +3 +17 +8 +3 +4 +8 +7 +3 +15 +6 +5 +7 +21 +9 +5 +6 +6 +5 +3 +2 +3 +10 +3 +6 +3 +14 +7 +4 +4 +8 +7 +8 +2 +6 +12 +4 +213 +6 +5 +21 +8 +2 +5 +23 +3 +11 +2 +3 +6 +25 +2 +3 +6 +7 +6 +6 +4 +4 +6 +3 +17 +9 +7 +6 +4 +3 +10 +7 +2 +3 +3 +3 +11 +8 +3 +7 +6 +4 +14 +36 +3 +4 +3 +3 +22 +13 +21 +4 +2 +7 +4 +4 +17 +15 +3 +7 +11 +2 +4 +7 +6 +209 +6 +3 +2 +2 +24 +4 +9 +4 +3 +3 +3 +29 +2 +2 +4 +3 +3 +5 +4 +6 +3 +3 +2 +4 diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/_vendor/perks/quantile/stream.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/_vendor/perks/quantile/stream.go new file mode 100644 index 0000000000..587b1fc5ba --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/_vendor/perks/quantile/stream.go @@ -0,0 +1,292 @@ +// Package quantile computes approximate quantiles over an unbounded data +// stream within low memory and CPU bounds. +// +// A small amount of accuracy is traded to achieve the above properties. +// +// Multiple streams can be merged before calling Query to generate a single set +// of results. This is meaningful when the streams represent the same type of +// data. See Merge and Samples. +// +// For more detailed information about the algorithm used, see: +// +// Effective Computation of Biased Quantiles over Data Streams +// +// http://www.cs.rutgers.edu/~muthu/bquant.pdf +package quantile + +import ( + "math" + "sort" +) + +// Sample holds an observed value and meta information for compression. JSON +// tags have been added for convenience. +type Sample struct { + Value float64 `json:",string"` + Width float64 `json:",string"` + Delta float64 `json:",string"` +} + +// Samples represents a slice of samples. It implements sort.Interface. +type Samples []Sample + +func (a Samples) Len() int { return len(a) } +func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value } +func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +type invariant func(s *stream, r float64) float64 + +// NewLowBiased returns an initialized Stream for low-biased quantiles +// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but +// error guarantees can still be given even for the lower ranks of the data +// distribution. +// +// The provided epsilon is a relative error, i.e. the true quantile of a value +// returned by a query is guaranteed to be within (1±Epsilon)*Quantile. +// +// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error +// properties. +func NewLowBiased(epsilon float64) *Stream { + ƒ := func(s *stream, r float64) float64 { + return 2 * epsilon * r + } + return newStream(ƒ) +} + +// NewHighBiased returns an initialized Stream for high-biased quantiles +// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but +// error guarantees can still be given even for the higher ranks of the data +// distribution. +// +// The provided epsilon is a relative error, i.e. the true quantile of a value +// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile). +// +// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error +// properties. +func NewHighBiased(epsilon float64) *Stream { + ƒ := func(s *stream, r float64) float64 { + return 2 * epsilon * (s.n - r) + } + return newStream(ƒ) +} + +// NewTargeted returns an initialized Stream concerned with a particular set of +// quantile values that are supplied a priori. Knowing these a priori reduces +// space and computation time. The targets map maps the desired quantiles to +// their absolute errors, i.e. the true quantile of a value returned by a query +// is guaranteed to be within (Quantile±Epsilon). +// +// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties. +func NewTargeted(targets map[float64]float64) *Stream { + ƒ := func(s *stream, r float64) float64 { + var m = math.MaxFloat64 + var f float64 + for quantile, epsilon := range targets { + if quantile*s.n <= r { + f = (2 * epsilon * r) / quantile + } else { + f = (2 * epsilon * (s.n - r)) / (1 - quantile) + } + if f < m { + m = f + } + } + return m + } + return newStream(ƒ) +} + +// Stream computes quantiles for a stream of float64s. It is not thread-safe by +// design. Take care when using across multiple goroutines. +type Stream struct { + *stream + b Samples + sorted bool +} + +func newStream(ƒ invariant) *Stream { + x := &stream{ƒ: ƒ} + return &Stream{x, make(Samples, 0, 500), true} +} + +// Insert inserts v into the stream. +func (s *Stream) Insert(v float64) { + s.insert(Sample{Value: v, Width: 1}) +} + +func (s *Stream) insert(sample Sample) { + s.b = append(s.b, sample) + s.sorted = false + if len(s.b) == cap(s.b) { + s.flush() + } +} + +// Query returns the computed qth percentiles value. If s was created with +// NewTargeted, and q is not in the set of quantiles provided a priori, Query +// will return an unspecified result. +func (s *Stream) Query(q float64) float64 { + if !s.flushed() { + // Fast path when there hasn't been enough data for a flush; + // this also yields better accuracy for small sets of data. + l := len(s.b) + if l == 0 { + return 0 + } + i := int(float64(l) * q) + if i > 0 { + i -= 1 + } + s.maybeSort() + return s.b[i].Value + } + s.flush() + return s.stream.query(q) +} + +// Merge merges samples into the underlying streams samples. This is handy when +// merging multiple streams from separate threads, database shards, etc. +// +// ATTENTION: This method is broken and does not yield correct results. The +// underlying algorithm is not capable of merging streams correctly. +func (s *Stream) Merge(samples Samples) { + sort.Sort(samples) + s.stream.merge(samples) +} + +// Reset reinitializes and clears the list reusing the samples buffer memory. +func (s *Stream) Reset() { + s.stream.reset() + s.b = s.b[:0] +} + +// Samples returns stream samples held by s. +func (s *Stream) Samples() Samples { + if !s.flushed() { + return s.b + } + s.flush() + return s.stream.samples() +} + +// Count returns the total number of samples observed in the stream +// since initialization. +func (s *Stream) Count() int { + return len(s.b) + s.stream.count() +} + +func (s *Stream) flush() { + s.maybeSort() + s.stream.merge(s.b) + s.b = s.b[:0] +} + +func (s *Stream) maybeSort() { + if !s.sorted { + s.sorted = true + sort.Sort(s.b) + } +} + +func (s *Stream) flushed() bool { + return len(s.stream.l) > 0 +} + +type stream struct { + n float64 + l []Sample + ƒ invariant +} + +func (s *stream) reset() { + s.l = s.l[:0] + s.n = 0 +} + +func (s *stream) insert(v float64) { + s.merge(Samples{{v, 1, 0}}) +} + +func (s *stream) merge(samples Samples) { + // TODO(beorn7): This tries to merge not only individual samples, but + // whole summaries. The paper doesn't mention merging summaries at + // all. Unittests show that the merging is inaccurate. Find out how to + // do merges properly. + var r float64 + i := 0 + for _, sample := range samples { + for ; i < len(s.l); i++ { + c := s.l[i] + if c.Value > sample.Value { + // Insert at position i. + s.l = append(s.l, Sample{}) + copy(s.l[i+1:], s.l[i:]) + s.l[i] = Sample{ + sample.Value, + sample.Width, + math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1), + // TODO(beorn7): How to calculate delta correctly? + } + i++ + goto inserted + } + r += c.Width + } + s.l = append(s.l, Sample{sample.Value, sample.Width, 0}) + i++ + inserted: + s.n += sample.Width + r += sample.Width + } + s.compress() +} + +func (s *stream) count() int { + return int(s.n) +} + +func (s *stream) query(q float64) float64 { + t := math.Ceil(q * s.n) + t += math.Ceil(s.ƒ(s, t) / 2) + p := s.l[0] + var r float64 + for _, c := range s.l[1:] { + r += p.Width + if r+c.Width+c.Delta > t { + return p.Value + } + p = c + } + return p.Value +} + +func (s *stream) compress() { + if len(s.l) < 2 { + return + } + x := s.l[len(s.l)-1] + xi := len(s.l) - 1 + r := s.n - 1 - x.Width + + for i := len(s.l) - 2; i >= 0; i-- { + c := s.l[i] + if c.Width+x.Width+x.Delta <= s.ƒ(s, r) { + x.Width += c.Width + s.l[xi] = x + // Remove element at i. + copy(s.l[i:], s.l[i+1:]) + s.l = s.l[:len(s.l)-1] + xi -= 1 + } else { + x = c + xi = i + } + r -= c.Width + } +} + +func (s *stream) samples() Samples { + samples := make(Samples, len(s.l)) + copy(samples, s.l) + return samples +} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/_vendor/perks/quantile/stream_test.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/_vendor/perks/quantile/stream_test.go new file mode 100644 index 0000000000..707b871508 --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/_vendor/perks/quantile/stream_test.go @@ -0,0 +1,185 @@ +package quantile + +import ( + "math" + "math/rand" + "sort" + "testing" +) + +var ( + Targets = map[float64]float64{ + 0.01: 0.001, + 0.10: 0.01, + 0.50: 0.05, + 0.90: 0.01, + 0.99: 0.001, + } + TargetsSmallEpsilon = map[float64]float64{ + 0.01: 0.0001, + 0.10: 0.001, + 0.50: 0.005, + 0.90: 0.001, + 0.99: 0.0001, + } + LowQuantiles = []float64{0.01, 0.1, 0.5} + HighQuantiles = []float64{0.99, 0.9, 0.5} +) + +const RelativeEpsilon = 0.01 + +func verifyPercsWithAbsoluteEpsilon(t *testing.T, a []float64, s *Stream) { + sort.Float64s(a) + for quantile, epsilon := range Targets { + n := float64(len(a)) + k := int(quantile * n) + lower := int((quantile - epsilon) * n) + if lower < 1 { + lower = 1 + } + upper := int(math.Ceil((quantile + epsilon) * n)) + if upper > len(a) { + upper = len(a) + } + w, min, max := a[k-1], a[lower-1], a[upper-1] + if g := s.Query(quantile); g < min || g > max { + t.Errorf("q=%f: want %v [%f,%f], got %v", quantile, w, min, max, g) + } + } +} + +func verifyLowPercsWithRelativeEpsilon(t *testing.T, a []float64, s *Stream) { + sort.Float64s(a) + for _, qu := range LowQuantiles { + n := float64(len(a)) + k := int(qu * n) + + lowerRank := int((1 - RelativeEpsilon) * qu * n) + upperRank := int(math.Ceil((1 + RelativeEpsilon) * qu * n)) + w, min, max := a[k-1], a[lowerRank-1], a[upperRank-1] + if g := s.Query(qu); g < min || g > max { + t.Errorf("q=%f: want %v [%f,%f], got %v", qu, w, min, max, g) + } + } +} + +func verifyHighPercsWithRelativeEpsilon(t *testing.T, a []float64, s *Stream) { + sort.Float64s(a) + for _, qu := range HighQuantiles { + n := float64(len(a)) + k := int(qu * n) + + lowerRank := int((1 - (1+RelativeEpsilon)*(1-qu)) * n) + upperRank := int(math.Ceil((1 - (1-RelativeEpsilon)*(1-qu)) * n)) + w, min, max := a[k-1], a[lowerRank-1], a[upperRank-1] + if g := s.Query(qu); g < min || g > max { + t.Errorf("q=%f: want %v [%f,%f], got %v", qu, w, min, max, g) + } + } +} + +func populateStream(s *Stream) []float64 { + a := make([]float64, 0, 1e5+100) + for i := 0; i < cap(a); i++ { + v := rand.NormFloat64() + // Add 5% asymmetric outliers. + if i%20 == 0 { + v = v*v + 1 + } + s.Insert(v) + a = append(a, v) + } + return a +} + +func TestTargetedQuery(t *testing.T) { + rand.Seed(42) + s := NewTargeted(Targets) + a := populateStream(s) + verifyPercsWithAbsoluteEpsilon(t, a, s) +} + +func TestLowBiasedQuery(t *testing.T) { + rand.Seed(42) + s := NewLowBiased(RelativeEpsilon) + a := populateStream(s) + verifyLowPercsWithRelativeEpsilon(t, a, s) +} + +func TestHighBiasedQuery(t *testing.T) { + rand.Seed(42) + s := NewHighBiased(RelativeEpsilon) + a := populateStream(s) + verifyHighPercsWithRelativeEpsilon(t, a, s) +} + +func TestTargetedMerge(t *testing.T) { + rand.Seed(42) + s1 := NewTargeted(Targets) + s2 := NewTargeted(Targets) + a := populateStream(s1) + a = append(a, populateStream(s2)...) + s1.Merge(s2.Samples()) + verifyPercsWithAbsoluteEpsilon(t, a, s1) +} + +func TestLowBiasedMerge(t *testing.T) { + rand.Seed(42) + s1 := NewLowBiased(RelativeEpsilon) + s2 := NewLowBiased(RelativeEpsilon) + a := populateStream(s1) + a = append(a, populateStream(s2)...) + s1.Merge(s2.Samples()) + verifyLowPercsWithRelativeEpsilon(t, a, s2) +} + +func TestHighBiasedMerge(t *testing.T) { + rand.Seed(42) + s1 := NewHighBiased(RelativeEpsilon) + s2 := NewHighBiased(RelativeEpsilon) + a := populateStream(s1) + a = append(a, populateStream(s2)...) + s1.Merge(s2.Samples()) + verifyHighPercsWithRelativeEpsilon(t, a, s2) +} + +func TestUncompressed(t *testing.T) { + q := NewTargeted(Targets) + for i := 100; i > 0; i-- { + q.Insert(float64(i)) + } + if g := q.Count(); g != 100 { + t.Errorf("want count 100, got %d", g) + } + // Before compression, Query should have 100% accuracy. + for quantile := range Targets { + w := quantile * 100 + if g := q.Query(quantile); g != w { + t.Errorf("want %f, got %f", w, g) + } + } +} + +func TestUncompressedSamples(t *testing.T) { + q := NewTargeted(map[float64]float64{0.99: 0.001}) + for i := 1; i <= 100; i++ { + q.Insert(float64(i)) + } + if g := q.Samples().Len(); g != 100 { + t.Errorf("want count 100, got %d", g) + } +} + +func TestUncompressedOne(t *testing.T) { + q := NewTargeted(map[float64]float64{0.99: 0.01}) + q.Insert(3.14) + if g := q.Query(0.90); g != 3.14 { + t.Error("want PI, got", g) + } +} + +func TestDefaults(t *testing.T) { + if g := NewTargeted(map[float64]float64{0.99: 0.001}).Query(0.99); g != 0 { + t.Errorf("want 0, got %f", g) + } +} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/discriminator.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/discriminator.go new file mode 100644 index 0000000000..a353c6378c --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/discriminator.go @@ -0,0 +1,74 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package extraction + +import ( + "errors" + "fmt" + "mime" + "net/http" +) + +// ProcessorForRequestHeader interprets a HTTP request header to determine +// what Processor should be used for the given input. If no acceptable +// Processor can be found, an error is returned. +func ProcessorForRequestHeader(header http.Header) (Processor, error) { + if header == nil { + return nil, errors.New("received illegal and nil header") + } + + mediatype, params, err := mime.ParseMediaType(header.Get("Content-Type")) + if err != nil { + return nil, fmt.Errorf("invalid Content-Type header %q: %s", header.Get("Content-Type"), err) + } + switch mediatype { + case "application/vnd.google.protobuf": + if params["proto"] != "io.prometheus.client.MetricFamily" { + return nil, fmt.Errorf("unrecognized protocol message %s", params["proto"]) + } + if params["encoding"] != "delimited" { + return nil, fmt.Errorf("unsupported encoding %s", params["encoding"]) + } + return MetricFamilyProcessor, nil + case "text/plain": + switch params["version"] { + case "0.0.4": + return Processor004, nil + case "": + // Fallback: most recent version. + return Processor004, nil + default: + return nil, fmt.Errorf("unrecognized API version %s", params["version"]) + } + case "application/json": + var prometheusAPIVersion string + + if params["schema"] == "prometheus/telemetry" && params["version"] != "" { + prometheusAPIVersion = params["version"] + } else { + prometheusAPIVersion = header.Get("X-Prometheus-API-Version") + } + + switch prometheusAPIVersion { + case "0.0.2": + return Processor002, nil + case "0.0.1": + return Processor001, nil + default: + return nil, fmt.Errorf("unrecognized API version %s", prometheusAPIVersion) + } + default: + return nil, fmt.Errorf("unsupported media type %q, expected %q", mediatype, "application/json") + } +} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/discriminator_test.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/discriminator_test.go new file mode 100644 index 0000000000..4f08248d64 --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/discriminator_test.go @@ -0,0 +1,126 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package extraction + +import ( + "errors" + "net/http" + "testing" +) + +func testDiscriminatorHTTPHeader(t testing.TB) { + var scenarios = []struct { + input map[string]string + output Processor + err error + }{ + { + output: nil, + err: errors.New("received illegal and nil header"), + }, + { + input: map[string]string{"Content-Type": "application/json", "X-Prometheus-API-Version": "0.0.0"}, + output: nil, + err: errors.New("unrecognized API version 0.0.0"), + }, + { + input: map[string]string{"Content-Type": "application/json", "X-Prometheus-API-Version": "0.0.1"}, + output: Processor001, + err: nil, + }, + { + input: map[string]string{"Content-Type": `application/json; schema="prometheus/telemetry"; version=0.0.0`}, + output: nil, + err: errors.New("unrecognized API version 0.0.0"), + }, + { + input: map[string]string{"Content-Type": `application/json; schema="prometheus/telemetry"; version=0.0.1`}, + output: Processor001, + err: nil, + }, + { + input: map[string]string{"Content-Type": `application/json; schema="prometheus/telemetry"; version=0.0.2`}, + output: Processor002, + err: nil, + }, + { + input: map[string]string{"Content-Type": `application/vnd.google.protobuf; proto="io.prometheus.client.MetricFamily"; encoding="delimited"`}, + output: MetricFamilyProcessor, + err: nil, + }, + { + input: map[string]string{"Content-Type": `application/vnd.google.protobuf; proto="illegal"; encoding="delimited"`}, + output: nil, + err: errors.New("unrecognized protocol message illegal"), + }, + { + input: map[string]string{"Content-Type": `application/vnd.google.protobuf; proto="io.prometheus.client.MetricFamily"; encoding="illegal"`}, + output: nil, + err: errors.New("unsupported encoding illegal"), + }, + { + input: map[string]string{"Content-Type": `text/plain; version=0.0.4`}, + output: Processor004, + err: nil, + }, + { + input: map[string]string{"Content-Type": `text/plain`}, + output: Processor004, + err: nil, + }, + { + input: map[string]string{"Content-Type": `text/plain; version=0.0.3`}, + output: nil, + err: errors.New("unrecognized API version 0.0.3"), + }, + } + + for i, scenario := range scenarios { + var header http.Header + + if len(scenario.input) > 0 { + header = http.Header{} + } + + for key, value := range scenario.input { + header.Add(key, value) + } + + actual, err := ProcessorForRequestHeader(header) + + if scenario.err != err { + if scenario.err != nil && err != nil { + if scenario.err.Error() != err.Error() { + t.Errorf("%d. expected %s, got %s", i, scenario.err, err) + } + } else if scenario.err != nil || err != nil { + t.Errorf("%d. expected %s, got %s", i, scenario.err, err) + } + } + + if scenario.output != actual { + t.Errorf("%d. expected %s, got %s", i, scenario.output, actual) + } + } +} + +func TestDiscriminatorHTTPHeader(t *testing.T) { + testDiscriminatorHTTPHeader(t) +} + +func BenchmarkDiscriminatorHTTPHeader(b *testing.B) { + for i := 0; i < b.N; i++ { + testDiscriminatorHTTPHeader(b) + } +} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/extraction.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/extraction.go new file mode 100644 index 0000000000..31cdafad4b --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/extraction.go @@ -0,0 +1,15 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package extraction decodes Prometheus clients' data streams for consumers. +package extraction diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/fixtures/empty.json b/Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/fixtures/empty.json new file mode 100644 index 0000000000..e69de29bb2 diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/fixtures/test0_0_1-0_0_2-large.json b/Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/fixtures/test0_0_1-0_0_2-large.json new file mode 100644 index 0000000000..7168338c8b --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/fixtures/test0_0_1-0_0_2-large.json @@ -0,0 +1,1032 @@ +[ + { + "baseLabels": { + "__name__": "rpc_calls_total_0", + "job": "batch_job" + }, + "docstring": "Total count of RPC calls.", + "metric": { + "type": "counter", + "value": [ + { + "labels": { + "foo": "bar", + "service": "zed" + }, + "value": 25 + }, + { + "labels": { + "foo": "baz", + "service": "bar" + }, + "value": 25 + }, + { + "labels": { + "foo": "bar", + "service": "foo" + }, + "value": 25 + }, + { + "labels": { + "foo": "baz", + "service": "foo" + }, + "value": 25 + } + ] + } + }, + { + "baseLabels": { + "__name__": "rpc_latency_microseconds_0" + }, + "docstring": "RPC latency summary.", + "metric": { + "type": "histogram", + "value": [ + { + "labels": { + "foo": "bar", + "service": "foo" + }, + "value": { + "0.010000": 15.890724674774395, + "0.050000": 15.890724674774395, + "0.500000": 84.63044031436561, + "0.900000": 160.21100853053224, + "0.990000": 172.49828748957728 + } + }, + { + "labels": { + "foo": "bar", + "service": "zed" + }, + "value": { + "0.010000": 0.0459814091918713, + "0.050000": 0.0459814091918713, + "0.500000": 0.6120456642749681, + "0.900000": 1.355915069887731, + "0.990000": 1.772733213161236 + } + }, + { + "labels": { + "foo": "bar", + "service": "bar" + }, + "value": { + "0.010000": 78.48563317257356, + "0.050000": 78.48563317257356, + "0.500000": 97.31798360385088, + "0.900000": 109.89202084295582, + "0.990000": 109.99626121011262 + } + }, + { + "labels": { + "foo": "baz", + "service": "bar" + }, + "value": { + "0.010000": 78.48563317257356, + "0.050000": 78.48563317257356, + "0.500000": 97.31798360385088, + "0.900000": 109.89202084295582, + "0.990000": 109.99626121011262 + } + } + ] + } + }, + { + "baseLabels": { + "__name__": "rpc_calls_total_1", + "job": "batch_job" + }, + "docstring": "Total count of RPC calls.", + "metric": { + "type": "counter", + "value": [ + { + "labels": { + "foo": "bar", + "service": "zed" + }, + "value": 25 + }, + { + "labels": { + "foo": "baz", + "service": "bar" + }, + "value": 25 + }, + { + "labels": { + "foo": "bar", + "service": "foo" + }, + "value": 25 + }, + { + "labels": { + "foo": "baz", + "service": "foo" + }, + "value": 25 + } + ] + } + }, + { + "baseLabels": { + "__name__": "rpc_latency_microseconds_1" + }, + "docstring": "RPC latency summary.", + "metric": { + "type": "histogram", + "value": [ + { + "labels": { + "foo": "bar", + "service": "foo" + }, + "value": { + "0.010000": 15.890724674774395, + "0.050000": 15.890724674774395, + "0.500000": 84.63044031436561, + "0.900000": 160.21100853053224, + "0.990000": 172.49828748957728 + } + }, + { + "labels": { + "foo": "bar", + "service": "zed" + }, + "value": { + "0.010000": 0.0459814091918713, + "0.050000": 0.0459814091918713, + "0.500000": 0.6120456642749681, + "0.900000": 1.355915069887731, + "0.990000": 1.772733213161236 + } + }, + { + "labels": { + "foo": "bar", + "service": "bar" + }, + "value": { + "0.010000": 78.48563317257356, + "0.050000": 78.48563317257356, + "0.500000": 97.31798360385088, + "0.900000": 109.89202084295582, + "0.990000": 109.99626121011262 + } + }, + { + "labels": { + "foo": "baz", + "service": "bar" + }, + "value": { + "0.010000": 78.48563317257356, + "0.050000": 78.48563317257356, + "0.500000": 97.31798360385088, + "0.900000": 109.89202084295582, + "0.990000": 109.99626121011262 + } + } + ] + } + }, + { + "baseLabels": { + "__name__": "rpc_calls_total_2", + "job": "batch_job" + }, + "docstring": "Total count of RPC calls.", + "metric": { + "type": "counter", + "value": [ + { + "labels": { + "foo": "bar", + "service": "zed" + }, + "value": 25 + }, + { + "labels": { + "foo": "baz", + "service": "bar" + }, + "value": 25 + }, + { + "labels": { + "foo": "bar", + "service": "foo" + }, + "value": 25 + }, + { + "labels": { + "foo": "baz", + "service": "foo" + }, + "value": 25 + } + ] + } + }, + { + "baseLabels": { + "__name__": "rpc_latency_microseconds_2" + }, + "docstring": "RPC latency summary.", + "metric": { + "type": "histogram", + "value": [ + { + "labels": { + "foo": "bar", + "service": "foo" + }, + "value": { + "0.010000": 15.890724674774395, + "0.050000": 15.890724674774395, + "0.500000": 84.63044031436561, + "0.900000": 160.21100853053224, + "0.990000": 172.49828748957728 + } + }, + { + "labels": { + "foo": "bar", + "service": "zed" + }, + "value": { + "0.010000": 0.0459814091918713, + "0.050000": 0.0459814091918713, + "0.500000": 0.6120456642749681, + "0.900000": 1.355915069887731, + "0.990000": 1.772733213161236 + } + }, + { + "labels": { + "foo": "bar", + "service": "bar" + }, + "value": { + "0.010000": 78.48563317257356, + "0.050000": 78.48563317257356, + "0.500000": 97.31798360385088, + "0.900000": 109.89202084295582, + "0.990000": 109.99626121011262 + } + }, + { + "labels": { + "foo": "baz", + "service": "bar" + }, + "value": { + "0.010000": 78.48563317257356, + "0.050000": 78.48563317257356, + "0.500000": 97.31798360385088, + "0.900000": 109.89202084295582, + "0.990000": 109.99626121011262 + } + } + ] + } + }, + { + "baseLabels": { + "__name__": "rpc_calls_total_3", + "job": "batch_job" + }, + "docstring": "Total count of RPC calls.", + "metric": { + "type": "counter", + "value": [ + { + "labels": { + "foo": "bar", + "service": "zed" + }, + "value": 25 + }, + { + "labels": { + "foo": "baz", + "service": "bar" + }, + "value": 25 + }, + { + "labels": { + "foo": "bar", + "service": "foo" + }, + "value": 25 + }, + { + "labels": { + "foo": "baz", + "service": "foo" + }, + "value": 25 + } + ] + } + }, + { + "baseLabels": { + "__name__": "rpc_latency_microseconds_3" + }, + "docstring": "RPC latency summary.", + "metric": { + "type": "histogram", + "value": [ + { + "labels": { + "foo": "bar", + "service": "foo" + }, + "value": { + "0.010000": 15.890724674774395, + "0.050000": 15.890724674774395, + "0.500000": 84.63044031436561, + "0.900000": 160.21100853053224, + "0.990000": 172.49828748957728 + } + }, + { + "labels": { + "foo": "bar", + "service": "zed" + }, + "value": { + "0.010000": 0.0459814091918713, + "0.050000": 0.0459814091918713, + "0.500000": 0.6120456642749681, + "0.900000": 1.355915069887731, + "0.990000": 1.772733213161236 + } + }, + { + "labels": { + "foo": "bar", + "service": "bar" + }, + "value": { + "0.010000": 78.48563317257356, + "0.050000": 78.48563317257356, + "0.500000": 97.31798360385088, + "0.900000": 109.89202084295582, + "0.990000": 109.99626121011262 + } + }, + { + "labels": { + "foo": "baz", + "service": "bar" + }, + "value": { + "0.010000": 78.48563317257356, + "0.050000": 78.48563317257356, + "0.500000": 97.31798360385088, + "0.900000": 109.89202084295582, + "0.990000": 109.99626121011262 + } + } + ] + } + }, + { + "baseLabels": { + "__name__": "rpc_calls_total_4", + "job": "batch_job" + }, + "docstring": "Total count of RPC calls.", + "metric": { + "type": "counter", + "value": [ + { + "labels": { + "foo": "bar", + "service": "zed" + }, + "value": 25 + }, + { + "labels": { + "foo": "baz", + "service": "bar" + }, + "value": 25 + }, + { + "labels": { + "foo": "bar", + "service": "foo" + }, + "value": 25 + }, + { + "labels": { + "foo": "baz", + "service": "foo" + }, + "value": 25 + } + ] + } + }, + { + "baseLabels": { + "__name__": "rpc_latency_microseconds_4" + }, + "docstring": "RPC latency summary.", + "metric": { + "type": "histogram", + "value": [ + { + "labels": { + "foo": "bar", + "service": "foo" + }, + "value": { + "0.010000": 15.890724674774395, + "0.050000": 15.890724674774395, + "0.500000": 84.63044031436561, + "0.900000": 160.21100853053224, + "0.990000": 172.49828748957728 + } + }, + { + "labels": { + "foo": "bar", + "service": "zed" + }, + "value": { + "0.010000": 0.0459814091918713, + "0.050000": 0.0459814091918713, + "0.500000": 0.6120456642749681, + "0.900000": 1.355915069887731, + "0.990000": 1.772733213161236 + } + }, + { + "labels": { + "foo": "bar", + "service": "bar" + }, + "value": { + "0.010000": 78.48563317257356, + "0.050000": 78.48563317257356, + "0.500000": 97.31798360385088, + "0.900000": 109.89202084295582, + "0.990000": 109.99626121011262 + } + }, + { + "labels": { + "foo": "baz", + "service": "bar" + }, + "value": { + "0.010000": 78.48563317257356, + "0.050000": 78.48563317257356, + "0.500000": 97.31798360385088, + "0.900000": 109.89202084295582, + "0.990000": 109.99626121011262 + } + } + ] + } + }, + { + "baseLabels": { + "__name__": "rpc_calls_total_5", + "job": "batch_job" + }, + "docstring": "Total count of RPC calls.", + "metric": { + "type": "counter", + "value": [ + { + "labels": { + "foo": "bar", + "service": "zed" + }, + "value": 25 + }, + { + "labels": { + "foo": "baz", + "service": "bar" + }, + "value": 25 + }, + { + "labels": { + "foo": "bar", + "service": "foo" + }, + "value": 25 + }, + { + "labels": { + "foo": "baz", + "service": "foo" + }, + "value": 25 + } + ] + } + }, + { + "baseLabels": { + "__name__": "rpc_latency_microseconds_5" + }, + "docstring": "RPC latency summary.", + "metric": { + "type": "histogram", + "value": [ + { + "labels": { + "foo": "bar", + "service": "foo" + }, + "value": { + "0.010000": 15.890724674774395, + "0.050000": 15.890724674774395, + "0.500000": 84.63044031436561, + "0.900000": 160.21100853053224, + "0.990000": 172.49828748957728 + } + }, + { + "labels": { + "foo": "bar", + "service": "zed" + }, + "value": { + "0.010000": 0.0459814091918713, + "0.050000": 0.0459814091918713, + "0.500000": 0.6120456642749681, + "0.900000": 1.355915069887731, + "0.990000": 1.772733213161236 + } + }, + { + "labels": { + "foo": "bar", + "service": "bar" + }, + "value": { + "0.010000": 78.48563317257356, + "0.050000": 78.48563317257356, + "0.500000": 97.31798360385088, + "0.900000": 109.89202084295582, + "0.990000": 109.99626121011262 + } + }, + { + "labels": { + "foo": "baz", + "service": "bar" + }, + "value": { + "0.010000": 78.48563317257356, + "0.050000": 78.48563317257356, + "0.500000": 97.31798360385088, + "0.900000": 109.89202084295582, + "0.990000": 109.99626121011262 + } + } + ] + } + }, + { + "baseLabels": { + "__name__": "rpc_calls_total_6", + "job": "batch_job" + }, + "docstring": "Total count of RPC calls.", + "metric": { + "type": "counter", + "value": [ + { + "labels": { + "foo": "bar", + "service": "zed" + }, + "value": 25 + }, + { + "labels": { + "foo": "baz", + "service": "bar" + }, + "value": 25 + }, + { + "labels": { + "foo": "bar", + "service": "foo" + }, + "value": 25 + }, + { + "labels": { + "foo": "baz", + "service": "foo" + }, + "value": 25 + } + ] + } + }, + { + "baseLabels": { + "__name__": "rpc_latency_microseconds_6" + }, + "docstring": "RPC latency summary.", + "metric": { + "type": "histogram", + "value": [ + { + "labels": { + "foo": "bar", + "service": "foo" + }, + "value": { + "0.010000": 15.890724674774395, + "0.050000": 15.890724674774395, + "0.500000": 84.63044031436561, + "0.900000": 160.21100853053224, + "0.990000": 172.49828748957728 + } + }, + { + "labels": { + "foo": "bar", + "service": "zed" + }, + "value": { + "0.010000": 0.0459814091918713, + "0.050000": 0.0459814091918713, + "0.500000": 0.6120456642749681, + "0.900000": 1.355915069887731, + "0.990000": 1.772733213161236 + } + }, + { + "labels": { + "foo": "bar", + "service": "bar" + }, + "value": { + "0.010000": 78.48563317257356, + "0.050000": 78.48563317257356, + "0.500000": 97.31798360385088, + "0.900000": 109.89202084295582, + "0.990000": 109.99626121011262 + } + }, + { + "labels": { + "foo": "baz", + "service": "bar" + }, + "value": { + "0.010000": 78.48563317257356, + "0.050000": 78.48563317257356, + "0.500000": 97.31798360385088, + "0.900000": 109.89202084295582, + "0.990000": 109.99626121011262 + } + } + ] + } + }, + { + "baseLabels": { + "__name__": "rpc_calls_total_7", + "job": "batch_job" + }, + "docstring": "Total count of RPC calls.", + "metric": { + "type": "counter", + "value": [ + { + "labels": { + "foo": "bar", + "service": "zed" + }, + "value": 25 + }, + { + "labels": { + "foo": "baz", + "service": "bar" + }, + "value": 25 + }, + { + "labels": { + "foo": "bar", + "service": "foo" + }, + "value": 25 + }, + { + "labels": { + "foo": "baz", + "service": "foo" + }, + "value": 25 + } + ] + } + }, + { + "baseLabels": { + "__name__": "rpc_latency_microseconds_7" + }, + "docstring": "RPC latency summary.", + "metric": { + "type": "histogram", + "value": [ + { + "labels": { + "foo": "bar", + "service": "foo" + }, + "value": { + "0.010000": 15.890724674774395, + "0.050000": 15.890724674774395, + "0.500000": 84.63044031436561, + "0.900000": 160.21100853053224, + "0.990000": 172.49828748957728 + } + }, + { + "labels": { + "foo": "bar", + "service": "zed" + }, + "value": { + "0.010000": 0.0459814091918713, + "0.050000": 0.0459814091918713, + "0.500000": 0.6120456642749681, + "0.900000": 1.355915069887731, + "0.990000": 1.772733213161236 + } + }, + { + "labels": { + "foo": "bar", + "service": "bar" + }, + "value": { + "0.010000": 78.48563317257356, + "0.050000": 78.48563317257356, + "0.500000": 97.31798360385088, + "0.900000": 109.89202084295582, + "0.990000": 109.99626121011262 + } + }, + { + "labels": { + "foo": "baz", + "service": "bar" + }, + "value": { + "0.010000": 78.48563317257356, + "0.050000": 78.48563317257356, + "0.500000": 97.31798360385088, + "0.900000": 109.89202084295582, + "0.990000": 109.99626121011262 + } + } + ] + } + }, + { + "baseLabels": { + "__name__": "rpc_calls_total_8", + "job": "batch_job" + }, + "docstring": "Total count of RPC calls.", + "metric": { + "type": "counter", + "value": [ + { + "labels": { + "foo": "bar", + "service": "zed" + }, + "value": 25 + }, + { + "labels": { + "foo": "baz", + "service": "bar" + }, + "value": 25 + }, + { + "labels": { + "foo": "bar", + "service": "foo" + }, + "value": 25 + }, + { + "labels": { + "foo": "baz", + "service": "foo" + }, + "value": 25 + } + ] + } + }, + { + "baseLabels": { + "__name__": "rpc_latency_microseconds_8" + }, + "docstring": "RPC latency summary.", + "metric": { + "type": "histogram", + "value": [ + { + "labels": { + "foo": "bar", + "service": "foo" + }, + "value": { + "0.010000": 15.890724674774395, + "0.050000": 15.890724674774395, + "0.500000": 84.63044031436561, + "0.900000": 160.21100853053224, + "0.990000": 172.49828748957728 + } + }, + { + "labels": { + "foo": "bar", + "service": "zed" + }, + "value": { + "0.010000": 0.0459814091918713, + "0.050000": 0.0459814091918713, + "0.500000": 0.6120456642749681, + "0.900000": 1.355915069887731, + "0.990000": 1.772733213161236 + } + }, + { + "labels": { + "foo": "bar", + "service": "bar" + }, + "value": { + "0.010000": 78.48563317257356, + "0.050000": 78.48563317257356, + "0.500000": 97.31798360385088, + "0.900000": 109.89202084295582, + "0.990000": 109.99626121011262 + } + }, + { + "labels": { + "foo": "baz", + "service": "bar" + }, + "value": { + "0.010000": 78.48563317257356, + "0.050000": 78.48563317257356, + "0.500000": 97.31798360385088, + "0.900000": 109.89202084295582, + "0.990000": 109.99626121011262 + } + } + ] + } + }, + { + "baseLabels": { + "__name__": "rpc_calls_total_9", + "job": "batch_job" + }, + "docstring": "Total count of RPC calls.", + "metric": { + "type": "counter", + "value": [ + { + "labels": { + "foo": "bar", + "service": "zed" + }, + "value": 25 + }, + { + "labels": { + "foo": "baz", + "service": "bar" + }, + "value": 25 + }, + { + "labels": { + "foo": "bar", + "service": "foo" + }, + "value": 25 + }, + { + "labels": { + "foo": "baz", + "service": "foo" + }, + "value": 25 + } + ] + } + }, + { + "baseLabels": { + "__name__": "rpc_latency_microseconds_9" + }, + "docstring": "RPC latency summary.", + "metric": { + "type": "histogram", + "value": [ + { + "labels": { + "foo": "bar", + "service": "foo" + }, + "value": { + "0.010000": 15.890724674774395, + "0.050000": 15.890724674774395, + "0.500000": 84.63044031436561, + "0.900000": 160.21100853053224, + "0.990000": 172.49828748957728 + } + }, + { + "labels": { + "foo": "bar", + "service": "zed" + }, + "value": { + "0.010000": 0.0459814091918713, + "0.050000": 0.0459814091918713, + "0.500000": 0.6120456642749681, + "0.900000": 1.355915069887731, + "0.990000": 1.772733213161236 + } + }, + { + "labels": { + "foo": "bar", + "service": "bar" + }, + "value": { + "0.010000": 78.48563317257356, + "0.050000": 78.48563317257356, + "0.500000": 97.31798360385088, + "0.900000": 109.89202084295582, + "0.990000": 109.99626121011262 + } + }, + { + "labels": { + "foo": "baz", + "service": "bar" + }, + "value": { + "0.010000": 78.48563317257356, + "0.050000": 78.48563317257356, + "0.500000": 97.31798360385088, + "0.900000": 109.89202084295582, + "0.990000": 109.99626121011262 + } + } + ] + } + } +] diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/fixtures/test0_0_1-0_0_2.json b/Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/fixtures/test0_0_1-0_0_2.json new file mode 100644 index 0000000000..1ac5be7d7e --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/fixtures/test0_0_1-0_0_2.json @@ -0,0 +1,79 @@ +[ + { + "baseLabels": { + "__name__": "rpc_calls_total", + "job": "batch_job" + }, + "docstring": "RPC calls.", + "metric": { + "type": "counter", + "value": [ + { + "labels": { + "service": "zed" + }, + "value": 25 + }, + { + "labels": { + "service": "bar" + }, + "value": 25 + }, + { + "labels": { + "service": "foo" + }, + "value": 25 + } + ] + } + }, + { + "baseLabels": { + "__name__": "rpc_latency_microseconds" + }, + "docstring": "RPC latency.", + "metric": { + "type": "histogram", + "value": [ + { + "labels": { + "service": "foo" + }, + "value": { + "0.010000": 15.890724674774395, + "0.050000": 15.890724674774395, + "0.500000": 84.63044031436561, + "0.900000": 160.21100853053224, + "0.990000": 172.49828748957728 + } + }, + { + "labels": { + "service": "zed" + }, + "value": { + "0.010000": 0.0459814091918713, + "0.050000": 0.0459814091918713, + "0.500000": 0.6120456642749681, + "0.900000": 1.355915069887731, + "0.990000": 1.772733213161236 + } + }, + { + "labels": { + "service": "bar" + }, + "value": { + "0.010000": 78.48563317257356, + "0.050000": 78.48563317257356, + "0.500000": 97.31798360385088, + "0.900000": 109.89202084295582, + "0.990000": 109.99626121011262 + } + } + ] + } + } +] diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/metricfamilyprocessor.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/metricfamilyprocessor.go new file mode 100644 index 0000000000..5ed5343484 --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/metricfamilyprocessor.go @@ -0,0 +1,295 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package extraction + +import ( + "fmt" + "io" + + dto "github.com/prometheus/client_model/go" + + "github.com/matttproud/golang_protobuf_extensions/ext" + + "github.com/prometheus/client_golang/model" +) + +type metricFamilyProcessor struct{} + +// MetricFamilyProcessor decodes varint encoded record length-delimited streams +// of io.prometheus.client.MetricFamily. +// +// See http://godoc.org/github.com/matttproud/golang_protobuf_extensions/ext for +// more details. +var MetricFamilyProcessor = &metricFamilyProcessor{} + +func (m *metricFamilyProcessor) ProcessSingle(i io.Reader, out Ingester, o *ProcessOptions) error { + family := &dto.MetricFamily{} + + for { + family.Reset() + + if _, err := ext.ReadDelimited(i, family); err != nil { + if err == io.EOF { + return nil + } + return err + } + if err := extractMetricFamily(out, o, family); err != nil { + return err + } + } +} + +func extractMetricFamily(out Ingester, o *ProcessOptions, family *dto.MetricFamily) error { + switch family.GetType() { + case dto.MetricType_COUNTER: + if err := extractCounter(out, o, family); err != nil { + return err + } + case dto.MetricType_GAUGE: + if err := extractGauge(out, o, family); err != nil { + return err + } + case dto.MetricType_SUMMARY: + if err := extractSummary(out, o, family); err != nil { + return err + } + case dto.MetricType_UNTYPED: + if err := extractUntyped(out, o, family); err != nil { + return err + } + case dto.MetricType_HISTOGRAM: + if err := extractHistogram(out, o, family); err != nil { + return err + } + } + return nil +} + +func extractCounter(out Ingester, o *ProcessOptions, f *dto.MetricFamily) error { + samples := make(model.Samples, 0, len(f.Metric)) + + for _, m := range f.Metric { + if m.Counter == nil { + continue + } + + sample := new(model.Sample) + samples = append(samples, sample) + + if m.TimestampMs != nil { + sample.Timestamp = model.TimestampFromUnixNano(*m.TimestampMs * 1000000) + } else { + sample.Timestamp = o.Timestamp + } + sample.Metric = model.Metric{} + metric := sample.Metric + + for _, p := range m.Label { + metric[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + + metric[model.MetricNameLabel] = model.LabelValue(f.GetName()) + + sample.Value = model.SampleValue(m.Counter.GetValue()) + } + + return out.Ingest(samples) +} + +func extractGauge(out Ingester, o *ProcessOptions, f *dto.MetricFamily) error { + samples := make(model.Samples, 0, len(f.Metric)) + + for _, m := range f.Metric { + if m.Gauge == nil { + continue + } + + sample := new(model.Sample) + samples = append(samples, sample) + + if m.TimestampMs != nil { + sample.Timestamp = model.TimestampFromUnixNano(*m.TimestampMs * 1000000) + } else { + sample.Timestamp = o.Timestamp + } + sample.Metric = model.Metric{} + metric := sample.Metric + + for _, p := range m.Label { + metric[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + + metric[model.MetricNameLabel] = model.LabelValue(f.GetName()) + + sample.Value = model.SampleValue(m.Gauge.GetValue()) + } + + return out.Ingest(samples) +} + +func extractSummary(out Ingester, o *ProcessOptions, f *dto.MetricFamily) error { + samples := make(model.Samples, 0, len(f.Metric)) + + for _, m := range f.Metric { + if m.Summary == nil { + continue + } + + timestamp := o.Timestamp + if m.TimestampMs != nil { + timestamp = model.TimestampFromUnixNano(*m.TimestampMs * 1000000) + } + + for _, q := range m.Summary.Quantile { + sample := new(model.Sample) + samples = append(samples, sample) + + sample.Timestamp = timestamp + sample.Metric = model.Metric{} + metric := sample.Metric + + for _, p := range m.Label { + metric[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + // BUG(matt): Update other names to "quantile". + metric[model.LabelName("quantile")] = model.LabelValue(fmt.Sprint(q.GetQuantile())) + + metric[model.MetricNameLabel] = model.LabelValue(f.GetName()) + + sample.Value = model.SampleValue(q.GetValue()) + } + + if m.Summary.SampleSum != nil { + sum := new(model.Sample) + sum.Timestamp = timestamp + metric := model.Metric{} + for _, p := range m.Label { + metric[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + metric[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum") + sum.Metric = metric + sum.Value = model.SampleValue(m.Summary.GetSampleSum()) + samples = append(samples, sum) + } + + if m.Summary.SampleCount != nil { + count := new(model.Sample) + count.Timestamp = timestamp + metric := model.Metric{} + for _, p := range m.Label { + metric[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + metric[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count") + count.Metric = metric + count.Value = model.SampleValue(m.Summary.GetSampleCount()) + samples = append(samples, count) + } + } + + return out.Ingest(samples) +} + +func extractUntyped(out Ingester, o *ProcessOptions, f *dto.MetricFamily) error { + samples := make(model.Samples, 0, len(f.Metric)) + + for _, m := range f.Metric { + if m.Untyped == nil { + continue + } + + sample := new(model.Sample) + samples = append(samples, sample) + + if m.TimestampMs != nil { + sample.Timestamp = model.TimestampFromUnixNano(*m.TimestampMs * 1000000) + } else { + sample.Timestamp = o.Timestamp + } + sample.Metric = model.Metric{} + metric := sample.Metric + + for _, p := range m.Label { + metric[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + + metric[model.MetricNameLabel] = model.LabelValue(f.GetName()) + + sample.Value = model.SampleValue(m.Untyped.GetValue()) + } + + return out.Ingest(samples) +} + +func extractHistogram(out Ingester, o *ProcessOptions, f *dto.MetricFamily) error { + samples := make(model.Samples, 0, len(f.Metric)) + + for _, m := range f.Metric { + if m.Histogram == nil { + continue + } + + timestamp := o.Timestamp + if m.TimestampMs != nil { + timestamp = model.TimestampFromUnixNano(*m.TimestampMs * 1000000) + } + + for _, q := range m.Histogram.Bucket { + sample := new(model.Sample) + samples = append(samples, sample) + + sample.Timestamp = timestamp + sample.Metric = model.Metric{} + metric := sample.Metric + + for _, p := range m.Label { + metric[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + metric[model.LabelName("le")] = model.LabelValue(fmt.Sprint(q.GetUpperBound())) + + metric[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket") + + sample.Value = model.SampleValue(q.GetCumulativeCount()) + } + // TODO: If +Inf bucket is missing, add it. + + if m.Histogram.SampleSum != nil { + sum := new(model.Sample) + sum.Timestamp = timestamp + metric := model.Metric{} + for _, p := range m.Label { + metric[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + metric[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum") + sum.Metric = metric + sum.Value = model.SampleValue(m.Histogram.GetSampleSum()) + samples = append(samples, sum) + } + + if m.Histogram.SampleCount != nil { + count := new(model.Sample) + count.Timestamp = timestamp + metric := model.Metric{} + for _, p := range m.Label { + metric[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + metric[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count") + count.Metric = metric + count.Value = model.SampleValue(m.Histogram.GetSampleCount()) + samples = append(samples, count) + } + } + + return out.Ingest(samples) +} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/metricfamilyprocessor_test.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/metricfamilyprocessor_test.go new file mode 100644 index 0000000000..9ba0fdbce3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/metricfamilyprocessor_test.go @@ -0,0 +1,153 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package extraction + +import ( + "sort" + "strings" + "testing" + + "github.com/prometheus/client_golang/model" +) + +var testTime = model.Now() + +type metricFamilyProcessorScenario struct { + in string + expected, actual []model.Samples +} + +func (s *metricFamilyProcessorScenario) Ingest(samples model.Samples) error { + s.actual = append(s.actual, samples) + return nil +} + +func (s *metricFamilyProcessorScenario) test(t *testing.T, set int) { + i := strings.NewReader(s.in) + + o := &ProcessOptions{ + Timestamp: testTime, + } + + err := MetricFamilyProcessor.ProcessSingle(i, s, o) + if err != nil { + t.Fatalf("%d. got error: %s", set, err) + } + + if len(s.expected) != len(s.actual) { + t.Fatalf("%d. expected length %d, got %d", set, len(s.expected), len(s.actual)) + } + + for i, expected := range s.expected { + sort.Sort(s.actual[i]) + sort.Sort(expected) + + if !expected.Equal(s.actual[i]) { + t.Errorf("%d.%d. expected %s, got %s", set, i, expected, s.actual[i]) + } + } +} + +func TestMetricFamilyProcessor(t *testing.T) { + scenarios := []metricFamilyProcessorScenario{ + { + in: "", + }, + { + in: "\x8f\x01\n\rrequest_count\x12\x12Number of requests\x18\x00\"0\n#\n\x0fsome_label_name\x12\x10some_label_value\x1a\t\t\x00\x00\x00\x00\x00\x00E\xc0\"6\n)\n\x12another_label_name\x12\x13another_label_value\x1a\t\t\x00\x00\x00\x00\x00\x00U@", + expected: []model.Samples{ + model.Samples{ + &model.Sample{ + Metric: model.Metric{model.MetricNameLabel: "request_count", "some_label_name": "some_label_value"}, + Value: -42, + Timestamp: testTime, + }, + &model.Sample{ + Metric: model.Metric{model.MetricNameLabel: "request_count", "another_label_name": "another_label_value"}, + Value: 84, + Timestamp: testTime, + }, + }, + }, + }, + { + in: "\xb9\x01\n\rrequest_count\x12\x12Number of requests\x18\x02\"O\n#\n\x0fsome_label_name\x12\x10some_label_value\"(\x1a\x12\t\xaeG\xe1z\x14\xae\xef?\x11\x00\x00\x00\x00\x00\x00E\xc0\x1a\x12\t+\x87\x16\xd9\xce\xf7\xef?\x11\x00\x00\x00\x00\x00\x00U\xc0\"A\n)\n\x12another_label_name\x12\x13another_label_value\"\x14\x1a\x12\t\x00\x00\x00\x00\x00\x00\xe0?\x11\x00\x00\x00\x00\x00\x00$@", + expected: []model.Samples{ + model.Samples{ + &model.Sample{ + Metric: model.Metric{model.MetricNameLabel: "request_count", "some_label_name": "some_label_value", "quantile": "0.99"}, + Value: -42, + Timestamp: testTime, + }, + &model.Sample{ + Metric: model.Metric{model.MetricNameLabel: "request_count", "some_label_name": "some_label_value", "quantile": "0.999"}, + Value: -84, + Timestamp: testTime, + }, + &model.Sample{ + Metric: model.Metric{model.MetricNameLabel: "request_count", "another_label_name": "another_label_value", "quantile": "0.5"}, + Value: 10, + Timestamp: testTime, + }, + }, + }, + }, + { + in: "\x8d\x01\n\x1drequest_duration_microseconds\x12\x15The response latency.\x18\x04\"S:Q\b\x85\x15\x11\xcd\xcc\xccL\x8f\xcb:A\x1a\v\b{\x11\x00\x00\x00\x00\x00\x00Y@\x1a\f\b\x9c\x03\x11\x00\x00\x00\x00\x00\x00^@\x1a\f\b\xd0\x04\x11\x00\x00\x00\x00\x00\x00b@\x1a\f\b\xf4\v\x11\x9a\x99\x99\x99\x99\x99e@\x1a\f\b\x85\x15\x11\x00\x00\x00\x00\x00\x00\xf0\u007f", + expected: []model.Samples{ + model.Samples{ + &model.Sample{ + Metric: model.Metric{model.MetricNameLabel: "request_duration_microseconds_bucket", "le": "100"}, + Value: 123, + Timestamp: testTime, + }, + &model.Sample{ + Metric: model.Metric{model.MetricNameLabel: "request_duration_microseconds_bucket", "le": "120"}, + Value: 412, + Timestamp: testTime, + }, + &model.Sample{ + Metric: model.Metric{model.MetricNameLabel: "request_duration_microseconds_bucket", "le": "144"}, + Value: 592, + Timestamp: testTime, + }, + &model.Sample{ + Metric: model.Metric{model.MetricNameLabel: "request_duration_microseconds_bucket", "le": "172.8"}, + Value: 1524, + Timestamp: testTime, + }, + &model.Sample{ + Metric: model.Metric{model.MetricNameLabel: "request_duration_microseconds_bucket", "le": "+Inf"}, + Value: 2693, + Timestamp: testTime, + }, + &model.Sample{ + Metric: model.Metric{model.MetricNameLabel: "request_duration_microseconds_sum"}, + Value: 1756047.3, + Timestamp: testTime, + }, + &model.Sample{ + Metric: model.Metric{model.MetricNameLabel: "request_duration_microseconds_count"}, + Value: 2693, + Timestamp: testTime, + }, + }, + }, + }, + } + + for i, scenario := range scenarios { + scenario.test(t, i) + } +} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/processor.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/processor.go new file mode 100644 index 0000000000..50c93c9e68 --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/processor.go @@ -0,0 +1,84 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package extraction + +import ( + "io" + "time" + + "github.com/prometheus/client_golang/model" +) + +// ProcessOptions dictates how the interpreted stream should be rendered for +// consumption. +type ProcessOptions struct { + // Timestamp is added to each value from the stream that has no explicit + // timestamp set. + Timestamp model.Timestamp +} + +// Ingester consumes result streams in whatever way is desired by the user. +type Ingester interface { + Ingest(model.Samples) error +} + +// Processor is responsible for decoding the actual message responses from +// stream into a format that can be consumed with the end result written +// to the results channel. +type Processor interface { + // ProcessSingle treats the input as a single self-contained message body and + // transforms it accordingly. It has no support for streaming. + ProcessSingle(in io.Reader, out Ingester, o *ProcessOptions) error +} + +// Helper function to convert map[string]string into LabelSet. +// +// NOTE: This should be deleted when support for go 1.0.3 is removed; 1.1 is +// smart enough to unmarshal JSON objects into LabelSet directly. +func labelSet(labels map[string]string) model.LabelSet { + labelset := make(model.LabelSet, len(labels)) + + for k, v := range labels { + labelset[model.LabelName(k)] = model.LabelValue(v) + } + + return labelset +} + +// A basic interface only useful in testing contexts for dispensing the time +// in a controlled manner. +type instantProvider interface { + // The current instant. + Now() time.Time +} + +// Clock is a simple means for fluently wrapping around standard Go timekeeping +// mechanisms to enhance testability without compromising code readability. +// +// It is sufficient for use on bare initialization. A provider should be +// set only for test contexts. When not provided, it emits the current +// system time. +type clock struct { + // The underlying means through which time is provided, if supplied. + Provider instantProvider +} + +// Emit the current instant. +func (t *clock) Now() time.Time { + if t.Provider == nil { + return time.Now() + } + + return t.Provider.Now() +} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/processor0_0_1.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/processor0_0_1.go new file mode 100644 index 0000000000..7a728efb0e --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/processor0_0_1.go @@ -0,0 +1,127 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package extraction + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + + "github.com/prometheus/client_golang/model" +) + +const ( + baseLabels001 = "baseLabels" + counter001 = "counter" + docstring001 = "docstring" + gauge001 = "gauge" + histogram001 = "histogram" + labels001 = "labels" + metric001 = "metric" + type001 = "type" + value001 = "value" + percentile001 = "percentile" +) + +// Processor001 is responsible for decoding payloads from protocol version +// 0.0.1. +var Processor001 = &processor001{} + +// processor001 is responsible for handling API version 0.0.1. +type processor001 struct{} + +// entity001 represents a the JSON structure that 0.0.1 uses. +type entity001 []struct { + BaseLabels map[string]string `json:"baseLabels"` + Docstring string `json:"docstring"` + Metric struct { + MetricType string `json:"type"` + Value []struct { + Labels map[string]string `json:"labels"` + Value interface{} `json:"value"` + } `json:"value"` + } `json:"metric"` +} + +func (p *processor001) ProcessSingle(in io.Reader, out Ingester, o *ProcessOptions) error { + // TODO(matt): Replace with plain-jane JSON unmarshalling. + buffer, err := ioutil.ReadAll(in) + if err != nil { + return err + } + + entities := entity001{} + if err = json.Unmarshal(buffer, &entities); err != nil { + return err + } + + // TODO(matt): This outer loop is a great basis for parallelization. + pendingSamples := model.Samples{} + for _, entity := range entities { + for _, value := range entity.Metric.Value { + labels := labelSet(entity.BaseLabels).Merge(labelSet(value.Labels)) + + switch entity.Metric.MetricType { + case gauge001, counter001: + sampleValue, ok := value.Value.(float64) + if !ok { + return fmt.Errorf("could not convert value from %s %s to float64", entity, value) + } + + pendingSamples = append(pendingSamples, &model.Sample{ + Metric: model.Metric(labels), + Timestamp: o.Timestamp, + Value: model.SampleValue(sampleValue), + }) + + break + + case histogram001: + sampleValue, ok := value.Value.(map[string]interface{}) + if !ok { + return fmt.Errorf("could not convert value from %q to a map[string]interface{}", value.Value) + } + + for percentile, percentileValue := range sampleValue { + individualValue, ok := percentileValue.(float64) + if !ok { + return fmt.Errorf("could not convert value from %q to a float64", percentileValue) + } + + childMetric := make(map[model.LabelName]model.LabelValue, len(labels)+1) + + for k, v := range labels { + childMetric[k] = v + } + + childMetric[model.LabelName(percentile001)] = model.LabelValue(percentile) + + pendingSamples = append(pendingSamples, &model.Sample{ + Metric: model.Metric(childMetric), + Timestamp: o.Timestamp, + Value: model.SampleValue(individualValue), + }) + } + + break + } + } + } + if len(pendingSamples) > 0 { + return out.Ingest(pendingSamples) + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/processor0_0_1_test.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/processor0_0_1_test.go new file mode 100644 index 0000000000..3fd909cfc7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/processor0_0_1_test.go @@ -0,0 +1,186 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package extraction + +import ( + "errors" + "os" + "path" + "sort" + "testing" + + "github.com/prometheus/prometheus/utility/test" + + "github.com/prometheus/client_golang/model" +) + +var test001Time = model.Now() + +type testProcessor001ProcessScenario struct { + in string + expected, actual []model.Samples + err error +} + +func (s *testProcessor001ProcessScenario) Ingest(samples model.Samples) error { + s.actual = append(s.actual, samples) + return nil +} + +func (s *testProcessor001ProcessScenario) test(t testing.TB, set int) { + reader, err := os.Open(path.Join("fixtures", s.in)) + if err != nil { + t.Fatalf("%d. couldn't open scenario input file %s: %s", set, s.in, err) + } + + options := &ProcessOptions{ + Timestamp: test001Time, + } + if err := Processor001.ProcessSingle(reader, s, options); !test.ErrorEqual(s.err, err) { + t.Fatalf("%d. expected err of %s, got %s", set, s.err, err) + } + + if len(s.actual) != len(s.expected) { + t.Fatalf("%d. expected output length of %d, got %d", set, len(s.expected), len(s.actual)) + } + + for i, expected := range s.expected { + sort.Sort(s.actual[i]) + sort.Sort(expected) + + if !expected.Equal(s.actual[i]) { + t.Errorf("%d.%d. expected %s, got %s", set, i, expected, s.actual[i]) + } + } +} + +func testProcessor001Process(t testing.TB) { + var scenarios = []testProcessor001ProcessScenario{ + { + in: "empty.json", + err: errors.New("unexpected end of JSON input"), + }, + { + in: "test0_0_1-0_0_2.json", + expected: []model.Samples{ + model.Samples{ + &model.Sample{ + Metric: model.Metric{"service": "zed", model.MetricNameLabel: "rpc_calls_total", "job": "batch_job"}, + Value: 25, + Timestamp: test001Time, + }, + &model.Sample{ + Metric: model.Metric{"service": "bar", model.MetricNameLabel: "rpc_calls_total", "job": "batch_job"}, + Value: 25, + Timestamp: test001Time, + }, + &model.Sample{ + Metric: model.Metric{"service": "foo", model.MetricNameLabel: "rpc_calls_total", "job": "batch_job"}, + Value: 25, + Timestamp: test001Time, + }, + &model.Sample{ + Metric: model.Metric{"percentile": "0.010000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "zed"}, + Value: 0.0459814091918713, + Timestamp: test001Time, + }, + &model.Sample{ + Metric: model.Metric{"percentile": "0.010000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "bar"}, + Value: 78.48563317257356, + Timestamp: test001Time, + }, + &model.Sample{ + Metric: model.Metric{"percentile": "0.010000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "foo"}, + Value: 15.890724674774395, + Timestamp: test001Time, + }, + &model.Sample{ + Metric: model.Metric{"percentile": "0.050000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "zed"}, + Value: 0.0459814091918713, + Timestamp: test001Time, + }, + &model.Sample{ + Metric: model.Metric{"percentile": "0.050000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "bar"}, + Value: 78.48563317257356, + Timestamp: test001Time, + }, + &model.Sample{ + Metric: model.Metric{"percentile": "0.050000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "foo"}, + Value: 15.890724674774395, + Timestamp: test001Time, + }, + &model.Sample{ + Metric: model.Metric{"percentile": "0.500000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "zed"}, + Value: 0.6120456642749681, + Timestamp: test001Time, + }, + &model.Sample{ + Metric: model.Metric{"percentile": "0.500000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "bar"}, + Value: 97.31798360385088, + Timestamp: test001Time, + }, + &model.Sample{ + Metric: model.Metric{"percentile": "0.500000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "foo"}, + Value: 84.63044031436561, + Timestamp: test001Time, + }, + &model.Sample{ + Metric: model.Metric{"percentile": "0.900000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "zed"}, + Value: 1.355915069887731, + Timestamp: test001Time, + }, + &model.Sample{ + Metric: model.Metric{"percentile": "0.900000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "bar"}, + Value: 109.89202084295582, + Timestamp: test001Time, + }, + &model.Sample{ + Metric: model.Metric{"percentile": "0.900000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "foo"}, + Value: 160.21100853053224, + Timestamp: test001Time, + }, + &model.Sample{ + Metric: model.Metric{"percentile": "0.990000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "zed"}, + Value: 1.772733213161236, + Timestamp: test001Time, + }, + &model.Sample{ + Metric: model.Metric{"percentile": "0.990000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "bar"}, + Value: 109.99626121011262, + Timestamp: test001Time, + }, + &model.Sample{ + Metric: model.Metric{"percentile": "0.990000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "foo"}, + Value: 172.49828748957728, + Timestamp: test001Time, + }, + }, + }, + }, + } + + for i, scenario := range scenarios { + scenario.test(t, i) + } +} + +func TestProcessor001Process(t *testing.T) { + testProcessor001Process(t) +} + +func BenchmarkProcessor001Process(b *testing.B) { + for i := 0; i < b.N; i++ { + testProcessor001Process(b) + } +} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/processor0_0_2.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/processor0_0_2.go new file mode 100644 index 0000000000..24c7e81554 --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/processor0_0_2.go @@ -0,0 +1,106 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package extraction + +import ( + "encoding/json" + "fmt" + "io" + + "github.com/prometheus/client_golang/model" +) + +// Processor002 is responsible for decoding payloads from protocol version +// 0.0.2. +var Processor002 = &processor002{} + +type histogram002 struct { + Labels map[string]string `json:"labels"` + Values map[string]model.SampleValue `json:"value"` +} + +type counter002 struct { + Labels map[string]string `json:"labels"` + Value model.SampleValue `json:"value"` +} + +type processor002 struct{} + +func (p *processor002) ProcessSingle(in io.Reader, out Ingester, o *ProcessOptions) error { + // Processor for telemetry schema version 0.0.2. + // container for telemetry data + var entities []struct { + BaseLabels map[string]string `json:"baseLabels"` + Docstring string `json:"docstring"` + Metric struct { + Type string `json:"type"` + Values json.RawMessage `json:"value"` + } `json:"metric"` + } + + if err := json.NewDecoder(in).Decode(&entities); err != nil { + return err + } + + pendingSamples := model.Samples{} + for _, entity := range entities { + switch entity.Metric.Type { + case "counter", "gauge": + var values []counter002 + + if err := json.Unmarshal(entity.Metric.Values, &values); err != nil { + return fmt.Errorf("could not extract %s value: %s", entity.Metric.Type, err) + } + + for _, counter := range values { + labels := labelSet(entity.BaseLabels).Merge(labelSet(counter.Labels)) + + pendingSamples = append(pendingSamples, &model.Sample{ + Metric: model.Metric(labels), + Timestamp: o.Timestamp, + Value: counter.Value, + }) + } + + case "histogram": + var values []histogram002 + + if err := json.Unmarshal(entity.Metric.Values, &values); err != nil { + return fmt.Errorf("could not extract %s value: %s", entity.Metric.Type, err) + } + + for _, histogram := range values { + for percentile, value := range histogram.Values { + labels := labelSet(entity.BaseLabels).Merge(labelSet(histogram.Labels)) + labels[model.LabelName("percentile")] = model.LabelValue(percentile) + + pendingSamples = append(pendingSamples, &model.Sample{ + Metric: model.Metric(labels), + Timestamp: o.Timestamp, + Value: value, + }) + } + } + + default: + return fmt.Errorf("unknown metric type %q", entity.Metric.Type) + } + } + + if len(pendingSamples) > 0 { + return out.Ingest(pendingSamples) + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/processor0_0_2_test.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/processor0_0_2_test.go new file mode 100644 index 0000000000..451c57e5e8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/processor0_0_2_test.go @@ -0,0 +1,226 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package extraction + +import ( + "bytes" + "errors" + "io/ioutil" + "os" + "path" + "runtime" + "sort" + "testing" + + "github.com/prometheus/prometheus/utility/test" + + "github.com/prometheus/client_golang/model" +) + +var test002Time = model.Now() + +type testProcessor002ProcessScenario struct { + in string + expected, actual []model.Samples + err error +} + +func (s *testProcessor002ProcessScenario) Ingest(samples model.Samples) error { + s.actual = append(s.actual, samples) + return nil +} + +func (s *testProcessor002ProcessScenario) test(t testing.TB, set int) { + reader, err := os.Open(path.Join("fixtures", s.in)) + if err != nil { + t.Fatalf("%d. couldn't open scenario input file %s: %s", set, s.in, err) + } + + options := &ProcessOptions{ + Timestamp: test002Time, + } + if err := Processor002.ProcessSingle(reader, s, options); !test.ErrorEqual(s.err, err) { + t.Fatalf("%d. expected err of %s, got %s", set, s.err, err) + } + + if len(s.actual) != len(s.expected) { + t.Fatalf("%d. expected output length of %d, got %d", set, len(s.expected), len(s.actual)) + } + + for i, expected := range s.expected { + sort.Sort(s.actual[i]) + sort.Sort(expected) + + if !expected.Equal(s.actual[i]) { + t.Fatalf("%d.%d. expected %s, got %s", set, i, expected, s.actual[i]) + } + } +} + +func testProcessor002Process(t testing.TB) { + var scenarios = []testProcessor002ProcessScenario{ + { + in: "empty.json", + err: errors.New("EOF"), + }, + { + in: "test0_0_1-0_0_2.json", + expected: []model.Samples{ + model.Samples{ + &model.Sample{ + Metric: model.Metric{"service": "zed", model.MetricNameLabel: "rpc_calls_total", "job": "batch_job"}, + Value: 25, + Timestamp: test002Time, + }, + &model.Sample{ + Metric: model.Metric{"service": "bar", model.MetricNameLabel: "rpc_calls_total", "job": "batch_job"}, + Value: 25, + Timestamp: test002Time, + }, + &model.Sample{ + Metric: model.Metric{"service": "foo", model.MetricNameLabel: "rpc_calls_total", "job": "batch_job"}, + Value: 25, + Timestamp: test002Time, + }, + &model.Sample{ + Metric: model.Metric{"percentile": "0.010000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "zed"}, + Value: 0.0459814091918713, + Timestamp: test002Time, + }, + &model.Sample{ + Metric: model.Metric{"percentile": "0.010000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "bar"}, + Value: 78.48563317257356, + Timestamp: test002Time, + }, + &model.Sample{ + Metric: model.Metric{"percentile": "0.010000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "foo"}, + Value: 15.890724674774395, + Timestamp: test002Time, + }, + &model.Sample{ + + Metric: model.Metric{"percentile": "0.050000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "zed"}, + Value: 0.0459814091918713, + Timestamp: test002Time, + }, + &model.Sample{ + Metric: model.Metric{"percentile": "0.050000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "bar"}, + Value: 78.48563317257356, + Timestamp: test002Time, + }, + &model.Sample{ + Metric: model.Metric{"percentile": "0.050000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "foo"}, + Value: 15.890724674774395, + Timestamp: test002Time, + }, + &model.Sample{ + Metric: model.Metric{"percentile": "0.500000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "zed"}, + Value: 0.6120456642749681, + Timestamp: test002Time, + }, + &model.Sample{ + Metric: model.Metric{"percentile": "0.500000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "bar"}, + Value: 97.31798360385088, + Timestamp: test002Time, + }, + &model.Sample{ + Metric: model.Metric{"percentile": "0.500000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "foo"}, + Value: 84.63044031436561, + Timestamp: test002Time, + }, + &model.Sample{ + Metric: model.Metric{"percentile": "0.900000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "zed"}, + Value: 1.355915069887731, + Timestamp: test002Time, + }, + &model.Sample{ + Metric: model.Metric{"percentile": "0.900000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "bar"}, + Value: 109.89202084295582, + Timestamp: test002Time, + }, + &model.Sample{ + Metric: model.Metric{"percentile": "0.900000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "foo"}, + Value: 160.21100853053224, + Timestamp: test002Time, + }, + &model.Sample{ + Metric: model.Metric{"percentile": "0.990000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "zed"}, + Value: 1.772733213161236, + Timestamp: test002Time, + }, + &model.Sample{ + Metric: model.Metric{"percentile": "0.990000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "bar"}, + Value: 109.99626121011262, + Timestamp: test002Time, + }, + &model.Sample{ + Metric: model.Metric{"percentile": "0.990000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "foo"}, + Value: 172.49828748957728, + Timestamp: test002Time, + }, + }, + }, + }, + } + + for i, scenario := range scenarios { + scenario.test(t, i) + } +} + +func TestProcessor002Process(t *testing.T) { + testProcessor002Process(t) +} + +func BenchmarkProcessor002Process(b *testing.B) { + b.StopTimer() + + pre := runtime.MemStats{} + runtime.ReadMemStats(&pre) + + b.StartTimer() + + for i := 0; i < b.N; i++ { + testProcessor002Process(b) + } + + post := runtime.MemStats{} + runtime.ReadMemStats(&post) + + allocated := post.TotalAlloc - pre.TotalAlloc + + b.Logf("Allocated %d at %f per cycle with %d cycles.", allocated, float64(allocated)/float64(b.N), b.N) +} + +func BenchmarkProcessor002ParseOnly(b *testing.B) { + b.StopTimer() + data, err := ioutil.ReadFile("fixtures/test0_0_1-0_0_2-large.json") + if err != nil { + b.Fatal(err) + } + ing := fakeIngester{} + b.StartTimer() + + for i := 0; i < b.N; i++ { + if err := Processor002.ProcessSingle(bytes.NewReader(data), ing, &ProcessOptions{}); err != nil { + b.Fatal(err) + } + } +} + +type fakeIngester struct{} + +func (i fakeIngester) Ingest(model.Samples) error { + return nil +} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/textprocessor.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/textprocessor.go new file mode 100644 index 0000000000..2eca1c63a6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/textprocessor.go @@ -0,0 +1,40 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package extraction + +import ( + "io" + + "github.com/prometheus/client_golang/text" +) + +type processor004 struct{} + +// Processor004 s responsible for decoding payloads from the text based variety +// of protocol version 0.0.4. +var Processor004 = &processor004{} + +func (t *processor004) ProcessSingle(i io.Reader, out Ingester, o *ProcessOptions) error { + var parser text.Parser + metricFamilies, err := parser.TextToMetricFamilies(i) + if err != nil { + return err + } + for _, metricFamily := range metricFamilies { + if err := extractMetricFamily(out, o, metricFamily); err != nil { + return err + } + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/textprocessor_test.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/textprocessor_test.go new file mode 100644 index 0000000000..ff704a9bc3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/extraction/textprocessor_test.go @@ -0,0 +1,100 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package extraction + +import ( + "sort" + "strings" + "testing" + + "github.com/prometheus/client_golang/model" +) + +var ( + ts = model.Now() + in = ` +# Only a quite simple scenario with two metric families. +# More complicated tests of the parser itself can be found in the text package. +# TYPE mf2 counter +mf2 3 +mf1{label="value1"} -3.14 123456 +mf1{label="value2"} 42 +mf2 4 +` + out = map[model.LabelValue]model.Samples{ + "mf1": model.Samples{ + &model.Sample{ + Metric: model.Metric{model.MetricNameLabel: "mf1", "label": "value1"}, + Value: -3.14, + Timestamp: 123456, + }, + &model.Sample{ + Metric: model.Metric{model.MetricNameLabel: "mf1", "label": "value2"}, + Value: 42, + Timestamp: ts, + }, + }, + "mf2": model.Samples{ + &model.Sample{ + Metric: model.Metric{model.MetricNameLabel: "mf2"}, + Value: 3, + Timestamp: ts, + }, + &model.Sample{ + Metric: model.Metric{model.MetricNameLabel: "mf2"}, + Value: 4, + Timestamp: ts, + }, + }, + } +) + +type testIngester struct { + results []model.Samples +} + +func (i *testIngester) Ingest(s model.Samples) error { + i.results = append(i.results, s) + return nil +} + +func TestTextProcessor(t *testing.T) { + var ingester testIngester + i := strings.NewReader(in) + o := &ProcessOptions{ + Timestamp: ts, + } + + err := Processor004.ProcessSingle(i, &ingester, o) + if err != nil { + t.Fatal(err) + } + if expected, got := len(out), len(ingester.results); expected != got { + t.Fatalf("Expected length %d, got %d", expected, got) + } + for _, r := range ingester.results { + expected, ok := out[r[0].Metric[model.MetricNameLabel]] + if !ok { + t.Fatalf( + "Unexpected metric name %q", + r[0].Metric[model.MetricNameLabel], + ) + } + sort.Sort(expected) + sort.Sort(r) + if !expected.Equal(r) { + t.Errorf("expected %s, got %s", expected, r) + } + } +} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/model/fingerprinting.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/model/fingerprinting.go new file mode 100644 index 0000000000..5b2ffe3bb3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/model/fingerprinting.go @@ -0,0 +1,110 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "fmt" + "strconv" +) + +// Fingerprint provides a hash-capable representation of a Metric. +// For our purposes, FNV-1A 64-bit is used. +type Fingerprint uint64 + +func (f Fingerprint) String() string { + return fmt.Sprintf("%016x", uint64(f)) +} + +// Less implements sort.Interface. +func (f Fingerprint) Less(o Fingerprint) bool { + return f < o +} + +// Equal implements sort.Interface. +func (f Fingerprint) Equal(o Fingerprint) bool { + return f == o +} + +// LoadFromString transforms a string representation into a Fingerprint. +func (f *Fingerprint) LoadFromString(s string) error { + num, err := strconv.ParseUint(s, 16, 64) + if err != nil { + return err + } + *f = Fingerprint(num) + return nil +} + +// Fingerprints represents a collection of Fingerprint subject to a given +// natural sorting scheme. It implements sort.Interface. +type Fingerprints []Fingerprint + +// Len implements sort.Interface. +func (f Fingerprints) Len() int { + return len(f) +} + +// Less implements sort.Interface. +func (f Fingerprints) Less(i, j int) bool { + return f[i] < f[j] +} + +// Swap implements sort.Interface. +func (f Fingerprints) Swap(i, j int) { + f[i], f[j] = f[j], f[i] +} + +// FingerprintSet is a set of Fingerprints. +type FingerprintSet map[Fingerprint]struct{} + +// Equal returns true if both sets contain the same elements (and not more). +func (s FingerprintSet) Equal(o FingerprintSet) bool { + if len(s) != len(o) { + return false + } + + for k := range s { + if _, ok := o[k]; !ok { + return false + } + } + + return true +} + +// Intersection returns the elements contained in both sets. +func (s FingerprintSet) Intersection(o FingerprintSet) FingerprintSet { + myLength, otherLength := len(s), len(o) + if myLength == 0 || otherLength == 0 { + return FingerprintSet{} + } + + subSet := s + superSet := o + + if otherLength < myLength { + subSet = o + superSet = s + } + + out := FingerprintSet{} + + for k := range subSet { + if _, ok := superSet[k]; ok { + out[k] = struct{}{} + } + } + + return out +} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/model/labelname.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/model/labelname.go new file mode 100644 index 0000000000..047e756555 --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/model/labelname.go @@ -0,0 +1,63 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "strings" +) + +const ( + // ExporterLabelPrefix is the label name prefix to prepend if a + // synthetic label is already present in the exported metrics. + ExporterLabelPrefix LabelName = "exporter_" + + // MetricNameLabel is the label name indicating the metric name of a + // timeseries. + MetricNameLabel LabelName = "__name__" + + // ReservedLabelPrefix is a prefix which is not legal in user-supplied + // label names. + ReservedLabelPrefix = "__" + + // JobLabel is the label name indicating the job from which a timeseries + // was scraped. + JobLabel LabelName = "job" +) + +// A LabelName is a key for a LabelSet or Metric. It has a value associated +// therewith. +type LabelName string + +// LabelNames is a sortable LabelName slice. In implements sort.Interface. +type LabelNames []LabelName + +func (l LabelNames) Len() int { + return len(l) +} + +func (l LabelNames) Less(i, j int) bool { + return l[i] < l[j] +} + +func (l LabelNames) Swap(i, j int) { + l[i], l[j] = l[j], l[i] +} + +func (l LabelNames) String() string { + labelStrings := make([]string, 0, len(l)) + for _, label := range l { + labelStrings = append(labelStrings, string(label)) + } + return strings.Join(labelStrings, ", ") +} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/model/labelname_test.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/model/labelname_test.go new file mode 100644 index 0000000000..693228d347 --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/model/labelname_test.go @@ -0,0 +1,55 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "sort" + "testing" +) + +func testLabelNames(t testing.TB) { + var scenarios = []struct { + in LabelNames + out LabelNames + }{ + { + in: LabelNames{"ZZZ", "zzz"}, + out: LabelNames{"ZZZ", "zzz"}, + }, + { + in: LabelNames{"aaa", "AAA"}, + out: LabelNames{"AAA", "aaa"}, + }, + } + + for i, scenario := range scenarios { + sort.Sort(scenario.in) + + for j, expected := range scenario.out { + if expected != scenario.in[j] { + t.Errorf("%d.%d expected %s, got %s", i, j, expected, scenario.in[j]) + } + } + } +} + +func TestLabelNames(t *testing.T) { + testLabelNames(t) +} + +func BenchmarkLabelNames(b *testing.B) { + for i := 0; i < b.N; i++ { + testLabelNames(b) + } +} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/model/labelset.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/model/labelset.go new file mode 100644 index 0000000000..b1b54fba35 --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/model/labelset.go @@ -0,0 +1,64 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "fmt" + "sort" + "strings" +) + +// A LabelSet is a collection of LabelName and LabelValue pairs. The LabelSet +// may be fully-qualified down to the point where it may resolve to a single +// Metric in the data store or not. All operations that occur within the realm +// of a LabelSet can emit a vector of Metric entities to which the LabelSet may +// match. +type LabelSet map[LabelName]LabelValue + +// Merge is a helper function to non-destructively merge two label sets. +func (l LabelSet) Merge(other LabelSet) LabelSet { + result := make(LabelSet, len(l)) + + for k, v := range l { + result[k] = v + } + + for k, v := range other { + result[k] = v + } + + return result +} + +func (l LabelSet) String() string { + labelStrings := make([]string, 0, len(l)) + for label, value := range l { + labelStrings = append(labelStrings, fmt.Sprintf("%s=%q", label, value)) + } + + switch len(labelStrings) { + case 0: + return "" + default: + sort.Strings(labelStrings) + return fmt.Sprintf("{%s}", strings.Join(labelStrings, ", ")) + } +} + +// MergeFromMetric merges Metric into this LabelSet. +func (l LabelSet) MergeFromMetric(m Metric) { + for k, v := range m { + l[k] = v + } +} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/model/labelvalue.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/model/labelvalue.go new file mode 100644 index 0000000000..df2d14cc12 --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/model/labelvalue.go @@ -0,0 +1,36 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "sort" +) + +// A LabelValue is an associated value for a LabelName. +type LabelValue string + +// LabelValues is a sortable LabelValue slice. It implements sort.Interface. +type LabelValues []LabelValue + +func (l LabelValues) Len() int { + return len(l) +} + +func (l LabelValues) Less(i, j int) bool { + return sort.StringsAreSorted([]string{string(l[i]), string(l[j])}) +} + +func (l LabelValues) Swap(i, j int) { + l[i], l[j] = l[j], l[i] +} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/model/labelvalue_test.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/model/labelvalue_test.go new file mode 100644 index 0000000000..15904edf4f --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/model/labelvalue_test.go @@ -0,0 +1,55 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "sort" + "testing" +) + +func testLabelValues(t testing.TB) { + var scenarios = []struct { + in LabelValues + out LabelValues + }{ + { + in: LabelValues{"ZZZ", "zzz"}, + out: LabelValues{"ZZZ", "zzz"}, + }, + { + in: LabelValues{"aaa", "AAA"}, + out: LabelValues{"AAA", "aaa"}, + }, + } + + for i, scenario := range scenarios { + sort.Sort(scenario.in) + + for j, expected := range scenario.out { + if expected != scenario.in[j] { + t.Errorf("%d.%d expected %s, got %s", i, j, expected, scenario.in[j]) + } + } + } +} + +func TestLabelValues(t *testing.T) { + testLabelValues(t) +} + +func BenchmarkLabelValues(b *testing.B) { + for i := 0; i < b.N; i++ { + testLabelValues(b) + } +} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/model/metric.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/model/metric.go new file mode 100644 index 0000000000..74f394b92e --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/model/metric.go @@ -0,0 +1,164 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/binary" + "encoding/json" + "fmt" + "hash/fnv" + "sort" + "strings" +) + +var separator = []byte{0} + +// A Metric is similar to a LabelSet, but the key difference is that a Metric is +// a singleton and refers to one and only one stream of samples. +type Metric map[LabelName]LabelValue + +// Equal compares the fingerprints of both metrics. +func (m Metric) Equal(o Metric) bool { + return m.Fingerprint().Equal(o.Fingerprint()) +} + +// Before compares the fingerprints of both metrics. +func (m Metric) Before(o Metric) bool { + return m.Fingerprint().Less(o.Fingerprint()) +} + +// String implements Stringer. +func (m Metric) String() string { + metricName, hasName := m[MetricNameLabel] + numLabels := len(m) - 1 + if !hasName { + numLabels = len(m) + } + labelStrings := make([]string, 0, numLabels) + for label, value := range m { + if label != MetricNameLabel { + labelStrings = append(labelStrings, fmt.Sprintf("%s=%q", label, value)) + } + } + + switch numLabels { + case 0: + if hasName { + return string(metricName) + } + return "{}" + default: + sort.Strings(labelStrings) + return fmt.Sprintf("%s{%s}", metricName, strings.Join(labelStrings, ", ")) + } +} + +// Fingerprint returns a Metric's Fingerprint. +func (m Metric) Fingerprint() Fingerprint { + labelNames := make([]string, 0, len(m)) + maxLength := 0 + + for labelName, labelValue := range m { + labelNames = append(labelNames, string(labelName)) + if len(labelName) > maxLength { + maxLength = len(labelName) + } + if len(labelValue) > maxLength { + maxLength = len(labelValue) + } + } + + sort.Strings(labelNames) + + summer := fnv.New64a() + buf := make([]byte, maxLength) + + for _, labelName := range labelNames { + labelValue := m[LabelName(labelName)] + + copy(buf, labelName) + summer.Write(buf[:len(labelName)]) + + summer.Write(separator) + + copy(buf, labelValue) + summer.Write(buf[:len(labelValue)]) + } + + return Fingerprint(binary.LittleEndian.Uint64(summer.Sum(nil))) +} + +// Clone returns a copy of the Metric. +func (m Metric) Clone() Metric { + clone := Metric{} + for k, v := range m { + clone[k] = v + } + return clone +} + +// MergeFromLabelSet merges a label set into this Metric, prefixing a collision +// prefix to the label names merged from the label set where required. +func (m Metric) MergeFromLabelSet(labels LabelSet, collisionPrefix LabelName) { + for k, v := range labels { + if collisionPrefix != "" { + for { + if _, exists := m[k]; !exists { + break + } + k = collisionPrefix + k + } + } + + m[k] = v + } +} + +// COWMetric wraps a Metric to enable copy-on-write access patterns. +type COWMetric struct { + Copied bool + Metric Metric +} + +// Set sets a label name in the wrapped Metric to a given value and copies the +// Metric initially, if it is not already a copy. +func (m COWMetric) Set(ln LabelName, lv LabelValue) { + m.doCOW() + m.Metric[ln] = lv +} + +// Delete deletes a given label name from the wrapped Metric and copies the +// Metric initially, if it is not already a copy. +func (m *COWMetric) Delete(ln LabelName) { + m.doCOW() + delete(m.Metric, ln) +} + +// doCOW copies the underlying Metric if it is not already a copy. +func (m *COWMetric) doCOW() { + if !m.Copied { + m.Metric = m.Metric.Clone() + m.Copied = true + } +} + +// String implements fmt.Stringer. +func (m COWMetric) String() string { + return m.Metric.String() +} + +// MarshalJSON implements json.Marshaler. +func (m COWMetric) MarshalJSON() ([]byte, error) { + return json.Marshal(m.Metric) +} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/model/metric_test.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/model/metric_test.go new file mode 100644 index 0000000000..7c31bdfb45 --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/model/metric_test.go @@ -0,0 +1,76 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import "testing" + +func testMetric(t testing.TB) { + var scenarios = []struct { + input Metric + fingerprint Fingerprint + }{ + { + input: Metric{}, + fingerprint: 2676020557754725067, + }, + { + input: Metric{ + "first_name": "electro", + "occupation": "robot", + "manufacturer": "westinghouse", + }, + fingerprint: 13260944541294022935, + }, + { + input: Metric{ + "x": "y", + }, + fingerprint: 1470933794305433534, + }, + // The following two demonstrate a bug in fingerprinting. They + // should not have the same fingerprint with a sane + // fingerprinting function. See + // https://github.com/prometheus/client_golang/issues/74 . + { + input: Metric{ + "a": "bb", + "b": "c", + }, + fingerprint: 3734646176939799877, + }, + { + input: Metric{ + "a": "b", + "bb": "c", + }, + fingerprint: 3734646176939799877, + }, + } + + for i, scenario := range scenarios { + if scenario.fingerprint != scenario.input.Fingerprint() { + t.Errorf("%d. expected %d, got %d", i, scenario.fingerprint, scenario.input.Fingerprint()) + } + } +} + +func TestMetric(t *testing.T) { + testMetric(t) +} + +func BenchmarkMetric(b *testing.B) { + for i := 0; i < b.N; i++ { + testMetric(b) + } +} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/model/model.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/model/model.go new file mode 100644 index 0000000000..189c5dcf6c --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/model/model.go @@ -0,0 +1,15 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package model contains core representation of Prometheus client primitives. +package model diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/model/sample.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/model/sample.go new file mode 100644 index 0000000000..c13a44d95c --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/model/sample.go @@ -0,0 +1,79 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +// Sample is a sample value with a timestamp and a metric. +type Sample struct { + Metric Metric + Value SampleValue + Timestamp Timestamp +} + +// Equal compares first the metrics, then the timestamp, then the value. +func (s *Sample) Equal(o *Sample) bool { + if s == o { + return true + } + + if !s.Metric.Equal(o.Metric) { + return false + } + if !s.Timestamp.Equal(o.Timestamp) { + return false + } + if !s.Value.Equal(o.Value) { + return false + } + + return true +} + +// Samples is a sortable Sample slice. It implements sort.Interface. +type Samples []*Sample + +func (s Samples) Len() int { + return len(s) +} + +// Less compares first the metrics, then the timestamp. +func (s Samples) Less(i, j int) bool { + switch { + case s[i].Metric.Before(s[j].Metric): + return true + case s[j].Metric.Before(s[i].Metric): + return false + case s[i].Timestamp.Before(s[j].Timestamp): + return true + default: + return false + } +} + +func (s Samples) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +// Equal compares two sets of samples and returns true if they are equal. +func (s Samples) Equal(o Samples) bool { + if len(s) != len(o) { + return false + } + + for i, sample := range s { + if !sample.Equal(o[i]) { + return false + } + } + return true +} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/model/sample_test.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/model/sample_test.go new file mode 100644 index 0000000000..3dc4ad2511 --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/model/sample_test.go @@ -0,0 +1,126 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "sort" + "testing" +) + +func TestSamplesSort(t *testing.T) { + input := Samples{ + &Sample{ + // Fingerprint: 81f9c9ed24563f8f. + Metric: Metric{ + MetricNameLabel: "A", + }, + Timestamp: 1, + }, + &Sample{ + // Fingerprint: 81f9c9ed24563f8f. + Metric: Metric{ + MetricNameLabel: "A", + }, + Timestamp: 2, + }, + &Sample{ + // Fingerprint: 1bf6c9ed24543f8f. + Metric: Metric{ + MetricNameLabel: "C", + }, + Timestamp: 1, + }, + &Sample{ + // Fingerprint: 1bf6c9ed24543f8f. + Metric: Metric{ + MetricNameLabel: "C", + }, + Timestamp: 2, + }, + &Sample{ + // Fingerprint: 68f4c9ed24533f8f. + Metric: Metric{ + MetricNameLabel: "B", + }, + Timestamp: 1, + }, + &Sample{ + // Fingerprint: 68f4c9ed24533f8f. + Metric: Metric{ + MetricNameLabel: "B", + }, + Timestamp: 2, + }, + } + + expected := Samples{ + &Sample{ + // Fingerprint: 1bf6c9ed24543f8f. + Metric: Metric{ + MetricNameLabel: "C", + }, + Timestamp: 1, + }, + &Sample{ + // Fingerprint: 1bf6c9ed24543f8f. + Metric: Metric{ + MetricNameLabel: "C", + }, + Timestamp: 2, + }, + &Sample{ + // Fingerprint: 68f4c9ed24533f8f. + Metric: Metric{ + MetricNameLabel: "B", + }, + Timestamp: 1, + }, + &Sample{ + // Fingerprint: 68f4c9ed24533f8f. + Metric: Metric{ + MetricNameLabel: "B", + }, + Timestamp: 2, + }, + &Sample{ + // Fingerprint: 81f9c9ed24563f8f. + Metric: Metric{ + MetricNameLabel: "A", + }, + Timestamp: 1, + }, + &Sample{ + // Fingerprint: 81f9c9ed24563f8f. + Metric: Metric{ + MetricNameLabel: "A", + }, + Timestamp: 2, + }, + } + + sort.Sort(input) + + for i, actual := range input { + actualFp := actual.Metric.Fingerprint() + expectedFp := expected[i].Metric.Fingerprint() + + if !actualFp.Equal(expectedFp) { + t.Fatalf("%d. Incorrect fingerprint. Got %s; want %s", i, actualFp.String(), expectedFp.String()) + } + + if actual.Timestamp != expected[i].Timestamp { + t.Fatalf("%d. Incorrect timestamp. Got %s; want %s", i, actual.Timestamp, expected[i].Timestamp) + } + } +} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/model/samplevalue.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/model/samplevalue.go new file mode 100644 index 0000000000..469c2c0b0e --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/model/samplevalue.go @@ -0,0 +1,37 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "fmt" + "strconv" +) + +// A SampleValue is a representation of a value for a given sample at a given +// time. +type SampleValue float64 + +// Equal does a straight v==o. +func (v SampleValue) Equal(o SampleValue) bool { + return v == o +} + +// MarshalJSON implements json.Marshaler. +func (v SampleValue) MarshalJSON() ([]byte, error) { + return []byte(fmt.Sprintf(`"%s"`, v)), nil +} + +func (v SampleValue) String() string { + return strconv.FormatFloat(float64(v), 'f', -1, 64) +} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/model/signature.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/model/signature.go new file mode 100644 index 0000000000..4b392fb187 --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/model/signature.go @@ -0,0 +1,97 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "bytes" + "hash" + "hash/fnv" +) + +// SeparatorByte is a byte that cannot occur in valid UTF-8 sequences and is +// used to separate label names, label values, and other strings from each other +// when calculating their combined hash value (aka signature aka fingerprint). +const SeparatorByte byte = 255 + +var ( + // cache the signature of an empty label set. + emptyLabelSignature = fnv.New64a().Sum64() + + hashAndBufPool = make(chan *hashAndBuf, 1024) +) + +type hashAndBuf struct { + h hash.Hash64 + b bytes.Buffer +} + +func getHashAndBuf() *hashAndBuf { + select { + case hb := <-hashAndBufPool: + return hb + default: + return &hashAndBuf{h: fnv.New64a()} + } +} + +func putHashAndBuf(hb *hashAndBuf) { + select { + case hashAndBufPool <- hb: + default: + } +} + +// LabelsToSignature returns a unique signature (i.e., fingerprint) for a given +// label set. +func LabelsToSignature(labels map[string]string) uint64 { + if len(labels) == 0 { + return emptyLabelSignature + } + + var result uint64 + hb := getHashAndBuf() + defer putHashAndBuf(hb) + + for k, v := range labels { + hb.b.WriteString(k) + hb.b.WriteByte(SeparatorByte) + hb.b.WriteString(v) + hb.h.Write(hb.b.Bytes()) + result ^= hb.h.Sum64() + hb.h.Reset() + hb.b.Reset() + } + return result +} + +// LabelValuesToSignature returns a unique signature (i.e., fingerprint) for the +// values of a given label set. +func LabelValuesToSignature(labels map[string]string) uint64 { + if len(labels) == 0 { + return emptyLabelSignature + } + + var result uint64 + hb := getHashAndBuf() + defer putHashAndBuf(hb) + + for _, v := range labels { + hb.b.WriteString(v) + hb.h.Write(hb.b.Bytes()) + result ^= hb.h.Sum64() + hb.h.Reset() + hb.b.Reset() + } + return result +} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/model/signature_test.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/model/signature_test.go new file mode 100644 index 0000000000..ad20fa3ff8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/model/signature_test.go @@ -0,0 +1,120 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "runtime" + "testing" +) + +func testLabelsToSignature(t testing.TB) { + var scenarios = []struct { + in map[string]string + out uint64 + }{ + { + in: map[string]string{}, + out: 14695981039346656037, + }, + { + in: map[string]string{"name": "garland, briggs", "fear": "love is not enough"}, + out: 12952432476264840823, + }, + } + + for i, scenario := range scenarios { + actual := LabelsToSignature(scenario.in) + + if actual != scenario.out { + t.Errorf("%d. expected %d, got %d", i, scenario.out, actual) + } + } +} + +func TestLabelToSignature(t *testing.T) { + testLabelsToSignature(t) +} + +func TestEmptyLabelSignature(t *testing.T) { + input := []map[string]string{nil, {}} + + var ms runtime.MemStats + runtime.ReadMemStats(&ms) + + alloc := ms.Alloc + + for _, labels := range input { + LabelsToSignature(labels) + } + + runtime.ReadMemStats(&ms) + + if got := ms.Alloc; alloc != got { + t.Fatal("expected LabelsToSignature with empty labels not to perform allocations") + } +} + +func BenchmarkLabelToSignature(b *testing.B) { + for i := 0; i < b.N; i++ { + testLabelsToSignature(b) + } +} + +func benchmarkLabelValuesToSignature(b *testing.B, l map[string]string, e uint64) { + for i := 0; i < b.N; i++ { + if a := LabelValuesToSignature(l); a != e { + b.Fatalf("expected signature of %d for %s, got %d", e, l, a) + } + } +} + +func BenchmarkLabelValuesToSignatureScalar(b *testing.B) { + benchmarkLabelValuesToSignature(b, nil, 14695981039346656037) +} + +func BenchmarkLabelValuesToSignatureSingle(b *testing.B) { + benchmarkLabelValuesToSignature(b, map[string]string{"first-label": "first-label-value"}, 2653746141194979650) +} + +func BenchmarkLabelValuesToSignatureDouble(b *testing.B) { + benchmarkLabelValuesToSignature(b, map[string]string{"first-label": "first-label-value", "second-label": "second-label-value"}, 8893559499616767364) +} + +func BenchmarkLabelValuesToSignatureTriple(b *testing.B) { + benchmarkLabelValuesToSignature(b, map[string]string{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 1685970066862087833) +} + +func benchmarkLabelToSignature(b *testing.B, l map[string]string, e uint64) { + for i := 0; i < b.N; i++ { + if a := LabelsToSignature(l); a != e { + b.Fatalf("expected signature of %d for %s, got %d", e, l, a) + } + } +} + +func BenchmarkLabelToSignatureScalar(b *testing.B) { + benchmarkLabelToSignature(b, nil, 14695981039346656037) +} + +func BenchmarkLabelToSignatureSingle(b *testing.B) { + benchmarkLabelToSignature(b, map[string]string{"first-label": "first-label-value"}, 5147259542624943964) +} + +func BenchmarkLabelToSignatureDouble(b *testing.B) { + benchmarkLabelToSignature(b, map[string]string{"first-label": "first-label-value", "second-label": "second-label-value"}, 18269973311206963528) +} + +func BenchmarkLabelToSignatureTriple(b *testing.B) { + benchmarkLabelToSignature(b, map[string]string{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 15738406913934009676) +} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/model/timestamp.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/model/timestamp.go new file mode 100644 index 0000000000..09bd87710d --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/model/timestamp.go @@ -0,0 +1,115 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "math" + "strconv" + + native_time "time" +) + +// Timestamp is the number of milliseconds since the epoch +// (1970-01-01 00:00 UTC) excluding leap seconds. +type Timestamp int64 + +const ( + // MinimumTick is the minimum supported time resolution. This has to be + // at least native_time.Second in order for the code below to work. + MinimumTick = native_time.Millisecond + // second is the timestamp duration equivalent to one second. + second = int64(native_time.Second / MinimumTick) + // The number of nanoseconds per minimum tick. + nanosPerTick = int64(MinimumTick / native_time.Nanosecond) + + // Earliest is the earliest timestamp representable. Handy for + // initializing a high watermark. + Earliest = Timestamp(math.MinInt64) + // Latest is the latest timestamp representable. Handy for initializing + // a low watermark. + Latest = Timestamp(math.MaxInt64) +) + +// Equal reports whether two timestamps represent the same instant. +func (t Timestamp) Equal(o Timestamp) bool { + return t == o +} + +// Before reports whether the timestamp t is before o. +func (t Timestamp) Before(o Timestamp) bool { + return t < o +} + +// After reports whether the timestamp t is after o. +func (t Timestamp) After(o Timestamp) bool { + return t > o +} + +// Add returns the Timestamp t + d. +func (t Timestamp) Add(d native_time.Duration) Timestamp { + return t + Timestamp(d/MinimumTick) +} + +// Sub returns the Duration t - o. +func (t Timestamp) Sub(o Timestamp) native_time.Duration { + return native_time.Duration(t-o) * MinimumTick +} + +// Time returns the time.Time representation of t. +func (t Timestamp) Time() native_time.Time { + return native_time.Unix(int64(t)/second, (int64(t)%second)*nanosPerTick) +} + +// Unix returns t as a Unix time, the number of seconds elapsed +// since January 1, 1970 UTC. +func (t Timestamp) Unix() int64 { + return int64(t) / second +} + +// UnixNano returns t as a Unix time, the number of nanoseconds elapsed +// since January 1, 1970 UTC. +func (t Timestamp) UnixNano() int64 { + return int64(t) * nanosPerTick +} + +// String returns a string representation of the timestamp. +func (t Timestamp) String() string { + return strconv.FormatFloat(float64(t)/float64(second), 'f', -1, 64) +} + +func (t Timestamp) MarshalJSON() ([]byte, error) { + return []byte(t.String()), nil +} + +// Now returns the current time as a Timestamp. +func Now() Timestamp { + return TimestampFromTime(native_time.Now()) +} + +// TimestampFromTime returns the Timestamp equivalent to the time.Time t. +func TimestampFromTime(t native_time.Time) Timestamp { + return TimestampFromUnixNano(t.UnixNano()) +} + +// TimestampFromUnix returns the Timestamp equivalent to the Unix timestamp t +// provided in seconds. +func TimestampFromUnix(t int64) Timestamp { + return Timestamp(t * second) +} + +// TimestampFromUnixNano returns the Timestamp equivalent to the Unix timestamp +// t provided in nanoseconds. +func TimestampFromUnixNano(t int64) Timestamp { + return Timestamp(t / nanosPerTick) +} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/model/timestamp_test.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/model/timestamp_test.go new file mode 100644 index 0000000000..fa028a47de --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/model/timestamp_test.go @@ -0,0 +1,86 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "testing" + native_time "time" +) + +func TestComparators(t *testing.T) { + t1a := TimestampFromUnix(0) + t1b := TimestampFromUnix(0) + t2 := TimestampFromUnix(2*second - 1) + + if !t1a.Equal(t1b) { + t.Fatalf("Expected %s to be equal to %s", t1a, t1b) + } + if t1a.Equal(t2) { + t.Fatalf("Expected %s to not be equal to %s", t1a, t2) + } + + if !t1a.Before(t2) { + t.Fatalf("Expected %s to be before %s", t1a, t2) + } + if t1a.Before(t1b) { + t.Fatalf("Expected %s to not be before %s", t1a, t1b) + } + + if !t2.After(t1a) { + t.Fatalf("Expected %s to be after %s", t2, t1a) + } + if t1b.After(t1a) { + t.Fatalf("Expected %s to not be after %s", t1b, t1a) + } +} + +func TestTimestampConversions(t *testing.T) { + unixSecs := int64(1136239445) + unixNsecs := int64(123456789) + unixNano := unixSecs*1000000000 + unixNsecs + + t1 := native_time.Unix(unixSecs, unixNsecs-unixNsecs%nanosPerTick) + t2 := native_time.Unix(unixSecs, unixNsecs) + + ts := TimestampFromUnixNano(unixNano) + if !ts.Time().Equal(t1) { + t.Fatalf("Expected %s, got %s", t1, ts.Time()) + } + + // Test available precision. + ts = TimestampFromTime(t2) + if !ts.Time().Equal(t1) { + t.Fatalf("Expected %s, got %s", t1, ts.Time()) + } + + if ts.UnixNano() != unixNano-unixNano%nanosPerTick { + t.Fatalf("Expected %d, got %d", unixNano, ts.UnixNano()) + } +} + +func TestDuration(t *testing.T) { + duration := native_time.Second + native_time.Minute + native_time.Hour + goTime := native_time.Unix(1136239445, 0) + + ts := TimestampFromTime(goTime) + if !goTime.Add(duration).Equal(ts.Add(duration).Time()) { + t.Fatalf("Expected %s to be equal to %s", goTime.Add(duration), ts.Add(duration)) + } + + earlier := ts.Add(-duration) + delta := ts.Sub(earlier) + if delta != duration { + t.Fatalf("Expected %s to be equal to %s", delta, duration) + } +} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/.gitignore b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/.gitignore new file mode 100644 index 0000000000..3460f0346d --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/.gitignore @@ -0,0 +1 @@ +command-line-arguments.test diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/README.md b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/README.md new file mode 100644 index 0000000000..81032bed88 --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/README.md @@ -0,0 +1,53 @@ +# Overview +This is the [Prometheus](http://www.prometheus.io) telemetric +instrumentation client [Go](http://golang.org) client library. It +enable authors to define process-space metrics for their servers and +expose them through a web service interface for extraction, +aggregation, and a whole slew of other post processing techniques. + +# Installing + $ go get github.com/prometheus/client_golang/prometheus + +# Example +```go +package main + +import ( + "net/http" + + "github.com/prometheus/client_golang/prometheus" +) + +var ( + indexed = prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: "my_company", + Subsystem: "indexer", + Name: "documents_indexed", + Help: "The number of documents indexed.", + }) + size = prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: "my_company", + Subsystem: "storage", + Name: "documents_total_size_bytes", + Help: "The total size of all documents in the storage.", + }) +) + +func main() { + http.Handle("/metrics", prometheus.Handler()) + + indexed.Inc() + size.Set(5) + + http.ListenAndServe(":8080", nil) +} + +func init() { + prometheus.MustRegister(indexed) + prometheus.MustRegister(size) +} +``` + +# Documentation + +[![GoDoc](https://godoc.org/github.com/prometheus/client_golang?status.png)](https://godoc.org/github.com/prometheus/client_golang) diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/benchmark_test.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/benchmark_test.go new file mode 100644 index 0000000000..d43a857e63 --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/benchmark_test.go @@ -0,0 +1,131 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "testing" +) + +func BenchmarkCounterWithLabelValues(b *testing.B) { + m := NewCounterVec( + CounterOpts{ + Name: "benchmark_counter", + Help: "A counter to benchmark it.", + }, + []string{"one", "two", "three"}, + ) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + m.WithLabelValues("eins", "zwei", "drei").Inc() + } +} + +func BenchmarkCounterWithMappedLabels(b *testing.B) { + m := NewCounterVec( + CounterOpts{ + Name: "benchmark_counter", + Help: "A counter to benchmark it.", + }, + []string{"one", "two", "three"}, + ) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + m.With(Labels{"two": "zwei", "one": "eins", "three": "drei"}).Inc() + } +} + +func BenchmarkCounterWithPreparedMappedLabels(b *testing.B) { + m := NewCounterVec( + CounterOpts{ + Name: "benchmark_counter", + Help: "A counter to benchmark it.", + }, + []string{"one", "two", "three"}, + ) + b.ReportAllocs() + b.ResetTimer() + labels := Labels{"two": "zwei", "one": "eins", "three": "drei"} + for i := 0; i < b.N; i++ { + m.With(labels).Inc() + } +} + +func BenchmarkCounterNoLabels(b *testing.B) { + m := NewCounter(CounterOpts{ + Name: "benchmark_counter", + Help: "A counter to benchmark it.", + }) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + m.Inc() + } +} + +func BenchmarkGaugeWithLabelValues(b *testing.B) { + m := NewGaugeVec( + GaugeOpts{ + Name: "benchmark_gauge", + Help: "A gauge to benchmark it.", + }, + []string{"one", "two", "three"}, + ) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + m.WithLabelValues("eins", "zwei", "drei").Set(3.1415) + } +} + +func BenchmarkGaugeNoLabels(b *testing.B) { + m := NewGauge(GaugeOpts{ + Name: "benchmark_gauge", + Help: "A gauge to benchmark it.", + }) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + m.Set(3.1415) + } +} + +func BenchmarkSummaryWithLabelValues(b *testing.B) { + m := NewSummaryVec( + SummaryOpts{ + Name: "benchmark_summary", + Help: "A summary to benchmark it.", + }, + []string{"one", "two", "three"}, + ) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + m.WithLabelValues("eins", "zwei", "drei").Observe(3.1415) + } +} + +func BenchmarkSummaryNoLabels(b *testing.B) { + m := NewSummary(SummaryOpts{ + Name: "benchmark_summary", + Help: "A summary to benchmark it.", + }, + ) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + m.Observe(3.1415) + } +} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/collector.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/collector.go new file mode 100644 index 0000000000..c04688009f --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/collector.go @@ -0,0 +1,75 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +// Collector is the interface implemented by anything that can be used by +// Prometheus to collect metrics. A Collector has to be registered for +// collection. See Register, MustRegister, RegisterOrGet, and MustRegisterOrGet. +// +// The stock metrics provided by this package (like Gauge, Counter, Summary) are +// also Collectors (which only ever collect one metric, namely itself). An +// implementer of Collector may, however, collect multiple metrics in a +// coordinated fashion and/or create metrics on the fly. Examples for collectors +// already implemented in this library are the metric vectors (i.e. collection +// of multiple instances of the same Metric but with different label values) +// like GaugeVec or SummaryVec, and the ExpvarCollector. +type Collector interface { + // Describe sends the super-set of all possible descriptors of metrics + // collected by this Collector to the provided channel and returns once + // the last descriptor has been sent. The sent descriptors fulfill the + // consistency and uniqueness requirements described in the Desc + // documentation. (It is valid if one and the same Collector sends + // duplicate descriptors. Those duplicates are simply ignored. However, + // two different Collectors must not send duplicate descriptors.) This + // method idempotently sends the same descriptors throughout the + // lifetime of the Collector. If a Collector encounters an error while + // executing this method, it must send an invalid descriptor (created + // with NewInvalidDesc) to signal the error to the registry. + Describe(chan<- *Desc) + // Collect is called by Prometheus when collecting metrics. The + // implementation sends each collected metric via the provided channel + // and returns once the last metric has been sent. The descriptor of + // each sent metric is one of those returned by Describe. Returned + // metrics that share the same descriptor must differ in their variable + // label values. This method may be called concurrently and must + // therefore be implemented in a concurrency safe way. Blocking occurs + // at the expense of total performance of rendering all registered + // metrics. Ideally, Collector implementations support concurrent + // readers. + Collect(chan<- Metric) +} + +// SelfCollector implements Collector for a single Metric so that that the +// Metric collects itself. Add it as an anonymous field to a struct that +// implements Metric, and call Init with the Metric itself as an argument. +type SelfCollector struct { + self Metric +} + +// Init provides the SelfCollector with a reference to the metric it is supposed +// to collect. It is usually called within the factory function to create a +// metric. See example. +func (c *SelfCollector) Init(self Metric) { + c.self = self +} + +// Describe implements Collector. +func (c *SelfCollector) Describe(ch chan<- *Desc) { + ch <- c.self.Desc() +} + +// Collect implements Collector. +func (c *SelfCollector) Collect(ch chan<- Metric) { + ch <- c.self +} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/counter.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/counter.go new file mode 100644 index 0000000000..d715ee0bb5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/counter.go @@ -0,0 +1,175 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "errors" + "hash/fnv" +) + +// Counter is a Metric that represents a single numerical value that only ever +// goes up. That implies that it cannot be used to count items whose number can +// also go down, e.g. the number of currently running goroutines. Those +// "counters" are represented by Gauges. +// +// A Counter is typically used to count requests served, tasks completed, errors +// occurred, etc. +// +// To create Counter instances, use NewCounter. +type Counter interface { + Metric + Collector + + // Set is used to set the Counter to an arbitrary value. It is only used + // if you have to transfer a value from an external counter into this + // Prometheus metrics. Do not use it for regular handling of a + // Prometheus counter (as it can be used to break the contract of + // monotonically increasing values). + Set(float64) + // Inc increments the counter by 1. + Inc() + // Add adds the given value to the counter. It panics if the value is < + // 0. + Add(float64) +} + +// CounterOpts is an alias for Opts. See there for doc comments. +type CounterOpts Opts + +// NewCounter creates a new Counter based on the provided CounterOpts. +func NewCounter(opts CounterOpts) Counter { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ) + result := &counter{value: value{desc: desc, valType: CounterValue, labelPairs: desc.constLabelPairs}} + result.Init(result) // Init self-collection. + return result +} + +type counter struct { + value +} + +func (c *counter) Add(v float64) { + if v < 0 { + panic(errors.New("counter cannot decrease in value")) + } + c.value.Add(v) +} + +// CounterVec is a Collector that bundles a set of Counters that all share the +// same Desc, but have different values for their variable labels. This is used +// if you want to count the same thing partitioned by various dimensions +// (e.g. number of http requests, partitioned by response code and +// method). Create instances with NewCounterVec. +// +// CounterVec embeds MetricVec. See there for a full list of methods with +// detailed documentation. +type CounterVec struct { + MetricVec +} + +// NewCounterVec creates a new CounterVec based on the provided CounterOpts and +// partitioned by the given label names. At least one label name must be +// provided. +func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + labelNames, + opts.ConstLabels, + ) + return &CounterVec{ + MetricVec: MetricVec{ + children: map[uint64]Metric{}, + desc: desc, + hash: fnv.New64a(), + newMetric: func(lvs ...string) Metric { + result := &counter{value: value{ + desc: desc, + valType: CounterValue, + labelPairs: makeLabelPairs(desc, lvs), + }} + result.Init(result) // Init self-collection. + return result + }, + }, + } +} + +// GetMetricWithLabelValues replaces the method of the same name in +// MetricVec. The difference is that this method returns a Counter and not a +// Metric so that no type conversion is required. +func (m *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) { + metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(Counter), err + } + return nil, err +} + +// GetMetricWith replaces the method of the same name in MetricVec. The +// difference is that this method returns a Counter and not a Metric so that no +// type conversion is required. +func (m *CounterVec) GetMetricWith(labels Labels) (Counter, error) { + metric, err := m.MetricVec.GetMetricWith(labels) + if metric != nil { + return metric.(Counter), err + } + return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. By not returning an +// error, WithLabelValues allows shortcuts like +// myVec.WithLabelValues("404", "GET").Add(42) +func (m *CounterVec) WithLabelValues(lvs ...string) Counter { + return m.MetricVec.WithLabelValues(lvs...).(Counter) +} + +// With works as GetMetricWith, but panics where GetMetricWithLabels would have +// returned an error. By not returning an error, With allows shortcuts like +// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) +func (m *CounterVec) With(labels Labels) Counter { + return m.MetricVec.With(labels).(Counter) +} + +// CounterFunc is a Counter whose value is determined at collect time by calling a +// provided function. +// +// To create CounterFunc instances, use NewCounterFunc. +type CounterFunc interface { + Metric + Collector +} + +// NewCounterFunc creates a new CounterFunc based on the provided +// CounterOpts. The value reported is determined by calling the given function +// from within the Write method. Take into account that metric collection may +// happen concurrently. If that results in concurrent calls to Write, like in +// the case where a CounterFunc is directly registered with Prometheus, the +// provided function must be concurrency-safe. The function should also honor +// the contract for a Counter (values only go up, not down), but compliance will +// not be checked. +func NewCounterFunc(opts CounterOpts, function func() float64) CounterFunc { + return newValueFunc(NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), CounterValue, function) +} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/counter_test.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/counter_test.go new file mode 100644 index 0000000000..67391a23aa --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/counter_test.go @@ -0,0 +1,58 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "math" + "testing" + + dto "github.com/prometheus/client_model/go" +) + +func TestCounterAdd(t *testing.T) { + counter := NewCounter(CounterOpts{ + Name: "test", + Help: "test help", + ConstLabels: Labels{"a": "1", "b": "2"}, + }).(*counter) + counter.Inc() + if expected, got := 1., math.Float64frombits(counter.valBits); expected != got { + t.Errorf("Expected %f, got %f.", expected, got) + } + counter.Add(42) + if expected, got := 43., math.Float64frombits(counter.valBits); expected != got { + t.Errorf("Expected %f, got %f.", expected, got) + } + + if expected, got := "counter cannot decrease in value", decreaseCounter(counter).Error(); expected != got { + t.Errorf("Expected error %q, got %q.", expected, got) + } + + m := &dto.Metric{} + counter.Write(m) + + if expected, got := `label: label: counter: `, m.String(); expected != got { + t.Errorf("expected %q, got %q", expected, got) + } +} + +func decreaseCounter(c *counter) (err error) { + defer func() { + if e := recover(); e != nil { + err = e.(error) + } + }() + c.Add(-1) + return nil +} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/desc.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/desc.go new file mode 100644 index 0000000000..7e1f9e8537 --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/desc.go @@ -0,0 +1,199 @@ +package prometheus + +import ( + "bytes" + "errors" + "fmt" + "hash/fnv" + "regexp" + "sort" + "strings" + + "github.com/prometheus/client_golang/model" + + dto "github.com/prometheus/client_model/go" + + "github.com/golang/protobuf/proto" +) + +var ( + metricNameRE = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_:]*$`) + labelNameRE = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_]*$`) +) + +// Labels represents a collection of label name -> value mappings. This type is +// commonly used with the With(Labels) and GetMetricWith(Labels) methods of +// metric vector Collectors, e.g.: +// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) +// +// The other use-case is the specification of constant label pairs in Opts or to +// create a Desc. +type Labels map[string]string + +// Desc is the descriptor used by every Prometheus Metric. It is essentially +// the immutable meta-data of a Metric. The normal Metric implementations +// included in this package manage their Desc under the hood. Users only have to +// deal with Desc if they use advanced features like the ExpvarCollector or +// custom Collectors and Metrics. +// +// Descriptors registered with the same registry have to fulfill certain +// consistency and uniqueness criteria if they share the same fully-qualified +// name: They must have the same help string and the same label names (aka label +// dimensions) in each, constLabels and variableLabels, but they must differ in +// the values of the constLabels. +// +// Descriptors that share the same fully-qualified names and the same label +// values of their constLabels are considered equal. +// +// Use NewDesc to create new Desc instances. +type Desc struct { + // fqName has been built from Namespace, Subsystem, and Name. + fqName string + // help provides some helpful information about this metric. + help string + // constLabelPairs contains precalculated DTO label pairs based on + // the constant labels. + constLabelPairs []*dto.LabelPair + // VariableLabels contains names of labels for which the metric + // maintains variable values. + variableLabels []string + // id is a hash of the values of the ConstLabels and fqName. This + // must be unique among all registered descriptors and can therefore be + // used as an identifier of the descriptor. + id uint64 + // dimHash is a hash of the label names (preset and variable) and the + // Help string. Each Desc with the same fqName must have the same + // dimHash. + dimHash uint64 + // err is an error that occured during construction. It is reported on + // registration time. + err error +} + +// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc +// and will be reported on registration time. variableLabels and constLabels can +// be nil if no such labels should be set. fqName and help must not be empty. +// +// variableLabels only contain the label names. Their label values are variable +// and therefore not part of the Desc. (They are managed within the Metric.) +// +// For constLabels, the label values are constant. Therefore, they are fully +// specified in the Desc. See the Opts documentation for the implications of +// constant labels. +func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc { + d := &Desc{ + fqName: fqName, + help: help, + variableLabels: variableLabels, + } + if help == "" { + d.err = errors.New("empty help string") + return d + } + if !metricNameRE.MatchString(fqName) { + d.err = fmt.Errorf("%q is not a valid metric name", fqName) + return d + } + // labelValues contains the label values of const labels (in order of + // their sorted label names) plus the fqName (at position 0). + labelValues := make([]string, 1, len(constLabels)+1) + labelValues[0] = fqName + labelNames := make([]string, 0, len(constLabels)+len(variableLabels)) + labelNameSet := map[string]struct{}{} + // First add only the const label names and sort them... + for labelName := range constLabels { + if !checkLabelName(labelName) { + d.err = fmt.Errorf("%q is not a valid label name", labelName) + return d + } + labelNames = append(labelNames, labelName) + labelNameSet[labelName] = struct{}{} + } + sort.Strings(labelNames) + // ... so that we can now add const label values in the order of their names. + for _, labelName := range labelNames { + labelValues = append(labelValues, constLabels[labelName]) + } + // Now add the variable label names, but prefix them with something that + // cannot be in a regular label name. That prevents matching the label + // dimension with a different mix between preset and variable labels. + for _, labelName := range variableLabels { + if !checkLabelName(labelName) { + d.err = fmt.Errorf("%q is not a valid label name", labelName) + return d + } + labelNames = append(labelNames, "$"+labelName) + labelNameSet[labelName] = struct{}{} + } + if len(labelNames) != len(labelNameSet) { + d.err = errors.New("duplicate label names") + return d + } + h := fnv.New64a() + var b bytes.Buffer // To copy string contents into, avoiding []byte allocations. + for _, val := range labelValues { + b.Reset() + b.WriteString(val) + b.WriteByte(model.SeparatorByte) + h.Write(b.Bytes()) + } + d.id = h.Sum64() + // Sort labelNames so that order doesn't matter for the hash. + sort.Strings(labelNames) + // Now hash together (in this order) the help string and the sorted + // label names. + h.Reset() + b.Reset() + b.WriteString(help) + b.WriteByte(model.SeparatorByte) + h.Write(b.Bytes()) + for _, labelName := range labelNames { + b.Reset() + b.WriteString(labelName) + b.WriteByte(model.SeparatorByte) + h.Write(b.Bytes()) + } + d.dimHash = h.Sum64() + + d.constLabelPairs = make([]*dto.LabelPair, 0, len(constLabels)) + for n, v := range constLabels { + d.constLabelPairs = append(d.constLabelPairs, &dto.LabelPair{ + Name: proto.String(n), + Value: proto.String(v), + }) + } + sort.Sort(LabelPairSorter(d.constLabelPairs)) + return d +} + +// NewInvalidDesc returns an invalid descriptor, i.e. a descriptor with the +// provided error set. If a collector returning such a descriptor is registered, +// registration will fail with the provided error. NewInvalidDesc can be used by +// a Collector to signal inability to describe itself. +func NewInvalidDesc(err error) *Desc { + return &Desc{ + err: err, + } +} + +func (d *Desc) String() string { + lpStrings := make([]string, 0, len(d.constLabelPairs)) + for _, lp := range d.constLabelPairs { + lpStrings = append( + lpStrings, + fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()), + ) + } + return fmt.Sprintf( + "Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: %v}", + d.fqName, + d.help, + strings.Join(lpStrings, ","), + d.variableLabels, + ) +} + +func checkLabelName(l string) bool { + return labelNameRE.MatchString(l) && + !strings.HasPrefix(l, model.ReservedLabelPrefix) +} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/doc.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/doc.go new file mode 100644 index 0000000000..b98c135262 --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/doc.go @@ -0,0 +1,108 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package prometheus provides embeddable metric primitives for servers and +// standardized exposition of telemetry through a web services interface. +// +// All exported functions and methods are safe to be used concurrently unless +// specified otherwise. +// +// To expose metrics registered with the Prometheus registry, an HTTP server +// needs to know about the Prometheus handler. The usual endpoint is "/metrics". +// +// http.Handle("/metrics", prometheus.Handler()) +// +// As a starting point a very basic usage example: +// +// package main +// +// import ( +// "net/http" +// +// "github.com/prometheus/client_golang/prometheus" +// ) +// +// var ( +// cpuTemp = prometheus.NewGauge(prometheus.GaugeOpts{ +// Name: "cpu_temperature_celsius", +// Help: "Current temperature of the CPU.", +// }) +// hdFailures = prometheus.NewCounter(prometheus.CounterOpts{ +// Name: "hd_errors_total", +// Help: "Number of hard-disk errors.", +// }) +// ) +// +// func init() { +// prometheus.MustRegister(cpuTemp) +// prometheus.MustRegister(hdFailures) +// } +// +// func main() { +// cpuTemp.Set(65.3) +// hdFailures.Inc() +// +// http.Handle("/metrics", prometheus.Handler()) +// http.ListenAndServe(":8080", nil) +// } +// +// +// This is a complete program that exports two metrics, a Gauge and a Counter. +// It also exports some stats about the HTTP usage of the /metrics +// endpoint. (See the Handler function for more detail.) +// +// A more advanced metric type is the Summary. +// +// In addition to the fundamental metric types Gauge, Counter, and Summary, a +// very important part of the Prometheus data model is the partitioning of +// samples along dimensions called labels, which results in metric vectors. The +// fundamental types are GaugeVec, CounterVec, and SummaryVec. +// +// Those are all the parts needed for basic usage. Detailed documentation and +// examples are provided below. +// +// Everything else this package offers is essentially for "power users" only. A +// few pointers to "power user features": +// +// All the various ...Opts structs have a ConstLabels field for labels that +// never change their value (which is only useful under special circumstances, +// see documentation of the Opts type). +// +// The Untyped metric behaves like a Gauge, but signals the Prometheus server +// not to assume anything about its type. +// +// Functions to fine-tune how the metric registry works: EnableCollectChecks, +// PanicOnCollectError, Register, Unregister, SetMetricFamilyInjectionHook. +// +// For custom metric collection, there are two entry points: Custom Metric +// implementations and custom Collector implementations. A Metric is the +// fundamental unit in the Prometheus data model: a sample at a point in time +// together with its meta-data (like its fully-qualified name and any number of +// pairs of label name and label value) that knows how to marshal itself into a +// data transfer object (aka DTO, implemented as a protocol buffer). A Collector +// gets registered with the Prometheus registry and manages the collection of +// one or more Metrics. Many parts of this package are building blocks for +// Metrics and Collectors. Desc is the metric descriptor, actually used by all +// metrics under the hood, and by Collectors to describe the Metrics to be +// collected, but only to be dealt with by users if they implement their own +// Metrics or Collectors. To create a Desc, the BuildFQName function will come +// in handy. Other useful components for Metric and Collector implementation +// include: LabelPairSorter to sort the DTO version of label pairs, +// NewConstMetric and MustNewConstMetric to create "throw away" Metrics at +// collection time, MetricVec to bundle custom Metrics into a metric vector +// Collector, SelfCollector to make a custom Metric collect itself. +// +// A good example for a custom Collector is the ExpVarCollector included in this +// package, which exports variables exported via the "expvar" package as +// Prometheus metrics. +package prometheus diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/example_clustermanager_test.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/example_clustermanager_test.go new file mode 100644 index 0000000000..6f3e215d47 --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/example_clustermanager_test.go @@ -0,0 +1,130 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus_test + +import ( + "sync" + + "github.com/prometheus/client_golang/prometheus" +) + +// ClusterManager is an example for a system that might have been built without +// Prometheus in mind. It models a central manager of jobs running in a +// cluster. To turn it into something that collects Prometheus metrics, we +// simply add the two methods required for the Collector interface. +// +// An additional challenge is that multiple instances of the ClusterManager are +// run within the same binary, each in charge of a different zone. We need to +// make use of ConstLabels to be able to register each ClusterManager instance +// with Prometheus. +type ClusterManager struct { + Zone string + OOMCount *prometheus.CounterVec + RAMUsage *prometheus.GaugeVec + mtx sync.Mutex // Protects OOMCount and RAMUsage. + // ... many more fields +} + +// ReallyExpensiveAssessmentOfTheSystemState is a mock for the data gathering a +// real cluster manager would have to do. Since it may actually be really +// expensive, it must only be called once per collection. This implementation, +// obviously, only returns some made-up data. +func (c *ClusterManager) ReallyExpensiveAssessmentOfTheSystemState() ( + oomCountByHost map[string]int, ramUsageByHost map[string]float64, +) { + // Just example fake data. + oomCountByHost = map[string]int{ + "foo.example.org": 42, + "bar.example.org": 2001, + } + ramUsageByHost = map[string]float64{ + "foo.example.org": 6.023e23, + "bar.example.org": 3.14, + } + return +} + +// Describe faces the interesting challenge that the two metric vectors that are +// used in this example are already Collectors themselves. However, thanks to +// the use of channels, it is really easy to "chain" Collectors. Here we simply +// call the Describe methods of the two metric vectors. +func (c *ClusterManager) Describe(ch chan<- *prometheus.Desc) { + c.OOMCount.Describe(ch) + c.RAMUsage.Describe(ch) +} + +// Collect first triggers the ReallyExpensiveAssessmentOfTheSystemState. Then it +// sets the retrieved values in the two metric vectors and then sends all their +// metrics to the channel (again using a chaining technique as in the Describe +// method). Since Collect could be called multiple times concurrently, that part +// is protected by a mutex. +func (c *ClusterManager) Collect(ch chan<- prometheus.Metric) { + oomCountByHost, ramUsageByHost := c.ReallyExpensiveAssessmentOfTheSystemState() + c.mtx.Lock() + defer c.mtx.Unlock() + for host, oomCount := range oomCountByHost { + c.OOMCount.WithLabelValues(host).Set(float64(oomCount)) + } + for host, ramUsage := range ramUsageByHost { + c.RAMUsage.WithLabelValues(host).Set(ramUsage) + } + c.OOMCount.Collect(ch) + c.RAMUsage.Collect(ch) + // All metrics in OOMCount and RAMUsage are sent to the channel now. We + // can safely reset the two metric vectors now, so that we can start + // fresh in the next Collect cycle. (Imagine a host disappears from the + // cluster. If we did not reset here, its Metric would stay in the + // metric vectors forever.) + c.OOMCount.Reset() + c.RAMUsage.Reset() +} + +// NewClusterManager creates the two metric vectors OOMCount and RAMUsage. Note +// that the zone is set as a ConstLabel. (It's different in each instance of the +// ClusterManager, but constant over the lifetime of an instance.) The reported +// values are partitioned by host, which is therefore a variable label. +func NewClusterManager(zone string) *ClusterManager { + return &ClusterManager{ + Zone: zone, + OOMCount: prometheus.NewCounterVec( + prometheus.CounterOpts{ + Subsystem: "clustermanager", + Name: "oom_count", + Help: "number of OOM crashes", + ConstLabels: prometheus.Labels{"zone": zone}, + }, + []string{"host"}, + ), + RAMUsage: prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Subsystem: "clustermanager", + Name: "ram_usage_bytes", + Help: "RAM usage as reported to the cluster manager", + ConstLabels: prometheus.Labels{"zone": zone}, + }, + []string{"host"}, + ), + } +} + +func ExampleCollector_clustermanager() { + workerDB := NewClusterManager("db") + workerCA := NewClusterManager("ca") + prometheus.MustRegister(workerDB) + prometheus.MustRegister(workerCA) + + // Since we are dealing with custom Collector implementations, it might + // be a good idea to enable the collect checks in the registry. + prometheus.EnableCollectChecks(true) +} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/example_memstats_test.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/example_memstats_test.go new file mode 100644 index 0000000000..a84d072504 --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/example_memstats_test.go @@ -0,0 +1,87 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus_test + +import ( + "runtime" + + "github.com/prometheus/client_golang/prometheus" +) + +var ( + allocDesc = prometheus.NewDesc( + prometheus.BuildFQName("", "memstats", "alloc_bytes"), + "bytes allocated and still in use", + nil, nil, + ) + totalAllocDesc = prometheus.NewDesc( + prometheus.BuildFQName("", "memstats", "total_alloc_bytes"), + "bytes allocated (even if freed)", + nil, nil, + ) + numGCDesc = prometheus.NewDesc( + prometheus.BuildFQName("", "memstats", "num_gc_total"), + "number of GCs run", + nil, nil, + ) +) + +// MemStatsCollector is an example for a custom Collector that solves the +// problem of feeding into multiple metrics at the same time. The +// runtime.ReadMemStats should happen only once, and then the results need to be +// fed into a number of separate Metrics. In this example, only a few of the +// values reported by ReadMemStats are used. For each, there is a Desc provided +// as a var, so the MemStatsCollector itself needs nothing else in the +// struct. Only the methods need to be implemented. +type MemStatsCollector struct{} + +// Describe just sends the three Desc objects for the Metrics we intend to +// collect. +func (_ MemStatsCollector) Describe(ch chan<- *prometheus.Desc) { + ch <- allocDesc + ch <- totalAllocDesc + ch <- numGCDesc +} + +// Collect does the trick by calling ReadMemStats once and then constructing +// three different Metrics on the fly. +func (_ MemStatsCollector) Collect(ch chan<- prometheus.Metric) { + var ms runtime.MemStats + runtime.ReadMemStats(&ms) + ch <- prometheus.MustNewConstMetric( + allocDesc, + prometheus.GaugeValue, + float64(ms.Alloc), + ) + ch <- prometheus.MustNewConstMetric( + totalAllocDesc, + prometheus.GaugeValue, + float64(ms.TotalAlloc), + ) + ch <- prometheus.MustNewConstMetric( + numGCDesc, + prometheus.CounterValue, + float64(ms.NumGC), + ) + // To avoid new allocations on each collection, you could also keep + // metric objects around and return the same objects each time, just + // with new values set. +} + +func ExampleCollector_memstats() { + prometheus.MustRegister(&MemStatsCollector{}) + // Since we are dealing with custom Collector implementations, it might + // be a good idea to enable the collect checks in the registry. + prometheus.EnableCollectChecks(true) +} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/example_selfcollector_test.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/example_selfcollector_test.go new file mode 100644 index 0000000000..608deeb027 --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/example_selfcollector_test.go @@ -0,0 +1,69 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus_test + +import ( + "runtime" + + "github.com/golang/protobuf/proto" + + dto "github.com/prometheus/client_model/go" + + "github.com/prometheus/client_golang/prometheus" +) + +func NewCallbackMetric(desc *prometheus.Desc, callback func() float64) *CallbackMetric { + result := &CallbackMetric{desc: desc, callback: callback} + result.Init(result) // Initialize the SelfCollector. + return result +} + +// TODO: Come up with a better example. + +// CallbackMetric is an example for a user-defined Metric that exports the +// result of a function call as a metric of type "untyped" without any +// labels. It uses SelfCollector to turn the Metric into a Collector so that it +// can be registered with Prometheus. +// +// Note that this example is pretty much academic as the prometheus package +// already provides an UntypedFunc type. +type CallbackMetric struct { + prometheus.SelfCollector + + desc *prometheus.Desc + callback func() float64 +} + +func (cm *CallbackMetric) Desc() *prometheus.Desc { + return cm.desc +} + +func (cm *CallbackMetric) Write(m *dto.Metric) error { + m.Untyped = &dto.Untyped{Value: proto.Float64(cm.callback())} + return nil +} + +func ExampleSelfCollector() { + m := NewCallbackMetric( + prometheus.NewDesc( + "runtime_goroutines_count", + "Total number of goroutines that currently exist.", + nil, nil, // No labels, these must be nil. + ), + func() float64 { + return float64(runtime.NumGoroutine()) + }, + ) + prometheus.MustRegister(m) +} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/examples_test.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/examples_test.go new file mode 100644 index 0000000000..78b1b81f29 --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/examples_test.go @@ -0,0 +1,454 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus_test + +import ( + "flag" + "fmt" + "math" + "net/http" + "runtime" + "sort" + + dto "github.com/prometheus/client_model/go" + + "github.com/golang/protobuf/proto" + + "github.com/prometheus/client_golang/prometheus" +) + +func ExampleGauge() { + opsQueued := prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: "our_company", + Subsystem: "blob_storage", + Name: "ops_queued", + Help: "Number of blob storage operations waiting to be processed.", + }) + prometheus.MustRegister(opsQueued) + + // 10 operations queued by the goroutine managing incoming requests. + opsQueued.Add(10) + // A worker goroutine has picked up a waiting operation. + opsQueued.Dec() + // And once more... + opsQueued.Dec() +} + +func ExampleGaugeVec() { + binaryVersion := flag.String("binary_version", "debug", "Version of the binary: debug, canary, production.") + flag.Parse() + + opsQueued := prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: "our_company", + Subsystem: "blob_storage", + Name: "ops_queued", + Help: "Number of blob storage operations waiting to be processed, partitioned by user and type.", + ConstLabels: prometheus.Labels{"binary_version": *binaryVersion}, + }, + []string{ + // Which user has requested the operation? + "user", + // Of what type is the operation? + "type", + }, + ) + prometheus.MustRegister(opsQueued) + + // Increase a value using compact (but order-sensitive!) WithLabelValues(). + opsQueued.WithLabelValues("bob", "put").Add(4) + // Increase a value with a map using WithLabels. More verbose, but order + // doesn't matter anymore. + opsQueued.With(prometheus.Labels{"type": "delete", "user": "alice"}).Inc() +} + +func ExampleGaugeFunc() { + if err := prometheus.Register(prometheus.NewGaugeFunc( + prometheus.GaugeOpts{ + Subsystem: "runtime", + Name: "goroutines_count", + Help: "Number of goroutines that currently exist.", + }, + func() float64 { return float64(runtime.NumGoroutine()) }, + )); err == nil { + fmt.Println("GaugeFunc 'goroutines_count' registered.") + } + // Note that the count of goroutines is a gauge (and not a counter) as + // it can go up and down. + + // Output: + // GaugeFunc 'goroutines_count' registered. +} + +func ExampleCounter() { + pushCounter := prometheus.NewCounter(prometheus.CounterOpts{ + Name: "repository_pushes", // Note: No help string... + }) + err := prometheus.Register(pushCounter) // ... so this will return an error. + if err != nil { + fmt.Println("Push counter couldn't be registered, no counting will happen:", err) + return + } + + // Try it once more, this time with a help string. + pushCounter = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "repository_pushes", + Help: "Number of pushes to external repository.", + }) + err = prometheus.Register(pushCounter) + if err != nil { + fmt.Println("Push counter couldn't be registered AGAIN, no counting will happen:", err) + return + } + + pushComplete := make(chan struct{}) + // TODO: Start a goroutine that performs repository pushes and reports + // each completion via the channel. + for _ = range pushComplete { + pushCounter.Inc() + } + // Output: + // Push counter couldn't be registered, no counting will happen: descriptor Desc{fqName: "repository_pushes", help: "", constLabels: {}, variableLabels: []} is invalid: empty help string +} + +func ExampleCounterVec() { + binaryVersion := flag.String("environment", "test", "Execution environment: test, staging, production.") + flag.Parse() + + httpReqs := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "http_requests_total", + Help: "How many HTTP requests processed, partitioned by status code and http method.", + ConstLabels: prometheus.Labels{"env": *binaryVersion}, + }, + []string{"code", "method"}, + ) + prometheus.MustRegister(httpReqs) + + httpReqs.WithLabelValues("404", "POST").Add(42) + + // If you have to access the same set of labels very frequently, it + // might be good to retrieve the metric only once and keep a handle to + // it. But beware of deletion of that metric, see below! + m := httpReqs.WithLabelValues("200", "GET") + for i := 0; i < 1000000; i++ { + m.Inc() + } + // Delete a metric from the vector. If you have previously kept a handle + // to that metric (as above), future updates via that handle will go + // unseen (even if you re-create a metric with the same label set + // later). + httpReqs.DeleteLabelValues("200", "GET") + // Same thing with the more verbose Labels syntax. + httpReqs.Delete(prometheus.Labels{"method": "GET", "code": "200"}) +} + +func ExampleInstrumentHandler() { + // Handle the "/doc" endpoint with the standard http.FileServer handler. + // By wrapping the handler with InstrumentHandler, request count, + // request and response sizes, and request latency are automatically + // exported to Prometheus, partitioned by HTTP status code and method + // and by the handler name (here "fileserver"). + http.Handle("/doc", prometheus.InstrumentHandler( + "fileserver", http.FileServer(http.Dir("/usr/share/doc")), + )) + // The Prometheus handler still has to be registered to handle the + // "/metrics" endpoint. The handler returned by prometheus.Handler() is + // already instrumented - with "prometheus" as the handler name. In this + // example, we want the handler name to be "metrics", so we instrument + // the uninstrumented Prometheus handler ourselves. + http.Handle("/metrics", prometheus.InstrumentHandler( + "metrics", prometheus.UninstrumentedHandler(), + )) +} + +func ExampleLabelPairSorter() { + labelPairs := []*dto.LabelPair{ + &dto.LabelPair{Name: proto.String("status"), Value: proto.String("404")}, + &dto.LabelPair{Name: proto.String("method"), Value: proto.String("get")}, + } + + sort.Sort(prometheus.LabelPairSorter(labelPairs)) + + fmt.Println(labelPairs) + // Output: + // [name:"method" value:"get" name:"status" value:"404" ] +} + +func ExampleRegister() { + // Imagine you have a worker pool and want to count the tasks completed. + taskCounter := prometheus.NewCounter(prometheus.CounterOpts{ + Subsystem: "worker_pool", + Name: "completed_tasks_total", + Help: "Total number of tasks completed.", + }) + // This will register fine. + if err := prometheus.Register(taskCounter); err != nil { + fmt.Println(err) + } else { + fmt.Println("taskCounter registered.") + } + // Don't forget to tell the HTTP server about the Prometheus handler. + // (In a real program, you still need to start the http server...) + http.Handle("/metrics", prometheus.Handler()) + + // Now you can start workers and give every one of them a pointer to + // taskCounter and let it increment it whenever it completes a task. + taskCounter.Inc() // This has to happen somewhere in the worker code. + + // But wait, you want to see how individual workers perform. So you need + // a vector of counters, with one element for each worker. + taskCounterVec := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Subsystem: "worker_pool", + Name: "completed_tasks_total", + Help: "Total number of tasks completed.", + }, + []string{"worker_id"}, + ) + + // Registering will fail because we already have a metric of that name. + if err := prometheus.Register(taskCounterVec); err != nil { + fmt.Println("taskCounterVec not registered:", err) + } else { + fmt.Println("taskCounterVec registered.") + } + + // To fix, first unregister the old taskCounter. + if prometheus.Unregister(taskCounter) { + fmt.Println("taskCounter unregistered.") + } + + // Try registering taskCounterVec again. + if err := prometheus.Register(taskCounterVec); err != nil { + fmt.Println("taskCounterVec not registered:", err) + } else { + fmt.Println("taskCounterVec registered.") + } + // Bummer! Still doesn't work. + + // Prometheus will not allow you to ever export metrics with + // inconsistent help strings or label names. After unregistering, the + // unregistered metrics will cease to show up in the /metrics http + // response, but the registry still remembers that those metrics had + // been exported before. For this example, we will now choose a + // different name. (In a real program, you would obviously not export + // the obsolete metric in the first place.) + taskCounterVec = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Subsystem: "worker_pool", + Name: "completed_tasks_by_id", + Help: "Total number of tasks completed.", + }, + []string{"worker_id"}, + ) + if err := prometheus.Register(taskCounterVec); err != nil { + fmt.Println("taskCounterVec not registered:", err) + } else { + fmt.Println("taskCounterVec registered.") + } + // Finally it worked! + + // The workers have to tell taskCounterVec their id to increment the + // right element in the metric vector. + taskCounterVec.WithLabelValues("42").Inc() // Code from worker 42. + + // Each worker could also keep a reference to their own counter element + // around. Pick the counter at initialization time of the worker. + myCounter := taskCounterVec.WithLabelValues("42") // From worker 42 initialization code. + myCounter.Inc() // Somewhere in the code of that worker. + + // Note that something like WithLabelValues("42", "spurious arg") would + // panic (because you have provided too many label values). If you want + // to get an error instead, use GetMetricWithLabelValues(...) instead. + notMyCounter, err := taskCounterVec.GetMetricWithLabelValues("42", "spurious arg") + if err != nil { + fmt.Println("Worker initialization failed:", err) + } + if notMyCounter == nil { + fmt.Println("notMyCounter is nil.") + } + + // A different (and somewhat tricky) approach is to use + // ConstLabels. ConstLabels are pairs of label names and label values + // that never change. You might ask what those labels are good for (and + // rightfully so - if they never change, they could as well be part of + // the metric name). There are essentially two use-cases: The first is + // if labels are constant throughout the lifetime of a binary execution, + // but they vary over time or between different instances of a running + // binary. The second is what we have here: Each worker creates and + // registers an own Counter instance where the only difference is in the + // value of the ConstLabels. Those Counters can all be registered + // because the different ConstLabel values guarantee that each worker + // will increment a different Counter metric. + counterOpts := prometheus.CounterOpts{ + Subsystem: "worker_pool", + Name: "completed_tasks", + Help: "Total number of tasks completed.", + ConstLabels: prometheus.Labels{"worker_id": "42"}, + } + taskCounterForWorker42 := prometheus.NewCounter(counterOpts) + if err := prometheus.Register(taskCounterForWorker42); err != nil { + fmt.Println("taskCounterVForWorker42 not registered:", err) + } else { + fmt.Println("taskCounterForWorker42 registered.") + } + // Obviously, in real code, taskCounterForWorker42 would be a member + // variable of a worker struct, and the "42" would be retrieved with a + // GetId() method or something. The Counter would be created and + // registered in the initialization code of the worker. + + // For the creation of the next Counter, we can recycle + // counterOpts. Just change the ConstLabels. + counterOpts.ConstLabels = prometheus.Labels{"worker_id": "2001"} + taskCounterForWorker2001 := prometheus.NewCounter(counterOpts) + if err := prometheus.Register(taskCounterForWorker2001); err != nil { + fmt.Println("taskCounterVForWorker2001 not registered:", err) + } else { + fmt.Println("taskCounterForWorker2001 registered.") + } + + taskCounterForWorker2001.Inc() + taskCounterForWorker42.Inc() + taskCounterForWorker2001.Inc() + + // Yet another approach would be to turn the workers themselves into + // Collectors and register them. See the Collector example for details. + + // Output: + // taskCounter registered. + // taskCounterVec not registered: a previously registered descriptor with the same fully-qualified name as Desc{fqName: "worker_pool_completed_tasks_total", help: "Total number of tasks completed.", constLabels: {}, variableLabels: [worker_id]} has different label names or a different help string + // taskCounter unregistered. + // taskCounterVec not registered: a previously registered descriptor with the same fully-qualified name as Desc{fqName: "worker_pool_completed_tasks_total", help: "Total number of tasks completed.", constLabels: {}, variableLabels: [worker_id]} has different label names or a different help string + // taskCounterVec registered. + // Worker initialization failed: inconsistent label cardinality + // notMyCounter is nil. + // taskCounterForWorker42 registered. + // taskCounterForWorker2001 registered. +} + +func ExampleSummary() { + temps := prometheus.NewSummary(prometheus.SummaryOpts{ + Name: "pond_temperature_celsius", + Help: "The temperature of the frog pond.", // Sorry, we can't measure how badly it smells. + }) + + // Simulate some observations. + for i := 0; i < 1000; i++ { + temps.Observe(30 + math.Floor(120*math.Sin(float64(i)*0.1))/10) + } + + // Just for demonstration, let's check the state of the summary by + // (ab)using its Write method (which is usually only used by Prometheus + // internally). + metric := &dto.Metric{} + temps.Write(metric) + fmt.Println(proto.MarshalTextString(metric)) + + // Output: + // summary: < + // sample_count: 1000 + // sample_sum: 29969.50000000001 + // quantile: < + // quantile: 0.5 + // value: 31.1 + // > + // quantile: < + // quantile: 0.9 + // value: 41.3 + // > + // quantile: < + // quantile: 0.99 + // value: 41.9 + // > + // > +} + +func ExampleSummaryVec() { + temps := prometheus.NewSummaryVec( + prometheus.SummaryOpts{ + Name: "pond_temperature_celsius", + Help: "The temperature of the frog pond.", // Sorry, we can't measure how badly it smells. + }, + []string{"species"}, + ) + + // Simulate some observations. + for i := 0; i < 1000; i++ { + temps.WithLabelValues("litoria-caerulea").Observe(30 + math.Floor(120*math.Sin(float64(i)*0.1))/10) + temps.WithLabelValues("lithobates-catesbeianus").Observe(32 + math.Floor(100*math.Cos(float64(i)*0.11))/10) + } + + // Just for demonstration, let's check the state of the summary vector + // by (ab)using its Collect method and the Write method of its elements + // (which is usually only used by Prometheus internally - code like the + // following will never appear in your own code). + metricChan := make(chan prometheus.Metric) + go func() { + defer close(metricChan) + temps.Collect(metricChan) + }() + + metricStrings := []string{} + for metric := range metricChan { + dtoMetric := &dto.Metric{} + metric.Write(dtoMetric) + metricStrings = append(metricStrings, proto.MarshalTextString(dtoMetric)) + } + sort.Strings(metricStrings) // For reproducible print order. + fmt.Println(metricStrings) + + // Output: + // [label: < + // name: "species" + // value: "lithobates-catesbeianus" + // > + // summary: < + // sample_count: 1000 + // sample_sum: 31956.100000000017 + // quantile: < + // quantile: 0.5 + // value: 32.4 + // > + // quantile: < + // quantile: 0.9 + // value: 41.4 + // > + // quantile: < + // quantile: 0.99 + // value: 41.9 + // > + // > + // label: < + // name: "species" + // value: "litoria-caerulea" + // > + // summary: < + // sample_count: 1000 + // sample_sum: 29969.50000000001 + // quantile: < + // quantile: 0.5 + // value: 31.1 + // > + // quantile: < + // quantile: 0.9 + // value: 41.3 + // > + // quantile: < + // quantile: 0.99 + // value: 41.9 + // > + // > + // ] +} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/expvar.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/expvar.go new file mode 100644 index 0000000000..0f7630d53f --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/expvar.go @@ -0,0 +1,119 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "encoding/json" + "expvar" +) + +// ExpvarCollector collects metrics from the expvar interface. It provides a +// quick way to expose numeric values that are already exported via expvar as +// Prometheus metrics. Note that the data models of expvar and Prometheus are +// fundamentally different, and that the ExpvarCollector is inherently +// slow. Thus, the ExpvarCollector is probably great for experiments and +// prototying, but you should seriously consider a more direct implementation of +// Prometheus metrics for monitoring production systems. +// +// Use NewExpvarCollector to create new instances. +type ExpvarCollector struct { + exports map[string]*Desc +} + +// NewExpvarCollector returns a newly allocated ExpvarCollector that still has +// to be registered with the Prometheus registry. +// +// The exports map has the following meaning: +// +// The keys in the map correspond to expvar keys, i.e. for every expvar key you +// want to export as Prometheus metric, you need an entry in the exports +// map. The descriptor mapped to each key describes how to export the expvar +// value. It defines the name and the help string of the Prometheus metric +// proxying the expvar value. The type will always be Untyped. +// +// For descriptors without variable labels, the expvar value must be a number or +// a bool. The number is then directly exported as the Prometheus sample +// value. (For a bool, 'false' translates to 0 and 'true' to 1). Expvar values +// that are not numbers or bools are silently ignored. +// +// If the descriptor has one variable label, the expvar value must be an expvar +// map. The keys in the expvar map become the various values of the one +// Prometheus label. The values in the expvar map must be numbers or bools again +// as above. +// +// For descriptors with more than one variable label, the expvar must be a +// nested expvar map, i.e. where the values of the topmost map are maps again +// etc. until a depth is reached that corresponds to the number of labels. The +// leaves of that structure must be numbers or bools as above to serve as the +// sample values. +// +// Anything that does not fit into the scheme above is silently ignored. +func NewExpvarCollector(exports map[string]*Desc) *ExpvarCollector { + return &ExpvarCollector{ + exports: exports, + } +} + +// Describe implements Collector. +func (e *ExpvarCollector) Describe(ch chan<- *Desc) { + for _, desc := range e.exports { + ch <- desc + } +} + +// Collect implements Collector. +func (e *ExpvarCollector) Collect(ch chan<- Metric) { + for name, desc := range e.exports { + var m Metric + expVar := expvar.Get(name) + if expVar == nil { + continue + } + var v interface{} + labels := make([]string, len(desc.variableLabels)) + if err := json.Unmarshal([]byte(expVar.String()), &v); err != nil { + ch <- NewInvalidMetric(desc, err) + continue + } + var processValue func(v interface{}, i int) + processValue = func(v interface{}, i int) { + if i >= len(labels) { + copiedLabels := append(make([]string, 0, len(labels)), labels...) + switch v := v.(type) { + case float64: + m = MustNewConstMetric(desc, UntypedValue, v, copiedLabels...) + case bool: + if v { + m = MustNewConstMetric(desc, UntypedValue, 1, copiedLabels...) + } else { + m = MustNewConstMetric(desc, UntypedValue, 0, copiedLabels...) + } + default: + return + } + ch <- m + return + } + vm, ok := v.(map[string]interface{}) + if !ok { + return + } + for lv, val := range vm { + labels[i] = lv + processValue(val, i+1) + } + } + processValue(v, 0) + } +} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/expvar_test.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/expvar_test.go new file mode 100644 index 0000000000..5d3128faed --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/expvar_test.go @@ -0,0 +1,97 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus_test + +import ( + "expvar" + "fmt" + "sort" + "strings" + + dto "github.com/prometheus/client_model/go" + + "github.com/prometheus/client_golang/prometheus" +) + +func ExampleExpvarCollector() { + expvarCollector := prometheus.NewExpvarCollector(map[string]*prometheus.Desc{ + "memstats": prometheus.NewDesc( + "expvar_memstats", + "All numeric memstats as one metric family. Not a good role-model, actually... ;-)", + []string{"type"}, nil, + ), + "lone-int": prometheus.NewDesc( + "expvar_lone_int", + "Just an expvar int as an example.", + nil, nil, + ), + "http-request-map": prometheus.NewDesc( + "expvar_http_request_total", + "How many http requests processed, partitioned by status code and http method.", + []string{"code", "method"}, nil, + ), + }) + prometheus.MustRegister(expvarCollector) + + // The Prometheus part is done here. But to show that this example is + // doing anything, we have to manually export something via expvar. In + // real-life use-cases, some library would already have exported via + // expvar what we want to re-export as Prometheus metrics. + expvar.NewInt("lone-int").Set(42) + expvarMap := expvar.NewMap("http-request-map") + var ( + expvarMap1, expvarMap2 expvar.Map + expvarInt11, expvarInt12, expvarInt21, expvarInt22 expvar.Int + ) + expvarMap1.Init() + expvarMap2.Init() + expvarInt11.Set(3) + expvarInt12.Set(13) + expvarInt21.Set(11) + expvarInt22.Set(212) + expvarMap1.Set("POST", &expvarInt11) + expvarMap1.Set("GET", &expvarInt12) + expvarMap2.Set("POST", &expvarInt21) + expvarMap2.Set("GET", &expvarInt22) + expvarMap.Set("404", &expvarMap1) + expvarMap.Set("200", &expvarMap2) + // Results in the following expvar map: + // "http-request-count": {"200": {"POST": 11, "GET": 212}, "404": {"POST": 3, "GET": 13}} + + // Let's see what the scrape would yield, but exclude the memstats metrics. + metricStrings := []string{} + metric := dto.Metric{} + metricChan := make(chan prometheus.Metric) + go func() { + expvarCollector.Collect(metricChan) + close(metricChan) + }() + for m := range metricChan { + if strings.Index(m.Desc().String(), "expvar_memstats") == -1 { + metric.Reset() + m.Write(&metric) + metricStrings = append(metricStrings, metric.String()) + } + } + sort.Strings(metricStrings) + for _, s := range metricStrings { + fmt.Println(strings.TrimRight(s, " ")) + } + // Output: + // label: label: untyped: + // label: label: untyped: + // label: label: untyped: + // label: label: untyped: + // untyped: +} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/gauge.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/gauge.go new file mode 100644 index 0000000000..ba8a402caf --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/gauge.go @@ -0,0 +1,147 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import "hash/fnv" + +// Gauge is a Metric that represents a single numerical value that can +// arbitrarily go up and down. +// +// A Gauge is typically used for measured values like temperatures or current +// memory usage, but also "counts" that can go up and down, like the number of +// running goroutines. +// +// To create Gauge instances, use NewGauge. +type Gauge interface { + Metric + Collector + + // Set sets the Gauge to an arbitrary value. + Set(float64) + // Inc increments the Gauge by 1. + Inc() + // Dec decrements the Gauge by 1. + Dec() + // Add adds the given value to the Gauge. (The value can be + // negative, resulting in a decrease of the Gauge.) + Add(float64) + // Sub subtracts the given value from the Gauge. (The value can be + // negative, resulting in an increase of the Gauge.) + Sub(float64) +} + +// GaugeOpts is an alias for Opts. See there for doc comments. +type GaugeOpts Opts + +// NewGauge creates a new Gauge based on the provided GaugeOpts. +func NewGauge(opts GaugeOpts) Gauge { + return newValue(NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), GaugeValue, 0) +} + +// GaugeVec is a Collector that bundles a set of Gauges that all share the same +// Desc, but have different values for their variable labels. This is used if +// you want to count the same thing partitioned by various dimensions +// (e.g. number of operations queued, partitioned by user and operation +// type). Create instances with NewGaugeVec. +type GaugeVec struct { + MetricVec +} + +// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and +// partitioned by the given label names. At least one label name must be +// provided. +func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + labelNames, + opts.ConstLabels, + ) + return &GaugeVec{ + MetricVec: MetricVec{ + children: map[uint64]Metric{}, + desc: desc, + hash: fnv.New64a(), + newMetric: func(lvs ...string) Metric { + return newValue(desc, GaugeValue, 0, lvs...) + }, + }, + } +} + +// GetMetricWithLabelValues replaces the method of the same name in +// MetricVec. The difference is that this method returns a Gauge and not a +// Metric so that no type conversion is required. +func (m *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) { + metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(Gauge), err + } + return nil, err +} + +// GetMetricWith replaces the method of the same name in MetricVec. The +// difference is that this method returns a Gauge and not a Metric so that no +// type conversion is required. +func (m *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) { + metric, err := m.MetricVec.GetMetricWith(labels) + if metric != nil { + return metric.(Gauge), err + } + return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. By not returning an +// error, WithLabelValues allows shortcuts like +// myVec.WithLabelValues("404", "GET").Add(42) +func (m *GaugeVec) WithLabelValues(lvs ...string) Gauge { + return m.MetricVec.WithLabelValues(lvs...).(Gauge) +} + +// With works as GetMetricWith, but panics where GetMetricWithLabels would have +// returned an error. By not returning an error, With allows shortcuts like +// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) +func (m *GaugeVec) With(labels Labels) Gauge { + return m.MetricVec.With(labels).(Gauge) +} + +// GaugeFunc is a Gauge whose value is determined at collect time by calling a +// provided function. +// +// To create GaugeFunc instances, use NewGaugeFunc. +type GaugeFunc interface { + Metric + Collector +} + +// NewGaugeFunc creates a new GaugeFunc based on the provided GaugeOpts. The +// value reported is determined by calling the given function from within the +// Write method. Take into account that metric collection may happen +// concurrently. If that results in concurrent calls to Write, like in the case +// where a GaugeFunc is directly registered with Prometheus, the provided +// function must be concurrency-safe. +func NewGaugeFunc(opts GaugeOpts, function func() float64) GaugeFunc { + return newValueFunc(NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), GaugeValue, function) +} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/gauge_test.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/gauge_test.go new file mode 100644 index 0000000000..48cab46367 --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/gauge_test.go @@ -0,0 +1,182 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "math" + "math/rand" + "sync" + "testing" + "testing/quick" + + dto "github.com/prometheus/client_model/go" +) + +func listenGaugeStream(vals, result chan float64, done chan struct{}) { + var sum float64 +outer: + for { + select { + case <-done: + close(vals) + for v := range vals { + sum += v + } + break outer + case v := <-vals: + sum += v + } + } + result <- sum + close(result) +} + +func TestGaugeConcurrency(t *testing.T) { + it := func(n uint32) bool { + mutations := int(n % 10000) + concLevel := int(n%15 + 1) + + var start, end sync.WaitGroup + start.Add(1) + end.Add(concLevel) + + sStream := make(chan float64, mutations*concLevel) + result := make(chan float64) + done := make(chan struct{}) + + go listenGaugeStream(sStream, result, done) + go func() { + end.Wait() + close(done) + }() + + gge := NewGauge(GaugeOpts{ + Name: "test_gauge", + Help: "no help can be found here", + }) + for i := 0; i < concLevel; i++ { + vals := make([]float64, mutations) + for j := 0; j < mutations; j++ { + vals[j] = rand.Float64() - 0.5 + } + + go func(vals []float64) { + start.Wait() + for _, v := range vals { + sStream <- v + gge.Add(v) + } + end.Done() + }(vals) + } + start.Done() + + if expected, got := <-result, math.Float64frombits(gge.(*value).valBits); math.Abs(expected-got) > 0.000001 { + t.Fatalf("expected approx. %f, got %f", expected, got) + return false + } + return true + } + + if err := quick.Check(it, nil); err != nil { + t.Fatal(err) + } +} + +func TestGaugeVecConcurrency(t *testing.T) { + it := func(n uint32) bool { + mutations := int(n % 10000) + concLevel := int(n%15 + 1) + vecLength := int(n%5 + 1) + + var start, end sync.WaitGroup + start.Add(1) + end.Add(concLevel) + + sStreams := make([]chan float64, vecLength) + results := make([]chan float64, vecLength) + done := make(chan struct{}) + + for i := 0; i < vecLength; i++ { + sStreams[i] = make(chan float64, mutations*concLevel) + results[i] = make(chan float64) + go listenGaugeStream(sStreams[i], results[i], done) + } + + go func() { + end.Wait() + close(done) + }() + + gge := NewGaugeVec( + GaugeOpts{ + Name: "test_gauge", + Help: "no help can be found here", + }, + []string{"label"}, + ) + for i := 0; i < concLevel; i++ { + vals := make([]float64, mutations) + pick := make([]int, mutations) + for j := 0; j < mutations; j++ { + vals[j] = rand.Float64() - 0.5 + pick[j] = rand.Intn(vecLength) + } + + go func(vals []float64) { + start.Wait() + for i, v := range vals { + sStreams[pick[i]] <- v + gge.WithLabelValues(string('A' + pick[i])).Add(v) + } + end.Done() + }(vals) + } + start.Done() + + for i := range sStreams { + if expected, got := <-results[i], math.Float64frombits(gge.WithLabelValues(string('A'+i)).(*value).valBits); math.Abs(expected-got) > 0.000001 { + t.Fatalf("expected approx. %f, got %f", expected, got) + return false + } + } + return true + } + + if err := quick.Check(it, nil); err != nil { + t.Fatal(err) + } +} + +func TestGaugeFunc(t *testing.T) { + gf := NewGaugeFunc( + GaugeOpts{ + Name: "test_name", + Help: "test help", + ConstLabels: Labels{"a": "1", "b": "2"}, + }, + func() float64 { return 3.1415 }, + ) + + if expected, got := `Desc{fqName: "test_name", help: "test help", constLabels: {a="1",b="2"}, variableLabels: []}`, gf.Desc().String(); expected != got { + t.Errorf("expected %q, got %q", expected, got) + } + + m := &dto.Metric{} + gf.Write(m) + + if expected, got := `label: label: gauge: `, m.String(); expected != got { + t.Errorf("expected %q, got %q", expected, got) + } +} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/go_collector.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/go_collector.go new file mode 100644 index 0000000000..d7b7a20a12 --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/go_collector.go @@ -0,0 +1,31 @@ +package prometheus + +import ( + "runtime" +) + +type goCollector struct { + goroutines Gauge +} + +// NewGoCollector returns a collector which exports metrics about the current +// go process. +func NewGoCollector() *goCollector { + return &goCollector{ + goroutines: NewGauge(GaugeOpts{ + Name: "process_goroutines", + Help: "Number of goroutines that currently exist.", + }), + } +} + +// Describe returns all descriptions of the collector. +func (c *goCollector) Describe(ch chan<- *Desc) { + ch <- c.goroutines.Desc() +} + +// Collect returns the current state of all metrics of the collector. +func (c *goCollector) Collect(ch chan<- Metric) { + c.goroutines.Set(float64(runtime.NumGoroutine())) + ch <- c.goroutines +} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/go_collector_test.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/go_collector_test.go new file mode 100644 index 0000000000..b0582d1b98 --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/go_collector_test.go @@ -0,0 +1,58 @@ +package prometheus + +import ( + "reflect" + "testing" + "time" + + dto "github.com/prometheus/client_model/go" +) + +func TestGoCollector(t *testing.T) { + var ( + c = NewGoCollector() + ch = make(chan Metric) + waitc = make(chan struct{}) + closec = make(chan struct{}) + old = -1 + ) + defer close(closec) + + go func() { + c.Collect(ch) + go func(c <-chan struct{}) { + <-c + }(closec) + <-waitc + c.Collect(ch) + }() + + for { + select { + case metric := <-ch: + switch m := metric.(type) { + // Attention, this also catches Counter... + case Gauge: + pb := &dto.Metric{} + m.Write(pb) + + if old == -1 { + old = int(pb.GetGauge().GetValue()) + close(waitc) + continue + } + + if diff := int(pb.GetGauge().GetValue()) - old; diff != 1 { + // TODO: This is flaky in highly concurrent situations. + t.Errorf("want 1 new goroutine, got %d", diff) + } + + return + default: + t.Errorf("want type Gauge, got %s", reflect.TypeOf(metric)) + } + case <-time.After(1 * time.Second): + t.Fatalf("expected collect timed out") + } + } +} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/http.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/http.go new file mode 100644 index 0000000000..818c90fb65 --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/http.go @@ -0,0 +1,322 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "net/http" + "strconv" + "strings" + "time" +) + +var instLabels = []string{"method", "code"} + +type nower interface { + Now() time.Time +} + +type nowFunc func() time.Time + +func (n nowFunc) Now() time.Time { + return n() +} + +var now nower = nowFunc(func() time.Time { + return time.Now() +}) + +func nowSeries(t ...time.Time) nower { + return nowFunc(func() time.Time { + defer func() { + t = t[1:] + }() + + return t[0] + }) +} + +// InstrumentHandler wraps the given HTTP handler for instrumentation. It +// registers four metric collectors (if not already done) and reports http +// metrics to the (newly or already) registered collectors: http_requests_total +// (CounterVec), http_request_duration_microseconds (Summary), +// http_request_size_bytes (Summary), http_response_size_bytes (Summary). Each +// has a constant label named "handler" with the provided handlerName as +// value. http_requests_total is a metric vector partitioned by HTTP method +// (label name "method") and HTTP status code (label name "code"). +func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc { + return InstrumentHandlerFunc(handlerName, handler.ServeHTTP) +} + +// InstrumentHandlerFunc wraps the given function for instrumentation. It +// otherwise works in the same way as InstrumentHandler. +func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc { + return InstrumentHandlerFuncWithOpts( + SummaryOpts{ + Subsystem: "http", + ConstLabels: Labels{"handler": handlerName}, + }, + handlerFunc, + ) +} + +// InstrumentHandlerWithOpts works like InstrumentHandler but provides more +// flexibility (at the cost of a more complex call syntax). As +// InstrumentHandler, this function registers four metric collectors, but it +// uses the provided SummaryOpts to create them. However, the fields "Name" and +// "Help" in the SummaryOpts are ignored. "Name" is replaced by +// "requests_total", "request_duration_microseconds", "request_size_bytes", and +// "response_size_bytes", respectively. "Help" is replaced by an appropriate +// help string. The names of the variable labels of the http_requests_total +// CounterVec are "method" (get, post, etc.), and "code" (HTTP status code). +// +// If InstrumentHandlerWithOpts is called as follows, it mimics exactly the +// behavior of InstrumentHandler: +// +// prometheus.InstrumentHandlerWithOpts( +// prometheus.SummaryOpts{ +// Subsystem: "http", +// ConstLabels: prometheus.Labels{"handler": handlerName}, +// }, +// handler, +// ) +// +// Technical detail: "requests_total" is a CounterVec, not a SummaryVec, so it +// cannot use SummaryOpts. Instead, a CounterOpts struct is created internally, +// and all its fields are set to the equally named fields in the provided +// SummaryOpts. +func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.HandlerFunc { + return InstrumentHandlerFuncWithOpts(opts, handler.ServeHTTP) +} + +// InstrumentHandlerFuncWithOpts works like InstrumentHandlerFunc but provides +// more flexibility (at the cost of a more complex call syntax). See +// InstrumentHandlerWithOpts for details how the provided SummaryOpts are used. +func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc { + reqCnt := NewCounterVec( + CounterOpts{ + Namespace: opts.Namespace, + Subsystem: opts.Subsystem, + Name: "requests_total", + Help: "Total number of HTTP requests made.", + ConstLabels: opts.ConstLabels, + }, + instLabels, + ) + + opts.Name = "request_duration_microseconds" + opts.Help = "The HTTP request latencies in microseconds." + reqDur := NewSummary(opts) + + opts.Name = "request_size_bytes" + opts.Help = "The HTTP request sizes in bytes." + reqSz := NewSummary(opts) + + opts.Name = "response_size_bytes" + opts.Help = "The HTTP response sizes in bytes." + resSz := NewSummary(opts) + + regReqCnt := MustRegisterOrGet(reqCnt).(*CounterVec) + regReqDur := MustRegisterOrGet(reqDur).(Summary) + regReqSz := MustRegisterOrGet(reqSz).(Summary) + regResSz := MustRegisterOrGet(resSz).(Summary) + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + now := time.Now() + + delegate := &responseWriterDelegator{ResponseWriter: w} + out := make(chan int) + urlLen := 0 + if r.URL != nil { + urlLen = len(r.URL.String()) + } + go computeApproximateRequestSize(r, out, urlLen) + handlerFunc(delegate, r) + + elapsed := float64(time.Since(now)) / float64(time.Microsecond) + + method := sanitizeMethod(r.Method) + code := sanitizeCode(delegate.status) + regReqCnt.WithLabelValues(method, code).Inc() + regReqDur.Observe(elapsed) + regResSz.Observe(float64(delegate.written)) + regReqSz.Observe(float64(<-out)) + }) +} + +func computeApproximateRequestSize(r *http.Request, out chan int, s int) { + s += len(r.Method) + s += len(r.Proto) + for name, values := range r.Header { + s += len(name) + for _, value := range values { + s += len(value) + } + } + s += len(r.Host) + + // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL. + + if r.ContentLength != -1 { + s += int(r.ContentLength) + } + out <- s +} + +type responseWriterDelegator struct { + http.ResponseWriter + + handler, method string + status int + written int + wroteHeader bool +} + +func (r *responseWriterDelegator) WriteHeader(code int) { + r.status = code + r.wroteHeader = true + r.ResponseWriter.WriteHeader(code) +} + +func (r *responseWriterDelegator) Write(b []byte) (int, error) { + if !r.wroteHeader { + r.WriteHeader(http.StatusOK) + } + n, err := r.ResponseWriter.Write(b) + r.written += n + return n, err +} + +func sanitizeMethod(m string) string { + switch m { + case "GET", "get": + return "get" + case "PUT", "put": + return "put" + case "HEAD", "head": + return "head" + case "POST", "post": + return "post" + case "DELETE", "delete": + return "delete" + case "CONNECT", "connect": + return "connect" + case "OPTIONS", "options": + return "options" + case "NOTIFY", "notify": + return "notify" + default: + return strings.ToLower(m) + } +} + +func sanitizeCode(s int) string { + switch s { + case 100: + return "100" + case 101: + return "101" + + case 200: + return "200" + case 201: + return "201" + case 202: + return "202" + case 203: + return "203" + case 204: + return "204" + case 205: + return "205" + case 206: + return "206" + + case 300: + return "300" + case 301: + return "301" + case 302: + return "302" + case 304: + return "304" + case 305: + return "305" + case 307: + return "307" + + case 400: + return "400" + case 401: + return "401" + case 402: + return "402" + case 403: + return "403" + case 404: + return "404" + case 405: + return "405" + case 406: + return "406" + case 407: + return "407" + case 408: + return "408" + case 409: + return "409" + case 410: + return "410" + case 411: + return "411" + case 412: + return "412" + case 413: + return "413" + case 414: + return "414" + case 415: + return "415" + case 416: + return "416" + case 417: + return "417" + case 418: + return "418" + + case 500: + return "500" + case 501: + return "501" + case 502: + return "502" + case 503: + return "503" + case 504: + return "504" + case 505: + return "505" + + case 428: + return "428" + case 429: + return "429" + case 431: + return "431" + case 511: + return "511" + + default: + return strconv.Itoa(s) + } +} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/http_test.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/http_test.go new file mode 100644 index 0000000000..ffe0418cf8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/http_test.go @@ -0,0 +1,121 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "net/http" + "net/http/httptest" + "testing" + "time" + + dto "github.com/prometheus/client_model/go" +) + +type respBody string + +func (b respBody) ServeHTTP(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusTeapot) + w.Write([]byte(b)) +} + +func TestInstrumentHandler(t *testing.T) { + defer func(n nower) { + now = n.(nower) + }(now) + + instant := time.Now() + end := instant.Add(30 * time.Second) + now = nowSeries(instant, end) + respBody := respBody("Howdy there!") + + hndlr := InstrumentHandler("test-handler", respBody) + + opts := SummaryOpts{ + Subsystem: "http", + ConstLabels: Labels{"handler": "test-handler"}, + } + + reqCnt := MustRegisterOrGet(NewCounterVec( + CounterOpts{ + Namespace: opts.Namespace, + Subsystem: opts.Subsystem, + Name: "requests_total", + Help: "Total number of HTTP requests made.", + ConstLabels: opts.ConstLabels, + }, + instLabels, + )).(*CounterVec) + + opts.Name = "request_duration_microseconds" + opts.Help = "The HTTP request latencies in microseconds." + reqDur := MustRegisterOrGet(NewSummary(opts)).(Summary) + + opts.Name = "request_size_bytes" + opts.Help = "The HTTP request sizes in bytes." + MustRegisterOrGet(NewSummary(opts)) + + opts.Name = "response_size_bytes" + opts.Help = "The HTTP response sizes in bytes." + MustRegisterOrGet(NewSummary(opts)) + + reqCnt.Reset() + + resp := httptest.NewRecorder() + req := &http.Request{ + Method: "GET", + } + + hndlr.ServeHTTP(resp, req) + + if resp.Code != http.StatusTeapot { + t.Fatalf("expected status %d, got %d", http.StatusTeapot, resp.Code) + } + if string(resp.Body.Bytes()) != "Howdy there!" { + t.Fatalf("expected body %s, got %s", "Howdy there!", string(resp.Body.Bytes())) + } + + out := &dto.Metric{} + reqDur.Write(out) + if want, got := "test-handler", out.Label[0].GetValue(); want != got { + t.Errorf("want label value %q in reqDur, got %q", want, got) + } + if want, got := uint64(1), out.Summary.GetSampleCount(); want != got { + t.Errorf("want sample count %d in reqDur, got %d", want, got) + } + + out.Reset() + if want, got := 1, len(reqCnt.children); want != got { + t.Errorf("want %d children in reqCnt, got %d", want, got) + } + cnt, err := reqCnt.GetMetricWithLabelValues("get", "418") + if err != nil { + t.Fatal(err) + } + cnt.Write(out) + if want, got := "418", out.Label[0].GetValue(); want != got { + t.Errorf("want label value %q in reqCnt, got %q", want, got) + } + if want, got := "test-handler", out.Label[1].GetValue(); want != got { + t.Errorf("want label value %q in reqCnt, got %q", want, got) + } + if want, got := "get", out.Label[2].GetValue(); want != got { + t.Errorf("want label value %q in reqCnt, got %q", want, got) + } + if out.Counter == nil { + t.Fatal("expected non-nil counter in reqCnt") + } + if want, got := 1., out.Counter.GetValue(); want != got { + t.Errorf("want reqCnt of %f, got %f", want, got) + } +} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/metric.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/metric.go new file mode 100644 index 0000000000..d8905de2e8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/metric.go @@ -0,0 +1,164 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "strings" + + dto "github.com/prometheus/client_model/go" +) + +// A Metric models a single sample value with its meta data being exported to +// Prometheus. Implementers of Metric in this package inclued Gauge, Counter, +// Untyped, and Summary. Users can implement their own Metric types, but that +// should be rarely needed. See the example for SelfCollector, which is also an +// example for a user-implemented Metric. +type Metric interface { + // Desc returns the descriptor for the Metric. This method idempotently + // returns the same descriptor throughout the lifetime of the + // Metric. The returned descriptor is immutable by contract. A Metric + // unable to describe itself must return an invalid descriptor (created + // with NewInvalidDesc). + Desc() *Desc + // Write encodes the Metric into a "Metric" Protocol Buffer data + // transmission object. + // + // Implementers of custom Metric types must observe concurrency safety + // as reads of this metric may occur at any time, and any blocking + // occurs at the expense of total performance of rendering all + // registered metrics. Ideally Metric implementations should support + // concurrent readers. + // + // The Prometheus client library attempts to minimize memory allocations + // and will provide a pre-existing reset dto.Metric pointer. Prometheus + // may recycle the dto.Metric proto message, so Metric implementations + // should just populate the provided dto.Metric and then should not keep + // any reference to it. + // + // While populating dto.Metric, labels must be sorted lexicographically. + // (Implementers may find LabelPairSorter useful for that.) + Write(*dto.Metric) error +} + +// Opts bundles the options for creating most Metric types. Each metric +// implementation XXX has its own XXXOpts type, but in most cases, it is just be +// an alias of this type (which might change when the requirement arises.) +// +// It is mandatory to set Name and Help to a non-empty string. All other fields +// are optional and can safely be left at their zero value. +type Opts struct { + // Namespace, Subsystem, and Name are components of the fully-qualified + // name of the Metric (created by joining these components with + // "_"). Only Name is mandatory, the others merely help structuring the + // name. Note that the fully-qualified name of the metric must be a + // valid Prometheus metric name. + Namespace string + Subsystem string + Name string + + // Help provides information about this metric. Mandatory! + // + // Metrics with the same fully-qualified name must have the same Help + // string. + Help string + + // ConstLabels are used to attach fixed labels to this metric. Metrics + // with the same fully-qualified name must have the same label names in + // their ConstLabels. + // + // Note that in most cases, labels have a value that varies during the + // lifetime of a process. Those labels are usually managed with a metric + // vector collector (like CounterVec, GaugeVec, UntypedVec). ConstLabels + // serve only special purposes. One is for the special case where the + // value of a label does not change during the lifetime of a process, + // e.g. if the revision of the running binary is put into a + // label. Another, more advanced purpose is if more than one Collector + // needs to collect Metrics with the same fully-qualified name. In that + // case, those Metrics must differ in the values of their + // ConstLabels. See the Collector examples. + // + // If the value of a label never changes (not even between binaries), + // that label most likely should not be a label at all (but part of the + // metric name). + ConstLabels Labels +} + +// BuildFQName joins the given three name components by "_". Empty name +// components are ignored. If the name parameter itself is empty, an empty +// string is returned, no matter what. Metric implementations included in this +// library use this function internally to generate the fully-qualified metric +// name from the name component in their Opts. Users of the library will only +// need this function if they implement their own Metric or instantiate a Desc +// (with NewDesc) directly. +func BuildFQName(namespace, subsystem, name string) string { + if name == "" { + return "" + } + switch { + case namespace != "" && subsystem != "": + return strings.Join([]string{namespace, subsystem, name}, "_") + case namespace != "": + return strings.Join([]string{namespace, name}, "_") + case subsystem != "": + return strings.Join([]string{subsystem, name}, "_") + } + return name +} + +// LabelPairSorter implements sort.Interface. It is used to sort a slice of +// dto.LabelPair pointers. This is useful for implementing the Write method of +// custom metrics. +type LabelPairSorter []*dto.LabelPair + +func (s LabelPairSorter) Len() int { + return len(s) +} + +func (s LabelPairSorter) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s LabelPairSorter) Less(i, j int) bool { + return s[i].GetName() < s[j].GetName() +} + +type hashSorter []uint64 + +func (s hashSorter) Len() int { + return len(s) +} + +func (s hashSorter) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s hashSorter) Less(i, j int) bool { + return s[i] < s[j] +} + +type invalidMetric struct { + desc *Desc + err error +} + +// NewInvalidMetric returns a metric whose Write method always returns the +// provided error. It is useful if a Collector finds itself unable to collect +// a metric and wishes to report an error to the registry. +func NewInvalidMetric(desc *Desc, err error) Metric { + return &invalidMetric{desc, err} +} + +func (m *invalidMetric) Desc() *Desc { return m.desc } + +func (m *invalidMetric) Write(*dto.Metric) error { return m.err } diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/metric_test.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/metric_test.go new file mode 100644 index 0000000000..7145f5e53c --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/metric_test.go @@ -0,0 +1,35 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import "testing" + +func TestBuildFQName(t *testing.T) { + scenarios := []struct{ namespace, subsystem, name, result string }{ + {"a", "b", "c", "a_b_c"}, + {"", "b", "c", "b_c"}, + {"a", "", "c", "a_c"}, + {"", "", "c", "c"}, + {"a", "b", "", ""}, + {"a", "", "", ""}, + {"", "b", "", ""}, + {" ", "", "", ""}, + } + + for i, s := range scenarios { + if want, got := s.result, BuildFQName(s.namespace, s.subsystem, s.name); want != got { + t.Errorf("%d. want %s, got %s", i, want, got) + } + } +} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/process_collector.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/process_collector.go new file mode 100644 index 0000000000..7fd1227fa6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/process_collector.go @@ -0,0 +1,102 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +type processCollector struct { + pid int + collectFn func(chan<- Metric) + pidFn func() (int, error) + cpuTotal Counter + openFDs, maxFDs Gauge + vsize, rss Gauge + startTime Gauge +} + +// NewProcessCollector returns a collector which exports the current state of +// process metrics including cpu, memory and file descriptor usage as well as +// the process start time for the given process id under the given namespace. +func NewProcessCollector(pid int, namespace string) *processCollector { + return NewProcessCollectorPIDFn( + func() (int, error) { return pid, nil }, + namespace, + ) +} + +// NewProcessCollectorPIDFn returns a collector which exports the current state +// of process metrics including cpu, memory and file descriptor usage as well +// as the process start time under the given namespace. The given pidFn is +// called on each collect and is used to determine the process to export +// metrics for. +func NewProcessCollectorPIDFn( + pidFn func() (int, error), + namespace string, +) *processCollector { + c := processCollector{ + pidFn: pidFn, + collectFn: func(chan<- Metric) {}, + + cpuTotal: NewCounter(CounterOpts{ + Namespace: namespace, + Name: "process_cpu_seconds_total", + Help: "Total user and system CPU time spent in seconds.", + }), + openFDs: NewGauge(GaugeOpts{ + Namespace: namespace, + Name: "process_open_fds", + Help: "Number of open file descriptors.", + }), + maxFDs: NewGauge(GaugeOpts{ + Namespace: namespace, + Name: "process_max_fds", + Help: "Maximum number of open file descriptors.", + }), + vsize: NewGauge(GaugeOpts{ + Namespace: namespace, + Name: "process_virtual_memory_bytes", + Help: "Virtual memory size in bytes.", + }), + rss: NewGauge(GaugeOpts{ + Namespace: namespace, + Name: "process_resident_memory_bytes", + Help: "Resident memory size in bytes.", + }), + startTime: NewGauge(GaugeOpts{ + Namespace: namespace, + Name: "process_start_time_seconds", + Help: "Start time of the process since unix epoch in seconds.", + }), + } + + // Set up process metric collection if supported by the runtime. + if processCollectSupported() { + c.collectFn = c.processCollect + } + + return &c +} + +// Describe returns all descriptions of the collector. +func (c *processCollector) Describe(ch chan<- *Desc) { + ch <- c.cpuTotal.Desc() + ch <- c.openFDs.Desc() + ch <- c.maxFDs.Desc() + ch <- c.vsize.Desc() + ch <- c.rss.Desc() + ch <- c.startTime.Desc() +} + +// Collect returns the current state of all metrics of the collector. +func (c *processCollector) Collect(ch chan<- Metric) { + c.collectFn(ch) +} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/process_collector_procfs.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/process_collector_procfs.go new file mode 100644 index 0000000000..5a09ded147 --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/process_collector_procfs.go @@ -0,0 +1,63 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux,cgo plan9,cgo solaris,cgo + +package prometheus + +import "github.com/prometheus/procfs" + +func processCollectSupported() bool { + if _, err := procfs.NewStat(); err == nil { + return true + } + return false +} + +// TODO(ts): Bring back error reporting by reverting 7faf9e7 as soon as the +// client allows users to configure the error behavior. +func (c *processCollector) processCollect(ch chan<- Metric) { + pid, err := c.pidFn() + if err != nil { + return + } + + p, err := procfs.NewProc(pid) + if err != nil { + return + } + + if stat, err := p.NewStat(); err == nil { + c.cpuTotal.Set(stat.CPUTime()) + ch <- c.cpuTotal + c.vsize.Set(float64(stat.VirtualMemory())) + ch <- c.vsize + c.rss.Set(float64(stat.ResidentMemory())) + ch <- c.rss + + if startTime, err := stat.StartTime(); err == nil { + c.startTime.Set(startTime) + ch <- c.startTime + } + } + + if fds, err := p.FileDescriptorsLen(); err == nil { + c.openFDs.Set(float64(fds)) + ch <- c.openFDs + } + + if limits, err := p.NewLimits(); err == nil { + c.maxFDs.Set(float64(limits.OpenFiles)) + ch <- c.maxFDs + } +} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/process_collector_rest.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/process_collector_rest.go new file mode 100644 index 0000000000..0b3698ab8f --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/process_collector_rest.go @@ -0,0 +1,24 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !linux,!plan9,!solaris !cgo + +package prometheus + +func processCollectSupported() bool { + return false +} + +func (c *processCollector) processCollect(ch chan<- Metric) { + panic("unreachable") +} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/process_collector_test.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/process_collector_test.go new file mode 100644 index 0000000000..829715acd8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/process_collector_test.go @@ -0,0 +1,54 @@ +package prometheus + +import ( + "io/ioutil" + "net/http" + "net/http/httptest" + "os" + "regexp" + "testing" + + "github.com/prometheus/procfs" +) + +func TestProcessCollector(t *testing.T) { + if _, err := procfs.Self(); err != nil { + t.Skipf("skipping TestProcessCollector, procfs not available: %s", err) + } + + registry := newRegistry() + registry.Register(NewProcessCollector(os.Getpid(), "")) + registry.Register(NewProcessCollectorPIDFn( + func() (int, error) { return os.Getpid(), nil }, "foobar")) + + s := httptest.NewServer(InstrumentHandler("prometheus", registry)) + defer s.Close() + r, err := http.Get(s.URL) + if err != nil { + t.Fatal(err) + } + defer r.Body.Close() + body, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Fatal(err) + } + + for _, re := range []*regexp.Regexp{ + regexp.MustCompile("process_cpu_seconds_total [0-9]"), + regexp.MustCompile("process_max_fds [0-9]{2,}"), + regexp.MustCompile("process_open_fds [1-9]"), + regexp.MustCompile("process_virtual_memory_bytes [1-9]"), + regexp.MustCompile("process_resident_memory_bytes [1-9]"), + regexp.MustCompile("process_start_time_seconds [0-9.]{10,}"), + regexp.MustCompile("foobar_process_cpu_seconds_total [0-9]"), + regexp.MustCompile("foobar_process_max_fds [0-9]{2,}"), + regexp.MustCompile("foobar_process_open_fds [1-9]"), + regexp.MustCompile("foobar_process_virtual_memory_bytes [1-9]"), + regexp.MustCompile("foobar_process_resident_memory_bytes [1-9]"), + regexp.MustCompile("foobar_process_start_time_seconds [0-9.]{10,}"), + } { + if !re.Match(body) { + t.Errorf("want body to match %s\n%s", re, body) + } + } +} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/registry.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/registry.go new file mode 100644 index 0000000000..505a4aa810 --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/registry.go @@ -0,0 +1,721 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Copyright (c) 2013, The Prometheus Authors +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be found +// in the LICENSE file. + +package prometheus + +import ( + "bytes" + "compress/gzip" + "errors" + "fmt" + "hash/fnv" + "io" + "net/http" + "net/url" + "os" + "sort" + "strings" + "sync" + + dto "github.com/prometheus/client_model/go" + + "github.com/golang/protobuf/proto" + + "github.com/prometheus/client_golang/_vendor/goautoneg" + "github.com/prometheus/client_golang/model" + "github.com/prometheus/client_golang/text" +) + +var ( + defRegistry = newDefaultRegistry() + errAlreadyReg = errors.New("duplicate metrics collector registration attempted") +) + +// Constants relevant to the HTTP interface. +const ( + // APIVersion is the version of the format of the exported data. This + // will match this library's version, which subscribes to the Semantic + // Versioning scheme. + APIVersion = "0.0.4" + + // DelimitedTelemetryContentType is the content type set on telemetry + // data responses in delimited protobuf format. + DelimitedTelemetryContentType = `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited` + // TextTelemetryContentType is the content type set on telemetry data + // responses in text format. + TextTelemetryContentType = `text/plain; version=` + APIVersion + // ProtoTextTelemetryContentType is the content type set on telemetry + // data responses in protobuf text format. (Only used for debugging.) + ProtoTextTelemetryContentType = `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=text` + // ProtoCompactTextTelemetryContentType is the content type set on + // telemetry data responses in protobuf compact text format. (Only used + // for debugging.) + ProtoCompactTextTelemetryContentType = `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=compact-text` + + // Constants for object pools. + numBufs = 4 + numMetricFamilies = 1000 + numMetrics = 10000 + + // Capacity for the channel to collect metrics and descriptors. + capMetricChan = 1000 + capDescChan = 10 + + contentTypeHeader = "Content-Type" + contentLengthHeader = "Content-Length" + contentEncodingHeader = "Content-Encoding" + + acceptEncodingHeader = "Accept-Encoding" + acceptHeader = "Accept" +) + +// Handler returns the HTTP handler for the global Prometheus registry. It is +// already instrumented with InstrumentHandler (using "prometheus" as handler +// name). Usually the handler is used to handle the "/metrics" endpoint. +func Handler() http.Handler { + return InstrumentHandler("prometheus", defRegistry) +} + +// UninstrumentedHandler works in the same way as Handler, but the returned HTTP +// handler is not instrumented. This is useful if no instrumentation is desired +// (for whatever reason) or if the instrumentation has to happen with a +// different handler name (or with a different instrumentation approach +// altogether). See the InstrumentHandler example. +func UninstrumentedHandler() http.Handler { + return defRegistry +} + +// Register registers a new Collector to be included in metrics collection. It +// returns an error if the descriptors provided by the Collector are invalid or +// if they - in combination with descriptors of already registered Collectors - +// do not fulfill the consistency and uniqueness criteria described in the Desc +// documentation. +// +// Do not register the same Collector multiple times concurrently. (Registering +// the same Collector twice would result in an error anyway, but on top of that, +// it is not safe to do so concurrently.) +func Register(m Collector) error { + _, err := defRegistry.Register(m) + return err +} + +// MustRegister works like Register but panics where Register would have +// returned an error. +func MustRegister(m Collector) { + err := Register(m) + if err != nil { + panic(err) + } +} + +// RegisterOrGet works like Register but does not return an error if a Collector +// is registered that equals a previously registered Collector. (Two Collectors +// are considered equal if their Describe method yields the same set of +// descriptors.) Instead, the previously registered Collector is returned (which +// is helpful if the new and previously registered Collectors are equal but not +// identical, i.e. not pointers to the same object). +// +// As for Register, it is still not safe to call RegisterOrGet with the same +// Collector multiple times concurrently. +func RegisterOrGet(m Collector) (Collector, error) { + return defRegistry.RegisterOrGet(m) +} + +// MustRegisterOrGet works like Register but panics where RegisterOrGet would +// have returned an error. +func MustRegisterOrGet(m Collector) Collector { + existing, err := RegisterOrGet(m) + if err != nil { + panic(err) + } + return existing +} + +// Unregister unregisters the Collector that equals the Collector passed in as +// an argument. (Two Collectors are considered equal if their Describe method +// yields the same set of descriptors.) The function returns whether a Collector +// was unregistered. +func Unregister(c Collector) bool { + return defRegistry.Unregister(c) +} + +// SetMetricFamilyInjectionHook sets a function that is called whenever metrics +// are collected. The hook function must be set before metrics collection begins +// (i.e. call SetMetricFamilyInjectionHook before setting the HTTP handler.) The +// MetricFamily protobufs returned by the hook function are added to the +// delivered metrics. Each returned MetricFamily must have a unique name (also +// taking into account the MetricFamilies created in the regular way). +// +// This is a way to directly inject MetricFamily protobufs managed and owned by +// the caller. The caller has full responsibility. No sanity checks are +// performed on the returned protobufs (besides the name checks described +// above). The function must be callable at any time and concurrently. +func SetMetricFamilyInjectionHook(hook func() []*dto.MetricFamily) { + defRegistry.metricFamilyInjectionHook = hook +} + +// PanicOnCollectError sets the behavior whether a panic is caused upon an error +// while metrics are collected and served to the http endpoint. By default, an +// internal server error (status code 500) is served with an error message. +func PanicOnCollectError(b bool) { + defRegistry.panicOnCollectError = b +} + +// EnableCollectChecks enables (or disables) additional consistency checks +// during metrics collection. These additional checks are not enabled by default +// because they inflict a performance penalty and the errors they check for can +// only happen if the used Metric and Collector types have internal programming +// errors. It can be helpful to enable these checks while working with custom +// Collectors or Metrics whose correctness is not well established yet. +func EnableCollectChecks(b bool) { + defRegistry.collectChecksEnabled = b +} + +// Push triggers a metric collection and pushes all collected metrics to the +// Pushgateway specified by addr. See the Pushgateway documentation for detailed +// implications of the job and instance parameter. instance can be left +// empty. The Pushgateway will then use the client's IP number instead. Use just +// host:port or ip:port ass addr. (Don't add 'http://' or any path.) +// +// Note that all previously pushed metrics with the same job and instance will +// be replaced with the metrics pushed by this call. (It uses HTTP method 'PUT' +// to push to the Pushgateway.) +func Push(job, instance, addr string) error { + return defRegistry.Push(job, instance, addr, "PUT") +} + +// PushAdd works like Push, but only previously pushed metrics with the same +// name (and the same job and instance) will be replaced. (It uses HTTP method +// 'POST' to push to the Pushgateway.) +func PushAdd(job, instance, addr string) error { + return defRegistry.Push(job, instance, addr, "POST") +} + +// encoder is a function that writes a dto.MetricFamily to an io.Writer in a +// certain encoding. It returns the number of bytes written and any error +// encountered. Note that ext.WriteDelimited and text.MetricFamilyToText are +// encoders. +type encoder func(io.Writer, *dto.MetricFamily) (int, error) + +type registry struct { + mtx sync.RWMutex + collectorsByID map[uint64]Collector // ID is a hash of the descIDs. + descIDs map[uint64]struct{} + dimHashesByName map[string]uint64 + bufPool chan *bytes.Buffer + metricFamilyPool chan *dto.MetricFamily + metricPool chan *dto.Metric + metricFamilyInjectionHook func() []*dto.MetricFamily + + panicOnCollectError, collectChecksEnabled bool +} + +func (r *registry) Register(c Collector) (Collector, error) { + descChan := make(chan *Desc, capDescChan) + go func() { + c.Describe(descChan) + close(descChan) + }() + + newDescIDs := map[uint64]struct{}{} + newDimHashesByName := map[string]uint64{} + var collectorID uint64 // Just a sum of all desc IDs. + var duplicateDescErr error + + r.mtx.Lock() + defer r.mtx.Unlock() + // Coduct various tests... + for desc := range descChan { + + // Is the descriptor valid at all? + if desc.err != nil { + return c, fmt.Errorf("descriptor %s is invalid: %s", desc, desc.err) + } + + // Is the descID unique? + // (In other words: Is the fqName + constLabel combination unique?) + if _, exists := r.descIDs[desc.id]; exists { + duplicateDescErr = fmt.Errorf("descriptor %s already exists with the same fully-qualified name and const label values", desc) + } + // If it is not a duplicate desc in this collector, add it to + // the collectorID. (We allow duplicate descs within the same + // collector, but their existence must be a no-op.) + if _, exists := newDescIDs[desc.id]; !exists { + newDescIDs[desc.id] = struct{}{} + collectorID += desc.id + } + + // Are all the label names and the help string consistent with + // previous descriptors of the same name? + // First check existing descriptors... + if dimHash, exists := r.dimHashesByName[desc.fqName]; exists { + if dimHash != desc.dimHash { + return nil, fmt.Errorf("a previously registered descriptor with the same fully-qualified name as %s has different label names or a different help string", desc) + } + } else { + // ...then check the new descriptors already seen. + if dimHash, exists := newDimHashesByName[desc.fqName]; exists { + if dimHash != desc.dimHash { + return nil, fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc) + } + } else { + newDimHashesByName[desc.fqName] = desc.dimHash + } + } + } + // Did anything happen at all? + if len(newDescIDs) == 0 { + return nil, errors.New("collector has no descriptors") + } + if existing, exists := r.collectorsByID[collectorID]; exists { + return existing, errAlreadyReg + } + // If the collectorID is new, but at least one of the descs existed + // before, we are in trouble. + if duplicateDescErr != nil { + return nil, duplicateDescErr + } + + // Only after all tests have passed, actually register. + r.collectorsByID[collectorID] = c + for hash := range newDescIDs { + r.descIDs[hash] = struct{}{} + } + for name, dimHash := range newDimHashesByName { + r.dimHashesByName[name] = dimHash + } + return c, nil +} + +func (r *registry) RegisterOrGet(m Collector) (Collector, error) { + existing, err := r.Register(m) + if err != nil && err != errAlreadyReg { + return nil, err + } + return existing, nil +} + +func (r *registry) Unregister(c Collector) bool { + descChan := make(chan *Desc, capDescChan) + go func() { + c.Describe(descChan) + close(descChan) + }() + + descIDs := map[uint64]struct{}{} + var collectorID uint64 // Just a sum of the desc IDs. + for desc := range descChan { + if _, exists := descIDs[desc.id]; !exists { + collectorID += desc.id + descIDs[desc.id] = struct{}{} + } + } + + r.mtx.RLock() + if _, exists := r.collectorsByID[collectorID]; !exists { + r.mtx.RUnlock() + return false + } + r.mtx.RUnlock() + + r.mtx.Lock() + defer r.mtx.Unlock() + + delete(r.collectorsByID, collectorID) + for id := range descIDs { + delete(r.descIDs, id) + } + // dimHashesByName is left untouched as those must be consistent + // throughout the lifetime of a program. + return true +} + +func (r *registry) Push(job, instance, addr, method string) error { + u := fmt.Sprintf("http://%s/metrics/jobs/%s", addr, url.QueryEscape(job)) + if instance != "" { + u += "/instances/" + url.QueryEscape(instance) + } + buf := r.getBuf() + defer r.giveBuf(buf) + if _, err := r.writePB(buf, text.WriteProtoDelimited); err != nil { + if r.panicOnCollectError { + panic(err) + } + return err + } + req, err := http.NewRequest(method, u, buf) + if err != nil { + return err + } + req.Header.Set(contentTypeHeader, DelimitedTelemetryContentType) + resp, err := http.DefaultClient.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.StatusCode != 202 { + return fmt.Errorf("unexpected status code %d while pushing to %s", resp.StatusCode, u) + } + return nil +} + +func (r *registry) ServeHTTP(w http.ResponseWriter, req *http.Request) { + enc, contentType := chooseEncoder(req) + buf := r.getBuf() + defer r.giveBuf(buf) + writer, encoding := decorateWriter(req, buf) + if _, err := r.writePB(writer, enc); err != nil { + if r.panicOnCollectError { + panic(err) + } + http.Error(w, "An error has occurred:\n\n"+err.Error(), http.StatusInternalServerError) + return + } + if closer, ok := writer.(io.Closer); ok { + closer.Close() + } + header := w.Header() + header.Set(contentTypeHeader, contentType) + header.Set(contentLengthHeader, fmt.Sprint(buf.Len())) + if encoding != "" { + header.Set(contentEncodingHeader, encoding) + } + w.Write(buf.Bytes()) +} + +func (r *registry) writePB(w io.Writer, writeEncoded encoder) (int, error) { + var metricHashes map[uint64]struct{} + if r.collectChecksEnabled { + metricHashes = make(map[uint64]struct{}) + } + metricChan := make(chan Metric, capMetricChan) + wg := sync.WaitGroup{} + + r.mtx.RLock() + metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName)) + + // Scatter. + // (Collectors could be complex and slow, so we call them all at once.) + wg.Add(len(r.collectorsByID)) + go func() { + wg.Wait() + close(metricChan) + }() + for _, collector := range r.collectorsByID { + go func(collector Collector) { + defer wg.Done() + collector.Collect(metricChan) + }(collector) + } + r.mtx.RUnlock() + + // Drain metricChan in case of premature return. + defer func() { + for _ = range metricChan { + } + }() + + // Gather. + for metric := range metricChan { + // This could be done concurrently, too, but it required locking + // of metricFamiliesByName (and of metricHashes if checks are + // enabled). Most likely not worth it. + desc := metric.Desc() + metricFamily, ok := metricFamiliesByName[desc.fqName] + if !ok { + metricFamily = r.getMetricFamily() + defer r.giveMetricFamily(metricFamily) + metricFamily.Name = proto.String(desc.fqName) + metricFamily.Help = proto.String(desc.help) + metricFamiliesByName[desc.fqName] = metricFamily + } + dtoMetric := r.getMetric() + defer r.giveMetric(dtoMetric) + if err := metric.Write(dtoMetric); err != nil { + // TODO: Consider different means of error reporting so + // that a single erroneous metric could be skipped + // instead of blowing up the whole collection. + return 0, fmt.Errorf("error collecting metric %v: %s", desc, err) + } + switch { + case metricFamily.Type != nil: + // Type already set. We are good. + case dtoMetric.Gauge != nil: + metricFamily.Type = dto.MetricType_GAUGE.Enum() + case dtoMetric.Counter != nil: + metricFamily.Type = dto.MetricType_COUNTER.Enum() + case dtoMetric.Summary != nil: + metricFamily.Type = dto.MetricType_SUMMARY.Enum() + case dtoMetric.Untyped != nil: + metricFamily.Type = dto.MetricType_UNTYPED.Enum() + default: + return 0, fmt.Errorf("empty metric collected: %s", dtoMetric) + } + if r.collectChecksEnabled { + if err := r.checkConsistency(metricFamily, dtoMetric, desc, metricHashes); err != nil { + return 0, err + } + } + metricFamily.Metric = append(metricFamily.Metric, dtoMetric) + } + + if r.metricFamilyInjectionHook != nil { + for _, mf := range r.metricFamilyInjectionHook() { + if _, exists := metricFamiliesByName[mf.GetName()]; exists { + return 0, fmt.Errorf("metric family with duplicate name injected: %s", mf) + } + metricFamiliesByName[mf.GetName()] = mf + } + } + + // Now that MetricFamilies are all set, sort their Metrics + // lexicographically by their label values. + for _, mf := range metricFamiliesByName { + sort.Sort(metricSorter(mf.Metric)) + } + + // Write out MetricFamilies sorted by their name. + names := make([]string, 0, len(metricFamiliesByName)) + for name := range metricFamiliesByName { + names = append(names, name) + } + sort.Strings(names) + + var written int + for _, name := range names { + w, err := writeEncoded(w, metricFamiliesByName[name]) + written += w + if err != nil { + return written, err + } + } + return written, nil +} + +func (r *registry) checkConsistency(metricFamily *dto.MetricFamily, dtoMetric *dto.Metric, desc *Desc, metricHashes map[uint64]struct{}) error { + + // Type consistency with metric family. + if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil || + metricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil || + metricFamily.GetType() == dto.MetricType_SUMMARY && dtoMetric.Summary == nil || + metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil { + return fmt.Errorf( + "collected metric %q is not a %s", + dtoMetric, metricFamily.Type, + ) + } + + // Desc consistency with metric family. + if metricFamily.GetHelp() != desc.help { + return fmt.Errorf( + "collected metric %q has help %q but should have %q", + dtoMetric, desc.help, metricFamily.GetHelp(), + ) + } + + // Is the desc consistent with the content of the metric? + lpsFromDesc := make([]*dto.LabelPair, 0, len(dtoMetric.Label)) + lpsFromDesc = append(lpsFromDesc, desc.constLabelPairs...) + for _, l := range desc.variableLabels { + lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{ + Name: proto.String(l), + }) + } + if len(lpsFromDesc) != len(dtoMetric.Label) { + return fmt.Errorf( + "labels in collected metric %q are inconsistent with descriptor %s", + dtoMetric, desc, + ) + } + sort.Sort(LabelPairSorter(lpsFromDesc)) + for i, lpFromDesc := range lpsFromDesc { + lpFromMetric := dtoMetric.Label[i] + if lpFromDesc.GetName() != lpFromMetric.GetName() || + lpFromDesc.Value != nil && lpFromDesc.GetValue() != lpFromMetric.GetValue() { + return fmt.Errorf( + "labels in collected metric %q are inconsistent with descriptor %s", + dtoMetric, desc, + ) + } + } + + // Is the metric unique (i.e. no other metric with the same name and the same label values)? + h := fnv.New64a() + var buf bytes.Buffer + buf.WriteString(desc.fqName) + buf.WriteByte(model.SeparatorByte) + h.Write(buf.Bytes()) + for _, lp := range dtoMetric.Label { + buf.Reset() + buf.WriteString(lp.GetValue()) + buf.WriteByte(model.SeparatorByte) + h.Write(buf.Bytes()) + } + metricHash := h.Sum64() + if _, exists := metricHashes[metricHash]; exists { + return fmt.Errorf( + "collected metric %q was collected before with the same name and label values", + dtoMetric, + ) + } + metricHashes[metricHash] = struct{}{} + + r.mtx.RLock() // Remaining checks need the read lock. + defer r.mtx.RUnlock() + + // Is the desc registered? + if _, exist := r.descIDs[desc.id]; !exist { + return fmt.Errorf("collected metric %q with unregistered descriptor %s", dtoMetric, desc) + } + + return nil +} + +func (r *registry) getBuf() *bytes.Buffer { + select { + case buf := <-r.bufPool: + return buf + default: + return &bytes.Buffer{} + } +} + +func (r *registry) giveBuf(buf *bytes.Buffer) { + buf.Reset() + select { + case r.bufPool <- buf: + default: + } +} + +func (r *registry) getMetricFamily() *dto.MetricFamily { + select { + case mf := <-r.metricFamilyPool: + return mf + default: + return &dto.MetricFamily{} + } +} + +func (r *registry) giveMetricFamily(mf *dto.MetricFamily) { + mf.Reset() + select { + case r.metricFamilyPool <- mf: + default: + } +} + +func (r *registry) getMetric() *dto.Metric { + select { + case m := <-r.metricPool: + return m + default: + return &dto.Metric{} + } +} + +func (r *registry) giveMetric(m *dto.Metric) { + m.Reset() + select { + case r.metricPool <- m: + default: + } +} + +func newRegistry() *registry { + return ®istry{ + collectorsByID: map[uint64]Collector{}, + descIDs: map[uint64]struct{}{}, + dimHashesByName: map[string]uint64{}, + bufPool: make(chan *bytes.Buffer, numBufs), + metricFamilyPool: make(chan *dto.MetricFamily, numMetricFamilies), + metricPool: make(chan *dto.Metric, numMetrics), + } +} + +func newDefaultRegistry() *registry { + r := newRegistry() + r.Register(NewProcessCollector(os.Getpid(), "")) + r.Register(NewGoCollector()) + return r +} + +func chooseEncoder(req *http.Request) (encoder, string) { + accepts := goautoneg.ParseAccept(req.Header.Get(acceptHeader)) + for _, accept := range accepts { + switch { + case accept.Type == "application" && + accept.SubType == "vnd.google.protobuf" && + accept.Params["proto"] == "io.prometheus.client.MetricFamily": + switch accept.Params["encoding"] { + case "delimited": + return text.WriteProtoDelimited, DelimitedTelemetryContentType + case "text": + return text.WriteProtoText, ProtoTextTelemetryContentType + case "compact-text": + return text.WriteProtoCompactText, ProtoCompactTextTelemetryContentType + default: + continue + } + case accept.Type == "text" && + accept.SubType == "plain" && + (accept.Params["version"] == "0.0.4" || accept.Params["version"] == ""): + return text.MetricFamilyToText, TextTelemetryContentType + default: + continue + } + } + return text.MetricFamilyToText, TextTelemetryContentType +} + +// decorateWriter wraps a writer to handle gzip compression if requested. It +// returns the decorated writer and the appropriate "Content-Encoding" header +// (which is empty if no compression is enabled). +func decorateWriter(request *http.Request, writer io.Writer) (io.Writer, string) { + header := request.Header.Get(acceptEncodingHeader) + parts := strings.Split(header, ",") + for _, part := range parts { + part := strings.TrimSpace(part) + if part == "gzip" || strings.HasPrefix(part, "gzip;") { + return gzip.NewWriter(writer), "gzip" + } + } + return writer, "" +} + +type metricSorter []*dto.Metric + +func (s metricSorter) Len() int { + return len(s) +} + +func (s metricSorter) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s metricSorter) Less(i, j int) bool { + for n, lp := range s[i].Label { + vi := lp.GetValue() + vj := s[j].Label[n].GetValue() + if vi != vj { + return vi < vj + } + } + return true +} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/registry_test.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/registry_test.go new file mode 100644 index 0000000000..95579e512b --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/registry_test.go @@ -0,0 +1,489 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Copyright (c) 2013, The Prometheus Authors +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be found +// in the LICENSE file. + +package prometheus + +import ( + "bytes" + "encoding/binary" + "net/http" + "testing" + + "github.com/golang/protobuf/proto" + dto "github.com/prometheus/client_model/go" +) + +type fakeResponseWriter struct { + header http.Header + body bytes.Buffer +} + +func (r *fakeResponseWriter) Header() http.Header { + return r.header +} + +func (r *fakeResponseWriter) Write(d []byte) (l int, err error) { + return r.body.Write(d) +} + +func (r *fakeResponseWriter) WriteHeader(c int) { +} + +func testHandler(t testing.TB) { + + metricVec := NewCounterVec( + CounterOpts{ + Name: "name", + Help: "docstring", + ConstLabels: Labels{"constname": "constvalue"}, + }, + []string{"labelname"}, + ) + + metricVec.WithLabelValues("val1").Inc() + metricVec.WithLabelValues("val2").Inc() + + varintBuf := make([]byte, binary.MaxVarintLen32) + + externalMetricFamily := []*dto.MetricFamily{ + { + Name: proto.String("externalname"), + Help: proto.String("externaldocstring"), + Type: dto.MetricType_COUNTER.Enum(), + Metric: []*dto.Metric{ + { + Label: []*dto.LabelPair{ + { + Name: proto.String("externallabelname"), + Value: proto.String("externalval1"), + }, + { + Name: proto.String("externalconstname"), + Value: proto.String("externalconstvalue"), + }, + }, + Counter: &dto.Counter{ + Value: proto.Float64(1), + }, + }, + }, + }, + } + marshaledExternalMetricFamily, err := proto.Marshal(externalMetricFamily[0]) + if err != nil { + t.Fatal(err) + } + var externalBuf bytes.Buffer + l := binary.PutUvarint(varintBuf, uint64(len(marshaledExternalMetricFamily))) + _, err = externalBuf.Write(varintBuf[:l]) + if err != nil { + t.Fatal(err) + } + _, err = externalBuf.Write(marshaledExternalMetricFamily) + if err != nil { + t.Fatal(err) + } + externalMetricFamilyAsBytes := externalBuf.Bytes() + externalMetricFamilyAsText := []byte(`# HELP externalname externaldocstring +# TYPE externalname counter +externalname{externallabelname="externalval1",externalconstname="externalconstvalue"} 1 +`) + externalMetricFamilyAsProtoText := []byte(`name: "externalname" +help: "externaldocstring" +type: COUNTER +metric: < + label: < + name: "externallabelname" + value: "externalval1" + > + label: < + name: "externalconstname" + value: "externalconstvalue" + > + counter: < + value: 1 + > +> + +`) + externalMetricFamilyAsProtoCompactText := []byte(`name:"externalname" help:"externaldocstring" type:COUNTER metric: label: counter: > +`) + + expectedMetricFamily := &dto.MetricFamily{ + Name: proto.String("name"), + Help: proto.String("docstring"), + Type: dto.MetricType_COUNTER.Enum(), + Metric: []*dto.Metric{ + { + Label: []*dto.LabelPair{ + { + Name: proto.String("constname"), + Value: proto.String("constvalue"), + }, + { + Name: proto.String("labelname"), + Value: proto.String("val1"), + }, + }, + Counter: &dto.Counter{ + Value: proto.Float64(1), + }, + }, + { + Label: []*dto.LabelPair{ + { + Name: proto.String("constname"), + Value: proto.String("constvalue"), + }, + { + Name: proto.String("labelname"), + Value: proto.String("val2"), + }, + }, + Counter: &dto.Counter{ + Value: proto.Float64(1), + }, + }, + }, + } + marshaledExpectedMetricFamily, err := proto.Marshal(expectedMetricFamily) + if err != nil { + t.Fatal(err) + } + var buf bytes.Buffer + l = binary.PutUvarint(varintBuf, uint64(len(marshaledExpectedMetricFamily))) + _, err = buf.Write(varintBuf[:l]) + if err != nil { + t.Fatal(err) + } + _, err = buf.Write(marshaledExpectedMetricFamily) + if err != nil { + t.Fatal(err) + } + expectedMetricFamilyAsBytes := buf.Bytes() + expectedMetricFamilyAsText := []byte(`# HELP name docstring +# TYPE name counter +name{constname="constvalue",labelname="val1"} 1 +name{constname="constvalue",labelname="val2"} 1 +`) + expectedMetricFamilyAsProtoText := []byte(`name: "name" +help: "docstring" +type: COUNTER +metric: < + label: < + name: "constname" + value: "constvalue" + > + label: < + name: "labelname" + value: "val1" + > + counter: < + value: 1 + > +> +metric: < + label: < + name: "constname" + value: "constvalue" + > + label: < + name: "labelname" + value: "val2" + > + counter: < + value: 1 + > +> + +`) + expectedMetricFamilyAsProtoCompactText := []byte(`name:"name" help:"docstring" type:COUNTER metric: label: counter: > metric: label: counter: > +`) + + type output struct { + headers map[string]string + body []byte + } + + var scenarios = []struct { + headers map[string]string + out output + withCounter bool + withExternalMF bool + }{ + { // 0 + headers: map[string]string{ + "Accept": "foo/bar;q=0.2, dings/bums;q=0.8", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `text/plain; version=0.0.4`, + }, + body: []byte{}, + }, + }, + { // 1 + headers: map[string]string{ + "Accept": "foo/bar;q=0.2, application/quark;q=0.8", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `text/plain; version=0.0.4`, + }, + body: []byte{}, + }, + }, + { // 2 + headers: map[string]string{ + "Accept": "foo/bar;q=0.2, application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=bla;q=0.8", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `text/plain; version=0.0.4`, + }, + body: []byte{}, + }, + }, + { // 3 + headers: map[string]string{ + "Accept": "text/plain;q=0.2, application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.8", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`, + }, + body: []byte{}, + }, + }, + { // 4 + headers: map[string]string{ + "Accept": "application/json", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `text/plain; version=0.0.4`, + }, + body: expectedMetricFamilyAsText, + }, + withCounter: true, + }, + { // 5 + headers: map[string]string{ + "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`, + }, + body: expectedMetricFamilyAsBytes, + }, + withCounter: true, + }, + { // 6 + headers: map[string]string{ + "Accept": "application/json", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `text/plain; version=0.0.4`, + }, + body: externalMetricFamilyAsText, + }, + withExternalMF: true, + }, + { // 7 + headers: map[string]string{ + "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`, + }, + body: externalMetricFamilyAsBytes, + }, + withExternalMF: true, + }, + { // 8 + headers: map[string]string{ + "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`, + }, + body: bytes.Join( + [][]byte{ + externalMetricFamilyAsBytes, + expectedMetricFamilyAsBytes, + }, + []byte{}, + ), + }, + withCounter: true, + withExternalMF: true, + }, + { // 9 + headers: map[string]string{ + "Accept": "text/plain", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `text/plain; version=0.0.4`, + }, + body: []byte{}, + }, + }, + { // 10 + headers: map[string]string{ + "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=bla;q=0.2, text/plain;q=0.5", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `text/plain; version=0.0.4`, + }, + body: expectedMetricFamilyAsText, + }, + withCounter: true, + }, + { // 11 + headers: map[string]string{ + "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=bla;q=0.2, text/plain;q=0.5;version=0.0.4", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `text/plain; version=0.0.4`, + }, + body: bytes.Join( + [][]byte{ + externalMetricFamilyAsText, + expectedMetricFamilyAsText, + }, + []byte{}, + ), + }, + withCounter: true, + withExternalMF: true, + }, + { // 12 + headers: map[string]string{ + "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.2, text/plain;q=0.5;version=0.0.2", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`, + }, + body: bytes.Join( + [][]byte{ + externalMetricFamilyAsBytes, + expectedMetricFamilyAsBytes, + }, + []byte{}, + ), + }, + withCounter: true, + withExternalMF: true, + }, + { // 13 + headers: map[string]string{ + "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=text;q=0.5, application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.4", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=text`, + }, + body: bytes.Join( + [][]byte{ + externalMetricFamilyAsProtoText, + expectedMetricFamilyAsProtoText, + }, + []byte{}, + ), + }, + withCounter: true, + withExternalMF: true, + }, + { // 14 + headers: map[string]string{ + "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=compact-text", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=compact-text`, + }, + body: bytes.Join( + [][]byte{ + externalMetricFamilyAsProtoCompactText, + expectedMetricFamilyAsProtoCompactText, + }, + []byte{}, + ), + }, + withCounter: true, + withExternalMF: true, + }, + } + for i, scenario := range scenarios { + registry := newRegistry() + registry.collectChecksEnabled = true + + if scenario.withCounter { + registry.Register(metricVec) + } + if scenario.withExternalMF { + registry.metricFamilyInjectionHook = func() []*dto.MetricFamily { + return externalMetricFamily + } + } + writer := &fakeResponseWriter{ + header: http.Header{}, + } + handler := InstrumentHandler("prometheus", registry) + request, _ := http.NewRequest("GET", "/", nil) + for key, value := range scenario.headers { + request.Header.Add(key, value) + } + handler(writer, request) + + for key, value := range scenario.out.headers { + if writer.Header().Get(key) != value { + t.Errorf( + "%d. expected %q for header %q, got %q", + i, value, key, writer.Header().Get(key), + ) + } + } + + if !bytes.Equal(scenario.out.body, writer.body.Bytes()) { + t.Errorf( + "%d. expected %q for body, got %q", + i, scenario.out.body, writer.body.Bytes(), + ) + } + } +} + +func TestHandler(t *testing.T) { + testHandler(t) +} + +func BenchmarkHandler(b *testing.B) { + for i := 0; i < b.N; i++ { + testHandler(b) + } +} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/summary.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/summary.go new file mode 100644 index 0000000000..5d8b82f595 --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/summary.go @@ -0,0 +1,424 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "fmt" + "hash/fnv" + "sort" + "sync" + "time" + + "github.com/golang/protobuf/proto" + + dto "github.com/prometheus/client_model/go" + + "github.com/prometheus/client_golang/_vendor/perks/quantile" +) + +// A Summary captures individual observations from an event or sample stream and +// summarizes them in a manner similar to traditional summary statistics: 1. sum +// of observations, 2. observation count, 3. rank estimations. +// +// A typical use-case is the observation of request latencies. By default, a +// Summary provides the median, the 90th and the 99th percentile of the latency +// as rank estimations. +// +// To create Summary instances, use NewSummary. +type Summary interface { + Metric + Collector + + // Observe adds a single observation to the summary. + Observe(float64) +} + +// DefObjectives are the default Summary quantile values. +var ( + DefObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001} +) + +// Default values for SummaryOpts. +const ( + // DefMaxAge is the default duration for which observations stay + // relevant. + DefMaxAge time.Duration = 10 * time.Minute + // DefAgeBuckets is the default number of buckets used to calculate the + // age of observations. + DefAgeBuckets = 5 + // DefBufCap is the standard buffer size for collecting Summary observations. + DefBufCap = 500 +) + +// SummaryOpts bundles the options for creating a Summary metric. It is +// mandatory to set Name and Help to a non-empty string. All other fields are +// optional and can safely be left at their zero value. +type SummaryOpts struct { + // Namespace, Subsystem, and Name are components of the fully-qualified + // name of the Summary (created by joining these components with + // "_"). Only Name is mandatory, the others merely help structuring the + // name. Note that the fully-qualified name of the Summary must be a + // valid Prometheus metric name. + Namespace string + Subsystem string + Name string + + // Help provides information about this Summary. Mandatory! + // + // Metrics with the same fully-qualified name must have the same Help + // string. + Help string + + // ConstLabels are used to attach fixed labels to this + // Summary. Summaries with the same fully-qualified name must have the + // same label names in their ConstLabels. + // + // Note that in most cases, labels have a value that varies during the + // lifetime of a process. Those labels are usually managed with a + // SummaryVec. ConstLabels serve only special purposes. One is for the + // special case where the value of a label does not change during the + // lifetime of a process, e.g. if the revision of the running binary is + // put into a label. Another, more advanced purpose is if more than one + // Collector needs to collect Summaries with the same fully-qualified + // name. In that case, those Summaries must differ in the values of + // their ConstLabels. See the Collector examples. + // + // If the value of a label never changes (not even between binaries), + // that label most likely should not be a label at all (but part of the + // metric name). + ConstLabels Labels + + // Objectives defines the quantile rank estimates with their respective + // absolute error. The default value is DefObjectives. + Objectives map[float64]float64 + + // MaxAge defines the duration for which an observation stays relevant + // for the summary. Must be positive. The default value is DefMaxAge. + MaxAge time.Duration + + // AgeBuckets is the number of buckets used to exclude observations that + // are older than MaxAge from the summary. A higher number has a + // resource penalty, so only increase it if the higher resolution is + // really required. The default value is DefAgeBuckets. + AgeBuckets uint32 + + // BufCap defines the default sample stream buffer size. The default + // value of DefBufCap should suffice for most uses. If there is a need + // to increase the value, a multiple of 500 is recommended (because that + // is the internal buffer size of the underlying package + // "github.com/bmizerany/perks/quantile"). + BufCap uint32 + + // Epsilon is the error epsilon for the quantile rank estimate. Must be + // positive. The default is DefEpsilon. + Epsilon float64 +} + +// TODO: Great fuck-up with the sliding-window decay algorithm... The Merge +// method of perk/quantile is actually not working as advertised - and it might +// be unfixable, as the underlying algorithm is apparently not capable of +// merging summaries in the first place. To avoid using Merge, we are currently +// adding observations to _each_ age bucket, i.e. the effort to add a sample is +// essentially multiplied by the number of age buckets. When rotating age +// buckets, we empty the previous head stream. On scrape time, we simply take +// the quantiles from the head stream (no merging required). Result: More effort +// on observation time, less effort on scrape time, which is exactly the +// opposite of what we try to accomplish, but at least the results are correct. +// +// The quite elegant previous contraption to merge the age buckets efficiently +// on scrape time (see code up commit 6b9530d72ea715f0ba612c0120e6e09fbf1d49d0) +// can't be used anymore. + +// NewSummary creates a new Summary based on the provided SummaryOpts. +func NewSummary(opts SummaryOpts) Summary { + return newSummary( + NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), + opts, + ) +} + +func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { + if len(desc.variableLabels) != len(labelValues) { + panic(errInconsistentCardinality) + } + + if len(opts.Objectives) == 0 { + opts.Objectives = DefObjectives + } + + if opts.MaxAge < 0 { + panic(fmt.Errorf("illegal max age MaxAge=%v", opts.MaxAge)) + } + if opts.MaxAge == 0 { + opts.MaxAge = DefMaxAge + } + + if opts.AgeBuckets == 0 { + opts.AgeBuckets = DefAgeBuckets + } + + if opts.BufCap == 0 { + opts.BufCap = DefBufCap + } + + s := &summary{ + desc: desc, + + objectives: opts.Objectives, + sortedObjectives: make([]float64, 0, len(opts.Objectives)), + + labelPairs: makeLabelPairs(desc, labelValues), + + hotBuf: make([]float64, 0, opts.BufCap), + coldBuf: make([]float64, 0, opts.BufCap), + streamDuration: opts.MaxAge / time.Duration(opts.AgeBuckets), + } + s.headStreamExpTime = time.Now().Add(s.streamDuration) + s.hotBufExpTime = s.headStreamExpTime + + for i := uint32(0); i < opts.AgeBuckets; i++ { + s.streams = append(s.streams, s.newStream()) + } + s.headStream = s.streams[0] + + for qu := range s.objectives { + s.sortedObjectives = append(s.sortedObjectives, qu) + } + sort.Float64s(s.sortedObjectives) + + s.Init(s) // Init self-collection. + return s +} + +type summary struct { + SelfCollector + + bufMtx sync.Mutex // Protects hotBuf and hotBufExpTime. + mtx sync.Mutex // Protects every other moving part. + // Lock bufMtx before mtx if both are needed. + + desc *Desc + + objectives map[float64]float64 + sortedObjectives []float64 + + labelPairs []*dto.LabelPair + + sum float64 + cnt uint64 + + hotBuf, coldBuf []float64 + + streams []*quantile.Stream + streamDuration time.Duration + headStream *quantile.Stream + headStreamIdx int + headStreamExpTime, hotBufExpTime time.Time +} + +func (s *summary) Desc() *Desc { + return s.desc +} + +func (s *summary) Observe(v float64) { + s.bufMtx.Lock() + defer s.bufMtx.Unlock() + + now := time.Now() + if now.After(s.hotBufExpTime) { + s.asyncFlush(now) + } + s.hotBuf = append(s.hotBuf, v) + if len(s.hotBuf) == cap(s.hotBuf) { + s.asyncFlush(now) + } +} + +func (s *summary) Write(out *dto.Metric) error { + sum := &dto.Summary{} + qs := make([]*dto.Quantile, 0, len(s.objectives)) + + s.bufMtx.Lock() + s.mtx.Lock() + + if len(s.hotBuf) != 0 { + s.swapBufs(time.Now()) + } + s.bufMtx.Unlock() + + s.flushColdBuf() + sum.SampleCount = proto.Uint64(s.cnt) + sum.SampleSum = proto.Float64(s.sum) + + for _, rank := range s.sortedObjectives { + qs = append(qs, &dto.Quantile{ + Quantile: proto.Float64(rank), + Value: proto.Float64(s.headStream.Query(rank)), + }) + } + + s.mtx.Unlock() + + if len(qs) > 0 { + sort.Sort(quantSort(qs)) + } + sum.Quantile = qs + + out.Summary = sum + out.Label = s.labelPairs + return nil +} + +func (s *summary) newStream() *quantile.Stream { + return quantile.NewTargeted(s.objectives) +} + +// asyncFlush needs bufMtx locked. +func (s *summary) asyncFlush(now time.Time) { + s.mtx.Lock() + s.swapBufs(now) + + // Unblock the original goroutine that was responsible for the mutation + // that triggered the compaction. But hold onto the global non-buffer + // state mutex until the operation finishes. + go func() { + s.flushColdBuf() + s.mtx.Unlock() + }() +} + +// rotateStreams needs mtx AND bufMtx locked. +func (s *summary) maybeRotateStreams() { + for !s.hotBufExpTime.Equal(s.headStreamExpTime) { + s.headStream.Reset() + s.headStreamIdx++ + if s.headStreamIdx >= len(s.streams) { + s.headStreamIdx = 0 + } + s.headStream = s.streams[s.headStreamIdx] + s.headStreamExpTime = s.headStreamExpTime.Add(s.streamDuration) + } +} + +// flushColdBuf needs mtx locked. +func (s *summary) flushColdBuf() { + for _, v := range s.coldBuf { + for _, stream := range s.streams { + stream.Insert(v) + } + s.cnt++ + s.sum += v + } + s.coldBuf = s.coldBuf[0:0] + s.maybeRotateStreams() +} + +// swapBufs needs mtx AND bufMtx locked, coldBuf must be empty. +func (s *summary) swapBufs(now time.Time) { + if len(s.coldBuf) != 0 { + panic("coldBuf is not empty") + } + s.hotBuf, s.coldBuf = s.coldBuf, s.hotBuf + // hotBuf is now empty and gets new expiration set. + for now.After(s.hotBufExpTime) { + s.hotBufExpTime = s.hotBufExpTime.Add(s.streamDuration) + } +} + +type quantSort []*dto.Quantile + +func (s quantSort) Len() int { + return len(s) +} + +func (s quantSort) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s quantSort) Less(i, j int) bool { + return s[i].GetQuantile() < s[j].GetQuantile() +} + +// SummaryVec is a Collector that bundles a set of Summaries that all share the +// same Desc, but have different values for their variable labels. This is used +// if you want to count the same thing partitioned by various dimensions +// (e.g. http request latencies, partitioned by status code and method). Create +// instances with NewSummaryVec. +type SummaryVec struct { + MetricVec +} + +// NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and +// partitioned by the given label names. At least one label name must be +// provided. +func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + labelNames, + opts.ConstLabels, + ) + return &SummaryVec{ + MetricVec: MetricVec{ + children: map[uint64]Metric{}, + desc: desc, + hash: fnv.New64a(), + newMetric: func(lvs ...string) Metric { + return newSummary(desc, opts, lvs...) + }, + }, + } +} + +// GetMetricWithLabelValues replaces the method of the same name in +// MetricVec. The difference is that this method returns a Summary and not a +// Metric so that no type conversion is required. +func (m *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Summary, error) { + metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(Summary), err + } + return nil, err +} + +// GetMetricWith replaces the method of the same name in MetricVec. The +// difference is that this method returns a Summary and not a Metric so that no +// type conversion is required. +func (m *SummaryVec) GetMetricWith(labels Labels) (Summary, error) { + metric, err := m.MetricVec.GetMetricWith(labels) + if metric != nil { + return metric.(Summary), err + } + return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. By not returning an +// error, WithLabelValues allows shortcuts like +// myVec.WithLabelValues("404", "GET").Add(42) +func (m *SummaryVec) WithLabelValues(lvs ...string) Summary { + return m.MetricVec.WithLabelValues(lvs...).(Summary) +} + +// With works as GetMetricWith, but panics where GetMetricWithLabels would have +// returned an error. By not returning an error, With allows shortcuts like +// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) +func (m *SummaryVec) With(labels Labels) Summary { + return m.MetricVec.With(labels).(Summary) +} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/summary_test.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/summary_test.go new file mode 100644 index 0000000000..3f83796ea6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/summary_test.go @@ -0,0 +1,328 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "math" + "math/rand" + "sort" + "sync" + "testing" + "testing/quick" + "time" + + dto "github.com/prometheus/client_model/go" +) + +func benchmarkSummaryObserve(w int, b *testing.B) { + b.StopTimer() + + wg := new(sync.WaitGroup) + wg.Add(w) + + g := new(sync.WaitGroup) + g.Add(1) + + s := NewSummary(SummaryOpts{}) + + for i := 0; i < w; i++ { + go func() { + g.Wait() + + for i := 0; i < b.N; i++ { + s.Observe(float64(i)) + } + + wg.Done() + }() + } + + b.StartTimer() + g.Done() + wg.Wait() +} + +func BenchmarkSummaryObserve1(b *testing.B) { + benchmarkSummaryObserve(1, b) +} + +func BenchmarkSummaryObserve2(b *testing.B) { + benchmarkSummaryObserve(2, b) +} + +func BenchmarkSummaryObserve4(b *testing.B) { + benchmarkSummaryObserve(4, b) +} + +func BenchmarkSummaryObserve8(b *testing.B) { + benchmarkSummaryObserve(8, b) +} + +func benchmarkSummaryWrite(w int, b *testing.B) { + b.StopTimer() + + wg := new(sync.WaitGroup) + wg.Add(w) + + g := new(sync.WaitGroup) + g.Add(1) + + s := NewSummary(SummaryOpts{}) + + for i := 0; i < 1000000; i++ { + s.Observe(float64(i)) + } + + for j := 0; j < w; j++ { + outs := make([]dto.Metric, b.N) + + go func(o []dto.Metric) { + g.Wait() + + for i := 0; i < b.N; i++ { + s.Write(&o[i]) + } + + wg.Done() + }(outs) + } + + b.StartTimer() + g.Done() + wg.Wait() +} + +func BenchmarkSummaryWrite1(b *testing.B) { + benchmarkSummaryWrite(1, b) +} + +func BenchmarkSummaryWrite2(b *testing.B) { + benchmarkSummaryWrite(2, b) +} + +func BenchmarkSummaryWrite4(b *testing.B) { + benchmarkSummaryWrite(4, b) +} + +func BenchmarkSummaryWrite8(b *testing.B) { + benchmarkSummaryWrite(8, b) +} + +func TestSummaryConcurrency(t *testing.T) { + rand.Seed(42) + + it := func(n uint32) bool { + mutations := int(n%1e4 + 1e4) + concLevel := int(n%5 + 1) + total := mutations * concLevel + + var start, end sync.WaitGroup + start.Add(1) + end.Add(concLevel) + + sum := NewSummary(SummaryOpts{ + Name: "test_summary", + Help: "helpless", + }) + + allVars := make([]float64, total) + var sampleSum float64 + for i := 0; i < concLevel; i++ { + vals := make([]float64, mutations) + for j := 0; j < mutations; j++ { + v := rand.NormFloat64() + vals[j] = v + allVars[i*mutations+j] = v + sampleSum += v + } + + go func(vals []float64) { + start.Wait() + for _, v := range vals { + sum.Observe(v) + } + end.Done() + }(vals) + } + sort.Float64s(allVars) + start.Done() + end.Wait() + + m := &dto.Metric{} + sum.Write(m) + if got, want := int(*m.Summary.SampleCount), total; got != want { + t.Errorf("got sample count %d, want %d", got, want) + } + if got, want := *m.Summary.SampleSum, sampleSum; math.Abs((got-want)/want) > 0.001 { + t.Errorf("got sample sum %f, want %f", got, want) + } + + objectives := make([]float64, 0, len(DefObjectives)) + for qu := range DefObjectives { + objectives = append(objectives, qu) + } + sort.Float64s(objectives) + + for i, wantQ := range objectives { + ε := DefObjectives[wantQ] + gotQ := *m.Summary.Quantile[i].Quantile + gotV := *m.Summary.Quantile[i].Value + min, max := getBounds(allVars, wantQ, ε) + if gotQ != wantQ { + t.Errorf("got quantile %f, want %f", gotQ, wantQ) + } + if gotV < min || gotV > max { + t.Errorf("got %f for quantile %f, want [%f,%f]", gotV, gotQ, min, max) + } + } + return true + } + + if err := quick.Check(it, nil); err != nil { + t.Error(err) + } +} + +func TestSummaryVecConcurrency(t *testing.T) { + rand.Seed(42) + + objectives := make([]float64, 0, len(DefObjectives)) + for qu := range DefObjectives { + + objectives = append(objectives, qu) + } + sort.Float64s(objectives) + + it := func(n uint32) bool { + mutations := int(n%1e4 + 1e4) + concLevel := int(n%7 + 1) + vecLength := int(n%3 + 1) + + var start, end sync.WaitGroup + start.Add(1) + end.Add(concLevel) + + sum := NewSummaryVec( + SummaryOpts{ + Name: "test_summary", + Help: "helpless", + }, + []string{"label"}, + ) + + allVars := make([][]float64, vecLength) + sampleSums := make([]float64, vecLength) + for i := 0; i < concLevel; i++ { + vals := make([]float64, mutations) + picks := make([]int, mutations) + for j := 0; j < mutations; j++ { + v := rand.NormFloat64() + vals[j] = v + pick := rand.Intn(vecLength) + picks[j] = pick + allVars[pick] = append(allVars[pick], v) + sampleSums[pick] += v + } + + go func(vals []float64) { + start.Wait() + for i, v := range vals { + sum.WithLabelValues(string('A' + picks[i])).Observe(v) + } + end.Done() + }(vals) + } + for _, vars := range allVars { + sort.Float64s(vars) + } + start.Done() + end.Wait() + + for i := 0; i < vecLength; i++ { + m := &dto.Metric{} + s := sum.WithLabelValues(string('A' + i)) + s.Write(m) + if got, want := int(*m.Summary.SampleCount), len(allVars[i]); got != want { + t.Errorf("got sample count %d for label %c, want %d", got, 'A'+i, want) + } + if got, want := *m.Summary.SampleSum, sampleSums[i]; math.Abs((got-want)/want) > 0.001 { + t.Errorf("got sample sum %f for label %c, want %f", got, 'A'+i, want) + } + for j, wantQ := range objectives { + ε := DefObjectives[wantQ] + gotQ := *m.Summary.Quantile[j].Quantile + gotV := *m.Summary.Quantile[j].Value + min, max := getBounds(allVars[i], wantQ, ε) + if gotQ != wantQ { + t.Errorf("got quantile %f for label %c, want %f", gotQ, 'A'+i, wantQ) + } + if gotV < min || gotV > max { + t.Errorf("got %f for quantile %f for label %c, want [%f,%f]", gotV, gotQ, 'A'+i, min, max) + } + } + } + return true + } + + if err := quick.Check(it, nil); err != nil { + t.Error(err) + } +} + +func TestSummaryDecay(t *testing.T) { + sum := NewSummary(SummaryOpts{ + Name: "test_summary", + Help: "helpless", + MaxAge: 100 * time.Millisecond, + Objectives: map[float64]float64{0.1: 0.001}, + AgeBuckets: 10, + }) + + m := &dto.Metric{} + i := 0 + tick := time.NewTicker(time.Millisecond) + for _ = range tick.C { + i++ + sum.Observe(float64(i)) + if i%10 == 0 { + sum.Write(m) + if got, want := *m.Summary.Quantile[0].Value, math.Max(float64(i)/10, float64(i-90)); math.Abs(got-want) > 20 { + t.Errorf("%d. got %f, want %f", i, got, want) + } + m.Reset() + } + if i >= 1000 { + break + } + } + tick.Stop() +} + +func getBounds(vars []float64, q, ε float64) (min, max float64) { + // TODO: This currently tolerates an error of up to 2*ε. The error must + // be at most ε, but for some reason, it's sometimes slightly + // higher. That's a bug. + n := float64(len(vars)) + lower := int((q - 2*ε) * n) + upper := int(math.Ceil((q + 2*ε) * n)) + min = vars[0] + if lower > 1 { + min = vars[lower-1] + } + max = vars[len(vars)-1] + if upper < len(vars) { + max = vars[upper-1] + } + return +} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/untyped.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/untyped.go new file mode 100644 index 0000000000..a5a4e77b14 --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/untyped.go @@ -0,0 +1,145 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import "hash/fnv" + +// Untyped is a Metric that represents a single numerical value that can +// arbitrarily go up and down. +// +// An Untyped metric works the same as a Gauge. The only difference is that to +// no type information is implied. +// +// To create Gauge instances, use NewUntyped. +type Untyped interface { + Metric + Collector + + // Set sets the Untyped metric to an arbitrary value. + Set(float64) + // Inc increments the Untyped metric by 1. + Inc() + // Dec decrements the Untyped metric by 1. + Dec() + // Add adds the given value to the Untyped metric. (The value can be + // negative, resulting in a decrease.) + Add(float64) + // Sub subtracts the given value from the Untyped metric. (The value can + // be negative, resulting in an increase.) + Sub(float64) +} + +// UntypedOpts is an alias for Opts. See there for doc comments. +type UntypedOpts Opts + +// NewUntyped creates a new Untyped metric from the provided UntypedOpts. +func NewUntyped(opts UntypedOpts) Untyped { + return newValue(NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), UntypedValue, 0) +} + +// UntypedVec is a Collector that bundles a set of Untyped metrics that all +// share the same Desc, but have different values for their variable +// labels. This is used if you want to count the same thing partitioned by +// various dimensions. Create instances with NewUntypedVec. +type UntypedVec struct { + MetricVec +} + +// NewUntypedVec creates a new UntypedVec based on the provided UntypedOpts and +// partitioned by the given label names. At least one label name must be +// provided. +func NewUntypedVec(opts UntypedOpts, labelNames []string) *UntypedVec { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + labelNames, + opts.ConstLabels, + ) + return &UntypedVec{ + MetricVec: MetricVec{ + children: map[uint64]Metric{}, + desc: desc, + hash: fnv.New64a(), + newMetric: func(lvs ...string) Metric { + return newValue(desc, UntypedValue, 0, lvs...) + }, + }, + } +} + +// GetMetricWithLabelValues replaces the method of the same name in +// MetricVec. The difference is that this method returns an Untyped and not a +// Metric so that no type conversion is required. +func (m *UntypedVec) GetMetricWithLabelValues(lvs ...string) (Untyped, error) { + metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(Untyped), err + } + return nil, err +} + +// GetMetricWith replaces the method of the same name in MetricVec. The +// difference is that this method returns an Untyped and not a Metric so that no +// type conversion is required. +func (m *UntypedVec) GetMetricWith(labels Labels) (Untyped, error) { + metric, err := m.MetricVec.GetMetricWith(labels) + if metric != nil { + return metric.(Untyped), err + } + return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. By not returning an +// error, WithLabelValues allows shortcuts like +// myVec.WithLabelValues("404", "GET").Add(42) +func (m *UntypedVec) WithLabelValues(lvs ...string) Untyped { + return m.MetricVec.WithLabelValues(lvs...).(Untyped) +} + +// With works as GetMetricWith, but panics where GetMetricWithLabels would have +// returned an error. By not returning an error, With allows shortcuts like +// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) +func (m *UntypedVec) With(labels Labels) Untyped { + return m.MetricVec.With(labels).(Untyped) +} + +// UntypedFunc is an Untyped whose value is determined at collect time by +// calling a provided function. +// +// To create UntypedFunc instances, use NewUntypedFunc. +type UntypedFunc interface { + Metric + Collector +} + +// NewUntypedFunc creates a new UntypedFunc based on the provided +// UntypedOpts. The value reported is determined by calling the given function +// from within the Write method. Take into account that metric collection may +// happen concurrently. If that results in concurrent calls to Write, like in +// the case where an UntypedFunc is directly registered with Prometheus, the +// provided function must be concurrency-safe. +func NewUntypedFunc(opts UntypedOpts, function func() float64) UntypedFunc { + return newValueFunc(NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), UntypedValue, function) +} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/value.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/value.go new file mode 100644 index 0000000000..107d43e372 --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/value.go @@ -0,0 +1,230 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "errors" + "fmt" + "math" + "sort" + "sync/atomic" + + dto "github.com/prometheus/client_model/go" + + "github.com/golang/protobuf/proto" +) + +// ValueType is an enumeration of metric types that represent a simple value. +type ValueType int + +// Possible values for the ValueType enum. +const ( + _ ValueType = iota + CounterValue + GaugeValue + UntypedValue +) + +var errInconsistentCardinality = errors.New("inconsistent label cardinality") + +// value is a generic metric for simple values. It implements Metric, Collector, +// Counter, Gauge, and Untyped. Its effective type is determined by +// ValueType. This is a low-level building block used by the library to back the +// implementations of Counter, Gauge, and Untyped. +type value struct { + SelfCollector + + desc *Desc + valType ValueType + valBits uint64 // These are the bits of the represented float64 value. + labelPairs []*dto.LabelPair +} + +// newValue returns a newly allocated value with the given Desc, ValueType, +// sample value and label values. It panics if the number of label +// values is different from the number of variable labels in Desc. +func newValue(desc *Desc, valueType ValueType, val float64, labelValues ...string) *value { + if len(labelValues) != len(desc.variableLabels) { + panic(errInconsistentCardinality) + } + result := &value{ + desc: desc, + valType: valueType, + valBits: math.Float64bits(val), + labelPairs: makeLabelPairs(desc, labelValues), + } + result.Init(result) + return result +} + +func (v *value) Desc() *Desc { + return v.desc +} + +func (v *value) Set(val float64) { + atomic.StoreUint64(&v.valBits, math.Float64bits(val)) +} + +func (v *value) Inc() { + v.Add(1) +} + +func (v *value) Dec() { + v.Add(-1) +} + +func (v *value) Add(val float64) { + for { + oldBits := atomic.LoadUint64(&v.valBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + val) + if atomic.CompareAndSwapUint64(&v.valBits, oldBits, newBits) { + return + } + } +} + +func (v *value) Sub(val float64) { + v.Add(val * -1) +} + +func (v *value) Write(out *dto.Metric) error { + val := math.Float64frombits(atomic.LoadUint64(&v.valBits)) + return populateMetric(v.valType, val, v.labelPairs, out) +} + +// valueFunc is a generic metric for simple values retrieved on collect time +// from a function. It implements Metric and Collector. Its effective type is +// determined by ValueType. This is a low-level building block used by the +// library to back the implementations of CounterFunc, GaugeFunc, and +// UntypedFunc. +type valueFunc struct { + SelfCollector + + desc *Desc + valType ValueType + function func() float64 + labelPairs []*dto.LabelPair +} + +// newValueFunc returns a newly allocated valueFunc with the given Desc and +// ValueType. The value reported is determined by calling the given function +// from within the Write method. Take into account that metric collection may +// happen concurrently. If that results in concurrent calls to Write, like in +// the case where a valueFunc is directly registered with Prometheus, the +// provided function must be concurrency-safe. +func newValueFunc(desc *Desc, valueType ValueType, function func() float64) *valueFunc { + result := &valueFunc{ + desc: desc, + valType: valueType, + function: function, + labelPairs: makeLabelPairs(desc, nil), + } + result.Init(result) + return result +} + +func (v *valueFunc) Desc() *Desc { + return v.desc +} + +func (v *valueFunc) Write(out *dto.Metric) error { + return populateMetric(v.valType, v.function(), v.labelPairs, out) +} + +// NewConstMetric returns a metric with one fixed value that cannot be +// changed. Users of this package will not have much use for it in regular +// operations. However, when implementing custom Collectors, it is useful as a +// throw-away metric that is generated on the fly to send it to Prometheus in +// the Collect method. NewConstMetric returns an error if the length of +// labelValues is not consistent with the variable labels in Desc. +func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) { + if len(desc.variableLabels) != len(labelValues) { + return nil, errInconsistentCardinality + } + return &constMetric{ + desc: desc, + valType: valueType, + val: value, + labelPairs: makeLabelPairs(desc, labelValues), + }, nil +} + +// MustNewConstMetric is a version of NewConstMetric that panics where +// NewConstMetric would have returned an error. +func MustNewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) Metric { + m, err := NewConstMetric(desc, valueType, value, labelValues...) + if err != nil { + panic(err) + } + return m +} + +type constMetric struct { + desc *Desc + valType ValueType + val float64 + labelPairs []*dto.LabelPair +} + +func (m *constMetric) Desc() *Desc { + return m.desc +} + +func (m *constMetric) Write(out *dto.Metric) error { + return populateMetric(m.valType, m.val, m.labelPairs, out) +} + +func populateMetric( + t ValueType, + v float64, + labelPairs []*dto.LabelPair, + m *dto.Metric, +) error { + m.Label = labelPairs + switch t { + case CounterValue: + m.Counter = &dto.Counter{Value: proto.Float64(v)} + case GaugeValue: + m.Gauge = &dto.Gauge{Value: proto.Float64(v)} + case UntypedValue: + m.Untyped = &dto.Untyped{Value: proto.Float64(v)} + default: + return fmt.Errorf("encountered unknown type %v", t) + } + return nil +} + +func makeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair { + totalLen := len(desc.variableLabels) + len(desc.constLabelPairs) + if totalLen == 0 { + // Super fast path. + return nil + } + if len(desc.variableLabels) == 0 { + // Moderately fast path. + return desc.constLabelPairs + } + labelPairs := make([]*dto.LabelPair, 0, totalLen) + for i, n := range desc.variableLabels { + labelPairs = append(labelPairs, &dto.LabelPair{ + Name: proto.String(n), + Value: proto.String(labelValues[i]), + }) + } + for _, lp := range desc.constLabelPairs { + labelPairs = append(labelPairs, lp) + } + sort.Sort(LabelPairSorter(labelPairs)) + return labelPairs +} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/vec.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/vec.go new file mode 100644 index 0000000000..aa49deba1b --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/vec.go @@ -0,0 +1,241 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "bytes" + "fmt" + "hash" + "sync" +) + +// MetricVec is a Collector to bundle metrics of the same name that +// differ in their label values. MetricVec is usually not used directly but as a +// building block for implementations of vectors of a given metric +// type. GaugeVec, CounterVec, SummaryVec, and UntypedVec are examples already +// provided in this package. +type MetricVec struct { + mtx sync.RWMutex // Protects not only children, but also hash and buf. + children map[uint64]Metric + desc *Desc + + // hash is our own hash instance to avoid repeated allocations. + hash hash.Hash64 + // buf is used to copy string contents into it for hashing, + // again to avoid allocations. + buf bytes.Buffer + + newMetric func(labelValues ...string) Metric +} + +// Describe implements Collector. The length of the returned slice +// is always one. +func (m *MetricVec) Describe(ch chan<- *Desc) { + ch <- m.desc +} + +// Collect implements Collector. +func (m *MetricVec) Collect(ch chan<- Metric) { + m.mtx.RLock() + defer m.mtx.RUnlock() + + for _, metric := range m.children { + ch <- metric + } +} + +// GetMetricWithLabelValues returns the Metric for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new Metric is created. +// Keeping the Metric for later use is possible (and should be considered if +// performance is critical), but keep in mind that Reset, DeleteLabelValues and +// Delete can be used to delete the Metric from the MetricVec. In that case, the +// Metric will still exist, but it will not be exported anymore, even if a +// Metric with the same label values is created later. See also the CounterVec +// example. +// +// An error is returned if the number of label values is not the same as the +// number of VariableLabels in Desc. +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as +// an alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +// See also the GaugeVec example. +func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) { + m.mtx.Lock() + defer m.mtx.Unlock() + + h, err := m.hashLabelValues(lvs) + if err != nil { + return nil, err + } + return m.getOrCreateMetric(h, lvs...), nil +} + +// GetMetricWith returns the Metric for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new Metric is created. Implications of keeping +// the Metric are the same as for GetMetricWithLabelValues. +// +// An error is returned if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc. +// +// This method is used for the same purpose as +// GetMetricWithLabelValues(...string). See there for pros and cons of the two +// methods. +func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) { + m.mtx.Lock() + defer m.mtx.Unlock() + + h, err := m.hashLabels(labels) + if err != nil { + return nil, err + } + lvs := make([]string, len(labels)) + for i, label := range m.desc.variableLabels { + lvs[i] = labels[label] + } + return m.getOrCreateMetric(h, lvs...), nil +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics if an error +// occurs. The method allows neat syntax like: +// httpReqs.WithLabelValues("404", "POST").Inc() +func (m *MetricVec) WithLabelValues(lvs ...string) Metric { + metric, err := m.GetMetricWithLabelValues(lvs...) + if err != nil { + panic(err) + } + return metric +} + +// With works as GetMetricWith, but panics if an error occurs. The method allows +// neat syntax like: +// httpReqs.With(Labels{"status":"404", "method":"POST"}).Inc() +func (m *MetricVec) With(labels Labels) Metric { + metric, err := m.GetMetricWith(labels) + if err != nil { + panic(err) + } + return metric +} + +// DeleteLabelValues removes the metric where the variable labels are the same +// as those passed in as labels (same order as the VariableLabels in Desc). It +// returns true if a metric was deleted. +// +// It is not an error if the number of label values is not the same as the +// number of VariableLabels in Desc. However, such inconsistent label count can +// never match an actual Metric, so the method will always return false in that +// case. +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider Delete(Labels) as an +// alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +// See also the CounterVec example. +func (m *MetricVec) DeleteLabelValues(lvs ...string) bool { + m.mtx.Lock() + defer m.mtx.Unlock() + + h, err := m.hashLabelValues(lvs) + if err != nil { + return false + } + if _, has := m.children[h]; !has { + return false + } + delete(m.children, h) + return true +} + +// Delete deletes the metric where the variable labels are the same as those +// passed in as labels. It returns true if a metric was deleted. +// +// It is not an error if the number and names of the Labels are inconsistent +// with those of the VariableLabels in the Desc of the MetricVec. However, such +// inconsistent Labels can never match an actual Metric, so the method will +// always return false in that case. +// +// This method is used for the same purpose as DeleteLabelValues(...string). See +// there for pros and cons of the two methods. +func (m *MetricVec) Delete(labels Labels) bool { + m.mtx.Lock() + defer m.mtx.Unlock() + + h, err := m.hashLabels(labels) + if err != nil { + return false + } + if _, has := m.children[h]; !has { + return false + } + delete(m.children, h) + return true +} + +// Reset deletes all metrics in this vector. +func (m *MetricVec) Reset() { + m.mtx.Lock() + defer m.mtx.Unlock() + + for h := range m.children { + delete(m.children, h) + } +} + +func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) { + if len(vals) != len(m.desc.variableLabels) { + return 0, errInconsistentCardinality + } + m.hash.Reset() + for _, val := range vals { + m.buf.Reset() + m.buf.WriteString(val) + m.hash.Write(m.buf.Bytes()) + } + return m.hash.Sum64(), nil +} + +func (m *MetricVec) hashLabels(labels Labels) (uint64, error) { + if len(labels) != len(m.desc.variableLabels) { + return 0, errInconsistentCardinality + } + m.hash.Reset() + for _, label := range m.desc.variableLabels { + val, ok := labels[label] + if !ok { + return 0, fmt.Errorf("label name %q missing in label map", label) + } + m.buf.Reset() + m.buf.WriteString(val) + m.hash.Write(m.buf.Bytes()) + } + return m.hash.Sum64(), nil +} + +func (m *MetricVec) getOrCreateMetric(hash uint64, labelValues ...string) Metric { + metric, ok := m.children[hash] + if !ok { + // Copy labelValues. Otherwise, they would be allocated even if we don't go + // down this code path. + copiedLabelValues := append(make([]string, 0, len(labelValues)), labelValues...) + metric = m.newMetric(copiedLabelValues...) + m.children[hash] = metric + } + return metric +} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/vec_test.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/vec_test.go new file mode 100644 index 0000000000..0e9431e656 --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus/vec_test.go @@ -0,0 +1,91 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "hash/fnv" + "testing" +) + +func TestDelete(t *testing.T) { + desc := NewDesc("test", "helpless", []string{"l1", "l2"}, nil) + vec := MetricVec{ + children: map[uint64]Metric{}, + desc: desc, + hash: fnv.New64a(), + newMetric: func(lvs ...string) Metric { + return newValue(desc, UntypedValue, 0, lvs...) + }, + } + + if got, want := vec.Delete(Labels{"l1": "v1", "l2": "v2"}), false; got != want { + t.Errorf("got %v, want %v", got, want) + } + + vec.With(Labels{"l1": "v1", "l2": "v2"}).(Untyped).Set(42) + if got, want := vec.Delete(Labels{"l1": "v1", "l2": "v2"}), true; got != want { + t.Errorf("got %v, want %v", got, want) + } + if got, want := vec.Delete(Labels{"l1": "v1", "l2": "v2"}), false; got != want { + t.Errorf("got %v, want %v", got, want) + } + + vec.With(Labels{"l1": "v1", "l2": "v2"}).(Untyped).Set(42) + if got, want := vec.Delete(Labels{"l2": "v2", "l1": "v1"}), true; got != want { + t.Errorf("got %v, want %v", got, want) + } + if got, want := vec.Delete(Labels{"l2": "v2", "l1": "v1"}), false; got != want { + t.Errorf("got %v, want %v", got, want) + } + + vec.With(Labels{"l1": "v1", "l2": "v2"}).(Untyped).Set(42) + if got, want := vec.Delete(Labels{"l2": "v1", "l1": "v2"}), false; got != want { + t.Errorf("got %v, want %v", got, want) + } + if got, want := vec.Delete(Labels{"l1": "v1"}), false; got != want { + t.Errorf("got %v, want %v", got, want) + } +} + +func TestDeleteLabelValues(t *testing.T) { + desc := NewDesc("test", "helpless", []string{"l1", "l2"}, nil) + vec := MetricVec{ + children: map[uint64]Metric{}, + desc: desc, + hash: fnv.New64a(), + newMetric: func(lvs ...string) Metric { + return newValue(desc, UntypedValue, 0, lvs...) + }, + } + + if got, want := vec.DeleteLabelValues("v1", "v2"), false; got != want { + t.Errorf("got %v, want %v", got, want) + } + + vec.With(Labels{"l1": "v1", "l2": "v2"}).(Untyped).Set(42) + if got, want := vec.DeleteLabelValues("v1", "v2"), true; got != want { + t.Errorf("got %v, want %v", got, want) + } + if got, want := vec.DeleteLabelValues("v1", "v2"), false; got != want { + t.Errorf("got %v, want %v", got, want) + } + + vec.With(Labels{"l1": "v1", "l2": "v2"}).(Untyped).Set(42) + if got, want := vec.DeleteLabelValues("v2", "v1"), false; got != want { + t.Errorf("got %v, want %v", got, want) + } + if got, want := vec.DeleteLabelValues("v1"), false; got != want { + t.Errorf("got %v, want %v", got, want) + } +} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/text/bench_test.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/text/bench_test.go new file mode 100644 index 0000000000..df887e0fc6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/text/bench_test.go @@ -0,0 +1,168 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package text + +import ( + "bytes" + "compress/gzip" + "io" + "io/ioutil" + "testing" + dto "github.com/prometheus/client_model/go" + + "github.com/matttproud/golang_protobuf_extensions/ext" +) + +// Benchmarks to show how much penalty text format parsing actually inflicts. +// +// Example results on Linux 3.13.0, Intel(R) Core(TM) i7-4700MQ CPU @ 2.40GHz, go1.4. +// +// BenchmarkParseText 1000 1188535 ns/op 205085 B/op 6135 allocs/op +// BenchmarkParseTextGzip 1000 1376567 ns/op 246224 B/op 6151 allocs/op +// BenchmarkParseProto 10000 172790 ns/op 52258 B/op 1160 allocs/op +// BenchmarkParseProtoGzip 5000 324021 ns/op 94931 B/op 1211 allocs/op +// BenchmarkParseProtoMap 10000 187946 ns/op 58714 B/op 1203 allocs/op +// +// CONCLUSION: The overhead for the map is negligible. Text format needs ~5x more allocations. +// Without compression, it needs ~7x longer, but with compression (the more relevant scenario), +// the difference becomes less relevant, only ~4x. +// +// The test data contains 248 samples. +// +// BenchmarkProcessor002ParseOnly in the extraction package is not quite +// comparable to the benchmarks here, but it gives an idea: JSON parsing is even +// slower than text parsing and needs a comparable amount of allocs. + +// BenchmarkParseText benchmarks the parsing of a text-format scrape into metric +// family DTOs. +func BenchmarkParseText(b *testing.B) { + b.StopTimer() + data, err := ioutil.ReadFile("testdata/text") + if err != nil { + b.Fatal(err) + } + b.StartTimer() + + for i := 0; i < b.N; i++ { + if _, err := parser.TextToMetricFamilies(bytes.NewReader(data)); err != nil { + b.Fatal(err) + } + } +} + +// BenchmarkParseTextGzip benchmarks the parsing of a gzipped text-format scrape +// into metric family DTOs. +func BenchmarkParseTextGzip(b *testing.B) { + b.StopTimer() + data, err := ioutil.ReadFile("testdata/text.gz") + if err != nil { + b.Fatal(err) + } + b.StartTimer() + + for i := 0; i < b.N; i++ { + in, err := gzip.NewReader(bytes.NewReader(data)) + if err != nil { + b.Fatal(err) + } + if _, err := parser.TextToMetricFamilies(in); err != nil { + b.Fatal(err) + } + } +} + +// BenchmarkParseProto benchmarks the parsing of a protobuf-format scrape into +// metric family DTOs. Note that this does not build a map of metric families +// (as the text version does), because it is not required for Prometheus +// ingestion either. (However, it is required for the text-format parsing, as +// the metric family might be sprinkled all over the text, while the +// protobuf-format guarantees bundling at one place.) +func BenchmarkParseProto(b *testing.B) { + b.StopTimer() + data, err := ioutil.ReadFile("testdata/protobuf") + if err != nil { + b.Fatal(err) + } + b.StartTimer() + + for i := 0; i < b.N; i++ { + family := &dto.MetricFamily{} + in := bytes.NewReader(data) + for { + family.Reset() + if _, err := ext.ReadDelimited(in, family); err != nil { + if err == io.EOF { + break + } + b.Fatal(err) + } + } + } +} + +// BenchmarkParseProtoGzip is like BenchmarkParseProto above, but parses gzipped +// protobuf format. +func BenchmarkParseProtoGzip(b *testing.B) { + b.StopTimer() + data, err := ioutil.ReadFile("testdata/protobuf.gz") + if err != nil { + b.Fatal(err) + } + b.StartTimer() + + for i := 0; i < b.N; i++ { + family := &dto.MetricFamily{} + in, err := gzip.NewReader(bytes.NewReader(data)) + if err != nil { + b.Fatal(err) + } + for { + family.Reset() + if _, err := ext.ReadDelimited(in, family); err != nil { + if err == io.EOF { + break + } + b.Fatal(err) + } + } + } +} + +// BenchmarkParseProtoMap is like BenchmarkParseProto but DOES put the parsed +// metric family DTOs into a map. This is not happening during Prometheus +// ingestion. It is just here to measure the overhead of that map creation and +// separate it from the overhead of the text format parsing. +func BenchmarkParseProtoMap(b *testing.B) { + b.StopTimer() + data, err := ioutil.ReadFile("testdata/protobuf") + if err != nil { + b.Fatal(err) + } + b.StartTimer() + + for i := 0; i < b.N; i++ { + families := map[string]*dto.MetricFamily{} + in := bytes.NewReader(data) + for { + family := &dto.MetricFamily{} + if _, err := ext.ReadDelimited(in, family); err != nil { + if err == io.EOF { + break + } + b.Fatal(err) + } + families[family.GetName()] = family + } + } +} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/text/create.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/text/create.go new file mode 100644 index 0000000000..64e372c0cd --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/text/create.go @@ -0,0 +1,298 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package text contains helper functions to parse and create text-based +// exchange formats. The package currently supports (only) version 0.0.4 of the +// exchange format. Should other versions be supported in the future, some +// versioning scheme has to be applied. Possibilities include separate packages +// or separate functions. The best way depends on the nature of future changes, +// which is the reason why no versioning scheme has been applied prematurely +// here. +package text + +import ( + "bytes" + "fmt" + "io" + "strings" + + dto "github.com/prometheus/client_model/go" +) + +// MetricFamilyToText converts a MetricFamily proto message into text format and +// writes the resulting lines to 'out'. It returns the number of bytes written +// and any error encountered. This function does not perform checks on the +// content of the metric and label names, i.e. invalid metric or label names +// will result in invalid text format output. +// This method fulfills the type 'prometheus.encoder'. +func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) { + var written int + + // Fail-fast checks. + if len(in.Metric) == 0 { + return written, fmt.Errorf("MetricFamily has no metrics: %s", in) + } + name := in.GetName() + if name == "" { + return written, fmt.Errorf("MetricFamily has no name: %s", in) + } + if in.Type == nil { + return written, fmt.Errorf("MetricFamily has no type: %s", in) + } + + // Comments, first HELP, then TYPE. + if in.Help != nil { + n, err := fmt.Fprintf( + out, "# HELP %s %s\n", + name, escapeString(*in.Help, false), + ) + written += n + if err != nil { + return written, err + } + } + metricType := in.GetType() + n, err := fmt.Fprintf( + out, "# TYPE %s %s\n", + name, strings.ToLower(metricType.String()), + ) + written += n + if err != nil { + return written, err + } + + // Finally the samples, one line for each. + for _, metric := range in.Metric { + switch metricType { + case dto.MetricType_COUNTER: + if metric.Counter == nil { + return written, fmt.Errorf( + "expected counter in metric %s", metric, + ) + } + n, err = writeSample( + name, metric, "", "", + metric.Counter.GetValue(), + out, + ) + case dto.MetricType_GAUGE: + if metric.Gauge == nil { + return written, fmt.Errorf( + "expected gauge in metric %s", metric, + ) + } + n, err = writeSample( + name, metric, "", "", + metric.Gauge.GetValue(), + out, + ) + case dto.MetricType_UNTYPED: + if metric.Untyped == nil { + return written, fmt.Errorf( + "expected untyped in metric %s", metric, + ) + } + n, err = writeSample( + name, metric, "", "", + metric.Untyped.GetValue(), + out, + ) + case dto.MetricType_SUMMARY: + if metric.Summary == nil { + return written, fmt.Errorf( + "expected summary in metric %s", metric, + ) + } + for _, q := range metric.Summary.Quantile { + n, err = writeSample( + name, metric, + "quantile", fmt.Sprint(q.GetQuantile()), + q.GetValue(), + out, + ) + written += n + if err != nil { + return written, err + } + } + n, err = writeSample( + name+"_sum", metric, "", "", + metric.Summary.GetSampleSum(), + out, + ) + if err != nil { + return written, err + } + written += n + n, err = writeSample( + name+"_count", metric, "", "", + float64(metric.Summary.GetSampleCount()), + out, + ) + case dto.MetricType_HISTOGRAM: + if metric.Histogram == nil { + return written, fmt.Errorf( + "expected summary in metric %s", metric, + ) + } + for _, q := range metric.Histogram.Bucket { + n, err = writeSample( + name+"_bucket", metric, + "le", fmt.Sprint(q.GetUpperBound()), + float64(q.GetCumulativeCount()), + out, + ) + written += n + if err != nil { + return written, err + } + // TODO: Add +inf bucket if it's missing. + } + n, err = writeSample( + name+"_sum", metric, "", "", + metric.Histogram.GetSampleSum(), + out, + ) + if err != nil { + return written, err + } + written += n + n, err = writeSample( + name+"_count", metric, "", "", + float64(metric.Histogram.GetSampleCount()), + out, + ) + default: + return written, fmt.Errorf( + "unexpected type in metric %s", metric, + ) + } + written += n + if err != nil { + return written, err + } + } + return written, nil +} + +// writeSample writes a single sample in text format to out, given the metric +// name, the metric proto message itself, optionally an additional label name +// and value (use empty strings if not required), and the value. The function +// returns the number of bytes written and any error encountered. +func writeSample( + name string, + metric *dto.Metric, + additionalLabelName, additionalLabelValue string, + value float64, + out io.Writer, +) (int, error) { + var written int + n, err := fmt.Fprint(out, name) + written += n + if err != nil { + return written, err + } + n, err = labelPairsToText( + metric.Label, + additionalLabelName, additionalLabelValue, + out, + ) + written += n + if err != nil { + return written, err + } + n, err = fmt.Fprintf(out, " %v", value) + written += n + if err != nil { + return written, err + } + if metric.TimestampMs != nil { + n, err = fmt.Fprintf(out, " %v", *metric.TimestampMs) + written += n + if err != nil { + return written, err + } + } + n, err = out.Write([]byte{'\n'}) + written += n + if err != nil { + return written, err + } + return written, nil +} + +// labelPairsToText converts a slice of LabelPair proto messages plus the +// explicitly given additional label pair into text formatted as required by the +// text format and writes it to 'out'. An empty slice in combination with an +// empty string 'additionalLabelName' results in nothing being +// written. Otherwise, the label pairs are written, escaped as required by the +// text format, and enclosed in '{...}'. The function returns the number of +// bytes written and any error encountered. +func labelPairsToText( + in []*dto.LabelPair, + additionalLabelName, additionalLabelValue string, + out io.Writer, +) (int, error) { + if len(in) == 0 && additionalLabelName == "" { + return 0, nil + } + var written int + separator := '{' + for _, lp := range in { + n, err := fmt.Fprintf( + out, `%c%s="%s"`, + separator, lp.GetName(), escapeString(lp.GetValue(), true), + ) + written += n + if err != nil { + return written, err + } + separator = ',' + } + if additionalLabelName != "" { + n, err := fmt.Fprintf( + out, `%c%s="%s"`, + separator, additionalLabelName, + escapeString(additionalLabelValue, true), + ) + written += n + if err != nil { + return written, err + } + } + n, err := out.Write([]byte{'}'}) + written += n + if err != nil { + return written, err + } + return written, nil +} + +// escapeString replaces '\' by '\\', new line character by '\n', and - if +// includeDoubleQuote is true - '"' by '\"'. +func escapeString(v string, includeDoubleQuote bool) string { + result := bytes.NewBuffer(make([]byte, 0, len(v))) + for _, c := range v { + switch { + case c == '\\': + result.WriteString(`\\`) + case includeDoubleQuote && c == '"': + result.WriteString(`\"`) + case c == '\n': + result.WriteString(`\n`) + default: + result.WriteRune(c) + } + } + return result.String() +} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/text/create_test.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/text/create_test.go new file mode 100644 index 0000000000..9326e6e5b5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/text/create_test.go @@ -0,0 +1,395 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package text + +import ( + "bytes" + "math" + "strings" + "testing" + + "github.com/golang/protobuf/proto" + dto "github.com/prometheus/client_model/go" +) + +func testCreate(t testing.TB) { + var scenarios = []struct { + in *dto.MetricFamily + out string + }{ + // 0: Counter, NaN as value, timestamp given. + { + in: &dto.MetricFamily{ + Name: proto.String("name"), + Help: proto.String("two-line\n doc str\\ing"), + Type: dto.MetricType_COUNTER.Enum(), + Metric: []*dto.Metric{ + &dto.Metric{ + Label: []*dto.LabelPair{ + &dto.LabelPair{ + Name: proto.String("labelname"), + Value: proto.String("val1"), + }, + &dto.LabelPair{ + Name: proto.String("basename"), + Value: proto.String("basevalue"), + }, + }, + Counter: &dto.Counter{ + Value: proto.Float64(math.NaN()), + }, + }, + &dto.Metric{ + Label: []*dto.LabelPair{ + &dto.LabelPair{ + Name: proto.String("labelname"), + Value: proto.String("val2"), + }, + &dto.LabelPair{ + Name: proto.String("basename"), + Value: proto.String("basevalue"), + }, + }, + Counter: &dto.Counter{ + Value: proto.Float64(.23), + }, + TimestampMs: proto.Int64(1234567890), + }, + }, + }, + out: `# HELP name two-line\n doc str\\ing +# TYPE name counter +name{labelname="val1",basename="basevalue"} NaN +name{labelname="val2",basename="basevalue"} 0.23 1234567890 +`, + }, + // 1: Gauge, some escaping required, +Inf as value, multi-byte characters in label values. + { + in: &dto.MetricFamily{ + Name: proto.String("gauge_name"), + Help: proto.String("gauge\ndoc\nstr\"ing"), + Type: dto.MetricType_GAUGE.Enum(), + Metric: []*dto.Metric{ + &dto.Metric{ + Label: []*dto.LabelPair{ + &dto.LabelPair{ + Name: proto.String("name_1"), + Value: proto.String("val with\nnew line"), + }, + &dto.LabelPair{ + Name: proto.String("name_2"), + Value: proto.String("val with \\backslash and \"quotes\""), + }, + }, + Gauge: &dto.Gauge{ + Value: proto.Float64(math.Inf(+1)), + }, + }, + &dto.Metric{ + Label: []*dto.LabelPair{ + &dto.LabelPair{ + Name: proto.String("name_1"), + Value: proto.String("Björn"), + }, + &dto.LabelPair{ + Name: proto.String("name_2"), + Value: proto.String("佖佥"), + }, + }, + Gauge: &dto.Gauge{ + Value: proto.Float64(3.14E42), + }, + }, + }, + }, + out: `# HELP gauge_name gauge\ndoc\nstr"ing +# TYPE gauge_name gauge +gauge_name{name_1="val with\nnew line",name_2="val with \\backslash and \"quotes\""} +Inf +gauge_name{name_1="Björn",name_2="佖佥"} 3.14e+42 +`, + }, + // 2: Untyped, no help, one sample with no labels and -Inf as value, another sample with one label. + { + in: &dto.MetricFamily{ + Name: proto.String("untyped_name"), + Type: dto.MetricType_UNTYPED.Enum(), + Metric: []*dto.Metric{ + &dto.Metric{ + Untyped: &dto.Untyped{ + Value: proto.Float64(math.Inf(-1)), + }, + }, + &dto.Metric{ + Label: []*dto.LabelPair{ + &dto.LabelPair{ + Name: proto.String("name_1"), + Value: proto.String("value 1"), + }, + }, + Untyped: &dto.Untyped{ + Value: proto.Float64(-1.23e-45), + }, + }, + }, + }, + out: `# TYPE untyped_name untyped +untyped_name -Inf +untyped_name{name_1="value 1"} -1.23e-45 +`, + }, + // 3: Summary. + { + in: &dto.MetricFamily{ + Name: proto.String("summary_name"), + Help: proto.String("summary docstring"), + Type: dto.MetricType_SUMMARY.Enum(), + Metric: []*dto.Metric{ + &dto.Metric{ + Summary: &dto.Summary{ + SampleCount: proto.Uint64(42), + SampleSum: proto.Float64(-3.4567), + Quantile: []*dto.Quantile{ + &dto.Quantile{ + Quantile: proto.Float64(0.5), + Value: proto.Float64(-1.23), + }, + &dto.Quantile{ + Quantile: proto.Float64(0.9), + Value: proto.Float64(.2342354), + }, + &dto.Quantile{ + Quantile: proto.Float64(0.99), + Value: proto.Float64(0), + }, + }, + }, + }, + &dto.Metric{ + Label: []*dto.LabelPair{ + &dto.LabelPair{ + Name: proto.String("name_1"), + Value: proto.String("value 1"), + }, + &dto.LabelPair{ + Name: proto.String("name_2"), + Value: proto.String("value 2"), + }, + }, + Summary: &dto.Summary{ + SampleCount: proto.Uint64(4711), + SampleSum: proto.Float64(2010.1971), + Quantile: []*dto.Quantile{ + &dto.Quantile{ + Quantile: proto.Float64(0.5), + Value: proto.Float64(1), + }, + &dto.Quantile{ + Quantile: proto.Float64(0.9), + Value: proto.Float64(2), + }, + &dto.Quantile{ + Quantile: proto.Float64(0.99), + Value: proto.Float64(3), + }, + }, + }, + }, + }, + }, + out: `# HELP summary_name summary docstring +# TYPE summary_name summary +summary_name{quantile="0.5"} -1.23 +summary_name{quantile="0.9"} 0.2342354 +summary_name{quantile="0.99"} 0 +summary_name_sum -3.4567 +summary_name_count 42 +summary_name{name_1="value 1",name_2="value 2",quantile="0.5"} 1 +summary_name{name_1="value 1",name_2="value 2",quantile="0.9"} 2 +summary_name{name_1="value 1",name_2="value 2",quantile="0.99"} 3 +summary_name_sum{name_1="value 1",name_2="value 2"} 2010.1971 +summary_name_count{name_1="value 1",name_2="value 2"} 4711 +`, + }, + // 4: Histogram + { + in: &dto.MetricFamily{ + Name: proto.String("request_duration_microseconds"), + Help: proto.String("The response latency."), + Type: dto.MetricType_HISTOGRAM.Enum(), + Metric: []*dto.Metric{ + &dto.Metric{ + Histogram: &dto.Histogram{ + SampleCount: proto.Uint64(2693), + SampleSum: proto.Float64(1756047.3), + Bucket: []*dto.Bucket{ + &dto.Bucket{ + UpperBound: proto.Float64(100), + CumulativeCount: proto.Uint64(123), + }, + &dto.Bucket{ + UpperBound: proto.Float64(120), + CumulativeCount: proto.Uint64(412), + }, + &dto.Bucket{ + UpperBound: proto.Float64(144), + CumulativeCount: proto.Uint64(592), + }, + &dto.Bucket{ + UpperBound: proto.Float64(172.8), + CumulativeCount: proto.Uint64(1524), + }, + &dto.Bucket{ + UpperBound: proto.Float64(math.Inf(+1)), + CumulativeCount: proto.Uint64(2693), + }, + }, + }, + }, + }, + }, + out: `# HELP request_duration_microseconds The response latency. +# TYPE request_duration_microseconds histogram +request_duration_microseconds_bucket{le="100"} 123 +request_duration_microseconds_bucket{le="120"} 412 +request_duration_microseconds_bucket{le="144"} 592 +request_duration_microseconds_bucket{le="172.8"} 1524 +request_duration_microseconds_bucket{le="+Inf"} 2693 +request_duration_microseconds_sum 1.7560473e+06 +request_duration_microseconds_count 2693 +`, + }, + } + + for i, scenario := range scenarios { + out := bytes.NewBuffer(make([]byte, 0, len(scenario.out))) + n, err := MetricFamilyToText(out, scenario.in) + if err != nil { + t.Errorf("%d. error: %s", i, err) + continue + } + if expected, got := len(scenario.out), n; expected != got { + t.Errorf( + "%d. expected %d bytes written, got %d", + i, expected, got, + ) + } + if expected, got := scenario.out, out.String(); expected != got { + t.Errorf( + "%d. expected out=%q, got %q", + i, expected, got, + ) + } + } + +} + +func TestCreate(t *testing.T) { + testCreate(t) +} + +func BenchmarkCreate(b *testing.B) { + for i := 0; i < b.N; i++ { + testCreate(b) + } +} + +func testCreateError(t testing.TB) { + var scenarios = []struct { + in *dto.MetricFamily + err string + }{ + // 0: No metric. + { + in: &dto.MetricFamily{ + Name: proto.String("name"), + Help: proto.String("doc string"), + Type: dto.MetricType_COUNTER.Enum(), + Metric: []*dto.Metric{}, + }, + err: "MetricFamily has no metrics", + }, + // 1: No metric name. + { + in: &dto.MetricFamily{ + Help: proto.String("doc string"), + Type: dto.MetricType_UNTYPED.Enum(), + Metric: []*dto.Metric{ + &dto.Metric{ + Untyped: &dto.Untyped{ + Value: proto.Float64(math.Inf(-1)), + }, + }, + }, + }, + err: "MetricFamily has no name", + }, + // 2: No metric type. + { + in: &dto.MetricFamily{ + Name: proto.String("name"), + Help: proto.String("doc string"), + Metric: []*dto.Metric{ + &dto.Metric{ + Untyped: &dto.Untyped{ + Value: proto.Float64(math.Inf(-1)), + }, + }, + }, + }, + err: "MetricFamily has no type", + }, + // 3: Wrong type. + { + in: &dto.MetricFamily{ + Name: proto.String("name"), + Help: proto.String("doc string"), + Type: dto.MetricType_COUNTER.Enum(), + Metric: []*dto.Metric{ + &dto.Metric{ + Untyped: &dto.Untyped{ + Value: proto.Float64(math.Inf(-1)), + }, + }, + }, + }, + err: "expected counter in metric", + }, + } + + for i, scenario := range scenarios { + var out bytes.Buffer + _, err := MetricFamilyToText(&out, scenario.in) + if err == nil { + t.Errorf("%d. expected error, got nil", i) + continue + } + if expected, got := scenario.err, err.Error(); strings.Index(got, expected) != 0 { + t.Errorf( + "%d. expected error starting with %q, got %q", + i, expected, got, + ) + } + } + +} + +func TestCreateError(t *testing.T) { + testCreateError(t) +} + +func BenchmarkCreateError(b *testing.B) { + for i := 0; i < b.N; i++ { + testCreateError(b) + } +} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/text/parse.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/text/parse.go new file mode 100644 index 0000000000..eaff592740 --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/text/parse.go @@ -0,0 +1,739 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package text + +import ( + "bufio" + "bytes" + "fmt" + "io" + "math" + "strconv" + "strings" + + dto "github.com/prometheus/client_model/go" + + "github.com/golang/protobuf/proto" + "github.com/prometheus/client_golang/model" +) + +// A stateFn is a function that represents a state in a state machine. By +// executing it, the state is progressed to the next state. The stateFn returns +// another stateFn, which represents the new state. The end state is represented +// by nil. +type stateFn func() stateFn + +// ParseError signals errors while parsing the simple and flat text-based +// exchange format. +type ParseError struct { + Line int + Msg string +} + +// Error implements the error interface. +func (e ParseError) Error() string { + return fmt.Sprintf("text format parsing error in line %d: %s", e.Line, e.Msg) +} + +// Parser is used to parse the simple and flat text-based exchange format. Its +// nil value is ready to use. +type Parser struct { + metricFamiliesByName map[string]*dto.MetricFamily + buf *bufio.Reader // Where the parsed input is read through. + err error // Most recent error. + lineCount int // Tracks the line count for error messages. + currentByte byte // The most recent byte read. + currentToken bytes.Buffer // Re-used each time a token has to be gathered from multiple bytes. + currentMF *dto.MetricFamily + currentMetric *dto.Metric + currentLabelPair *dto.LabelPair + + // The remaining member variables are only used for summaries/histograms. + currentLabels map[string]string // All labels including '__name__' but excluding 'quantile'/'le' + // Summary specific. + summaries map[uint64]*dto.Metric // Key is created with LabelsToSignature. + currentQuantile float64 + // Histogram specific. + histograms map[uint64]*dto.Metric // Key is created with LabelsToSignature. + currentBucket float64 + // These tell us if the currently processed line ends on '_count' or + // '_sum' respectively and belong to a summary/histogram, representing the sample + // count and sum of that summary/histogram. + currentIsSummaryCount, currentIsSummarySum bool + currentIsHistogramCount, currentIsHistogramSum bool +} + +// TextToMetricFamilies reads 'in' as the simple and flat text-based exchange +// format and creates MetricFamily proto messages. It returns the MetricFamily +// proto messages in a map where the metric names are the keys, along with any +// error encountered. +// +// If the input contains duplicate metrics (i.e. lines with the same metric name +// and exactly the same label set), the resulting MetricFamily will contain +// duplicate Metric proto messages. Similar is true for duplicate label +// names. Checks for duplicates have to be performed separately, if required. +// +// Summaries are a rather special beast. You would probably not use them in the +// simple text format anyway. This method can deal with summaries if they are +// presented in exactly the way the text.Create function creates them. +// +// This method must not be called concurrently. If you want to parse different +// input concurrently, instantiate a separate Parser for each goroutine. +func (p *Parser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricFamily, error) { + p.reset(in) + for nextState := p.startOfLine; nextState != nil; nextState = nextState() { + // Magic happens here... + } + // Get rid of empty metric families. + for k, mf := range p.metricFamiliesByName { + if len(mf.GetMetric()) == 0 { + delete(p.metricFamiliesByName, k) + } + } + return p.metricFamiliesByName, p.err +} + +func (p *Parser) reset(in io.Reader) { + p.metricFamiliesByName = map[string]*dto.MetricFamily{} + if p.buf == nil { + p.buf = bufio.NewReader(in) + } else { + p.buf.Reset(in) + } + p.err = nil + p.lineCount = 0 + if p.summaries == nil || len(p.summaries) > 0 { + p.summaries = map[uint64]*dto.Metric{} + } + if p.histograms == nil || len(p.histograms) > 0 { + p.histograms = map[uint64]*dto.Metric{} + } + p.currentQuantile = math.NaN() + p.currentBucket = math.NaN() +} + +// startOfLine represents the state where the next byte read from p.buf is the +// start of a line (or whitespace leading up to it). +func (p *Parser) startOfLine() stateFn { + p.lineCount++ + if p.skipBlankTab(); p.err != nil { + // End of input reached. This is the only case where + // that is not an error but a signal that we are done. + p.err = nil + return nil + } + switch p.currentByte { + case '#': + return p.startComment + case '\n': + return p.startOfLine // Empty line, start the next one. + } + return p.readingMetricName +} + +// startComment represents the state where the next byte read from p.buf is the +// start of a comment (or whitespace leading up to it). +func (p *Parser) startComment() stateFn { + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte == '\n' { + return p.startOfLine + } + if p.readTokenUntilWhitespace(); p.err != nil { + return nil // Unexpected end of input. + } + // If we have hit the end of line already, there is nothing left + // to do. This is not considered a syntax error. + if p.currentByte == '\n' { + return p.startOfLine + } + keyword := p.currentToken.String() + if keyword != "HELP" && keyword != "TYPE" { + // Generic comment, ignore by fast forwarding to end of line. + for p.currentByte != '\n' { + if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil { + return nil // Unexpected end of input. + } + } + return p.startOfLine + } + // There is something. Next has to be a metric name. + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.readTokenAsMetricName(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte == '\n' { + // At the end of the line already. + // Again, this is not considered a syntax error. + return p.startOfLine + } + if !isBlankOrTab(p.currentByte) { + p.parseError("invalid metric name in comment") + return nil + } + p.setOrCreateCurrentMF() + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte == '\n' { + // At the end of the line already. + // Again, this is not considered a syntax error. + return p.startOfLine + } + switch keyword { + case "HELP": + return p.readingHelp + case "TYPE": + return p.readingType + } + panic(fmt.Sprintf("code error: unexpected keyword %q", keyword)) +} + +// readingMetricName represents the state where the last byte read (now in +// p.currentByte) is the first byte of a metric name. +func (p *Parser) readingMetricName() stateFn { + if p.readTokenAsMetricName(); p.err != nil { + return nil + } + if p.currentToken.Len() == 0 { + p.parseError("invalid metric name") + return nil + } + p.setOrCreateCurrentMF() + // Now is the time to fix the type if it hasn't happened yet. + if p.currentMF.Type == nil { + p.currentMF.Type = dto.MetricType_UNTYPED.Enum() + } + p.currentMetric = &dto.Metric{} + // Do not append the newly created currentMetric to + // currentMF.Metric right now. First wait if this is a summary, + // and the metric exists already, which we can only know after + // having read all the labels. + if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + return p.readingLabels +} + +// readingLabels represents the state where the last byte read (now in +// p.currentByte) is either the first byte of the label set (i.e. a '{'), or the +// first byte of the value (otherwise). +func (p *Parser) readingLabels() stateFn { + // Summaries/histograms are special. We have to reset the + // currentLabels map, currentQuantile and currentBucket before starting to + // read labels. + if p.currentMF.GetType() == dto.MetricType_SUMMARY || p.currentMF.GetType() == dto.MetricType_HISTOGRAM { + p.currentLabels = map[string]string{} + p.currentLabels[string(model.MetricNameLabel)] = p.currentMF.GetName() + p.currentQuantile = math.NaN() + p.currentBucket = math.NaN() + } + if p.currentByte != '{' { + return p.readingValue + } + return p.startLabelName +} + +// startLabelName represents the state where the next byte read from p.buf is +// the start of a label name (or whitespace leading up to it). +func (p *Parser) startLabelName() stateFn { + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte == '}' { + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + return p.readingValue + } + if p.readTokenAsLabelName(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentToken.Len() == 0 { + p.parseError(fmt.Sprintf("invalid label name for metric %q", p.currentMF.GetName())) + return nil + } + p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())} + if p.currentLabelPair.GetName() == string(model.MetricNameLabel) { + p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel)) + return nil + } + // Special summary/histogram treatment. Don't add 'quantile' and 'le' + // labels to 'real' labels. + if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == "quantile") && + !(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == "le") { + p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPair) + } + if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte != '=' { + p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte)) + return nil + } + return p.startLabelValue +} + +// startLabelValue represents the state where the next byte read from p.buf is +// the start of a (quoted) label value (or whitespace leading up to it). +func (p *Parser) startLabelValue() stateFn { + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte != '"' { + p.parseError(fmt.Sprintf("expected '\"' at start of label value, found %q", p.currentByte)) + return nil + } + if p.readTokenAsLabelValue(); p.err != nil { + return nil + } + p.currentLabelPair.Value = proto.String(p.currentToken.String()) + // Special treatment of summaries: + // - Quantile labels are special, will result in dto.Quantile later. + // - Other labels have to be added to currentLabels for signature calculation. + if p.currentMF.GetType() == dto.MetricType_SUMMARY { + if p.currentLabelPair.GetName() == "quantile" { + if p.currentQuantile, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil { + // Create a more helpful error message. + p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue())) + return nil + } + } else { + p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue() + } + } + // Similar special treatment of histograms. + if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { + if p.currentLabelPair.GetName() == "le" { + if p.currentBucket, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil { + // Create a more helpful error message. + p.parseError(fmt.Sprintf("expected float as value for 'le' label, got %q", p.currentLabelPair.GetValue())) + return nil + } + } else { + p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue() + } + } + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + switch p.currentByte { + case ',': + return p.startLabelName + + case '}': + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + return p.readingValue + default: + p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.Value)) + return nil + } +} + +// readingValue represents the state where the last byte read (now in +// p.currentByte) is the first byte of the sample value (i.e. a float). +func (p *Parser) readingValue() stateFn { + // When we are here, we have read all the labels, so for the + // special case of a summary/histogram, we can finally find out + // if the metric already exists. + if p.currentMF.GetType() == dto.MetricType_SUMMARY { + signature := model.LabelsToSignature(p.currentLabels) + if summary := p.summaries[signature]; summary != nil { + p.currentMetric = summary + } else { + p.summaries[signature] = p.currentMetric + p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) + } + } else if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { + signature := model.LabelsToSignature(p.currentLabels) + if histogram := p.histograms[signature]; histogram != nil { + p.currentMetric = histogram + } else { + p.histograms[signature] = p.currentMetric + p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) + } + } else { + p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) + } + if p.readTokenUntilWhitespace(); p.err != nil { + return nil // Unexpected end of input. + } + value, err := strconv.ParseFloat(p.currentToken.String(), 64) + if err != nil { + // Create a more helpful error message. + p.parseError(fmt.Sprintf("expected float as value, got %q", p.currentToken.String())) + return nil + } + switch p.currentMF.GetType() { + case dto.MetricType_COUNTER: + p.currentMetric.Counter = &dto.Counter{Value: proto.Float64(value)} + case dto.MetricType_GAUGE: + p.currentMetric.Gauge = &dto.Gauge{Value: proto.Float64(value)} + case dto.MetricType_UNTYPED: + p.currentMetric.Untyped = &dto.Untyped{Value: proto.Float64(value)} + case dto.MetricType_SUMMARY: + // *sigh* + if p.currentMetric.Summary == nil { + p.currentMetric.Summary = &dto.Summary{} + } + switch { + case p.currentIsSummaryCount: + p.currentMetric.Summary.SampleCount = proto.Uint64(uint64(value)) + case p.currentIsSummarySum: + p.currentMetric.Summary.SampleSum = proto.Float64(value) + case !math.IsNaN(p.currentQuantile): + p.currentMetric.Summary.Quantile = append( + p.currentMetric.Summary.Quantile, + &dto.Quantile{ + Quantile: proto.Float64(p.currentQuantile), + Value: proto.Float64(value), + }, + ) + } + case dto.MetricType_HISTOGRAM: + // *sigh* + if p.currentMetric.Histogram == nil { + p.currentMetric.Histogram = &dto.Histogram{} + } + switch { + case p.currentIsHistogramCount: + p.currentMetric.Histogram.SampleCount = proto.Uint64(uint64(value)) + case p.currentIsHistogramSum: + p.currentMetric.Histogram.SampleSum = proto.Float64(value) + case !math.IsNaN(p.currentBucket): + p.currentMetric.Histogram.Bucket = append( + p.currentMetric.Histogram.Bucket, + &dto.Bucket{ + UpperBound: proto.Float64(p.currentBucket), + CumulativeCount: proto.Uint64(uint64(value)), + }, + ) + } + default: + p.err = fmt.Errorf("unexpected type for metric name %q", p.currentMF.GetName()) + } + if p.currentByte == '\n' { + return p.startOfLine + } + return p.startTimestamp +} + +// startTimestamp represents the state where the next byte read from p.buf is +// the start of the timestamp (or whitespace leading up to it). +func (p *Parser) startTimestamp() stateFn { + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.readTokenUntilWhitespace(); p.err != nil { + return nil // Unexpected end of input. + } + timestamp, err := strconv.ParseInt(p.currentToken.String(), 10, 64) + if err != nil { + // Create a more helpful error message. + p.parseError(fmt.Sprintf("expected integer as timestamp, got %q", p.currentToken.String())) + return nil + } + p.currentMetric.TimestampMs = proto.Int64(timestamp) + if p.readTokenUntilNewline(false); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentToken.Len() > 0 { + p.parseError(fmt.Sprintf("spurious string after timestamp: %q", p.currentToken.String())) + return nil + } + return p.startOfLine +} + +// readingHelp represents the state where the last byte read (now in +// p.currentByte) is the first byte of the docstring after 'HELP'. +func (p *Parser) readingHelp() stateFn { + if p.currentMF.Help != nil { + p.parseError(fmt.Sprintf("second HELP line for metric name %q", p.currentMF.GetName())) + return nil + } + // Rest of line is the docstring. + if p.readTokenUntilNewline(true); p.err != nil { + return nil // Unexpected end of input. + } + p.currentMF.Help = proto.String(p.currentToken.String()) + return p.startOfLine +} + +// readingType represents the state where the last byte read (now in +// p.currentByte) is the first byte of the type hint after 'HELP'. +func (p *Parser) readingType() stateFn { + if p.currentMF.Type != nil { + p.parseError(fmt.Sprintf("second TYPE line for metric name %q, or TYPE reported after samples", p.currentMF.GetName())) + return nil + } + // Rest of line is the type. + if p.readTokenUntilNewline(false); p.err != nil { + return nil // Unexpected end of input. + } + metricType, ok := dto.MetricType_value[strings.ToUpper(p.currentToken.String())] + if !ok { + p.parseError(fmt.Sprintf("unknown metric type %q", p.currentToken.String())) + return nil + } + p.currentMF.Type = dto.MetricType(metricType).Enum() + return p.startOfLine +} + +// parseError sets p.err to a ParseError at the current line with the given +// message. +func (p *Parser) parseError(msg string) { + p.err = ParseError{ + Line: p.lineCount, + Msg: msg, + } +} + +// skipBlankTab reads (and discards) bytes from p.buf until it encounters a byte +// that is neither ' ' nor '\t'. That byte is left in p.currentByte. +func (p *Parser) skipBlankTab() { + for { + if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil || !isBlankOrTab(p.currentByte) { + return + } + } +} + +// skipBlankTabIfCurrentBlankTab works exactly as skipBlankTab but doesn't do +// anything if p.currentByte is neither ' ' nor '\t'. +func (p *Parser) skipBlankTabIfCurrentBlankTab() { + if isBlankOrTab(p.currentByte) { + p.skipBlankTab() + } +} + +// readTokenUntilWhitespace copies bytes from p.buf into p.currentToken. The +// first byte considered is the byte already read (now in p.currentByte). The +// first whitespace byte encountered is still copied into p.currentByte, but not +// into p.currentToken. +func (p *Parser) readTokenUntilWhitespace() { + p.currentToken.Reset() + for p.err == nil && !isBlankOrTab(p.currentByte) && p.currentByte != '\n' { + p.currentToken.WriteByte(p.currentByte) + p.currentByte, p.err = p.buf.ReadByte() + } +} + +// readTokenUntilNewline copies bytes from p.buf into p.currentToken. The first +// byte considered is the byte already read (now in p.currentByte). The first +// newline byte encountered is still copied into p.currentByte, but not into +// p.currentToken. If recognizeEscapeSequence is true, two escape sequences are +// recognized: '\\' tranlates into '\', and '\n' into a line-feed character. All +// other escape sequences are invalid and cause an error. +func (p *Parser) readTokenUntilNewline(recognizeEscapeSequence bool) { + p.currentToken.Reset() + escaped := false + for p.err == nil { + if recognizeEscapeSequence && escaped { + switch p.currentByte { + case '\\': + p.currentToken.WriteByte(p.currentByte) + case 'n': + p.currentToken.WriteByte('\n') + default: + p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) + return + } + escaped = false + } else { + switch p.currentByte { + case '\n': + return + case '\\': + escaped = true + default: + p.currentToken.WriteByte(p.currentByte) + } + } + p.currentByte, p.err = p.buf.ReadByte() + } +} + +// readTokenAsMetricName copies a metric name from p.buf into p.currentToken. +// The first byte considered is the byte already read (now in p.currentByte). +// The first byte not part of a metric name is still copied into p.currentByte, +// but not into p.currentToken. +func (p *Parser) readTokenAsMetricName() { + p.currentToken.Reset() + if !isValidMetricNameStart(p.currentByte) { + return + } + for { + p.currentToken.WriteByte(p.currentByte) + p.currentByte, p.err = p.buf.ReadByte() + if p.err != nil || !isValidMetricNameContinuation(p.currentByte) { + return + } + } +} + +// readTokenAsLabelName copies a label name from p.buf into p.currentToken. +// The first byte considered is the byte already read (now in p.currentByte). +// The first byte not part of a label name is still copied into p.currentByte, +// but not into p.currentToken. +func (p *Parser) readTokenAsLabelName() { + p.currentToken.Reset() + if !isValidLabelNameStart(p.currentByte) { + return + } + for { + p.currentToken.WriteByte(p.currentByte) + p.currentByte, p.err = p.buf.ReadByte() + if p.err != nil || !isValidLabelNameContinuation(p.currentByte) { + return + } + } +} + +// readTokenAsLabelValue copies a label value from p.buf into p.currentToken. +// In contrast to the other 'readTokenAs...' functions, which start with the +// last read byte in p.currentByte, this method ignores p.currentByte and starts +// with reading a new byte from p.buf. The first byte not part of a label value +// is still copied into p.currentByte, but not into p.currentToken. +func (p *Parser) readTokenAsLabelValue() { + p.currentToken.Reset() + escaped := false + for { + if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil { + return + } + if escaped { + switch p.currentByte { + case '"', '\\': + p.currentToken.WriteByte(p.currentByte) + case 'n': + p.currentToken.WriteByte('\n') + default: + p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) + return + } + escaped = false + continue + } + switch p.currentByte { + case '"': + return + case '\n': + p.parseError(fmt.Sprintf("label value %q contains unescaped new-line", p.currentToken.String())) + return + case '\\': + escaped = true + default: + p.currentToken.WriteByte(p.currentByte) + } + } +} + +func (p *Parser) setOrCreateCurrentMF() { + p.currentIsSummaryCount = false + p.currentIsSummarySum = false + p.currentIsHistogramCount = false + p.currentIsHistogramSum = false + name := p.currentToken.String() + if p.currentMF = p.metricFamiliesByName[name]; p.currentMF != nil { + return + } + // Try out if this is a _sum or _count for a summary/histogram. + summaryName := summaryMetricName(name) + if p.currentMF = p.metricFamiliesByName[summaryName]; p.currentMF != nil { + if p.currentMF.GetType() == dto.MetricType_SUMMARY { + if isCount(name) { + p.currentIsSummaryCount = true + } + if isSum(name) { + p.currentIsSummarySum = true + } + return + } + } + histogramName := histogramMetricName(name) + if p.currentMF = p.metricFamiliesByName[histogramName]; p.currentMF != nil { + if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { + if isCount(name) { + p.currentIsHistogramCount = true + } + if isSum(name) { + p.currentIsHistogramSum = true + } + return + } + } + p.currentMF = &dto.MetricFamily{Name: proto.String(name)} + p.metricFamiliesByName[name] = p.currentMF +} + +func isValidLabelNameStart(b byte) bool { + return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' +} + +func isValidLabelNameContinuation(b byte) bool { + return isValidLabelNameStart(b) || (b >= '0' && b <= '9') +} + +func isValidMetricNameStart(b byte) bool { + return isValidLabelNameStart(b) || b == ':' +} + +func isValidMetricNameContinuation(b byte) bool { + return isValidLabelNameContinuation(b) || b == ':' +} + +func isBlankOrTab(b byte) bool { + return b == ' ' || b == '\t' +} + +func isCount(name string) bool { + return len(name) > 6 && name[len(name)-6:] == "_count" +} + +func isSum(name string) bool { + return len(name) > 4 && name[len(name)-4:] == "_sum" +} + +func isBucket(name string) bool { + return len(name) > 7 && name[len(name)-7:] == "_bucket" +} + +func summaryMetricName(name string) string { + switch { + case isCount(name): + return name[:len(name)-6] + case isSum(name): + return name[:len(name)-4] + default: + return name + } +} + +func histogramMetricName(name string) string { + switch { + case isCount(name): + return name[:len(name)-6] + case isSum(name): + return name[:len(name)-4] + case isBucket(name): + return name[:len(name)-7] + default: + return name + } +} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/text/parse_test.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/text/parse_test.go new file mode 100644 index 0000000000..6b7adfff55 --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/text/parse_test.go @@ -0,0 +1,588 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package text + +import ( + "math" + "strings" + "testing" + + "github.com/golang/protobuf/proto" + dto "github.com/prometheus/client_model/go" +) + +var parser Parser + +func testParse(t testing.TB) { + var scenarios = []struct { + in string + out []*dto.MetricFamily + }{ + // 0: Empty lines as input. + { + in: ` + +`, + out: []*dto.MetricFamily{}, + }, + // 1: Minimal case. + { + in: ` +minimal_metric 1.234 +another_metric -3e3 103948 +# Even that: +no_labels{} 3 +# HELP line for non-existing metric will be ignored. +`, + out: []*dto.MetricFamily{ + &dto.MetricFamily{ + Name: proto.String("minimal_metric"), + Type: dto.MetricType_UNTYPED.Enum(), + Metric: []*dto.Metric{ + &dto.Metric{ + Untyped: &dto.Untyped{ + Value: proto.Float64(1.234), + }, + }, + }, + }, + &dto.MetricFamily{ + Name: proto.String("another_metric"), + Type: dto.MetricType_UNTYPED.Enum(), + Metric: []*dto.Metric{ + &dto.Metric{ + Untyped: &dto.Untyped{ + Value: proto.Float64(-3e3), + }, + TimestampMs: proto.Int64(103948), + }, + }, + }, + &dto.MetricFamily{ + Name: proto.String("no_labels"), + Type: dto.MetricType_UNTYPED.Enum(), + Metric: []*dto.Metric{ + &dto.Metric{ + Untyped: &dto.Untyped{ + Value: proto.Float64(3), + }, + }, + }, + }, + }, + }, + // 2: Counters & gauges, docstrings, various whitespace, escape sequences. + { + in: ` +# A normal comment. +# +# TYPE name counter +name{labelname="val1",basename="basevalue"} NaN +name {labelname="val2",basename="base\"v\\al\nue"} 0.23 1234567890 +# HELP name two-line\n doc str\\ing + + # HELP name2 doc str"ing 2 + # TYPE name2 gauge +name2{labelname="val2" ,basename = "basevalue2" } +Inf 54321 +name2{ labelname = "val1" , }-Inf +`, + out: []*dto.MetricFamily{ + &dto.MetricFamily{ + Name: proto.String("name"), + Help: proto.String("two-line\n doc str\\ing"), + Type: dto.MetricType_COUNTER.Enum(), + Metric: []*dto.Metric{ + &dto.Metric{ + Label: []*dto.LabelPair{ + &dto.LabelPair{ + Name: proto.String("labelname"), + Value: proto.String("val1"), + }, + &dto.LabelPair{ + Name: proto.String("basename"), + Value: proto.String("basevalue"), + }, + }, + Counter: &dto.Counter{ + Value: proto.Float64(math.NaN()), + }, + }, + &dto.Metric{ + Label: []*dto.LabelPair{ + &dto.LabelPair{ + Name: proto.String("labelname"), + Value: proto.String("val2"), + }, + &dto.LabelPair{ + Name: proto.String("basename"), + Value: proto.String("base\"v\\al\nue"), + }, + }, + Counter: &dto.Counter{ + Value: proto.Float64(.23), + }, + TimestampMs: proto.Int64(1234567890), + }, + }, + }, + &dto.MetricFamily{ + Name: proto.String("name2"), + Help: proto.String("doc str\"ing 2"), + Type: dto.MetricType_GAUGE.Enum(), + Metric: []*dto.Metric{ + &dto.Metric{ + Label: []*dto.LabelPair{ + &dto.LabelPair{ + Name: proto.String("labelname"), + Value: proto.String("val2"), + }, + &dto.LabelPair{ + Name: proto.String("basename"), + Value: proto.String("basevalue2"), + }, + }, + Gauge: &dto.Gauge{ + Value: proto.Float64(math.Inf(+1)), + }, + TimestampMs: proto.Int64(54321), + }, + &dto.Metric{ + Label: []*dto.LabelPair{ + &dto.LabelPair{ + Name: proto.String("labelname"), + Value: proto.String("val1"), + }, + }, + Gauge: &dto.Gauge{ + Value: proto.Float64(math.Inf(-1)), + }, + }, + }, + }, + }, + }, + // 3: The evil summary, mixed with other types and funny comments. + { + in: ` +# TYPE my_summary summary +my_summary{n1="val1",quantile="0.5"} 110 +decoy -1 -2 +my_summary{n1="val1",quantile="0.9"} 140 1 +my_summary_count{n1="val1"} 42 +# Latest timestamp wins in case of a summary. +my_summary_sum{n1="val1"} 4711 2 +fake_sum{n1="val1"} 2001 +# TYPE another_summary summary +another_summary_count{n2="val2",n1="val1"} 20 +my_summary_count{n2="val2",n1="val1"} 5 5 +another_summary{n1="val1",n2="val2",quantile=".3"} -1.2 +my_summary_sum{n1="val2"} 08 15 +my_summary{n1="val3", quantile="0.2"} 4711 + my_summary{n1="val1",n2="val2",quantile="-12.34",} NaN +# some +# funny comments +# HELP +# HELP +# HELP my_summary +# HELP my_summary +`, + out: []*dto.MetricFamily{ + &dto.MetricFamily{ + Name: proto.String("fake_sum"), + Type: dto.MetricType_UNTYPED.Enum(), + Metric: []*dto.Metric{ + &dto.Metric{ + Label: []*dto.LabelPair{ + &dto.LabelPair{ + Name: proto.String("n1"), + Value: proto.String("val1"), + }, + }, + Untyped: &dto.Untyped{ + Value: proto.Float64(2001), + }, + }, + }, + }, + &dto.MetricFamily{ + Name: proto.String("decoy"), + Type: dto.MetricType_UNTYPED.Enum(), + Metric: []*dto.Metric{ + &dto.Metric{ + Untyped: &dto.Untyped{ + Value: proto.Float64(-1), + }, + TimestampMs: proto.Int64(-2), + }, + }, + }, + &dto.MetricFamily{ + Name: proto.String("my_summary"), + Type: dto.MetricType_SUMMARY.Enum(), + Metric: []*dto.Metric{ + &dto.Metric{ + Label: []*dto.LabelPair{ + &dto.LabelPair{ + Name: proto.String("n1"), + Value: proto.String("val1"), + }, + }, + Summary: &dto.Summary{ + SampleCount: proto.Uint64(42), + SampleSum: proto.Float64(4711), + Quantile: []*dto.Quantile{ + &dto.Quantile{ + Quantile: proto.Float64(0.5), + Value: proto.Float64(110), + }, + &dto.Quantile{ + Quantile: proto.Float64(0.9), + Value: proto.Float64(140), + }, + }, + }, + TimestampMs: proto.Int64(2), + }, + &dto.Metric{ + Label: []*dto.LabelPair{ + &dto.LabelPair{ + Name: proto.String("n2"), + Value: proto.String("val2"), + }, + &dto.LabelPair{ + Name: proto.String("n1"), + Value: proto.String("val1"), + }, + }, + Summary: &dto.Summary{ + SampleCount: proto.Uint64(5), + Quantile: []*dto.Quantile{ + &dto.Quantile{ + Quantile: proto.Float64(-12.34), + Value: proto.Float64(math.NaN()), + }, + }, + }, + TimestampMs: proto.Int64(5), + }, + &dto.Metric{ + Label: []*dto.LabelPair{ + &dto.LabelPair{ + Name: proto.String("n1"), + Value: proto.String("val2"), + }, + }, + Summary: &dto.Summary{ + SampleSum: proto.Float64(8), + }, + TimestampMs: proto.Int64(15), + }, + &dto.Metric{ + Label: []*dto.LabelPair{ + &dto.LabelPair{ + Name: proto.String("n1"), + Value: proto.String("val3"), + }, + }, + Summary: &dto.Summary{ + Quantile: []*dto.Quantile{ + &dto.Quantile{ + Quantile: proto.Float64(0.2), + Value: proto.Float64(4711), + }, + }, + }, + }, + }, + }, + &dto.MetricFamily{ + Name: proto.String("another_summary"), + Type: dto.MetricType_SUMMARY.Enum(), + Metric: []*dto.Metric{ + &dto.Metric{ + Label: []*dto.LabelPair{ + &dto.LabelPair{ + Name: proto.String("n2"), + Value: proto.String("val2"), + }, + &dto.LabelPair{ + Name: proto.String("n1"), + Value: proto.String("val1"), + }, + }, + Summary: &dto.Summary{ + SampleCount: proto.Uint64(20), + Quantile: []*dto.Quantile{ + &dto.Quantile{ + Quantile: proto.Float64(0.3), + Value: proto.Float64(-1.2), + }, + }, + }, + }, + }, + }, + }, + }, + // 4: The histogram. + { + in: ` +# HELP request_duration_microseconds The response latency. +# TYPE request_duration_microseconds histogram +request_duration_microseconds_bucket{le="100"} 123 +request_duration_microseconds_bucket{le="120"} 412 +request_duration_microseconds_bucket{le="144"} 592 +request_duration_microseconds_bucket{le="172.8"} 1524 +request_duration_microseconds_bucket{le="+Inf"} 2693 +request_duration_microseconds_sum 1.7560473e+06 +request_duration_microseconds_count 2693 +`, + out: []*dto.MetricFamily{ + { + Name: proto.String("request_duration_microseconds"), + Help: proto.String("The response latency."), + Type: dto.MetricType_HISTOGRAM.Enum(), + Metric: []*dto.Metric{ + &dto.Metric{ + Histogram: &dto.Histogram{ + SampleCount: proto.Uint64(2693), + SampleSum: proto.Float64(1756047.3), + Bucket: []*dto.Bucket{ + &dto.Bucket{ + UpperBound: proto.Float64(100), + CumulativeCount: proto.Uint64(123), + }, + &dto.Bucket{ + UpperBound: proto.Float64(120), + CumulativeCount: proto.Uint64(412), + }, + &dto.Bucket{ + UpperBound: proto.Float64(144), + CumulativeCount: proto.Uint64(592), + }, + &dto.Bucket{ + UpperBound: proto.Float64(172.8), + CumulativeCount: proto.Uint64(1524), + }, + &dto.Bucket{ + UpperBound: proto.Float64(math.Inf(+1)), + CumulativeCount: proto.Uint64(2693), + }, + }, + }, + }, + }, + }, + }, + }, + } + + for i, scenario := range scenarios { + out, err := parser.TextToMetricFamilies(strings.NewReader(scenario.in)) + if err != nil { + t.Errorf("%d. error: %s", i, err) + continue + } + if expected, got := len(scenario.out), len(out); expected != got { + t.Errorf( + "%d. expected %d MetricFamilies, got %d", + i, expected, got, + ) + } + for _, expected := range scenario.out { + got, ok := out[expected.GetName()] + if !ok { + t.Errorf( + "%d. expected MetricFamily %q, found none", + i, expected.GetName(), + ) + continue + } + if expected.String() != got.String() { + t.Errorf( + "%d. expected MetricFamily %s, got %s", + i, expected, got, + ) + } + } + } +} + +func TestParse(t *testing.T) { + testParse(t) +} + +func BenchmarkParse(b *testing.B) { + for i := 0; i < b.N; i++ { + testParse(b) + } +} + +func testParseError(t testing.TB) { + var scenarios = []struct { + in string + err string + }{ + // 0: No new-line at end of input. + { + in: `bla 3.14`, + err: "EOF", + }, + // 1: Invalid escape sequence in label value. + { + in: `metric{label="\t"} 3.14`, + err: "text format parsing error in line 1: invalid escape sequence", + }, + // 2: Newline in label value. + { + in: ` +metric{label="new +line"} 3.14 +`, + err: `text format parsing error in line 2: label value "new" contains unescaped new-line`, + }, + // 3: + { + in: `metric{@="bla"} 3.14`, + err: "text format parsing error in line 1: invalid label name for metric", + }, + // 4: + { + in: `metric{__name__="bla"} 3.14`, + err: `text format parsing error in line 1: label name "__name__" is reserved`, + }, + // 5: + { + in: `metric{label+="bla"} 3.14`, + err: "text format parsing error in line 1: expected '=' after label name", + }, + // 6: + { + in: `metric{label=bla} 3.14`, + err: "text format parsing error in line 1: expected '\"' at start of label value", + }, + // 7: + { + in: ` +# TYPE metric summary +metric{quantile="bla"} 3.14 +`, + err: "text format parsing error in line 3: expected float as value for 'quantile' label", + }, + // 8: + { + in: `metric{label="bla"+} 3.14`, + err: "text format parsing error in line 1: unexpected end of label value", + }, + // 9: + { + in: `metric{label="bla"} 3.14 2.72 +`, + err: "text format parsing error in line 1: expected integer as timestamp", + }, + // 10: + { + in: `metric{label="bla"} 3.14 2 3 +`, + err: "text format parsing error in line 1: spurious string after timestamp", + }, + // 11: + { + in: `metric{label="bla"} blubb +`, + err: "text format parsing error in line 1: expected float as value", + }, + // 12: + { + in: ` +# HELP metric one +# HELP metric two +`, + err: "text format parsing error in line 3: second HELP line for metric name", + }, + // 13: + { + in: ` +# TYPE metric counter +# TYPE metric untyped +`, + err: `text format parsing error in line 3: second TYPE line for metric name "metric", or TYPE reported after samples`, + }, + // 14: + { + in: ` +metric 4.12 +# TYPE metric counter +`, + err: `text format parsing error in line 3: second TYPE line for metric name "metric", or TYPE reported after samples`, + }, + // 14: + { + in: ` +# TYPE metric bla +`, + err: "text format parsing error in line 2: unknown metric type", + }, + // 15: + { + in: ` +# TYPE met-ric +`, + err: "text format parsing error in line 2: invalid metric name in comment", + }, + // 16: + { + in: `@invalidmetric{label="bla"} 3.14 2`, + err: "text format parsing error in line 1: invalid metric name", + }, + // 17: + { + in: `{label="bla"} 3.14 2`, + err: "text format parsing error in line 1: invalid metric name", + }, + // 18: + { + in: ` +# TYPE metric histogram +metric_bucket{le="bla"} 3.14 +`, + err: "text format parsing error in line 3: expected float as value for 'le' label", + }, + } + + for i, scenario := range scenarios { + _, err := parser.TextToMetricFamilies(strings.NewReader(scenario.in)) + if err == nil { + t.Errorf("%d. expected error, got nil", i) + continue + } + if expected, got := scenario.err, err.Error(); strings.Index(got, expected) != 0 { + t.Errorf( + "%d. expected error starting with %q, got %q", + i, expected, got, + ) + } + } + +} + +func TestParseError(t *testing.T) { + testParseError(t) +} + +func BenchmarkParseError(b *testing.B) { + for i := 0; i < b.N; i++ { + testParseError(b) + } +} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/text/proto.go b/Godeps/_workspace/src/github.com/prometheus/client_golang/text/proto.go new file mode 100644 index 0000000000..058adfc6f6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/text/proto.go @@ -0,0 +1,43 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package text + +import ( + "fmt" + "io" + + "github.com/golang/protobuf/proto" + "github.com/matttproud/golang_protobuf_extensions/ext" + + dto "github.com/prometheus/client_model/go" +) + +// WriteProtoDelimited writes the MetricFamily to the writer in delimited +// protobuf format and returns the number of bytes written and any error +// encountered. +func WriteProtoDelimited(w io.Writer, p *dto.MetricFamily) (int, error) { + return ext.WriteDelimited(w, p) +} + +// WriteProtoText writes the MetricFamily to the writer in text format and +// returns the number of bytes written and any error encountered. +func WriteProtoText(w io.Writer, p *dto.MetricFamily) (int, error) { + return fmt.Fprintf(w, "%s\n", proto.MarshalTextString(p)) +} + +// WriteProtoCompactText writes the MetricFamily to the writer in compact text +// format and returns the number of bytes written and any error encountered. +func WriteProtoCompactText(w io.Writer, p *dto.MetricFamily) (int, error) { + return fmt.Fprintf(w, "%s\n", p) +} diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/text/testdata/protobuf b/Godeps/_workspace/src/github.com/prometheus/client_golang/text/testdata/protobuf new file mode 100644 index 0000000000000000000000000000000000000000..df48256390c4f08c38e03f14d6cb90c8f5a96137 GIT binary patch literal 8243 zcmeHMO^h5z74BI(_G~#2yW`-+u|v5bcx}8h>yTg(;%u)Rln@dp!G?ewX!my4%rx86 z-RY{1cN`8I;J|?>NC=`JL@WgIATV3bDwzbDnzy0>C{Og^i>e$?oxx7`$?ZnPfvrjn4kB_G(A$Y7DOXWV?w7GQcs~hrL}f+`?;SD zre~vbXm=_7rtX0F+4O{e4c%G_Z+9t!%nhb!8q@e$ojX?|=S%KDLq1-M@Wo$8*do_>e}|zP}gJ57S3=}G@8Z?)f&$5RqmTt zIDYUyElcx;#hj0A;EZ@D$IN7av2UPTt1v@7mNGxKKYEt`hm&HsLUNq;42DDT&<{yx z&T{TYyU_L^4T-(&uEj;8e3yGgA3!@eJf@+&4NBntS-IQwN8PJ!G^e!F4C)v0DkyiT zf^zaL-hw%B<(OyXnCV#<+k!bB_wUzQTXd@0T>M|@6nEo0zj$vP=I(xZ9Ol}W_?Vdt z=#u*s`Y-@eKq&QstngIxroe7df?YuY>Yy{jbVL~$;!}@*5wmDWFwIE9cr&M#apc4Sa@!BF{l&Ji=rxqO3F5a(3AY7Of}T zhtVxs-q2t3deCLgKkBbV!(v!L

&=dr?eqNNA8p8B23&in{EcLc6snK3~Yf<;HG6 z#!2@yI;dsjghnCk+#8Z%nvWD8z2ggN7|UUS)7)N)jaE+g`R^Z}3s;Z-arqH+K6-jsUnpGCf2Tq?^IP(CObFrSQ z!-vN;|2>92vYC7>_uvG%;By&{Qd-;nLkk_P2qe$=ap6O7R4*{%IMNpw$GB{Lw}E>p zN7W&Nv#<|k|tmUwBV{rO40C&9ET(IVtx zNJ)9kL?L~6{6{%vqfF4P+tCNncK%@R2yz0~Oqxsmdfjynpj*_-ogjh&uIRUy&$-SV zXKG^Uwk%5YoAqa1=ML!d<(pXs*C3SBIsN|9s_X0r{wCkwR3zrwH(ThwP0t)9A)s5* z_f~us3_N)&HZpy4;)%jf7+@F`fhT5y>E*)uMOx`J9)_QTZmm`A^KKx#M97)70apE{ zg%)n~6oP@;%jUK>HZ1oqS?YWV13H4hEm%xBGIju-NA&~nF_Xgs-5<{n%k81X>+c_K z(hA5HKl6i#jey`o1nMVF@FCJl%ZtfoExFx%k{%=yT5Cp6B85xct)X@eVnApf++i(Q~>3Gi{cL4=tOXTLjh#C6QV#|G;_BA zn+PG%Lr8`d5=#%k0t50bD>V?`1V~k?1-fviK<&GP?8 zk8f(UszPc96O_ptazQ1IL5T_CBIITY;xvI1OPs~?QHON&!-WX?$bwhAO%p;?BvR-ffS-d|1x*KJ|U^}zH2}+ zU&eSHF4PL#fEM3#FHQheQSurX z5%Gm-1ct9xPf7qeAb7;5AAp&P96S$?5YIZp!aU6~NHJs5Mpc^57pZ|kn+;7~Eyg;+ z^{O=KV$^huhDiI0+edd+yxpWoMOc@sDSh0eK!sO>9?Sa4j8SLvA|_r*lQ&_XqRwh! zD%myWD|zv&kZOLkl&0l5=KS{}eOpN3} zc075yjeh*la=_bXzz+Eyx)Q=J5N^5vz6WwDGUI%+1-(aGPqDWhzlTDTxLpP=-17oy ztu=|q_*kt;6k|0KEF!W-g2h;k#dloixfXOsSOQiwFqspvP1I18{P0bZ=sHW&rJ3O^h7H6`rw;JzIugHzr<;P09nryUy;+ znx9w!%=XGeBm^6XHzCL*?b_~|na=hfRn>UM_SNCb)!OGHAE5aAFdxy4G5 zaskDcBuXSCazQ>siiMIJhrH^p+V1L^*)=dpq-f``yHi#D-mCY%_r3ReKWw08Pl&`} z^of*m;dm({!jB^-^j#Kn>c)}Bt-!vPUAa(&9NAK~)Z*5sDvqv&e1 zqemhypv)28WTTOaG(Z5`=Z> zI3rT=79fnr0p(+a)Z=J(p<#Qw%Sh52pY+pcS0VkXc8`)|e8Rtlt}BE$+FU^9x?^;W z>HLJnkPJPV%JAO3`}7OH{a0iDpLSV;6!80>zi5{=wBNmD`_q?yV3#!$Tp=q5X`$upZgv&l1B`u{J_$P@Nk7wrKJbIcylkgu5< zVC_5Tx-!jhN5p~z*3FCZ7mm_!i835_47%YsBpz)|)|&UCooJgId(_%`+uYm;ThTRW zyZkfutgQgeeG|F0t0(MB4K%5=^8|23%Pe=W#B%xWCe3{U>}=B9Ce3|p;s0}*d+!$? z4$|BmKN+OC#`!^-L+9Sz>c9rf`&`fvAAk63Sok5( zPC_FgX=iPYt2K85jr}x{-P*&!3WOx!=J+Kmk^#L2BeuoJRf%rb+DfjELkqj-kD?myY~#;X$-t>v+;< zA|-)o@qw@Bs~Z}OtqThdI#eLi@glI>bAmWtP7|j?{2&FdC?wq~YgQ4W`%a#kJ#`<> z&tiR6!$+h4A@rG1^0nLp>&UIhYuJCNqr)YE^hpw=1Oi)rfe{40_V|pHt+#L|h8ze2 zte%1;z-$aSrEV&yEV)lKZjKb!xE^l*88kRC42zgkCM zC|xLsU5F=#8<8$`+#Yq8lh}{Mi2FTi+;1v-CsS-sV;|54j+D17WC0jv6*g2|&X@sM z)q_)Mv|P!OTJ3{6np($5cmy&~$Mbe+lc=;<*&&&dO!=yU1y01x98+<#rkG!jy{r-NC5xv40zc| ztRJktNGqO`F#IyqhWc2l&u{8z_DXM&b)eRQY4)1Ix&3*UI#mQ#BM8EhADJGwz+iF9 z*Qys7EN-yYL31114~ z;Vkim)B+i|q?A&xnonKSUPoIO#t`Z`oPgcmq zq>x+gI?75KW}w8lLF!32xj`IJ&9lJRE!xsw&iXJ%rUKkp*g0)jcjgDIdTO^z)s8%&Xv=wCAX+v_Biw!1N`RbWF7RHIn-G&;jP z7AFa$m~m*WDvf8T>ltmcd94b@TEmLbuxktn_hWV!-MWDyg|N0+j)o~v@EQuMA8#@0 zEK39G6m9Z|?(^os?i$M{D^0Z=fG0AE8E6`rZ4fDl#!=P7LMqNuc@>wDB!3J0-8#%34SmGgdgM+HL*lj^8LKCT42d)L;E*^o1bwX`O>k>S j6Wp?!PuGEv3nR81kv80=_V4+x466SI@kr%FGavu}%V!DY literal 0 HcmV?d00001 diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/text/testdata/text b/Godeps/_workspace/src/github.com/prometheus/client_golang/text/testdata/text new file mode 100644 index 0000000000..1b2b302213 --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/client_golang/text/testdata/text @@ -0,0 +1,322 @@ +# HELP http_request_duration_microseconds The HTTP request latencies in microseconds. +# TYPE http_request_duration_microseconds summary +http_request_duration_microseconds{handler="/",quantile="0.5"} 0 +http_request_duration_microseconds{handler="/",quantile="0.9"} 0 +http_request_duration_microseconds{handler="/",quantile="0.99"} 0 +http_request_duration_microseconds_sum{handler="/"} 0 +http_request_duration_microseconds_count{handler="/"} 0 +http_request_duration_microseconds{handler="/alerts",quantile="0.5"} 0 +http_request_duration_microseconds{handler="/alerts",quantile="0.9"} 0 +http_request_duration_microseconds{handler="/alerts",quantile="0.99"} 0 +http_request_duration_microseconds_sum{handler="/alerts"} 0 +http_request_duration_microseconds_count{handler="/alerts"} 0 +http_request_duration_microseconds{handler="/api/metrics",quantile="0.5"} 0 +http_request_duration_microseconds{handler="/api/metrics",quantile="0.9"} 0 +http_request_duration_microseconds{handler="/api/metrics",quantile="0.99"} 0 +http_request_duration_microseconds_sum{handler="/api/metrics"} 0 +http_request_duration_microseconds_count{handler="/api/metrics"} 0 +http_request_duration_microseconds{handler="/api/query",quantile="0.5"} 0 +http_request_duration_microseconds{handler="/api/query",quantile="0.9"} 0 +http_request_duration_microseconds{handler="/api/query",quantile="0.99"} 0 +http_request_duration_microseconds_sum{handler="/api/query"} 0 +http_request_duration_microseconds_count{handler="/api/query"} 0 +http_request_duration_microseconds{handler="/api/query_range",quantile="0.5"} 0 +http_request_duration_microseconds{handler="/api/query_range",quantile="0.9"} 0 +http_request_duration_microseconds{handler="/api/query_range",quantile="0.99"} 0 +http_request_duration_microseconds_sum{handler="/api/query_range"} 0 +http_request_duration_microseconds_count{handler="/api/query_range"} 0 +http_request_duration_microseconds{handler="/api/targets",quantile="0.5"} 0 +http_request_duration_microseconds{handler="/api/targets",quantile="0.9"} 0 +http_request_duration_microseconds{handler="/api/targets",quantile="0.99"} 0 +http_request_duration_microseconds_sum{handler="/api/targets"} 0 +http_request_duration_microseconds_count{handler="/api/targets"} 0 +http_request_duration_microseconds{handler="/consoles/",quantile="0.5"} 0 +http_request_duration_microseconds{handler="/consoles/",quantile="0.9"} 0 +http_request_duration_microseconds{handler="/consoles/",quantile="0.99"} 0 +http_request_duration_microseconds_sum{handler="/consoles/"} 0 +http_request_duration_microseconds_count{handler="/consoles/"} 0 +http_request_duration_microseconds{handler="/graph",quantile="0.5"} 0 +http_request_duration_microseconds{handler="/graph",quantile="0.9"} 0 +http_request_duration_microseconds{handler="/graph",quantile="0.99"} 0 +http_request_duration_microseconds_sum{handler="/graph"} 0 +http_request_duration_microseconds_count{handler="/graph"} 0 +http_request_duration_microseconds{handler="/heap",quantile="0.5"} 0 +http_request_duration_microseconds{handler="/heap",quantile="0.9"} 0 +http_request_duration_microseconds{handler="/heap",quantile="0.99"} 0 +http_request_duration_microseconds_sum{handler="/heap"} 0 +http_request_duration_microseconds_count{handler="/heap"} 0 +http_request_duration_microseconds{handler="/static/",quantile="0.5"} 0 +http_request_duration_microseconds{handler="/static/",quantile="0.9"} 0 +http_request_duration_microseconds{handler="/static/",quantile="0.99"} 0 +http_request_duration_microseconds_sum{handler="/static/"} 0 +http_request_duration_microseconds_count{handler="/static/"} 0 +http_request_duration_microseconds{handler="prometheus",quantile="0.5"} 1307.275 +http_request_duration_microseconds{handler="prometheus",quantile="0.9"} 1858.632 +http_request_duration_microseconds{handler="prometheus",quantile="0.99"} 3087.384 +http_request_duration_microseconds_sum{handler="prometheus"} 179886.5000000001 +http_request_duration_microseconds_count{handler="prometheus"} 119 +# HELP http_request_size_bytes The HTTP request sizes in bytes. +# TYPE http_request_size_bytes summary +http_request_size_bytes{handler="/",quantile="0.5"} 0 +http_request_size_bytes{handler="/",quantile="0.9"} 0 +http_request_size_bytes{handler="/",quantile="0.99"} 0 +http_request_size_bytes_sum{handler="/"} 0 +http_request_size_bytes_count{handler="/"} 0 +http_request_size_bytes{handler="/alerts",quantile="0.5"} 0 +http_request_size_bytes{handler="/alerts",quantile="0.9"} 0 +http_request_size_bytes{handler="/alerts",quantile="0.99"} 0 +http_request_size_bytes_sum{handler="/alerts"} 0 +http_request_size_bytes_count{handler="/alerts"} 0 +http_request_size_bytes{handler="/api/metrics",quantile="0.5"} 0 +http_request_size_bytes{handler="/api/metrics",quantile="0.9"} 0 +http_request_size_bytes{handler="/api/metrics",quantile="0.99"} 0 +http_request_size_bytes_sum{handler="/api/metrics"} 0 +http_request_size_bytes_count{handler="/api/metrics"} 0 +http_request_size_bytes{handler="/api/query",quantile="0.5"} 0 +http_request_size_bytes{handler="/api/query",quantile="0.9"} 0 +http_request_size_bytes{handler="/api/query",quantile="0.99"} 0 +http_request_size_bytes_sum{handler="/api/query"} 0 +http_request_size_bytes_count{handler="/api/query"} 0 +http_request_size_bytes{handler="/api/query_range",quantile="0.5"} 0 +http_request_size_bytes{handler="/api/query_range",quantile="0.9"} 0 +http_request_size_bytes{handler="/api/query_range",quantile="0.99"} 0 +http_request_size_bytes_sum{handler="/api/query_range"} 0 +http_request_size_bytes_count{handler="/api/query_range"} 0 +http_request_size_bytes{handler="/api/targets",quantile="0.5"} 0 +http_request_size_bytes{handler="/api/targets",quantile="0.9"} 0 +http_request_size_bytes{handler="/api/targets",quantile="0.99"} 0 +http_request_size_bytes_sum{handler="/api/targets"} 0 +http_request_size_bytes_count{handler="/api/targets"} 0 +http_request_size_bytes{handler="/consoles/",quantile="0.5"} 0 +http_request_size_bytes{handler="/consoles/",quantile="0.9"} 0 +http_request_size_bytes{handler="/consoles/",quantile="0.99"} 0 +http_request_size_bytes_sum{handler="/consoles/"} 0 +http_request_size_bytes_count{handler="/consoles/"} 0 +http_request_size_bytes{handler="/graph",quantile="0.5"} 0 +http_request_size_bytes{handler="/graph",quantile="0.9"} 0 +http_request_size_bytes{handler="/graph",quantile="0.99"} 0 +http_request_size_bytes_sum{handler="/graph"} 0 +http_request_size_bytes_count{handler="/graph"} 0 +http_request_size_bytes{handler="/heap",quantile="0.5"} 0 +http_request_size_bytes{handler="/heap",quantile="0.9"} 0 +http_request_size_bytes{handler="/heap",quantile="0.99"} 0 +http_request_size_bytes_sum{handler="/heap"} 0 +http_request_size_bytes_count{handler="/heap"} 0 +http_request_size_bytes{handler="/static/",quantile="0.5"} 0 +http_request_size_bytes{handler="/static/",quantile="0.9"} 0 +http_request_size_bytes{handler="/static/",quantile="0.99"} 0 +http_request_size_bytes_sum{handler="/static/"} 0 +http_request_size_bytes_count{handler="/static/"} 0 +http_request_size_bytes{handler="prometheus",quantile="0.5"} 291 +http_request_size_bytes{handler="prometheus",quantile="0.9"} 291 +http_request_size_bytes{handler="prometheus",quantile="0.99"} 291 +http_request_size_bytes_sum{handler="prometheus"} 34488 +http_request_size_bytes_count{handler="prometheus"} 119 +# HELP http_requests_total Total number of HTTP requests made. +# TYPE http_requests_total counter +http_requests_total{code="200",handler="prometheus",method="get"} 119 +# HELP http_response_size_bytes The HTTP response sizes in bytes. +# TYPE http_response_size_bytes summary +http_response_size_bytes{handler="/",quantile="0.5"} 0 +http_response_size_bytes{handler="/",quantile="0.9"} 0 +http_response_size_bytes{handler="/",quantile="0.99"} 0 +http_response_size_bytes_sum{handler="/"} 0 +http_response_size_bytes_count{handler="/"} 0 +http_response_size_bytes{handler="/alerts",quantile="0.5"} 0 +http_response_size_bytes{handler="/alerts",quantile="0.9"} 0 +http_response_size_bytes{handler="/alerts",quantile="0.99"} 0 +http_response_size_bytes_sum{handler="/alerts"} 0 +http_response_size_bytes_count{handler="/alerts"} 0 +http_response_size_bytes{handler="/api/metrics",quantile="0.5"} 0 +http_response_size_bytes{handler="/api/metrics",quantile="0.9"} 0 +http_response_size_bytes{handler="/api/metrics",quantile="0.99"} 0 +http_response_size_bytes_sum{handler="/api/metrics"} 0 +http_response_size_bytes_count{handler="/api/metrics"} 0 +http_response_size_bytes{handler="/api/query",quantile="0.5"} 0 +http_response_size_bytes{handler="/api/query",quantile="0.9"} 0 +http_response_size_bytes{handler="/api/query",quantile="0.99"} 0 +http_response_size_bytes_sum{handler="/api/query"} 0 +http_response_size_bytes_count{handler="/api/query"} 0 +http_response_size_bytes{handler="/api/query_range",quantile="0.5"} 0 +http_response_size_bytes{handler="/api/query_range",quantile="0.9"} 0 +http_response_size_bytes{handler="/api/query_range",quantile="0.99"} 0 +http_response_size_bytes_sum{handler="/api/query_range"} 0 +http_response_size_bytes_count{handler="/api/query_range"} 0 +http_response_size_bytes{handler="/api/targets",quantile="0.5"} 0 +http_response_size_bytes{handler="/api/targets",quantile="0.9"} 0 +http_response_size_bytes{handler="/api/targets",quantile="0.99"} 0 +http_response_size_bytes_sum{handler="/api/targets"} 0 +http_response_size_bytes_count{handler="/api/targets"} 0 +http_response_size_bytes{handler="/consoles/",quantile="0.5"} 0 +http_response_size_bytes{handler="/consoles/",quantile="0.9"} 0 +http_response_size_bytes{handler="/consoles/",quantile="0.99"} 0 +http_response_size_bytes_sum{handler="/consoles/"} 0 +http_response_size_bytes_count{handler="/consoles/"} 0 +http_response_size_bytes{handler="/graph",quantile="0.5"} 0 +http_response_size_bytes{handler="/graph",quantile="0.9"} 0 +http_response_size_bytes{handler="/graph",quantile="0.99"} 0 +http_response_size_bytes_sum{handler="/graph"} 0 +http_response_size_bytes_count{handler="/graph"} 0 +http_response_size_bytes{handler="/heap",quantile="0.5"} 0 +http_response_size_bytes{handler="/heap",quantile="0.9"} 0 +http_response_size_bytes{handler="/heap",quantile="0.99"} 0 +http_response_size_bytes_sum{handler="/heap"} 0 +http_response_size_bytes_count{handler="/heap"} 0 +http_response_size_bytes{handler="/static/",quantile="0.5"} 0 +http_response_size_bytes{handler="/static/",quantile="0.9"} 0 +http_response_size_bytes{handler="/static/",quantile="0.99"} 0 +http_response_size_bytes_sum{handler="/static/"} 0 +http_response_size_bytes_count{handler="/static/"} 0 +http_response_size_bytes{handler="prometheus",quantile="0.5"} 2049 +http_response_size_bytes{handler="prometheus",quantile="0.9"} 2058 +http_response_size_bytes{handler="prometheus",quantile="0.99"} 2064 +http_response_size_bytes_sum{handler="prometheus"} 247001 +http_response_size_bytes_count{handler="prometheus"} 119 +# HELP process_cpu_seconds_total Total user and system CPU time spent in seconds. +# TYPE process_cpu_seconds_total counter +process_cpu_seconds_total 0.55 +# HELP process_goroutines Number of goroutines that currently exist. +# TYPE process_goroutines gauge +process_goroutines 70 +# HELP process_max_fds Maximum number of open file descriptors. +# TYPE process_max_fds gauge +process_max_fds 8192 +# HELP process_open_fds Number of open file descriptors. +# TYPE process_open_fds gauge +process_open_fds 29 +# HELP process_resident_memory_bytes Resident memory size in bytes. +# TYPE process_resident_memory_bytes gauge +process_resident_memory_bytes 5.3870592e+07 +# HELP process_start_time_seconds Start time of the process since unix epoch in seconds. +# TYPE process_start_time_seconds gauge +process_start_time_seconds 1.42236894836e+09 +# HELP process_virtual_memory_bytes Virtual memory size in bytes. +# TYPE process_virtual_memory_bytes gauge +process_virtual_memory_bytes 5.41478912e+08 +# HELP prometheus_dns_sd_lookup_failures_total The number of DNS-SD lookup failures. +# TYPE prometheus_dns_sd_lookup_failures_total counter +prometheus_dns_sd_lookup_failures_total 0 +# HELP prometheus_dns_sd_lookups_total The number of DNS-SD lookups. +# TYPE prometheus_dns_sd_lookups_total counter +prometheus_dns_sd_lookups_total 7 +# HELP prometheus_evaluator_duration_milliseconds The duration for all evaluations to execute. +# TYPE prometheus_evaluator_duration_milliseconds summary +prometheus_evaluator_duration_milliseconds{quantile="0.01"} 0 +prometheus_evaluator_duration_milliseconds{quantile="0.05"} 0 +prometheus_evaluator_duration_milliseconds{quantile="0.5"} 0 +prometheus_evaluator_duration_milliseconds{quantile="0.9"} 1 +prometheus_evaluator_duration_milliseconds{quantile="0.99"} 1 +prometheus_evaluator_duration_milliseconds_sum 12 +prometheus_evaluator_duration_milliseconds_count 23 +# HELP prometheus_local_storage_checkpoint_duration_milliseconds The duration (in milliseconds) it took to checkpoint in-memory metrics and head chunks. +# TYPE prometheus_local_storage_checkpoint_duration_milliseconds gauge +prometheus_local_storage_checkpoint_duration_milliseconds 0 +# HELP prometheus_local_storage_chunk_ops_total The total number of chunk operations by their type. +# TYPE prometheus_local_storage_chunk_ops_total counter +prometheus_local_storage_chunk_ops_total{type="create"} 598 +prometheus_local_storage_chunk_ops_total{type="persist"} 174 +prometheus_local_storage_chunk_ops_total{type="pin"} 920 +prometheus_local_storage_chunk_ops_total{type="transcode"} 415 +prometheus_local_storage_chunk_ops_total{type="unpin"} 920 +# HELP prometheus_local_storage_indexing_batch_latency_milliseconds Quantiles for batch indexing latencies in milliseconds. +# TYPE prometheus_local_storage_indexing_batch_latency_milliseconds summary +prometheus_local_storage_indexing_batch_latency_milliseconds{quantile="0.5"} 0 +prometheus_local_storage_indexing_batch_latency_milliseconds{quantile="0.9"} 0 +prometheus_local_storage_indexing_batch_latency_milliseconds{quantile="0.99"} 0 +prometheus_local_storage_indexing_batch_latency_milliseconds_sum 0 +prometheus_local_storage_indexing_batch_latency_milliseconds_count 1 +# HELP prometheus_local_storage_indexing_batch_sizes Quantiles for indexing batch sizes (number of metrics per batch). +# TYPE prometheus_local_storage_indexing_batch_sizes summary +prometheus_local_storage_indexing_batch_sizes{quantile="0.5"} 2 +prometheus_local_storage_indexing_batch_sizes{quantile="0.9"} 2 +prometheus_local_storage_indexing_batch_sizes{quantile="0.99"} 2 +prometheus_local_storage_indexing_batch_sizes_sum 2 +prometheus_local_storage_indexing_batch_sizes_count 1 +# HELP prometheus_local_storage_indexing_queue_capacity The capacity of the indexing queue. +# TYPE prometheus_local_storage_indexing_queue_capacity gauge +prometheus_local_storage_indexing_queue_capacity 16384 +# HELP prometheus_local_storage_indexing_queue_length The number of metrics waiting to be indexed. +# TYPE prometheus_local_storage_indexing_queue_length gauge +prometheus_local_storage_indexing_queue_length 0 +# HELP prometheus_local_storage_ingested_samples_total The total number of samples ingested. +# TYPE prometheus_local_storage_ingested_samples_total counter +prometheus_local_storage_ingested_samples_total 30473 +# HELP prometheus_local_storage_invalid_preload_requests_total The total number of preload requests referring to a non-existent series. This is an indication of outdated label indexes. +# TYPE prometheus_local_storage_invalid_preload_requests_total counter +prometheus_local_storage_invalid_preload_requests_total 0 +# HELP prometheus_local_storage_memory_chunkdescs The current number of chunk descriptors in memory. +# TYPE prometheus_local_storage_memory_chunkdescs gauge +prometheus_local_storage_memory_chunkdescs 1059 +# HELP prometheus_local_storage_memory_chunks The current number of chunks in memory, excluding cloned chunks (i.e. chunks without a descriptor). +# TYPE prometheus_local_storage_memory_chunks gauge +prometheus_local_storage_memory_chunks 1020 +# HELP prometheus_local_storage_memory_series The current number of series in memory. +# TYPE prometheus_local_storage_memory_series gauge +prometheus_local_storage_memory_series 424 +# HELP prometheus_local_storage_persist_latency_microseconds A summary of latencies for persisting each chunk. +# TYPE prometheus_local_storage_persist_latency_microseconds summary +prometheus_local_storage_persist_latency_microseconds{quantile="0.5"} 30.377 +prometheus_local_storage_persist_latency_microseconds{quantile="0.9"} 203.539 +prometheus_local_storage_persist_latency_microseconds{quantile="0.99"} 2626.463 +prometheus_local_storage_persist_latency_microseconds_sum 20424.415 +prometheus_local_storage_persist_latency_microseconds_count 174 +# HELP prometheus_local_storage_persist_queue_capacity The total capacity of the persist queue. +# TYPE prometheus_local_storage_persist_queue_capacity gauge +prometheus_local_storage_persist_queue_capacity 1024 +# HELP prometheus_local_storage_persist_queue_length The current number of chunks waiting in the persist queue. +# TYPE prometheus_local_storage_persist_queue_length gauge +prometheus_local_storage_persist_queue_length 0 +# HELP prometheus_local_storage_series_ops_total The total number of series operations by their type. +# TYPE prometheus_local_storage_series_ops_total counter +prometheus_local_storage_series_ops_total{type="create"} 2 +prometheus_local_storage_series_ops_total{type="maintenance_in_memory"} 11 +# HELP prometheus_notifications_latency_milliseconds Latency quantiles for sending alert notifications (not including dropped notifications). +# TYPE prometheus_notifications_latency_milliseconds summary +prometheus_notifications_latency_milliseconds{quantile="0.5"} 0 +prometheus_notifications_latency_milliseconds{quantile="0.9"} 0 +prometheus_notifications_latency_milliseconds{quantile="0.99"} 0 +prometheus_notifications_latency_milliseconds_sum 0 +prometheus_notifications_latency_milliseconds_count 0 +# HELP prometheus_notifications_queue_capacity The capacity of the alert notifications queue. +# TYPE prometheus_notifications_queue_capacity gauge +prometheus_notifications_queue_capacity 100 +# HELP prometheus_notifications_queue_length The number of alert notifications in the queue. +# TYPE prometheus_notifications_queue_length gauge +prometheus_notifications_queue_length 0 +# HELP prometheus_rule_evaluation_duration_milliseconds The duration for a rule to execute. +# TYPE prometheus_rule_evaluation_duration_milliseconds summary +prometheus_rule_evaluation_duration_milliseconds{rule_type="alerting",quantile="0.5"} 0 +prometheus_rule_evaluation_duration_milliseconds{rule_type="alerting",quantile="0.9"} 0 +prometheus_rule_evaluation_duration_milliseconds{rule_type="alerting",quantile="0.99"} 2 +prometheus_rule_evaluation_duration_milliseconds_sum{rule_type="alerting"} 12 +prometheus_rule_evaluation_duration_milliseconds_count{rule_type="alerting"} 115 +prometheus_rule_evaluation_duration_milliseconds{rule_type="recording",quantile="0.5"} 0 +prometheus_rule_evaluation_duration_milliseconds{rule_type="recording",quantile="0.9"} 0 +prometheus_rule_evaluation_duration_milliseconds{rule_type="recording",quantile="0.99"} 3 +prometheus_rule_evaluation_duration_milliseconds_sum{rule_type="recording"} 15 +prometheus_rule_evaluation_duration_milliseconds_count{rule_type="recording"} 115 +# HELP prometheus_rule_evaluation_failures_total The total number of rule evaluation failures. +# TYPE prometheus_rule_evaluation_failures_total counter +prometheus_rule_evaluation_failures_total 0 +# HELP prometheus_samples_queue_capacity Capacity of the queue for unwritten samples. +# TYPE prometheus_samples_queue_capacity gauge +prometheus_samples_queue_capacity 4096 +# HELP prometheus_samples_queue_length Current number of items in the queue for unwritten samples. Each item comprises all samples exposed by one target as one metric family (i.e. metrics of the same name). +# TYPE prometheus_samples_queue_length gauge +prometheus_samples_queue_length 0 +# HELP prometheus_target_interval_length_seconds Actual intervals between scrapes. +# TYPE prometheus_target_interval_length_seconds summary +prometheus_target_interval_length_seconds{interval="15s",quantile="0.01"} 14 +prometheus_target_interval_length_seconds{interval="15s",quantile="0.05"} 14 +prometheus_target_interval_length_seconds{interval="15s",quantile="0.5"} 15 +prometheus_target_interval_length_seconds{interval="15s",quantile="0.9"} 15 +prometheus_target_interval_length_seconds{interval="15s",quantile="0.99"} 15 +prometheus_target_interval_length_seconds_sum{interval="15s"} 175 +prometheus_target_interval_length_seconds_count{interval="15s"} 12 +prometheus_target_interval_length_seconds{interval="1s",quantile="0.01"} 0 +prometheus_target_interval_length_seconds{interval="1s",quantile="0.05"} 0 +prometheus_target_interval_length_seconds{interval="1s",quantile="0.5"} 0 +prometheus_target_interval_length_seconds{interval="1s",quantile="0.9"} 1 +prometheus_target_interval_length_seconds{interval="1s",quantile="0.99"} 1 +prometheus_target_interval_length_seconds_sum{interval="1s"} 55 +prometheus_target_interval_length_seconds_count{interval="1s"} 117 diff --git a/Godeps/_workspace/src/github.com/prometheus/client_golang/text/testdata/text.gz b/Godeps/_workspace/src/github.com/prometheus/client_golang/text/testdata/text.gz new file mode 100644 index 0000000000000000000000000000000000000000..46de5995ad79083b3e4a47f74936ac9382bc4ee5 GIT binary patch literal 2595 zcmV+;3f%P{iwFoRp~qAJ19WA0bO5zmU6Y$Q7Jctuq3U^=sZBg!8{0he!&D|!^Dvvr zZhE(Bo+ujBHWds6#H77l^WXPMh&CSrT?tPfk~HWXu8yRudj$Oh`R(KHpUH`_A}Hy% znpP}`>oR0Xo(1P5D)Wj)c@|gX@kGgQkB^^;Y(~hn1)FL(N;uct7JQ(8XU|9t=MTODRBN$JDAv-0l$BF+*5e=z};A%O07S&*nGuQO(j z>mWk-hgFMpQ_)zcr=+peP;yx+X@u_Lisa`rWn~iGK-4KN8)YZCY~}3`+G=b}F#to$ z@^TZI6-<^QtC$u@+|Vr$*n~g@4azV((%Xrw;#(rMl5eTtl60x;Ml=Hg7M4ePi_AR5 zWhuIvYk}Y`;R3PEC^}&Hxyn;oEiW7(g{&$T zOGz6lOF2!z9oUJ6#bPDLBo;y{NGx>}OqMblU^}Ra!R=`IYFF}DXsPD0l(QwXRMZT9 z1r}uvMcj$jioMF+IQ!Ll@7;uu%iOdxk_}6~P&?NkP;OYDNzoz`|$Y!B|ny zj%reuNgp&L4FZEROl#k@C-4^&Xc%S;&}O?m9I$#}oK6+6oP{ZQ6n|&+`H7Y!f9^S+ zik!okDveGH7f+_;o#NhWl*iERwd35sQ%b}ClgAJDu!kt)tP04Sbi$G~H#MGY%rG!z zqXsxvhKKo&h=>3i+chf&BMA|P4I`hz zdEunP01Z8x1s*LK%C+ePQyMV6TsWA4aWllCMbcU~oj@ZerWZ#K!MiszC|VYsf77Wl z^kaHav$XN*QGgD`PAaTJbBuMY-<^l{GCUqB&znByImP&6F7EmE+SM- zG~zI6k{C1#&h(r^E!3FXf6J?+xhl-@*c;Dt^g~d$@m8C|-tqi3{fDz3h6angWC5RM z?ff8L_@!p%fhI7K+r|)K7Ew}X$qS)H9-Zdqq=L8~tfGOt+OF4|-ND};Ham!RB;=n- z$?7oe_wT>WMVyREhJIkmCf;i6ZubY@h)3QL=l$J;%lU>F*wNp=&v19AS=VH;wR|< z-@2YDyw0vq){SG5#ZZ7{$KWYs(J5$t&FIpf4gZo0v=UiTG$bu<_=`zbn0|aQ&sJnn zW0LvYwv5idG2L=Lkr5YCG%3r8GOT)1XpqC*=1qVZ1-Edq2tU(b%|O@)2gek|0d>K2mV`aZrWlBBgViv1~zF z2&N}Wa_ufWf!jFSy}ss3GN|5?I4DY*=3zW~bFJu^YHI)jHOivo$`BSk}~3WQMl-poEuyZ_IjM|Hr)S#9?F8;~_e#+#M5@o&K<=yvPf zYP;Lm@ii5L1H-$T^%k=nvVz#-r--72keP8%NJs#-j+o%rO!WyAtW?t;b!v#o6xEgz zuy7am5LZwrG5RFbEPaiwm-)fo*N>i}-5iIAp|hVUfG42mFo8#AVHWYNSEfGkrbQuA zmb2tp)(Dl_r~cktB;R`Gr=pqIj>Kn1q(=iDDCdVk?T!_fc~QWY)oY~Omk`k?a-kkI zZ<7u2BOhyuw;&ihUj(MfWU8gn2O}(yYLq9}_$>QqbGb~fCu&~_)A$1DZ8VQXlDCqq6>I+Si%&LP2WB)_$3lw3ADGN z_NDAQ? z{(8`ap~;dXj + +The following individuals have contributed code to this repository +(listed in alphabetical order): + +* Tobias Schmidt diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/CONTRIBUTING.md b/Godeps/_workspace/src/github.com/prometheus/procfs/CONTRIBUTING.md new file mode 100644 index 0000000000..5705f0fbea --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/procfs/CONTRIBUTING.md @@ -0,0 +1,18 @@ +# Contributing + +Prometheus uses GitHub to manage reviews of pull requests. + +* If you have a trivial fix or improvement, go ahead and create a pull + request, addressing (with `@...`) one or more of the maintainers + (see [AUTHORS.md](AUTHORS.md)) in the description of the pull request. + +* If you plan to do something more involved, first discuss your ideas + on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers). + This will avoid unnecessary work and surely give you and us a good deal + of inspiration. + +* Relevant coding style guidelines are the [Go Code Review + Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments) + and the _Formatting and style_ section of Peter Bourgon's [Go: Best + Practices for Production + Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style). diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/LICENSE b/Godeps/_workspace/src/github.com/prometheus/procfs/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/procfs/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/NOTICE b/Godeps/_workspace/src/github.com/prometheus/procfs/NOTICE new file mode 100644 index 0000000000..53c5e9aa11 --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/procfs/NOTICE @@ -0,0 +1,7 @@ +procfs provides functions to retrieve system, kernel and process +metrics from the pseudo-filesystem proc. + +Copyright 2014-2015 The Prometheus Authors + +This product includes software developed at +SoundCloud Ltd. (http://soundcloud.com/). diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/README.md b/Godeps/_workspace/src/github.com/prometheus/procfs/README.md new file mode 100644 index 0000000000..0edf496663 --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/procfs/README.md @@ -0,0 +1,11 @@ +# procfs + +This procfs package provides functions to retrieve system, kernel and process +metrics from the pseudo-filesystem proc. + +[![GoDoc](https://godoc.org/github.com/prometheus/procfs?status.png)](https://godoc.org/github.com/prometheus/procfs) +[![Circle CI](https://circleci.com/gh/prometheus/procfs.svg?style=svg)](https://circleci.com/gh/prometheus/procfs) + +# Testing + + $ go test diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/doc.go b/Godeps/_workspace/src/github.com/prometheus/procfs/doc.go new file mode 100644 index 0000000000..c6400345f6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/procfs/doc.go @@ -0,0 +1,45 @@ +// Copyright 2014 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package procfs provides functions to retrieve system, kernel and process +// metrics from the pseudo-filesystem proc. +// +// Example: +// +// package main +// +// import ( +// "fmt" +// "log" +// +// "github.com/prometheus/client_golang/procfs" +// ) +// +// func main() { +// p, err := procfs.Self() +// if err != nil { +// log.Fatalf("could not get process: %s", err) +// } +// +// stat, err := p.Stat() +// if err != nil { +// log.Fatalf("could not get process stat: %s", err) +// } +// +// fmt.Printf("command: %s\n", stat.Comm) +// fmt.Printf("cpu time: %fs\n", stat.CPUTime()) +// fmt.Printf("vsize: %dB\n", stat.VirtualMemory()) +// fmt.Printf("rss: %dB\n", stat.ResidentMemory()) +// } +// +package procfs diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26231/cmdline b/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26231/cmdline new file mode 100644 index 0000000000000000000000000000000000000000..d2d8ef88764f6c9f819285f534aa70835a54950c GIT binary patch literal 16 XcmXTR%w;G^EiTbZ&u7p!G++P#E?@+m literal 0 HcmV?d00001 diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26231/fd/0 b/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26231/fd/0 new file mode 100644 index 0000000000..e69de29bb2 diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26231/fd/1 b/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26231/fd/1 new file mode 100644 index 0000000000..e69de29bb2 diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26231/fd/2 b/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26231/fd/2 new file mode 100644 index 0000000000..e69de29bb2 diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26231/fd/3 b/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26231/fd/3 new file mode 100644 index 0000000000..e69de29bb2 diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26231/fd/4 b/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26231/fd/4 new file mode 100644 index 0000000000..e69de29bb2 diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26231/limits b/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26231/limits new file mode 100644 index 0000000000..23c6b6898f --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26231/limits @@ -0,0 +1,17 @@ +Limit Soft Limit Hard Limit Units +Max cpu time unlimited unlimited seconds +Max file size unlimited unlimited bytes +Max data size unlimited unlimited bytes +Max stack size 8388608 unlimited bytes +Max core file size 0 unlimited bytes +Max resident set unlimited unlimited bytes +Max processes 62898 62898 processes +Max open files 2048 4096 files +Max locked memory 65536 65536 bytes +Max address space unlimited unlimited bytes +Max file locks unlimited unlimited locks +Max pending signals 62898 62898 signals +Max msgqueue size 819200 819200 bytes +Max nice priority 0 0 +Max realtime priority 0 0 +Max realtime timeout unlimited unlimited us diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26231/stat b/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26231/stat new file mode 100644 index 0000000000..438aaa9dce --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/26231/stat @@ -0,0 +1 @@ +26231 (vim) R 5392 7446 5392 34835 7446 4218880 32533 309516 26 82 1677 44 158 99 20 0 1 0 82375 56274944 1981 18446744073709551615 4194304 6294284 140736914091744 140736914087944 139965136429984 0 0 12288 1870679807 0 0 0 17 0 0 0 31 0 0 8391624 8481048 16420864 140736914093252 140736914093279 140736914093279 140736914096107 0 diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/584/stat b/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/584/stat new file mode 100644 index 0000000000..65b9369d13 --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/584/stat @@ -0,0 +1,2 @@ +1020 ((a b ) ( c d) ) R 28378 1020 28378 34842 1020 4218880 286 0 0 0 0 0 0 0 20 0 1 0 10839175 10395648 155 18446744073709551615 4194304 4238788 140736466511168 140736466511168 140609271124624 0 0 0 0 0 0 0 17 5 0 0 0 0 0 6336016 6337300 25579520 140736466515030 140736466515061 140736466515061 140736466518002 0 +#!/bin/cat /proc/self/stat diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/stat b/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/stat new file mode 100644 index 0000000000..dabb96f747 --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/procfs/fixtures/stat @@ -0,0 +1,16 @@ +cpu 301854 612 111922 8979004 3552 2 3944 0 0 0 +cpu0 44490 19 21045 1087069 220 1 3410 0 0 0 +cpu1 47869 23 16474 1110787 591 0 46 0 0 0 +cpu2 46504 36 15916 1112321 441 0 326 0 0 0 +cpu3 47054 102 15683 1113230 533 0 60 0 0 0 +cpu4 28413 25 10776 1140321 217 0 8 0 0 0 +cpu5 29271 101 11586 1136270 672 0 30 0 0 0 +cpu6 29152 36 10276 1139721 319 0 29 0 0 0 +cpu7 29098 268 10164 1139282 555 0 31 0 0 0 +intr 8885917 17 0 0 0 0 0 0 0 1 79281 0 0 0 0 0 0 0 231237 0 0 0 0 250586 103 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 223424 190745 13 906 1283803 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +ctxt 38014093 +btime 1418183276 +processes 26442 +procs_running 2 +procs_blocked 0 +softirq 5057579 250191 1481983 1647 211099 186066 0 1783454 622196 12499 508444 diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/fs.go b/Godeps/_workspace/src/github.com/prometheus/procfs/fs.go new file mode 100644 index 0000000000..838474aba4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/procfs/fs.go @@ -0,0 +1,36 @@ +package procfs + +import ( + "fmt" + "os" + "path" +) + +// FS represents the pseudo-filesystem proc, which provides an interface to +// kernel data structures. +type FS string + +// DefaultMountPoint is the common mount point of the proc filesystem. +const DefaultMountPoint = "/proc" + +// NewFS returns a new FS mounted under the given mountPoint. It will error +// if the mount point can't be read. +func NewFS(mountPoint string) (FS, error) { + info, err := os.Stat(mountPoint) + if err != nil { + return "", fmt.Errorf("could not read %s: %s", mountPoint, err) + } + if !info.IsDir() { + return "", fmt.Errorf("mount point %s is not a directory", mountPoint) + } + + return FS(mountPoint), nil +} + +func (fs FS) stat(p string) (os.FileInfo, error) { + return os.Stat(path.Join(string(fs), p)) +} + +func (fs FS) open(p string) (*os.File, error) { + return os.Open(path.Join(string(fs), p)) +} diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/fs_test.go b/Godeps/_workspace/src/github.com/prometheus/procfs/fs_test.go new file mode 100644 index 0000000000..91f1c6c976 --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/procfs/fs_test.go @@ -0,0 +1,13 @@ +package procfs + +import "testing" + +func TestNewFS(t *testing.T) { + if _, err := NewFS("foobar"); err == nil { + t.Error("want NewFS to fail for non-existing mount point") + } + + if _, err := NewFS("procfs.go"); err == nil { + t.Error("want NewFS to fail if mount point is not a directory") + } +} diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/proc.go b/Godeps/_workspace/src/github.com/prometheus/procfs/proc.go new file mode 100644 index 0000000000..21445cf176 --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/procfs/proc.go @@ -0,0 +1,149 @@ +package procfs + +import ( + "fmt" + "io/ioutil" + "os" + "path" + "strconv" + "strings" +) + +// Proc provides information about a running process. +type Proc struct { + // The process ID. + PID int + + fs FS +} + +// Procs represents a list of Proc structs. +type Procs []Proc + +func (p Procs) Len() int { return len(p) } +func (p Procs) Swap(i, j int) { p[i], p[j] = p[j], p[i] } +func (p Procs) Less(i, j int) bool { return p[i].PID < p[j].PID } + +// Self returns a process for the current process. +func Self() (Proc, error) { + return NewProc(os.Getpid()) +} + +// NewProc returns a process for the given pid under /proc. +func NewProc(pid int) (Proc, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return Proc{}, err + } + + return fs.NewProc(pid) +} + +// AllProcs returns a list of all currently avaible processes under /proc. +func AllProcs() (Procs, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return Procs{}, err + } + + return fs.AllProcs() +} + +// NewProc returns a process for the given pid. +func (fs FS) NewProc(pid int) (Proc, error) { + if _, err := fs.stat(strconv.Itoa(pid)); err != nil { + return Proc{}, err + } + + return Proc{PID: pid, fs: fs}, nil +} + +// AllProcs returns a list of all currently avaible processes. +func (fs FS) AllProcs() (Procs, error) { + d, err := fs.open("") + if err != nil { + return Procs{}, err + } + defer d.Close() + + names, err := d.Readdirnames(-1) + if err != nil { + return Procs{}, fmt.Errorf("could not read %s: %s", d.Name(), err) + } + + p := Procs{} + for _, n := range names { + pid, err := strconv.ParseInt(n, 10, 64) + if err != nil { + continue + } + p = append(p, Proc{PID: int(pid), fs: fs}) + } + + return p, nil +} + +// CmdLine returns the command line of a process. +func (p Proc) CmdLine() ([]string, error) { + f, err := p.open("cmdline") + if err != nil { + return nil, err + } + defer f.Close() + + data, err := ioutil.ReadAll(f) + if err != nil { + return nil, err + } + + return strings.Split(string(data[:len(data)-1]), string(byte(0))), nil +} + +// FileDescriptors returns the currently open file descriptors of a process. +func (p Proc) FileDescriptors() ([]uintptr, error) { + names, err := p.fileDescriptors() + if err != nil { + return nil, err + } + + fds := make([]uintptr, len(names)) + for i, n := range names { + fd, err := strconv.ParseInt(n, 10, 32) + if err != nil { + return nil, fmt.Errorf("could not parse fd %s: %s", n, err) + } + fds[i] = uintptr(fd) + } + + return fds, nil +} + +// FileDescriptorsLen returns the number of currently open file descriptors of +// a process. +func (p Proc) FileDescriptorsLen() (int, error) { + fds, err := p.fileDescriptors() + if err != nil { + return 0, err + } + + return len(fds), nil +} + +func (p Proc) fileDescriptors() ([]string, error) { + d, err := p.open("fd") + if err != nil { + return nil, err + } + defer d.Close() + + names, err := d.Readdirnames(-1) + if err != nil { + return nil, fmt.Errorf("could not read %s: %s", d.Name(), err) + } + + return names, nil +} + +func (p Proc) open(pa string) (*os.File, error) { + return p.fs.open(path.Join(strconv.Itoa(p.PID), pa)) +} diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/proc_limits.go b/Godeps/_workspace/src/github.com/prometheus/procfs/proc_limits.go new file mode 100644 index 0000000000..9f080b9f62 --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/procfs/proc_limits.go @@ -0,0 +1,111 @@ +package procfs + +import ( + "bufio" + "fmt" + "regexp" + "strconv" +) + +// ProcLimits represents the soft limits for each of the process's resource +// limits. +type ProcLimits struct { + CPUTime int + FileSize int + DataSize int + StackSize int + CoreFileSize int + ResidentSet int + Processes int + OpenFiles int + LockedMemory int + AddressSpace int + FileLocks int + PendingSignals int + MsqqueueSize int + NicePriority int + RealtimePriority int + RealtimeTimeout int +} + +const ( + limitsFields = 3 + limitsUnlimited = "unlimited" +) + +var ( + limitsDelimiter = regexp.MustCompile(" +") +) + +// NewLimits returns the current soft limits of the process. +func (p Proc) NewLimits() (ProcLimits, error) { + f, err := p.open("limits") + if err != nil { + return ProcLimits{}, err + } + defer f.Close() + + var ( + l = ProcLimits{} + s = bufio.NewScanner(f) + ) + for s.Scan() { + fields := limitsDelimiter.Split(s.Text(), limitsFields) + if len(fields) != limitsFields { + return ProcLimits{}, fmt.Errorf( + "couldn't parse %s line %s", f.Name(), s.Text()) + } + + switch fields[0] { + case "Max cpu time": + l.CPUTime, err = parseInt(fields[1]) + case "Max file size": + l.FileLocks, err = parseInt(fields[1]) + case "Max data size": + l.DataSize, err = parseInt(fields[1]) + case "Max stack size": + l.StackSize, err = parseInt(fields[1]) + case "Max core file size": + l.CoreFileSize, err = parseInt(fields[1]) + case "Max resident set": + l.ResidentSet, err = parseInt(fields[1]) + case "Max processes": + l.Processes, err = parseInt(fields[1]) + case "Max open files": + l.OpenFiles, err = parseInt(fields[1]) + case "Max locked memory": + l.LockedMemory, err = parseInt(fields[1]) + case "Max address space": + l.AddressSpace, err = parseInt(fields[1]) + case "Max file locks": + l.FileLocks, err = parseInt(fields[1]) + case "Max pending signals": + l.PendingSignals, err = parseInt(fields[1]) + case "Max msgqueue size": + l.MsqqueueSize, err = parseInt(fields[1]) + case "Max nice priority": + l.NicePriority, err = parseInt(fields[1]) + case "Max realtime priority": + l.RealtimePriority, err = parseInt(fields[1]) + case "Max realtime timeout": + l.RealtimeTimeout, err = parseInt(fields[1]) + } + + if err != nil { + return ProcLimits{}, err + } + } + + return l, s.Err() +} + +func parseInt(s string) (int, error) { + if s == limitsUnlimited { + return -1, nil + } + i, err := strconv.ParseInt(s, 10, 32) + if err != nil { + return 0, fmt.Errorf("couldn't parse value %s: %s", s, err) + } + return int(i), nil +} diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/proc_limits_test.go b/Godeps/_workspace/src/github.com/prometheus/procfs/proc_limits_test.go new file mode 100644 index 0000000000..ca7a254da7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/procfs/proc_limits_test.go @@ -0,0 +1,36 @@ +package procfs + +import "testing" + +func TestNewLimits(t *testing.T) { + fs, err := NewFS("fixtures") + if err != nil { + t.Fatal(err) + } + + p, err := fs.NewProc(26231) + if err != nil { + t.Fatal(err) + } + + l, err := p.NewLimits() + if err != nil { + t.Fatal(err) + } + + for _, test := range []struct { + name string + want int + got int + }{ + {name: "cpu time", want: -1, got: l.CPUTime}, + {name: "open files", want: 2048, got: l.OpenFiles}, + {name: "msgqueue size", want: 819200, got: l.MsqqueueSize}, + {name: "nice priority", want: 0, got: l.NicePriority}, + {name: "address space", want: -1, got: l.AddressSpace}, + } { + if test.want != test.got { + t.Errorf("want %s %d, got %d", test.name, test.want, test.got) + } + } +} diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/proc_stat.go b/Godeps/_workspace/src/github.com/prometheus/procfs/proc_stat.go new file mode 100644 index 0000000000..1e027762ee --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/procfs/proc_stat.go @@ -0,0 +1,165 @@ +package procfs + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" +) + +// #include +import "C" + +// ProcStat provides status information about the process, +// read from /proc/[pid]/stat. +type ProcStat struct { + // The process ID. + PID int + // The filename of the executable. + Comm string + // The process state. + State string + // The PID of the parent of this process. + PPID int + // The process group ID of the process. + PGRP int + // The session ID of the process. + Session int + // The controlling terminal of the process. + TTY int + // The ID of the foreground process group of the controlling terminal of + // the process. + TPGID int + // The kernel flags word of the process. + Flags uint + // The number of minor faults the process has made which have not required + // loading a memory page from disk. + MinFlt uint + // The number of minor faults that the process's waited-for children have + // made. + CMinFlt uint + // The number of major faults the process has made which have required + // loading a memory page from disk. + MajFlt uint + // The number of major faults that the process's waited-for children have + // made. + CMajFlt uint + // Amount of time that this process has been scheduled in user mode, + // measured in clock ticks. + UTime uint + // Amount of time that this process has been scheduled in kernel mode, + // measured in clock ticks. + STime uint + // Amount of time that this process's waited-for children have been + // scheduled in user mode, measured in clock ticks. + CUTime uint + // Amount of time that this process's waited-for children have been + // scheduled in kernel mode, measured in clock ticks. + CSTime uint + // For processes running a real-time scheduling policy, this is the negated + // scheduling priority, minus one. + Priority int + // The nice value, a value in the range 19 (low priority) to -20 (high + // priority). + Nice int + // Number of threads in this process. + NumThreads int + // The time the process started after system boot, the value is expressed + // in clock ticks. + Starttime uint64 + // Virtual memory size in bytes. + VSize int + // Resident set size in pages. + RSS int + + fs FS +} + +// NewStat returns the current status information of the process. +func (p Proc) NewStat() (ProcStat, error) { + f, err := p.open("stat") + if err != nil { + return ProcStat{}, err + } + defer f.Close() + + data, err := ioutil.ReadAll(f) + if err != nil { + return ProcStat{}, err + } + + var ( + ignore int + + s = ProcStat{PID: p.PID, fs: p.fs} + l = bytes.Index(data, []byte("(")) + r = bytes.LastIndex(data, []byte(")")) + ) + + if l < 0 || r < 0 { + return ProcStat{}, fmt.Errorf( + "unexpected format, couldn't extract comm: %s", + data, + ) + } + + s.Comm = string(data[l+1 : r]) + _, err = fmt.Fscan( + bytes.NewBuffer(data[r+2:]), + &s.State, + &s.PPID, + &s.PGRP, + &s.Session, + &s.TTY, + &s.TPGID, + &s.Flags, + &s.MinFlt, + &s.CMinFlt, + &s.MajFlt, + &s.CMajFlt, + &s.UTime, + &s.STime, + &s.CUTime, + &s.CSTime, + &s.Priority, + &s.Nice, + &s.NumThreads, + &ignore, + &s.Starttime, + &s.VSize, + &s.RSS, + ) + if err != nil { + return ProcStat{}, err + } + + return s, nil +} + +// VirtualMemory returns the virtual memory size in bytes. +func (s ProcStat) VirtualMemory() int { + return s.VSize +} + +// ResidentMemory returns the resident memory size in bytes. +func (s ProcStat) ResidentMemory() int { + return s.RSS * os.Getpagesize() +} + +// StartTime returns the unix timestamp of the process in seconds. +func (s ProcStat) StartTime() (float64, error) { + stat, err := s.fs.NewStat() + if err != nil { + return 0, err + } + return float64(stat.BootTime) + (float64(s.Starttime) / ticks()), nil +} + +// CPUTime returns the total CPU user and system time in seconds. +func (s ProcStat) CPUTime() float64 { + return float64(s.UTime+s.STime) / ticks() +} + +func ticks() float64 { + return float64(C.sysconf(C._SC_CLK_TCK)) // most likely 100 +} diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/proc_stat_test.go b/Godeps/_workspace/src/github.com/prometheus/procfs/proc_stat_test.go new file mode 100644 index 0000000000..e4d5cacfa4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/procfs/proc_stat_test.go @@ -0,0 +1,112 @@ +package procfs + +import "testing" + +func TestProcStat(t *testing.T) { + fs, err := NewFS("fixtures") + if err != nil { + t.Fatal(err) + } + + p, err := fs.NewProc(26231) + if err != nil { + t.Fatal(err) + } + + s, err := p.NewStat() + if err != nil { + t.Fatal(err) + } + + for _, test := range []struct { + name string + want int + got int + }{ + {name: "pid", want: 26231, got: s.PID}, + {name: "user time", want: 1677, got: int(s.UTime)}, + {name: "system time", want: 44, got: int(s.STime)}, + {name: "start time", want: 82375, got: int(s.Starttime)}, + {name: "virtual memory size", want: 56274944, got: s.VSize}, + {name: "resident set size", want: 1981, got: s.RSS}, + } { + if test.want != test.got { + t.Errorf("want %s %d, got %d", test.name, test.want, test.got) + } + } +} + +func TestProcStatComm(t *testing.T) { + s1, err := testProcStat(26231) + if err != nil { + t.Fatal(err) + } + if want, got := "vim", s1.Comm; want != got { + t.Errorf("want comm %s, got %s", want, got) + } + + s2, err := testProcStat(584) + if err != nil { + t.Fatal(err) + } + if want, got := "(a b ) ( c d) ", s2.Comm; want != got { + t.Errorf("want comm %s, got %s", want, got) + } +} + +func TestProcStatVirtualMemory(t *testing.T) { + s, err := testProcStat(26231) + if err != nil { + t.Fatal(err) + } + + if want, got := 56274944, s.VirtualMemory(); want != got { + t.Errorf("want virtual memory %d, got %d", want, got) + } +} + +func TestProcStatResidentMemory(t *testing.T) { + s, err := testProcStat(26231) + if err != nil { + t.Fatal(err) + } + + if want, got := 1981*4096, s.ResidentMemory(); want != got { + t.Errorf("want resident memory %d, got %d", want, got) + } +} + +func TestProcStatStartTime(t *testing.T) { + s, err := testProcStat(26231) + if err != nil { + t.Fatal(err) + } + + time, err := s.StartTime() + if err != nil { + t.Fatal(err) + } + if want, got := 1418184099.75, time; want != got { + t.Errorf("want start time %f, got %f", want, got) + } +} + +func TestProcStatCPUTime(t *testing.T) { + s, err := testProcStat(26231) + if err != nil { + t.Fatal(err) + } + + if want, got := 17.21, s.CPUTime(); want != got { + t.Errorf("want cpu time %f, got %f", want, got) + } +} + +func testProcStat(pid int) (ProcStat, error) { + p, err := testProcess(pid) + if err != nil { + return ProcStat{}, err + } + + return p.NewStat() +} diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/proc_test.go b/Godeps/_workspace/src/github.com/prometheus/procfs/proc_test.go new file mode 100644 index 0000000000..bd234a14cb --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/procfs/proc_test.go @@ -0,0 +1,123 @@ +package procfs + +import ( + "os" + "reflect" + "sort" + "testing" +) + +func TestSelf(t *testing.T) { + p1, err := NewProc(os.Getpid()) + if err != nil { + t.Fatal(err) + } + p2, err := Self() + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(p1, p2) { + t.Errorf("want process %v to equal %v", p1, p2) + } +} + +func TestAllProcs(t *testing.T) { + fs, err := NewFS("fixtures") + if err != nil { + t.Fatal(err) + } + procs, err := fs.AllProcs() + if err != nil { + t.Fatal(err) + } + sort.Sort(procs) + for i, p := range []*Proc{{PID: 584}, {PID: 26231}} { + if want, got := p.PID, procs[i].PID; want != got { + t.Errorf("want processes %d, got %d", want, got) + } + } +} + +func TestCmdLine(t *testing.T) { + p1, err := testProcess(26231) + if err != nil { + t.Fatal(err) + } + c, err := p1.CmdLine() + if err != nil { + t.Fatal(err) + } + if want := []string{"vim", "test.go", "+10"}; !reflect.DeepEqual(want, c) { + t.Errorf("want cmdline %v, got %v", want, c) + } +} + +func TestFileDescriptors(t *testing.T) { + p1, err := testProcess(26231) + if err != nil { + t.Fatal(err) + } + fds, err := p1.FileDescriptors() + if err != nil { + t.Fatal(err) + } + sort.Sort(byUintptr(fds)) + if want := []uintptr{0, 1, 2, 3, 4}; !reflect.DeepEqual(want, fds) { + t.Errorf("want fds %v, got %v", want, fds) + } + + p2, err := Self() + if err != nil { + t.Fatal(err) + } + + fdsBefore, err := p2.FileDescriptors() + if err != nil { + t.Fatal(err) + } + + s, err := os.Open("fixtures") + if err != nil { + t.Fatal(err) + } + defer s.Close() + + fdsAfter, err := p2.FileDescriptors() + if err != nil { + t.Fatal(err) + } + + if len(fdsBefore)+1 != len(fdsAfter) { + t.Errorf("want fds %v+1 to equal %v", fdsBefore, fdsAfter) + } +} + +func TestFileDescriptorsLen(t *testing.T) { + p1, err := testProcess(26231) + if err != nil { + t.Fatal(err) + } + l, err := p1.FileDescriptorsLen() + if err != nil { + t.Fatal(err) + } + if want, got := 5, l; want != got { + t.Errorf("want fds %d, got %d", want, got) + } +} + +func testProcess(pid int) (Proc, error) { + fs, err := NewFS("fixtures") + if err != nil { + return Proc{}, err + } + + return fs.NewProc(pid) +} + +type byUintptr []uintptr + +func (a byUintptr) Len() int { return len(a) } +func (a byUintptr) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byUintptr) Less(i, j int) bool { return a[i] < a[j] } diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/stat.go b/Godeps/_workspace/src/github.com/prometheus/procfs/stat.go new file mode 100644 index 0000000000..26fefb0fa0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/procfs/stat.go @@ -0,0 +1,55 @@ +package procfs + +import ( + "bufio" + "fmt" + "strconv" + "strings" +) + +// Stat represents kernel/system statistics. +type Stat struct { + // Boot time in seconds since the Epoch. + BootTime int64 +} + +// NewStat returns kernel/system statistics read from /proc/stat. +func NewStat() (Stat, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return Stat{}, err + } + + return fs.NewStat() +} + +// NewStat returns an information about current kernel/system statistics. +func (fs FS) NewStat() (Stat, error) { + f, err := fs.open("stat") + if err != nil { + return Stat{}, err + } + defer f.Close() + + s := bufio.NewScanner(f) + for s.Scan() { + line := s.Text() + if !strings.HasPrefix(line, "btime") { + continue + } + fields := strings.Fields(line) + if len(fields) != 2 { + return Stat{}, fmt.Errorf("couldn't parse %s line %s", f.Name(), line) + } + i, err := strconv.ParseInt(fields[1], 10, 32) + if err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s: %s", fields[1], err) + } + return Stat{BootTime: i}, nil + } + if err := s.Err(); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s: %s", f.Name(), err) + } + + return Stat{}, fmt.Errorf("couldn't parse %s, missing btime", f.Name()) +} diff --git a/Godeps/_workspace/src/github.com/prometheus/procfs/stat_test.go b/Godeps/_workspace/src/github.com/prometheus/procfs/stat_test.go new file mode 100644 index 0000000000..24b5d61f8e --- /dev/null +++ b/Godeps/_workspace/src/github.com/prometheus/procfs/stat_test.go @@ -0,0 +1,19 @@ +package procfs + +import "testing" + +func TestStat(t *testing.T) { + fs, err := NewFS("fixtures") + if err != nil { + t.Fatal(err) + } + + s, err := fs.NewStat() + if err != nil { + t.Fatal(err) + } + + if want, got := int64(1418183276), s.BootTime; want != got { + t.Errorf("want boot time %d, got %d", want, got) + } +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/go13_bench_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/bench2_test.go similarity index 98% rename from Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/go13_bench_test.go rename to Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/bench2_test.go index e76657e5ec..0dd60fd829 100644 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/go13_bench_test.go +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/bench2_test.go @@ -4,7 +4,7 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. -// +build go1.3 +// +build !go1.2 package leveldb diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/bench2_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/bench2_test.go new file mode 100644 index 0000000000..175e222032 --- /dev/null +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/bench2_test.go @@ -0,0 +1,30 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// +build !go1.2 + +package cache + +import ( + "math/rand" + "testing" +) + +func BenchmarkLRUCache(b *testing.B) { + c := NewCache(NewLRU(10000)) + + b.SetParallelism(10) + b.RunParallel(func(pb *testing.PB) { + r := rand.New(rand.NewSource(time.Now().UnixNano())) + + for pb.Next() { + key := uint64(r.Intn(1000000)) + c.Get(0, key, func() (int, Value) { + return 1, key + }).Release() + } + }) +} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache_test.go index 5575583dce..c2a50156f0 100644 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache_test.go +++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache_test.go @@ -552,19 +552,3 @@ func TestLRUCache_Close(t *testing.T) { t.Errorf("delFunc isn't called 1 times: got=%d", delFuncCalled) } } - -func BenchmarkLRUCache(b *testing.B) { - c := NewCache(NewLRU(10000)) - - b.SetParallelism(10) - b.RunParallel(func(pb *testing.PB) { - r := rand.New(rand.NewSource(time.Now().UnixNano())) - - for pb.Next() { - key := uint64(r.Intn(1000000)) - c.Get(0, key, func() (int, Value) { - return 1, key - }).Release() - } - }) -} From 464524fa44a21bcfaef969f7bb5e12fdb4d1061d Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Tue, 17 Feb 2015 02:05:17 +0100 Subject: [PATCH 5/6] Make config/Makefile use user Go installation. Otherwise it will fail to "go get" the "github.com/golang/protobuf" package because its dir already exists in the vendored packages, but the copy isn't a git repository. --- config/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config/Makefile b/config/Makefile index 983c0a05dd..1a71d02b93 100644 --- a/config/Makefile +++ b/config/Makefile @@ -18,5 +18,5 @@ SUFFIXES: include ../Makefile.INCLUDE generated/config.pb.go: config.proto - $(GO) get github.com/golang/protobuf/protoc-gen-go + go get github.com/golang/protobuf/protoc-gen-go $(PROTOC) --proto_path=$(PREFIX)/include:. --go_out=generated/ config.proto From 13048b7468b75b4bdb8a75be54dfd4a24b03131a Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Tue, 17 Feb 2015 02:06:14 +0100 Subject: [PATCH 6/6] Simplify GOPATH/dependency setup. --- Makefile | 36 +++++++++++++++++------------------- Makefile.INCLUDE | 5 ++--- 2 files changed, 19 insertions(+), 22 deletions(-) diff --git a/Makefile b/Makefile index dce56e777f..6778045432 100644 --- a/Makefile +++ b/Makefile @@ -17,16 +17,16 @@ include Makefile.INCLUDE all: binary test -$(GOCC): $(BUILD_PATH)/cache/$(GOPKG) $(FULL_GOPATH) +$(GOCC): $(BUILD_PATH)/cache/$(GOPKG) tar -C $(BUILD_PATH)/root -xzf $< touch $@ -advice: +advice: $(GOCC) $(GO) vet ./... binary: build -build: config dependencies tools web +build: config tools web $(GOPATH) $(GO) build -o prometheus $(BUILDFLAGS) . docker: build @@ -49,7 +49,7 @@ tag: $(BUILD_PATH)/cache/$(GOPKG): $(CURL) -o $@ -L $(GOURL)/$(GOPKG) -benchmark: config dependencies tools +benchmark: config dependencies tools web $(GO) test $(GO_TEST_FLAGS) -test.run='NONE' -test.bench='.*' -test.benchmem ./... | tee benchmark.txt clean: @@ -62,17 +62,21 @@ clean: -find . -type f -name '*#' -exec rm '{}' ';' -find . -type f -name '.#*' -exec rm '{}' ';' -config: dependencies +config: $(MAKE) -C config -dependencies: $(GOCC) $(FULL_GOPATH) - cp -a $(CURDIR)/Godeps/_workspace/src/* $(GOPATH)/src - $(GO) get -d +$(SELFLINK): $(GOPATH) + ln -s $(CURDIR) $@ + +$(GOPATH): + cp -a $(CURDIR)/Godeps/_workspace $(GOPATH) + +dependencies: $(GOCC) | $(SELFLINK) documentation: search_index godoc -http=:6060 -index -index_files='search_index' -format: +format: dependencies find . -iname '*.go' | egrep -v "^\./\.build|./generated|\./Godeps|\.(l|y)\.go" | xargs -n1 $(GOFMT) -w -s=true race_condition_binary: build @@ -87,22 +91,16 @@ run: binary search_index: godoc -index -write_index -index_files='search_index' -server: config dependencies - $(MAKE) -C server - -# $(FULL_GOPATH) is responsible for ensuring that the builder has not done anything -# stupid like working on Prometheus outside of ${GOPATH}. -$(FULL_GOPATH): - -[ -d "$(FULL_GOPATH)" ] || { mkdir -vp $(FULL_GOPATH_BASE) ; ln -s "$(PWD)" "$(FULL_GOPATH)" ; } - [ -d "$(FULL_GOPATH)" ] - test: config dependencies tools web $(GO) test $(GO_TEST_FLAGS) ./... tools: dependencies $(MAKE) -C tools -web: config dependencies +web: dependencies $(MAKE) -C web +rules: dependencies + $(MAKE) -C rules + .PHONY: advice binary build clean config dependencies documentation format race_condition_binary race_condition_run release run search_index tag tarball test tools diff --git a/Makefile.INCLUDE b/Makefile.INCLUDE index 6ea78da00e..44d00b49b1 100644 --- a/Makefile.INCLUDE +++ b/Makefile.INCLUDE @@ -49,9 +49,8 @@ GOENV = TMPDIR=$(TMPDIR) GOROOT=$(GOROOT) GOPATH=$(GOPATH) GO = $(GOENV) $(GOCC) GOFMT = $(GOROOT)/bin/gofmt -UNAME := $(shell uname) -FULL_GOPATH := $(GOPATH)/src/github.com/prometheus/prometheus -FULL_GOPATH_BASE := $(GOPATH)/src/github.com/prometheus +UNAME := $(shell uname) +SELFLINK = $(GOPATH)/src/github.com/prometheus/prometheus export PREFIX=$(BUILD_PATH)/root