diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md index 4be088530d..b79bc9a3f9 100644 --- a/.github/ISSUE_TEMPLATE.md +++ b/.github/ISSUE_TEMPLATE.md @@ -1,3 +1,5 @@ +[NOTICE]: <> (If your question is around usage and not a bug in Prometheus please use: https://groups.google.com/forum/#!forum/prometheus-users) + **What did you do?** **What did you expect to see?** diff --git a/.travis.yml b/.travis.yml index 78c382486f..41246f6a30 100644 --- a/.travis.yml +++ b/.travis.yml @@ -3,7 +3,7 @@ sudo: false language: go go: -- 1.7.4 +- 1.8 go_import_path: github.com/prometheus/prometheus diff --git a/Makefile b/Makefile index 971e2be0d0..bf3a7ce9f6 100644 --- a/Makefile +++ b/Makefile @@ -12,7 +12,7 @@ # limitations under the License. GO := GO15VENDOREXPERIMENT=1 go -FIRST_GOPATH := $(firstword $(subst :, ,$(GOPATH))) +FIRST_GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH))) PROMU := $(FIRST_GOPATH)/bin/promu pkgs = $(shell $(GO) list ./... | grep -v /vendor/) diff --git a/README.md b/README.md index f17d9c1b45..d0afa6e4cc 100644 --- a/README.md +++ b/README.md @@ -28,7 +28,7 @@ Prometheus' main distinguishing features as compared to other monitoring systems ## Architecture overview -![](https://cdn.rawgit.com/prometheus/prometheus/e761f0d/documentation/images/architecture.svg) +![](https://cdn.rawgit.com/prometheus/prometheus/c34257d069c630685da35bcef084632ffd5d6209/documentation/images/architecture.svg) ## Install diff --git a/circle.yml b/circle.yml index 6d7cbcd6b3..4f9519a0dd 100644 --- a/circle.yml +++ b/circle.yml @@ -2,7 +2,7 @@ machine: environment: DOCKER_IMAGE_NAME: prom/prometheus QUAY_IMAGE_NAME: quay.io/prometheus/prometheus - DOCKER_TEST_IMAGE_NAME: quay.io/prometheus/golang-builder:1.7-base + DOCKER_TEST_IMAGE_NAME: quay.io/prometheus/golang-builder:1.8-base REPO_PATH: github.com/prometheus/prometheus pre: - sudo curl -L -o /usr/bin/docker 'https://s3-external-1.amazonaws.com/circle-downloads/docker-1.9.1-circleci' diff --git a/cmd/prometheus/config.go b/cmd/prometheus/config.go index 3fcf917ce2..0edaa1f9d4 100644 --- a/cmd/prometheus/config.go +++ b/cmd/prometheus/config.go @@ -26,6 +26,7 @@ import ( "unicode" "github.com/asaskevich/govalidator" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/log" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/config" @@ -53,8 +54,15 @@ var cfg = struct { alertmanagerURLs stringset prometheusURL string + + // Deprecated storage flags, kept for backwards compatibility. + deprecatedMemoryChunks uint64 + deprecatedMaxChunksToPersist uint64 }{ alertmanagerURLs: stringset{}, + notifier: notifier.Options{ + Registerer: prometheus.DefaultRegisterer, + }, } func init() { diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index 8f2365af43..d850b32420 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -20,6 +20,7 @@ import ( _ "net/http/pprof" // Comment this line to disable pprof endpoint. "os" "os/signal" + "runtime/debug" "syscall" "time" @@ -41,6 +42,13 @@ func main() { os.Exit(Main()) } +// defaultGCPercent is the value used to to call SetGCPercent if the GOGC +// environment variable is not set or empty. The value here is intended to hit +// the sweet spot between memory utilization and GC effort. It is lower than the +// usual default of 100 as a lot of the heap in Prometheus is used to cache +// memory chunks, which have a lifetime of hours if not days or weeks. +const defaultGCPercent = 40 + var ( configSuccess = prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: "prometheus", @@ -70,6 +78,10 @@ func Main() int { return 0 } + if os.Getenv("GOGC") == "" { + debug.SetGCPercent(defaultGCPercent) + } + log.Infoln("Starting prometheus", version.Info()) log.Infoln("Build context", version.BuildContext()) @@ -167,7 +179,6 @@ func Main() int { // defer remoteStorage.Stop() - prometheus.MustRegister(notifier) prometheus.MustRegister(configSuccess) prometheus.MustRegister(configSuccessTime) diff --git a/config/config.go b/config/config.go index 2560653a39..2442f64b24 100644 --- a/config/config.go +++ b/config/config.go @@ -167,6 +167,11 @@ var ( DefaultRemoteWriteConfig = RemoteWriteConfig{ RemoteTimeout: model.Duration(30 * time.Second), } + + // DefaultRemoteReadConfig is the default remote read configuration. + DefaultRemoteReadConfig = RemoteReadConfig{ + RemoteTimeout: model.Duration(1 * time.Minute), + } ) // URL is a custom URL type that allows validation at configuration load time. @@ -205,6 +210,7 @@ type Config struct { ScrapeConfigs []*ScrapeConfig `yaml:"scrape_configs,omitempty"` RemoteWriteConfigs []*RemoteWriteConfig `yaml:"remote_write,omitempty"` + RemoteReadConfigs []*RemoteReadConfig `yaml:"remote_read,omitempty"` // Catches all undefined fields and must be empty after parsing. XXX map[string]interface{} `yaml:",inline"` @@ -1296,15 +1302,16 @@ func (re Regexp) MarshalYAML() (interface{}, error) { return nil, nil } -// RemoteWriteConfig is the configuration for remote storage. +// RemoteWriteConfig is the configuration for writing to remote storage. type RemoteWriteConfig struct { URL *URL `yaml:"url,omitempty"` RemoteTimeout model.Duration `yaml:"remote_timeout,omitempty"` - BasicAuth *BasicAuth `yaml:"basic_auth,omitempty"` - TLSConfig TLSConfig `yaml:"tls_config,omitempty"` - ProxyURL URL `yaml:"proxy_url,omitempty"` WriteRelabelConfigs []*RelabelConfig `yaml:"write_relabel_configs,omitempty"` + // We cannot do proper Go type embedding below as the parser will then parse + // values arbitrarily into the overflow maps of further-down types. + HTTPClientConfig HTTPClientConfig `yaml:",inline"` + // Catches all undefined fields and must be empty after parsing. XXX map[string]interface{} `yaml:",inline"` } @@ -1321,3 +1328,29 @@ func (c *RemoteWriteConfig) UnmarshalYAML(unmarshal func(interface{}) error) err } return nil } + +// RemoteReadConfig is the configuration for reading from remote storage. +type RemoteReadConfig struct { + URL *URL `yaml:"url,omitempty"` + RemoteTimeout model.Duration `yaml:"remote_timeout,omitempty"` + + // We cannot do proper Go type embedding below as the parser will then parse + // values arbitrarily into the overflow maps of further-down types. + HTTPClientConfig HTTPClientConfig `yaml:",inline"` + + // Catches all undefined fields and must be empty after parsing. + XXX map[string]interface{} `yaml:",inline"` +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (c *RemoteReadConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { + *c = DefaultRemoteReadConfig + type plain RemoteReadConfig + if err := unmarshal((*plain)(c)); err != nil { + return err + } + if err := checkOverflow(c.XXX, "remote_read"); err != nil { + return err + } + return nil +} diff --git a/documentation/examples/remote_storage/remote_storage_bridge/README.md b/documentation/examples/remote_storage/remote_storage_bridge/README.md index ad194c7169..4c48a7ddf3 100644 --- a/documentation/examples/remote_storage/remote_storage_bridge/README.md +++ b/documentation/examples/remote_storage/remote_storage_bridge/README.md @@ -1,10 +1,13 @@ # Remote storage bridge -This is a bridge that receives samples in Prometheus's remote storage -format and forwards them to Graphite, InfluxDB, or OpenTSDB. It is meant +This is a bridge that receives samples via Prometheus's remote write +protocol and stores them in Graphite, InfluxDB, or OpenTSDB. It is meant as a replacement for the built-in specific remote storage implementations that have been removed from Prometheus. +For InfluxDB, this bridge also supports reading back data through +Prometheus via Prometheus's remote read protocol. + ## Building ``` @@ -13,10 +16,22 @@ go build ## Running -Example: +Graphite example: ``` -./remote_storage_bridge -graphite-address=localhost:8080 -opentsdb-url=http://localhost:8081/ +./remote_storage_bridge -graphite-address=localhost:8080 +``` + +OpenTSDB example: + +``` +./remote_storage_bridge -opentsdb-url=http://localhost:8081/ +``` + +InfluxDB example: + +``` +./remote_storage_bridge -influxdb-url=http://localhost:8086/ -influxdb.database=prometheus -influxdb.retention-policy=autogen ``` To show all flags: @@ -30,6 +45,11 @@ To show all flags: To configure Prometheus to send samples to this bridge, add the following to your `prometheus.yml`: ```yaml +# Remote write configuration (for Graphite, OpenTSDB, or InfluxDB). remote_write: - url: "http://localhost:9201/receive" + - url: "http://localhost:9201/write" + +# Remote read configuration (for InfluxDB only at the moment). +remote_read: + - url: "http://localhost:9201/read" ``` \ No newline at end of file diff --git a/documentation/examples/remote_storage/remote_storage_bridge/graphite/client.go b/documentation/examples/remote_storage/remote_storage_bridge/graphite/client.go index 8ce28c4a58..45373cf19a 100644 --- a/documentation/examples/remote_storage/remote_storage_bridge/graphite/client.go +++ b/documentation/examples/remote_storage/remote_storage_bridge/graphite/client.go @@ -72,8 +72,8 @@ func pathFromMetric(m model.Metric, prefix string) string { return buffer.String() } -// Store sends a batch of samples to Graphite. -func (c *Client) Store(samples model.Samples) error { +// Write sends a batch of samples to Graphite. +func (c *Client) Write(samples model.Samples) error { conn, err := net.DialTimeout(c.transport, c.address, c.timeout) if err != nil { return err diff --git a/documentation/examples/remote_storage/remote_storage_bridge/influxdb/client.go b/documentation/examples/remote_storage/remote_storage_bridge/influxdb/client.go index ae1fa4d631..26b5d56bd4 100644 --- a/documentation/examples/remote_storage/remote_storage_bridge/influxdb/client.go +++ b/documentation/examples/remote_storage/remote_storage_bridge/influxdb/client.go @@ -14,26 +14,30 @@ package influxdb import ( + "encoding/json" + "fmt" "math" + "strings" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/log" "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/storage/remote" - influx "github.com/influxdb/influxdb/client" + influx "github.com/influxdata/influxdb/client/v2" ) // Client allows sending batches of Prometheus samples to InfluxDB. type Client struct { - client *influx.Client + client influx.Client database string retentionPolicy string ignoredSamples prometheus.Counter } // NewClient creates a new Client. -func NewClient(conf influx.Config, db string, rp string) *Client { - c, err := influx.NewClient(conf) +func NewClient(conf influx.HTTPConfig, db string, rp string) *Client { + c, err := influx.NewHTTPClient(conf) // Currently influx.NewClient() *should* never return an error. if err != nil { log.Fatal(err) @@ -63,9 +67,9 @@ func tagsFromMetric(m model.Metric) map[string]string { return tags } -// Store sends a batch of samples to InfluxDB via its HTTP API. -func (c *Client) Store(samples model.Samples) error { - points := make([]influx.Point, 0, len(samples)) +// Write sends a batch of samples to InfluxDB via its HTTP API. +func (c *Client) Write(samples model.Samples) error { + points := make([]*influx.Point, 0, len(samples)) for _, s := range samples { v := float64(s.Value) if math.IsNaN(v) || math.IsInf(v, 0) { @@ -73,24 +77,221 @@ func (c *Client) Store(samples model.Samples) error { c.ignoredSamples.Inc() continue } - points = append(points, influx.Point{ - Measurement: string(s.Metric[model.MetricNameLabel]), - Tags: tagsFromMetric(s.Metric), - Time: s.Timestamp.Time(), - Precision: "ms", - Fields: map[string]interface{}{ - "value": v, - }, - }) + p, err := influx.NewPoint( + string(s.Metric[model.MetricNameLabel]), + tagsFromMetric(s.Metric), + map[string]interface{}{"value": v}, + s.Timestamp.Time(), + ) + if err != nil { + return err + } + points = append(points, p) } - bps := influx.BatchPoints{ - Points: points, + bps, err := influx.NewBatchPoints(influx.BatchPointsConfig{ + Precision: "ms", Database: c.database, RetentionPolicy: c.retentionPolicy, + }) + if err != nil { + return err } - _, err := c.client.Write(bps) - return err + bps.AddPoints(points) + return c.client.Write(bps) +} + +func (c *Client) Read(req *remote.ReadRequest) (*remote.ReadResponse, error) { + labelsToSeries := map[string]*remote.TimeSeries{} + for _, q := range req.Queries { + command, err := buildCommand(q) + if err != nil { + return nil, err + } + + query := influx.NewQuery(command, c.database, "ms") + resp, err := c.client.Query(query) + if err != nil { + return nil, err + } + if resp.Err != "" { + return nil, fmt.Errorf(resp.Err) + } + + if err = mergeResult(labelsToSeries, resp.Results); err != nil { + return nil, err + } + } + + resp := remote.ReadResponse{ + Timeseries: make([]*remote.TimeSeries, 0, len(labelsToSeries)), + } + for _, ts := range labelsToSeries { + resp.Timeseries = append(resp.Timeseries, ts) + } + return &resp, nil +} + +func buildCommand(q *remote.Query) (string, error) { + matchers := make([]string, 0, len(q.Matchers)) + // If we don't find a metric name matcher, query all metrics + // (InfluxDB measurements) by default. + from := "FROM /.+/" + for _, m := range q.Matchers { + if m.Name == model.MetricNameLabel { + switch m.Type { + case remote.MatchType_EQUAL: + from = fmt.Sprintf("FROM %q", m.Value) + case remote.MatchType_REGEX_MATCH: + from = fmt.Sprintf("FROM /^%s$/", escapeSlashes(m.Value)) + default: + // TODO: Figure out how to support these efficiently. + return "", fmt.Errorf("non-equal or regex-non-equal matchers are not supported on the metric name yet") + } + continue + } + + switch m.Type { + case remote.MatchType_EQUAL: + matchers = append(matchers, fmt.Sprintf("%q = '%s'", m.Name, escapeSingleQuotes(m.Value))) + case remote.MatchType_NOT_EQUAL: + matchers = append(matchers, fmt.Sprintf("%q != '%s'", m.Name, escapeSingleQuotes(m.Value))) + case remote.MatchType_REGEX_MATCH: + matchers = append(matchers, fmt.Sprintf("%q =~ /^%s$/", m.Name, escapeSlashes(m.Value))) + case remote.MatchType_REGEX_NO_MATCH: + matchers = append(matchers, fmt.Sprintf("%q !~ /^%s$/", m.Name, escapeSlashes(m.Value))) + default: + return "", fmt.Errorf("unknown match type %v", m.Type) + } + } + matchers = append(matchers, fmt.Sprintf("time >= %vms", q.StartTimestampMs)) + matchers = append(matchers, fmt.Sprintf("time <= %vms", q.EndTimestampMs)) + + return fmt.Sprintf("SELECT value %s WHERE %v GROUP BY *", from, strings.Join(matchers, " AND ")), nil +} + +func escapeSingleQuotes(str string) string { + return strings.Replace(str, `'`, `\'`, -1) +} + +func escapeSlashes(str string) string { + return strings.Replace(str, `/`, `\/`, -1) +} + +func mergeResult(labelsToSeries map[string]*remote.TimeSeries, results []influx.Result) error { + for _, r := range results { + for _, s := range r.Series { + k := concatLabels(s.Tags) + ts, ok := labelsToSeries[k] + if !ok { + ts = &remote.TimeSeries{ + Labels: tagsToLabelPairs(s.Name, s.Tags), + } + labelsToSeries[k] = ts + } + + samples, err := valuesToSamples(s.Values) + if err != nil { + return err + } + + ts.Samples = mergeSamples(ts.Samples, samples) + } + } + return nil +} + +func concatLabels(labels map[string]string) string { + // 0xff cannot cannot occur in valid UTF-8 sequences, so use it + // as a separator here. + separator := "\xff" + pairs := make([]string, 0, len(labels)) + for k, v := range labels { + pairs = append(pairs, k+separator+v) + } + return strings.Join(pairs, separator) +} + +func tagsToLabelPairs(name string, tags map[string]string) []*remote.LabelPair { + pairs := make([]*remote.LabelPair, 0, len(tags)) + for k, v := range tags { + if v == "" { + // If we select metrics with different sets of labels names, + // InfluxDB returns *all* possible tag names on all returned + // series, with empty tag values on series where they don't + // apply. In Prometheus, an empty label value is equivalent + // to a non-existent label, so we just skip empty ones here + // to make the result correct. + continue + } + pairs = append(pairs, &remote.LabelPair{ + Name: k, + Value: v, + }) + } + pairs = append(pairs, &remote.LabelPair{ + Name: model.MetricNameLabel, + Value: name, + }) + return pairs +} + +func valuesToSamples(values [][]interface{}) ([]*remote.Sample, error) { + samples := make([]*remote.Sample, 0, len(values)) + for _, v := range values { + if len(v) != 2 { + return nil, fmt.Errorf("bad sample tuple length, expected [, ], got %v", v) + } + + jsonTimestamp, ok := v[0].(json.Number) + if !ok { + return nil, fmt.Errorf("bad timestamp: %v", v[0]) + } + + jsonValue, ok := v[1].(json.Number) + if !ok { + return nil, fmt.Errorf("bad sample value: %v", v[1]) + } + + timestamp, err := jsonTimestamp.Int64() + if err != nil { + return nil, fmt.Errorf("unable to convert sample timestamp to int64: %v", err) + } + + value, err := jsonValue.Float64() + if err != nil { + return nil, fmt.Errorf("unable to convert sample value to float64: %v", err) + } + + samples = append(samples, &remote.Sample{ + TimestampMs: timestamp, + Value: value, + }) + } + return samples, nil +} + +// mergeSamples merges two lists of sample pairs and removes duplicate +// timestamps. It assumes that both lists are sorted by timestamp. +func mergeSamples(a, b []*remote.Sample) []*remote.Sample { + result := make([]*remote.Sample, 0, len(a)+len(b)) + i, j := 0, 0 + for i < len(a) && j < len(b) { + if a[i].TimestampMs < b[j].TimestampMs { + result = append(result, a[i]) + i++ + } else if a[i].TimestampMs > b[j].TimestampMs { + result = append(result, b[j]) + j++ + } else { + result = append(result, a[i]) + i++ + j++ + } + } + result = append(result, a[i:]...) + result = append(result, b[j:]...) + return result } // Name identifies the client as an InfluxDB client. diff --git a/documentation/examples/remote_storage/remote_storage_bridge/influxdb/client_test.go b/documentation/examples/remote_storage/remote_storage_bridge/influxdb/client_test.go index 38a5e1a680..b3567fb75b 100644 --- a/documentation/examples/remote_storage/remote_storage_bridge/influxdb/client_test.go +++ b/documentation/examples/remote_storage/remote_storage_bridge/influxdb/client_test.go @@ -22,7 +22,7 @@ import ( "testing" "time" - influx "github.com/influxdb/influxdb/client" + influx "github.com/influxdata/influxdb/client/v2" "github.com/prometheus/common/model" ) @@ -68,8 +68,8 @@ func TestClient(t *testing.T) { }, } - expectedBody := `testmetric,test_label=test_label_value1 value=1.23 123456789123000000 -testmetric,test_label=test_label_value2 value=5.1234 123456789123000000 + expectedBody := `testmetric,test_label=test_label_value1 value=1.23 123456789123 +testmetric,test_label=test_label_value2 value=5.1234 123456789123 ` server := httptest.NewServer(http.HandlerFunc( @@ -97,15 +97,15 @@ testmetric,test_label=test_label_value2 value=5.1234 123456789123000000 t.Fatalf("Unable to parse server URL %s: %s", server.URL, err) } - conf := influx.Config{ - URL: *serverURL, + conf := influx.HTTPConfig{ + Addr: serverURL.String(), Username: "testuser", Password: "testpass", Timeout: time.Minute, } c := NewClient(conf, "test_db", "default") - if err := c.Store(samples); err != nil { + if err := c.Write(samples); err != nil { t.Fatalf("Error sending samples: %s", err) } } diff --git a/documentation/examples/remote_storage/remote_storage_bridge/main.go b/documentation/examples/remote_storage/remote_storage_bridge/main.go index d6e7fcb457..7603511bcd 100644 --- a/documentation/examples/remote_storage/remote_storage_bridge/main.go +++ b/documentation/examples/remote_storage/remote_storage_bridge/main.go @@ -16,6 +16,7 @@ package main import ( "flag" + "fmt" "io/ioutil" "net/http" _ "net/http/pprof" @@ -30,7 +31,7 @@ import ( "github.com/prometheus/common/log" "github.com/prometheus/common/model" - influx "github.com/influxdb/influxdb/client" + influx "github.com/influxdata/influxdb/client/v2" "github.com/prometheus/prometheus/documentation/examples/remote_storage/remote_storage_bridge/graphite" "github.com/prometheus/prometheus/documentation/examples/remote_storage/remote_storage_bridge/influxdb" @@ -95,8 +96,8 @@ func main() { cfg := parseFlags() http.Handle(cfg.telemetryPath, prometheus.Handler()) - clients := buildClients(cfg) - serve(cfg.listenAddr, clients) + writers, readers := buildClients(cfg) + serve(cfg.listenAddr, writers, readers) } func parseFlags() *config { @@ -119,7 +120,7 @@ func parseFlags() *config { flag.StringVar(&cfg.influxdbURL, "influxdb-url", "", "The URL of the remote InfluxDB server to send samples to. None, if empty.", ) - flag.StringVar(&cfg.influxdbRetentionPolicy, "influxdb.retention-policy", "default", + flag.StringVar(&cfg.influxdbRetentionPolicy, "influxdb.retention-policy", "autogen", "The InfluxDB retention policy to use.", ) flag.StringVar(&cfg.influxdbUsername, "influxdb.username", "", @@ -139,38 +140,50 @@ func parseFlags() *config { return cfg } -func buildClients(cfg *config) []remote.StorageClient { - var clients []remote.StorageClient +type writer interface { + Write(samples model.Samples) error + Name() string +} + +type reader interface { + Read(req *remote.ReadRequest) (*remote.ReadResponse, error) + Name() string +} + +func buildClients(cfg *config) ([]writer, []reader) { + var writers []writer + var readers []reader if cfg.graphiteAddress != "" { c := graphite.NewClient( cfg.graphiteAddress, cfg.graphiteTransport, cfg.remoteTimeout, cfg.graphitePrefix) - clients = append(clients, c) + writers = append(writers, c) } if cfg.opentsdbURL != "" { c := opentsdb.NewClient(cfg.opentsdbURL, cfg.remoteTimeout) - clients = append(clients, c) + writers = append(writers, c) } if cfg.influxdbURL != "" { url, err := url.Parse(cfg.influxdbURL) if err != nil { log.Fatalf("Failed to parse InfluxDB URL %q: %v", cfg.influxdbURL, err) } - conf := influx.Config{ - URL: *url, + conf := influx.HTTPConfig{ + Addr: url.String(), Username: cfg.influxdbUsername, Password: cfg.influxdbPassword, Timeout: cfg.remoteTimeout, } c := influxdb.NewClient(conf, cfg.influxdbDatabase, cfg.influxdbRetentionPolicy) prometheus.MustRegister(c) - clients = append(clients, c) + writers = append(writers, c) + readers = append(readers, c) } - return clients + return writers, readers } -func serve(addr string, clients []remote.StorageClient) error { - http.HandleFunc("/receive", func(w http.ResponseWriter, r *http.Request) { +func serve(addr string, writers []writer, readers []reader) error { + http.HandleFunc("/write", func(w http.ResponseWriter, r *http.Request) { reqBuf, err := ioutil.ReadAll(snappy.NewReader(r.Body)) if err != nil { http.Error(w, err.Error(), http.StatusBadRequest) @@ -187,16 +200,57 @@ func serve(addr string, clients []remote.StorageClient) error { receivedSamples.Add(float64(len(samples))) var wg sync.WaitGroup - for _, c := range clients { + for _, w := range writers { wg.Add(1) - go func(rc remote.StorageClient) { - sendSamples(rc, samples) + go func(rw writer) { + sendSamples(rw, samples) wg.Done() - }(c) + }(w) } wg.Wait() }) + http.HandleFunc("/read", func(w http.ResponseWriter, r *http.Request) { + reqBuf, err := ioutil.ReadAll(snappy.NewReader(r.Body)) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + var req remote.ReadRequest + if err := proto.Unmarshal(reqBuf, &req); err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + // TODO: Support reading from more than one reader and merging the results. + if len(readers) != 1 { + http.Error(w, fmt.Sprintf("expected exactly one reader, found %d readers", len(readers)), http.StatusInternalServerError) + return + } + reader := readers[0] + + var resp *remote.ReadResponse + resp, err = reader.Read(&req) + if err != nil { + log.With("query", req).With("storage", reader.Name()).With("err", err).Warnf("Error executing query") + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + data, err := proto.Marshal(resp) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.Header().Set("Content-Type", "application/x-protobuf") + if _, err := snappy.NewWriter(w).Write(data); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + }) + return http.ListenAndServe(addr, nil) } @@ -219,14 +273,14 @@ func protoToSamples(req *remote.WriteRequest) model.Samples { return samples } -func sendSamples(c remote.StorageClient, samples model.Samples) { +func sendSamples(w writer, samples model.Samples) { begin := time.Now() - err := c.Store(samples) + err := w.Write(samples) duration := time.Since(begin).Seconds() if err != nil { - log.Warnf("Error sending %d samples to remote storage %q: %v", len(samples), c.Name(), err) - failedSamples.WithLabelValues(c.Name()).Add(float64(len(samples))) + log.With("num_samples", len(samples)).With("storage", w.Name()).With("err", err).Warnf("Error sending samples to remote storage") + failedSamples.WithLabelValues(w.Name()).Add(float64(len(samples))) } - sentSamples.WithLabelValues(c.Name()).Add(float64(len(samples))) - sentBatchDuration.WithLabelValues(c.Name()).Observe(duration) + sentSamples.WithLabelValues(w.Name()).Add(float64(len(samples))) + sentBatchDuration.WithLabelValues(w.Name()).Observe(duration) } diff --git a/documentation/examples/remote_storage/remote_storage_bridge/opentsdb/client.go b/documentation/examples/remote_storage/remote_storage_bridge/opentsdb/client.go index 46575313f3..4b6c0e6f4a 100644 --- a/documentation/examples/remote_storage/remote_storage_bridge/opentsdb/client.go +++ b/documentation/examples/remote_storage/remote_storage_bridge/opentsdb/client.go @@ -24,10 +24,9 @@ import ( "time" "github.com/prometheus/common/log" - "github.com/prometheus/common/model" - - "github.com/prometheus/prometheus/util/httputil" + "golang.org/x/net/context" + "golang.org/x/net/context/ctxhttp" ) const ( @@ -37,15 +36,15 @@ const ( // Client allows sending batches of Prometheus samples to OpenTSDB. type Client struct { - url string - httpClient *http.Client + url string + timeout time.Duration } // NewClient creates a new Client. func NewClient(url string, timeout time.Duration) *Client { return &Client{ - url: url, - httpClient: httputil.NewDeadlineClient(timeout, nil), + url: url, + timeout: timeout, } } @@ -70,8 +69,8 @@ func tagsFromMetric(m model.Metric) map[string]TagValue { return tags } -// Store sends a batch of samples to OpenTSDB via its HTTP API. -func (c *Client) Store(samples model.Samples) error { +// Write sends a batch of samples to OpenTSDB via its HTTP API. +func (c *Client) Write(samples model.Samples) error { reqs := make([]StoreSamplesRequest, 0, len(samples)) for _, s := range samples { v := float64(s.Value) @@ -100,11 +99,10 @@ func (c *Client) Store(samples model.Samples) error { return err } - resp, err := c.httpClient.Post( - u.String(), - contentTypeJSON, - bytes.NewBuffer(buf), - ) + ctx, cancel := context.WithTimeout(context.Background(), c.timeout) + defer cancel() + + resp, err := ctxhttp.Post(ctx, http.DefaultClient, u.String(), contentTypeJSON, bytes.NewBuffer(buf)) if err != nil { return err } diff --git a/documentation/images/architecture.svg b/documentation/images/architecture.svg index 0edb9666d9..df93e13cb2 100644 --- a/documentation/images/architecture.svg +++ b/documentation/images/architecture.svg @@ -1,3 +1,2 @@ - -
pull metrics


[Not supported by viewer]
HDD / SSD
[Not supported by viewer]
Pushgateway
[Not supported by viewer]
Short-lived jobs
[Not supported by viewer]
Jobs / Exporters
[Not supported by viewer]
Storage
[Not supported by viewer]
Retrieval
[Not supported by viewer]
PromQL
[Not supported by viewer]
Prometheus Server
[Not supported by viewer]
Node
[Not supported by viewer]
Service Discovery

[Not supported by viewer]
             find 
                 targets
[Not supported by viewer]
Prometheus Server
[Not supported by viewer]
Alertmanager
[Not supported by viewer]
push alerts         


[Not supported by viewer]
Web UI
[Not supported by viewer]
PromDash
[Not supported by viewer]
Grafana
[Not supported by viewer]
API clients
[Not supported by viewer]
PagerDuty
[Not supported by viewer]
Email
[Not supported by viewer]
  • DNS
  • Kubernetes
  • Consul
  • ...
  • Custom integration
[Not supported by viewer]
              notify
[Not supported by viewer]
...
[Not supported by viewer]
\ No newline at end of file +
pull metrics


[Not supported by viewer]
HDD / SSD
[Not supported by viewer]
Pushgateway
Pushgateway
Short-lived jobs
Short-lived jobs
Jobs / Exporters
Jobs / Exporters
Storage
Storage
Retrieval
Retrieval
PromQL
PromQL
Prometheus Server
[Not supported by viewer]
Node
<b>Node</b>
Service Discovery

Service Discovery<div><br></div>
             find 
                 targets
[Not supported by viewer]
Prometheus Server
Prometheus Server
Alertmanager
Alertmanager
push alerts         


[Not supported by viewer]
Web UI
Web UI
Grafana
Grafana
API clients
API clients
PagerDuty
PagerDuty
Email
Email
  • DNS
  • Kubernetes
  • Consul
  • ...
  • Custom integration
[Not supported by viewer]
              notify
[Not supported by viewer]
...
...
\ No newline at end of file diff --git a/documentation/images/architecture.xml b/documentation/images/architecture.xml index 9cd0bb0755..d47ae9d855 100644 --- a/documentation/images/architecture.xml +++ b/documentation/images/architecture.xml @@ -1 +1 @@ -7R1dd+q48dfkMTkQvpLHEJK7be9u09KebR8dEOBeY1FjbnL31+9I1hh7JHNtPDYhCZzDwWMhSzOj+Za46N2vX79E3mb1q5yL4OK6M3+96E0urq9vB334VIAfCWA46CWAZeTPE1B3D5j6fwgD7Bjozp+Lba5hLGUQ+5s8cCbDUMziHGwhg/wjNt4Su98DpjMvsKG/+/N4lUBvrod7+C/CX67wMd3hbXJnG//APuZi4e2C+FKD4J66vfawLz2r3gMgLJISulHf1q/3IlBIQ4QkU38suJsOMhKhGcjhH/TNKL57wc4M0hr1y8qPxXTjzdT1C1DyojdexesArrrwdbvy5vIFLjpwsQy8rUK++r6NI/lN3MtARrqj3k1HvdM7iEbVycIPgkzLh6F6q/4ib+7DXPBeKEMYxdgMWkSxMMzkmLkGmWl/EXIt4ugHNDE/uL41XGSYrzc01y976l4jbJWh7MjAPMNky7TrPZ7hi0G1G+31sS4jL1QMSzH3+Phwfw+jKMLcEdSae9uVUANXz20A8+l6/hnmuwMG1BuZcwj1gKON+gpT8oJABL960TcBWMlxPUVXf6LeTBgaGkGIGEKey2CoayaSRRAHfgYl8GMt37w40MhbvwIHblZXi0C+zFZeFF9tIjkTit/GLt5mQBsKM1zRLqwZeZhDW48BbUYNnCPaBgRtsOhbQ9vofNE2JGhDqdYG2m4sLIk5WCvmUkbxSi5l6AUPe+g4krtwruW4EuoZHIpwfhdFWik8B3L2LQE9gl4pKevUsw8jEQwzL1oKFFNuvEYi8GL/e74rF5bMT5+kDw/ZC80OUSvIx9hFMgTzq6xNRDtC3VPU0VbuopmwOgIkemoe2GyjGmyLB9xFvUees+eBpMc9R6TIKsUktv15TkyiBSo/k1Da4qrlpi2ufPIcLtoi65wpcbXYb5y4PZQI3MRFEU6ew0ZcM/M6TpmlNMsb/mP95lGSKQlwHQyMfZnRkmimZZUkXZbHKMkug29robE9zKHWOYA5bMKOOeN+nCfm+shQp+C5Eu5TZcxZLv4jBEh4cEVc8VSyZY1YhGVx1efA1Rkb/+hppxrARhtKPm7bv2uM/7P0mfJoQy5qBW285vCrH/9Hga8G5uq/mTtPIvJhgCpmpH/Yin2VGDdZHmE3sKjHkoqGn7hYR9hAaD9zGbjw4Ay91GVKsLaN30RH8Fu/VO3RQDWX9YvqlDyHz7Wxzd/NTjdRvfgzGNswAFyO5/53+LpUXxH0HFEIPM7R7qifEn4EMajgGTaLxNb/w3vWDRQHGlRC68H4YqA5pjf2An8ZAmwGPKKjykqi+pBoujM31v58rtk58J5FMPZm35aasUkkP8/s5c37lHtLZGEerx8P5BIWMoxNVg5cluS6UkTcJOYMxtJkVW4BZdJiLo1y2bnqYyjcMOSl6f3Y9YRN5GKxhTVbT471mvDlsvQlNBuM1JvJkO6bnxwKqLv0NUceoufy3pJ1qfhMcw/Oevj/nUqYwpx7C/3KgpLV+8sE2sGwHuFzOlW/wSWe9FZ6jefXE66EDBEMqPwyd5FfQmtllwFkBe0E/KB4iXOQGngmnwSwfaahaZKlNK7YWpS2U3JPu+1q6cXiRWmmD0qSdGWhseWyltEgY6eJ7cdOV2B0XQYgUOdw43/yWdsIH5I0w7wF1LfXCvrV7HSx84x/1ZQwou3hdQNUEtHHpc2IpuVs4jh1Fgt17IjGNIZKDfAPPio50rQoeguOMhtXrQcLOexIyT+VByEA8HEJQiPyDoK4oqMsBDExmKymj+T6H18/rt1FnfYWqYHWvcPC1m5vhh5oTSuyXBoM30GLQCyU7Uxs7dSjdvYR+KG4xLmoTrpX6rmkD8UXIl6JndJtUxEBljM2O/Rc01V/p4ubprc7Du3nKkphqaxzZY1STkgo8xvUALvp+CmOCx2vpozJfolaSLbiX2C7Ccy1Sn6rueLfIQ1ku3wr1zrB9VUL7Qw5wgaifbe3UDh8INrHUddKtJ0L6yiJcoW/SK5aaLddJ6VXfEAxsKa/namlDz02GSB+nzIsrUMo9oedSWQWGWZcrjMthLptpsyth2FnWgLxk1zQTzvq0qRSQcavalKpaMB8SSWnceKt1XoJn7dJfhvm1ipo4YcQS6PNmEzwE8+tHChhnzSj95lxewMZt0SkFkp7SKHnQ2x1820oAnKdEnnFkovDcbdeOdNgAVdqL6HudeXoXMqXo6YG92u+K4TSGiXXdpjGEOpIhbkCIW/Y0CwpJDkoRb0Jh+vcWIJsYLyJs7E7m6+2p05aWRsz3cxARRizjUmfgwOuOi4umxRT7ue6i0sL/wbYKm8I9OhqZSpjS6UAeQ4XdXF1cNeb3mbqTffVp7Ti9JANCuoVOQU3hSuYYRVSr2pa7FlpL7yrFqVq3cbv6tJ8FiZ8uT3Uguew8UuJQjGbLhkWyW3TVxdPXgwMoZQ4dN3pu7iio1+WZwI4qKFXMurXFcxDWO0QR74II72uHuIg9axoRrCHOMiA8+qnPgOV8G4cSuLQmQYHDuzI65ZMffX1jTLE0wrrztVQe6oaQKXUkfXTBYqnJb7D6GZds4emfq2OmPiuTxUdN9+VcAJb4LvBSFW9ZvhuMGLmu+R4iVPxXb/Aeq3MdwOacaQGFhffUfOZeZfs0PaV7wLwJ9deCAVXn04yagWS6XQUmDRWD4d1ko1VmGw3XlijyGQDZcZqioptkhpKtki3O6gPCEuGXHqPyqlRdDD52fZcKmdgGTwu/J4oFYhLH95NVtG9quqq2wrxcMriOE/NKPCc8qvrvZWWGXaa/nehKsn+/ZeqVRLN52vaKFWhJ4m4AvyuKCfHFm3c8kHi0RPwLj+JkTXCWiGGXc4M52ouwNx5k7SAV0NIR4u6FaTbJct3TyCJOrNAsZi9z+J9o951yFlTqEdBxxxBteOnqM5Tf9Hc66kDSZwOZOta/jg9jro0q8gLnNhq3mmxL1gjPY87d5qmN0cEvGwktQbtmjkja0CPFaKn3jB5/0MaZTDp8aKoBG3PfWDayESpzpK5+AVDM+eDnIy58pnZ9pnLhKI+JdfeZGJnLovotBa/IebCOCYbs9Qv/z/WiCx/AHtqRDbnSw2IbenYDOA6347l1GE7xvCkYsaTXfy2zxtoL25sUafc6YMcUeOR7XJ9yAVCNym1uECw349OghHVBTYJEMROArse5GHt+W9743qLeS1yBq9DPrkowyGfbpux9nInCSaWXJpmAJvh1HmGI2u+DK5yhz/XtQ9LE8r1pwxJ3mgH7fKZpMCnkEPHTPXn6m1nqia/TQuPlkrB9qN4Hv633TPUe0GYKrufpuUx3Mtwq5F7oudfXV2dcPK7bSzXigNBoIJWin2p8qOVRwPALIO6BX6hsK4hmBvcunQ4T5ncldFcRORORlSqif8dlJAPVnpvoo0E/bsnufU1qh2q7CtpkGIJlZ/OOOs0+cwPl3ioHtlnZe7+S4Lim1yqQiMD+ap/PbmEs4xT2FjGigsAqmouK2/ROn47tSsz6KpIov9ncZRs5S1eL6MET68Dq22/Y9GVLGfFVg6BEMbKh8vssB/ZCcIcXbvl/bebQlZ7l+zEHoYbkcBAQ5t1KrIgHVXF5uxHGBcfXlq2EirS8rrBsq7TbwYPZewv0hM3StaTfW4JP8GW8EQEHzqEeXAD2fI3ewpzWuGaWZDaUzj/WEpt+teIhjUWc4HL/X/gJvTe/4Nw7+FP \ No newline at end of file +7R1bm6I49tf4WH7ckcdSy57Z7e6tXXe/mXlEico0Ehewq2p+/SSQICRBowTUttuHhkNIwjkn557UwJxs3z8l/m7zBQYgGhha8D4wpwPD0C3DQf9hyEcB8TS7AKyTMCCNDoB5+BcgQI1A92EA0lrDDMIoC3d14BLGMVhmNZifJPCt3mwFo/qoO38NOMB86Uc89LcwyDYFdEQ/C8N/AeF6Q0fWHa94svCX39YJ3MdkvIFhrvJ/xeOtT/siH5pu/AC+VUDmy8CcJBBmxdX2fQIijFuKtuK9WcPTct4JiDOZFyyjeOO7H+0BnXI+seyDIuNtE2ZgvvOX+P4NEXxgjjfZNkJ3OrosP0FDN+vIT1NynWYJ/AYmMIJJ3pE50vCvfEIxiztZhVFUafni4B/uL/GDEH0LfRbDGM1izH8m+fLvIMnAewVEPvsTgFuQJR+oCXmKeLJ4hbCo6ZD7twPBDQrbVIjtEphPeGxddn3AM7ogqBajvT3WYeLHmIdZzM1mL5PJrBlzF1Ar8NMNCMi4HWC+XPWnMK/bClBvnUY9wtEOX6Kv9KMIRF/85BtIGK5n0WVN8U8RhhyzjiGXx5Bu8QhSgR9bAj/c8q2Lgxx52/c11g3DVQTflhs/yYa7BC4B5rexiLcVoI0KM7qiRVgzBHxlKkCbc79osxm0jXpEm3u/aHMYtHk9om3EYQkEyIAhtzDJNnANYz96OUDHuVWSy3GtjkMQB8/YZkK3iwguvxWgWRi1kHWZn6wBlUoFCM/wKF4TEPlZ+L1ugomwRF59hSEa9SA0NUatjBhEF3Mib1VtIrYj50RHKdwnS8B1hJDof1Sa7XCDtHnCui4e58ADRY8HjiiRJcUk3j0xidMTk7C09TqirSEeRxVtKevcB3Hd6xDX1DoirikeRxlxqS5q4ZRxSlPe8B/nPzVK0mSFsm1zWtIVKEl2WV6iJHUFvi2Hxv4wNzqNuVFXmDPvGXOWe0Wek3CfzsYc5+LPzDw4ogBXjCteSraqEWsKcGWpwNUdG/82qwF4tGkd2f766H7Rxihoq0+0qTWH38Psdwwe2uTuj8qTV5CEaII4ZpS/2IV9VdgydZbo2sBiPRadJUyDi3WBDWQoNnDRwBV64duSYB0bv3pf/i+r9thAtSrrl6pTZhx1rg1v/u72eRPcS7hEc3OiDAemw+/ocp3llChAi4SFoOEE7S56leFHJPOyOpslIA3/8hd5A8yBBJWotT0e2DnHmGM/Ctcxgi0R0+RRZSw+w6UfPZMH2zAIcnaO/AWIxmVGiYnk15ld3rwvuVciCzMzZkdyCSsYZyR3p+vkXm6Z0NXNaY4ypUewOKimxUQa5UkbWjQUThjyifR06XqiTeBqlYK2cszswper0pehme3inyJD2tLrVo4ooC7S1yryEKbIeyvWJeaznFPoVzv/38NiiZbZ1gOoWL2/TFE7DY9uaPP5tLLEi96k13h9PdGVUCECAckvcxH5IWqN7TIE2aB2ID62xFWQ2nbrpB7xPpPj8pQ2VFCaT8m97tPN2s/Am//xsCTRmdCJLrKWdcHqU0IT3o+db5DR9RQhgRqgB3/CRfqwpHHqFpDFrxWvK7rwecZ/5JQgou3lfYeoBJLHpY1r1AWZxxNHqLOUUIePaMwzmOBio0clh8kWewjKbES1HkrIwUdK/oM9CIAAj0sQNiIvIIgoOqqEIB6v6RO4/ffnh6UGF6vukRqWyDWpuMcVelBrGpPliWD4GbWIwCrjbe3Soxb2EYUxeKLfgjvRhwbfB+YLkG3AHuu2OUgQSis2+6K1q/5jspPJprc1gfYTFaUoqawTZY1KTigo8xUGQEzHR6UYI45FjldXxqQlUQuprPgXsd1Ukyr+LfNb3RX/OmwgW+RbidaJowLtCnKEHUT7PG8yORbtU4B2NkQtwrotwHrp5rZCO+86Yb0SIhQj1gzTJV76H90GiH9MGWYwpQoCf1iYRFYiw1wO7TdcCOX1VOZmmvXIbVkCcSIXdLIjnU0qNWT8zk0qNU1YXVJJaJz4W7xe4kVa5Le1nkGrMA74ZopM8Ct/mxyoYJ/0YoH6M+MmmXGjcrJ9xk0bMhsG2ubbqAiodcrIKyW5ODrv3itnOizg0plKJEuUoxMpXxU1NbZEneDdIZStURJth+kMoYJUmCgQcsOGpqSQVEEp1psQuM6dJchs59btzt6r7VknTdbGNPQGEabYxmTHoRM+d16qbFL7rlwXKuu7Z6u6IWCyq1VRGVspBZhxVFGXrg7V9abe4FBveqg+ZStOj9mgSL1STqGbwjGMsApTr0paHFjpqPCuFqVa/fALl8/yGNNRlYfaMI4yfpEoFOPpUmGR2jZ9fPPqZ4gh4hxiaJaIK7T8H+eZGK30SkX9ioJ5FNY6xFEvwijvzw9xMPWso44YiJ2wrljgSHg3AiVx7EyDIwd21HVLpb7aGDmDSoW1NnT0sgUrpaQUjy6reHriO0tMxrP5jk39ch0p4juLVXSq+U7CCeyB72zXrfOd7bbjO3qaxI3wndVgvZ7NdzabcWQNLFV8x5rPinfJOryv/Bwh53Hrx/76p5NcnlLBZDoFBSad1cM5okynygqTdOfHwm7kikx2+3SDPxGzTVFDqSzSLQ7qI4QVU5beo3JtFB1Nfvb9LWdnYBV4XPS6UCpGuX2sYTfZme7Vua46rxCPpywaJUjVUzMEyk+59yYtM/g0/W8AV5L971eOvCeqJLrP1/RRqsKeJCIK8IuinCq2aDt8QfOnxF8hDXuTtJh1XL/C0qK0wXqhBV/N/PyK1oS2jPDX8hX/t0ORLlBv94h6uuQUx/L4SB5VLKXnQp6ZZpMr07u+kdIoVIhXVYqsO3Wen9TslbRIFNM9JF3TW0UsVjamJ0+7nk5rstkDbtjzVxT5oQ7r75rH0z5se9VHd7nmHTNXa8HQ00kVV2Mu98rM1b62+FK7QP505x4sNZsxFwSVxqLDs5Qcaco7MK84IDXdZ7e9mbm/oBRHHbmjzVSEpFx+C9pDLhB2B0SPC4T2++gkcLWTJLC6IgGfbH7Z+uFt74rtMWhunpRPIsqokE+exdFA+TFlWj2G6V4/iNlIs1pBCUFN7WRZIs77j1N6ohPfi6D0PmLD1FHIQo6dYWMF+MeHwadf543n1pRgfig1g/9zvwBJDDKQXm8OExinOXKvNP5wOLzix+/TDG4xByKBirRSFsL4ktkgYJVBxQK/UVi3EMwd7os4ngQpnsIkAAnzpCIq8Yf/CymhMMNCPzcS8vdeYRrmqBaoss9MgxJLVPnl6aw8B7cM4zXBA7uJgzz9L0SKb/rkHiCf87enT3oFNoYZ5gIEtQbn7P9ovVdTlHYQlTuwh+VfJFvVVsbKKMHr68DL9/bI6spuDqI8O2TCMJZ3PGJiO8eatw6YeGr/lEYjq/0Y7NR12M41xEJHddTuPBZkZ3Vmc+XnoxqNlqdsmUWSy+sOa0auv9M0hlm4+hicV6xy0vv9ud9UUohQuarkhFd7ZNY3hdzUEa9l+VxlQeaewv3HUlorkRbRsM5iLuj28Ac2C3of/oqp+fI3 \ No newline at end of file diff --git a/notifier/notifier.go b/notifier/notifier.go index 6834382c07..dc1b378b26 100644 --- a/notifier/notifier.go +++ b/notifier/notifier.go @@ -36,7 +36,7 @@ import ( "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/pkg/relabel" - "github.com/prometheus/prometheus/retrieval" + "github.com/prometheus/prometheus/util/httputil" ) const ( @@ -104,18 +104,13 @@ type Notifier struct { queue []*Alert opts *Options + metrics *alertMetrics + more chan struct{} mtx sync.RWMutex ctx context.Context cancel func() - latency *prometheus.SummaryVec - errors *prometheus.CounterVec - sent *prometheus.CounterVec - dropped prometheus.Counter - queueLength prometheus.Gauge - queueCapacity prometheus.Metric - alertmanagers []*alertmanagerSet cancelDiscovery func() } @@ -127,23 +122,21 @@ type Options struct { RelabelConfigs []*config.RelabelConfig // Used for sending HTTP requests to the Alertmanager. Do func(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) + + Registerer prometheus.Registerer } -// New constructs a new Notifier. -func New(o *Options) *Notifier { - ctx, cancel := context.WithCancel(context.Background()) - - if o.Do == nil { - o.Do = ctxhttp.Do - } - - return &Notifier{ - queue: make([]*Alert, 0, o.QueueCapacity), - ctx: ctx, - cancel: cancel, - more: make(chan struct{}, 1), - opts: o, +type alertMetrics struct { + latency *prometheus.SummaryVec + errors *prometheus.CounterVec + sent *prometheus.CounterVec + dropped prometheus.Counter + queueLength prometheus.GaugeFunc + queueCapacity prometheus.Gauge +} +func newAlertMetrics(r prometheus.Registerer, queueCap int, queueLen func() float64) *alertMetrics { + m := &alertMetrics{ latency: prometheus.NewSummaryVec(prometheus.SummaryOpts{ Namespace: namespace, Subsystem: subsystem, @@ -174,22 +167,55 @@ func New(o *Options) *Notifier { Name: "dropped_total", Help: "Total number of alerts dropped due to errors when sending to Alertmanager.", }), - queueLength: prometheus.NewGauge(prometheus.GaugeOpts{ + queueLength: prometheus.NewGaugeFunc(prometheus.GaugeOpts{ Namespace: namespace, Subsystem: subsystem, Name: "queue_length", Help: "The number of alert notifications in the queue.", + }, queueLen), + queueCapacity: prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "queue_capacity", + Help: "The capacity of the alert notifications queue.", }), - queueCapacity: prometheus.MustNewConstMetric( - prometheus.NewDesc( - prometheus.BuildFQName(namespace, subsystem, "queue_capacity"), - "The capacity of the alert notifications queue.", - nil, nil, - ), - prometheus.GaugeValue, - float64(o.QueueCapacity), - ), } + + m.queueCapacity.Set(float64(queueCap)) + + if r != nil { + r.MustRegister( + m.latency, + m.errors, + m.sent, + m.dropped, + m.queueLength, + m.queueCapacity, + ) + } + + return m +} + +// New constructs a new Notifier. +func New(o *Options) *Notifier { + ctx, cancel := context.WithCancel(context.Background()) + + if o.Do == nil { + o.Do = ctxhttp.Do + } + + n := &Notifier{ + queue: make([]*Alert, 0, o.QueueCapacity), + ctx: ctx, + cancel: cancel, + more: make(chan struct{}, 1), + opts: o, + } + + queueLenFunc := func() float64 { return float64(n.queueLen()) } + n.metrics = newAlertMetrics(o.Registerer, o.QueueCapacity, queueLenFunc) + return n } // ApplyConfig updates the status state as the new config requires. @@ -208,6 +234,9 @@ func (n *Notifier) ApplyConfig(conf *config.Config) error { if err != nil { return err } + + ams.metrics = n.metrics + amSets = append(amSets, ams) } @@ -264,7 +293,7 @@ func (n *Notifier) Run() { alerts := n.nextBatch() if !n.sendAll(alerts...) { - n.dropped.Add(float64(len(alerts))) + n.metrics.dropped.Add(float64(len(alerts))) } // If the queue still has items left, kick off the next iteration. if n.queueLen() > 0 { @@ -300,7 +329,7 @@ func (n *Notifier) Send(alerts ...*Alert) { alerts = alerts[d:] log.Warnf("Alert batch larger than queue capacity, dropping %d alerts", d) - n.dropped.Add(float64(d)) + n.metrics.dropped.Add(float64(d)) } // If the queue is full, remove the oldest alerts in favor @@ -309,7 +338,7 @@ func (n *Notifier) Send(alerts ...*Alert) { n.queue = n.queue[d:] log.Warnf("Alert notification queue full, dropping %d alerts", d) - n.dropped.Add(float64(d)) + n.metrics.dropped.Add(float64(d)) } n.queue = append(n.queue, alerts...) @@ -392,12 +421,12 @@ func (n *Notifier) sendAll(alerts ...*Alert) bool { if err := n.sendOne(ctx, ams.client, u, b); err != nil { log.With("alertmanager", u).With("count", len(alerts)).Errorf("Error sending alerts: %s", err) - n.errors.WithLabelValues(u).Inc() + n.metrics.errors.WithLabelValues(u).Inc() } else { atomic.AddUint64(&numSuccess, 1) } - n.latency.WithLabelValues(u).Observe(time.Since(begin).Seconds()) - n.sent.WithLabelValues(u).Add(float64(len(alerts))) + n.metrics.latency.WithLabelValues(u).Observe(time.Since(begin).Seconds()) + n.metrics.sent.WithLabelValues(u).Add(float64(len(alerts))) wg.Done() }(am) @@ -434,30 +463,6 @@ func (n *Notifier) Stop() { n.cancel() } -// Describe implements prometheus.Collector. -func (n *Notifier) Describe(ch chan<- *prometheus.Desc) { - n.latency.Describe(ch) - n.errors.Describe(ch) - n.sent.Describe(ch) - - ch <- n.dropped.Desc() - ch <- n.queueLength.Desc() - ch <- n.queueCapacity.Desc() -} - -// Collect implements prometheus.Collector. -func (n *Notifier) Collect(ch chan<- prometheus.Metric) { - n.queueLength.Set(float64(n.queueLen())) - - n.latency.Collect(ch) - n.errors.Collect(ch) - n.sent.Collect(ch) - - ch <- n.dropped - ch <- n.queueLength - ch <- n.queueCapacity -} - // alertmanager holds Alertmanager endpoint information. type alertmanager interface { url() string @@ -483,12 +488,14 @@ type alertmanagerSet struct { cfg *config.AlertmanagerConfig client *http.Client + metrics *alertMetrics + mtx sync.RWMutex ams []alertmanager } func newAlertmanagerSet(cfg *config.AlertmanagerConfig) (*alertmanagerSet, error) { - client, err := retrieval.NewHTTPClient(cfg.HTTPClientConfig) + client, err := httputil.NewClientFromConfig(cfg.HTTPClientConfig) if err != nil { return nil, err } @@ -527,6 +534,10 @@ func (s *alertmanagerSet) Sync(tgs []*config.TargetGroup) { continue } + // This will initialise the Counters for the AM to 0. + s.metrics.sent.WithLabelValues(us) + s.metrics.errors.WithLabelValues(us) + seen[us] = struct{}{} s.ams = append(s.ams, am) } diff --git a/pkg/labels/labels.go b/pkg/labels/labels.go index 0c65a4ff44..1e10d5cb3f 100644 --- a/pkg/labels/labels.go +++ b/pkg/labels/labels.go @@ -14,9 +14,10 @@ const sep = '\xff' // Well-known label names used by Prometheus components. const ( - MetricName = "__name__" - AlertName = "alertname" - BucketLabel = "le" + MetricName = "__name__" + AlertName = "alertname" + BucketLabel = "le" + InstanceName = "instance" ) // Label is a key/value pair of strings. diff --git a/retrieval/scrape.go b/retrieval/scrape.go index c0a6ae1df1..d469403789 100644 --- a/retrieval/scrape.go +++ b/retrieval/scrape.go @@ -36,6 +36,7 @@ import ( "github.com/prometheus/prometheus/pkg/textparse" "github.com/prometheus/prometheus/pkg/timestamp" "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/util/httputil" ) const ( @@ -166,7 +167,7 @@ func (sp *scrapePool) reload(cfg *config.ScrapeConfig) { sp.mtx.Lock() defer sp.mtx.Unlock() - client, err := NewHTTPClient(cfg.HTTPClientConfig) + client, err := httputil.NewClientFromConfig(cfg.HTTPClientConfig) if err != nil { // Any errors that could occur here should be caught during config validation. log.Errorf("Error creating HTTP client for job %q: %s", cfg.JobName, err) diff --git a/retrieval/target_test.go b/retrieval/target_test.go index 100ebc9bb3..73969c01a0 100644 --- a/retrieval/target_test.go +++ b/retrieval/target_test.go @@ -30,6 +30,7 @@ import ( "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/util/httputil" ) const ( @@ -150,7 +151,7 @@ func TestNewHTTPBearerToken(t *testing.T) { cfg := config.HTTPClientConfig{ BearerToken: "1234", } - c, err := NewHTTPClient(cfg) + c, err := httputil.NewClientFromConfig(cfg) if err != nil { t.Fatal(err) } @@ -177,7 +178,7 @@ func TestNewHTTPBearerTokenFile(t *testing.T) { cfg := config.HTTPClientConfig{ BearerTokenFile: "testdata/bearertoken.txt", } - c, err := NewHTTPClient(cfg) + c, err := httputil.NewClientFromConfig(cfg) if err != nil { t.Fatal(err) } @@ -206,7 +207,7 @@ func TestNewHTTPBasicAuth(t *testing.T) { Password: "password123", }, } - c, err := NewHTTPClient(cfg) + c, err := httputil.NewClientFromConfig(cfg) if err != nil { t.Fatal(err) } @@ -234,7 +235,7 @@ func TestNewHTTPCACert(t *testing.T) { CAFile: caCertPath, }, } - c, err := NewHTTPClient(cfg) + c, err := httputil.NewClientFromConfig(cfg) if err != nil { t.Fatal(err) } @@ -268,7 +269,7 @@ func TestNewHTTPClientCert(t *testing.T) { KeyFile: "testdata/client.key", }, } - c, err := NewHTTPClient(cfg) + c, err := httputil.NewClientFromConfig(cfg) if err != nil { t.Fatal(err) } @@ -297,7 +298,7 @@ func TestNewHTTPWithServerName(t *testing.T) { ServerName: "prometheus.rocks", }, } - c, err := NewHTTPClient(cfg) + c, err := httputil.NewClientFromConfig(cfg) if err != nil { t.Fatal(err) } @@ -326,7 +327,7 @@ func TestNewHTTPWithBadServerName(t *testing.T) { ServerName: "badname", }, } - c, err := NewHTTPClient(cfg) + c, err := httputil.NewClientFromConfig(cfg) if err != nil { t.Fatal(err) } @@ -365,7 +366,7 @@ func TestNewClientWithBadTLSConfig(t *testing.T) { KeyFile: "testdata/nonexistent_client.key", }, } - _, err := NewHTTPClient(cfg) + _, err := httputil.NewClientFromConfig(cfg) if err == nil { t.Fatalf("Expected error, got nil.") } diff --git a/retrieval/targetmanager.go b/retrieval/targetmanager.go index 6fa89c2a46..ede8c79f0a 100644 --- a/retrieval/targetmanager.go +++ b/retrieval/targetmanager.go @@ -135,7 +135,7 @@ func (tm *TargetManager) reload() { } } -// Targets returns the targets currently being scraped bucketed by their job name. +// Targets returns the targets currently being scraped. func (tm *TargetManager) Targets() []*Target { tm.mtx.RLock() defer tm.mtx.RUnlock() diff --git a/rules/manager.go b/rules/manager.go index b158764b8b..46372feffd 100644 --- a/rules/manager.go +++ b/rules/manager.go @@ -74,10 +74,15 @@ var ( Name: "evaluator_iterations_skipped_total", Help: "The total number of rule group evaluations skipped due to throttled metric storage.", }) + iterationsMissed = prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Name: "evaluator_iterations_missed_total", + Help: "The total number of rule group evaluations missed due to slow rule group evaluation.", + }) iterationsScheduled = prometheus.NewCounter(prometheus.CounterOpts{ Namespace: namespace, Name: "evaluator_iterations_total", - Help: "The total number of scheduled rule group evaluations, whether skipped or executed.", + Help: "The total number of scheduled rule group evaluations, whether executed, missed or skipped.", }) ) @@ -88,7 +93,9 @@ func init() { evalFailures.WithLabelValues(string(ruleTypeRecording)) prometheus.MustRegister(iterationDuration) + prometheus.MustRegister(iterationsScheduled) prometheus.MustRegister(iterationsSkipped) + prometheus.MustRegister(iterationsMissed) prometheus.MustRegister(evalFailures) prometheus.MustRegister(evalDuration) } @@ -154,6 +161,7 @@ func (g *Group) run() { iterationDuration.Observe(time.Since(start).Seconds()) } + lastTriggered := time.Now() iter() tick := time.NewTicker(g.interval) @@ -168,6 +176,12 @@ func (g *Group) run() { case <-g.done: return case <-tick.C: + missed := (time.Since(lastTriggered).Nanoseconds() / g.interval.Nanoseconds()) - 1 + if missed > 0 { + iterationsMissed.Add(float64(missed)) + iterationsScheduled.Add(float64(missed)) + } + lastTriggered = time.Now() iter() } } diff --git a/storage/remote/client.go b/storage/remote/client.go deleted file mode 100644 index e5d97feda1..0000000000 --- a/storage/remote/client.go +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright 2016 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package remote - -import ( - "bytes" - "fmt" - "net/http" - "time" - - "github.com/golang/protobuf/proto" - "github.com/golang/snappy" - "golang.org/x/net/context" - "golang.org/x/net/context/ctxhttp" - - "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/config" - "github.com/prometheus/prometheus/util/httputil" -) - -// Client allows sending batches of Prometheus samples to an HTTP endpoint. -type Client struct { - index int // Used to differentiate metrics. - url config.URL - client *http.Client - timeout time.Duration -} - -// NewClient creates a new Client. -func NewClient(index int, conf *config.RemoteWriteConfig) (*Client, error) { - tlsConfig, err := httputil.NewTLSConfig(conf.TLSConfig) - if err != nil { - return nil, err - } - - // The only timeout we care about is the configured push timeout. - // It is applied on request. So we leave out any timings here. - var rt http.RoundTripper = &http.Transport{ - Proxy: http.ProxyURL(conf.ProxyURL.URL), - TLSClientConfig: tlsConfig, - } - - if conf.BasicAuth != nil { - rt = httputil.NewBasicAuthRoundTripper(conf.BasicAuth.Username, conf.BasicAuth.Password, rt) - } - - return &Client{ - index: index, - url: *conf.URL, - client: httputil.NewClient(rt), - timeout: time.Duration(conf.RemoteTimeout), - }, nil -} - -// Store sends a batch of samples to the HTTP endpoint. -func (c *Client) Store(samples model.Samples) error { - req := &WriteRequest{ - Timeseries: make([]*TimeSeries, 0, len(samples)), - } - for _, s := range samples { - ts := &TimeSeries{ - Labels: make([]*LabelPair, 0, len(s.Metric)), - } - for k, v := range s.Metric { - ts.Labels = append(ts.Labels, - &LabelPair{ - Name: string(k), - Value: string(v), - }) - } - ts.Samples = []*Sample{ - { - Value: float64(s.Value), - TimestampMs: int64(s.Timestamp), - }, - } - req.Timeseries = append(req.Timeseries, ts) - } - - data, err := proto.Marshal(req) - if err != nil { - return err - } - - buf := bytes.Buffer{} - if _, err := snappy.NewWriter(&buf).Write(data); err != nil { - return err - } - - httpReq, err := http.NewRequest("POST", c.url.String(), &buf) - if err != nil { - return err - } - httpReq.Header.Add("Content-Encoding", "snappy") - - ctx, _ := context.WithTimeout(context.Background(), c.timeout) - httpResp, err := ctxhttp.Do(ctx, c.client, httpReq) - if err != nil { - return err - } - defer httpResp.Body.Close() - if httpResp.StatusCode/100 != 2 { - return fmt.Errorf("server returned HTTP status %s", httpResp.Status) - } - return nil -} - -// Name identifies the client. -func (c Client) Name() string { - return fmt.Sprintf("%d:%s", c.index, c.url) -} diff --git a/storage/remote/ewma.go b/storage/remote/ewma.go deleted file mode 100644 index d974bc3bb1..0000000000 --- a/storage/remote/ewma.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package remote - -import ( - "sync" - "sync/atomic" - "time" -) - -// ewmaRate tracks an exponentially weighted moving average of a per-second rate. -type ewmaRate struct { - newEvents int64 - alpha float64 - interval time.Duration - lastRate float64 - init bool - mutex sync.Mutex -} - -func newEWMARate(alpha float64, interval time.Duration) ewmaRate { - return ewmaRate{ - alpha: alpha, - interval: interval, - } -} - -// rate returns the per-second rate. -func (r *ewmaRate) rate() float64 { - r.mutex.Lock() - defer r.mutex.Unlock() - return r.lastRate -} - -// tick assumes to be called every r.interval. -func (r *ewmaRate) tick() { - newEvents := atomic.LoadInt64(&r.newEvents) - atomic.AddInt64(&r.newEvents, -newEvents) - instantRate := float64(newEvents) / r.interval.Seconds() - - r.mutex.Lock() - defer r.mutex.Unlock() - - if r.init { - r.lastRate += r.alpha * (instantRate - r.lastRate) - } else { - r.init = true - r.lastRate = instantRate - } -} - -// inc counts one event. -func (r *ewmaRate) incr(incr int64) { - atomic.AddInt64(&r.newEvents, incr) -} diff --git a/storage/remote/queue_manager.go b/storage/remote/queue_manager.go deleted file mode 100644 index 3718561aa9..0000000000 --- a/storage/remote/queue_manager.go +++ /dev/null @@ -1,487 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package remote - -import ( - "math" - "sync" - "time" - - "golang.org/x/time/rate" - - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/common/log" - "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/config" - "github.com/prometheus/prometheus/relabel" -) - -// String constants for instrumentation. -const ( - namespace = "prometheus" - subsystem = "remote_storage" - queue = "queue" - - // With a maximum of 1000 shards, assuming an average of 100ms remote write - // time and 100 samples per batch, we will be able to push 1M samples/s. - defaultMaxShards = 1000 - defaultMaxSamplesPerSend = 100 - - // defaultQueueCapacity is per shard - at 1000 shards, this will buffer - // 100M samples. It is configured to buffer 1000 batches, which at 100ms - // per batch is 1:40mins. - defaultQueueCapacity = defaultMaxSamplesPerSend * 1000 - defaultBatchSendDeadline = 5 * time.Second - - // We track samples in/out and how long pushes take using an Exponentially - // Weighted Moving Average. - ewmaWeight = 0.2 - shardUpdateDuration = 10 * time.Second - - // Allow 30% too many shards before scaling down. - shardToleranceFraction = 0.3 - - // Limit to 1 log event every 10s - logRateLimit = 0.1 - logBurst = 10 -) - -var ( - sentSamplesTotal = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "sent_samples_total", - Help: "Total number of processed samples sent to remote storage.", - }, - []string{queue}, - ) - failedSamplesTotal = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "failed_samples_total", - Help: "Total number of processed samples which failed on send to remote storage.", - }, - []string{queue}, - ) - droppedSamplesTotal = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "dropped_samples_total", - Help: "Total number of samples which were dropped due to the queue being full.", - }, - []string{queue}, - ) - sentBatchDuration = prometheus.NewHistogramVec( - prometheus.HistogramOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "sent_batch_duration_seconds", - Help: "Duration of sample batch send calls to the remote storage.", - Buckets: prometheus.DefBuckets, - }, - []string{queue}, - ) - queueLength = prometheus.NewGaugeVec( - prometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "queue_length", - Help: "The number of processed samples queued to be sent to the remote storage.", - }, - []string{queue}, - ) - queueCapacity = prometheus.NewGaugeVec( - prometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "queue_capacity", - Help: "The capacity of the queue of samples to be sent to the remote storage.", - }, - []string{queue}, - ) - numShards = prometheus.NewGaugeVec( - prometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "shards", - Help: "The number of shards used for parallel sending to the remote storage.", - }, - []string{queue}, - ) -) - -func init() { - prometheus.MustRegister(sentSamplesTotal) - prometheus.MustRegister(failedSamplesTotal) - prometheus.MustRegister(droppedSamplesTotal) - prometheus.MustRegister(sentBatchDuration) - prometheus.MustRegister(queueLength) - prometheus.MustRegister(queueCapacity) - prometheus.MustRegister(numShards) -} - -// StorageClient defines an interface for sending a batch of samples to an -// external timeseries database. -type StorageClient interface { - // Store stores the given samples in the remote storage. - Store(model.Samples) error - // Name identifies the remote storage implementation. - Name() string -} - -// QueueManagerConfig configures a storage queue. -type QueueManagerConfig struct { - QueueCapacity int // Number of samples to buffer per shard before we start dropping them. - MaxShards int // Max number of shards, i.e. amount of concurrency. - MaxSamplesPerSend int // Maximum number of samples per send. - BatchSendDeadline time.Duration // Maximum time sample will wait in buffer. - ExternalLabels model.LabelSet - RelabelConfigs []*config.RelabelConfig - Client StorageClient -} - -// QueueManager manages a queue of samples to be sent to the Storage -// indicated by the provided StorageClient. -type QueueManager struct { - cfg QueueManagerConfig - queueName string - logLimiter *rate.Limiter - - shardsMtx sync.Mutex - shards *shards - numShards int - reshardChan chan int - quit chan struct{} - wg sync.WaitGroup - - samplesIn, samplesOut, samplesOutDuration ewmaRate - integralAccumulator float64 -} - -// NewQueueManager builds a new QueueManager. -func NewQueueManager(cfg QueueManagerConfig) *QueueManager { - if cfg.QueueCapacity == 0 { - cfg.QueueCapacity = defaultQueueCapacity - } - if cfg.MaxShards == 0 { - cfg.MaxShards = defaultMaxShards - } - if cfg.MaxSamplesPerSend == 0 { - cfg.MaxSamplesPerSend = defaultMaxSamplesPerSend - } - if cfg.BatchSendDeadline == 0 { - cfg.BatchSendDeadline = defaultBatchSendDeadline - } - - t := &QueueManager{ - cfg: cfg, - queueName: cfg.Client.Name(), - logLimiter: rate.NewLimiter(logRateLimit, logBurst), - numShards: 1, - reshardChan: make(chan int), - quit: make(chan struct{}), - - samplesIn: newEWMARate(ewmaWeight, shardUpdateDuration), - samplesOut: newEWMARate(ewmaWeight, shardUpdateDuration), - samplesOutDuration: newEWMARate(ewmaWeight, shardUpdateDuration), - } - t.shards = t.newShards(t.numShards) - numShards.WithLabelValues(t.queueName).Set(float64(t.numShards)) - queueCapacity.WithLabelValues(t.queueName).Set(float64(t.cfg.QueueCapacity)) - - return t -} - -// Append queues a sample to be sent to the remote storage. It drops the -// sample on the floor if the queue is full. -// Always returns nil. -func (t *QueueManager) Append(s *model.Sample) error { - var snew model.Sample - snew = *s - snew.Metric = s.Metric.Clone() - - for ln, lv := range t.cfg.ExternalLabels { - if _, ok := s.Metric[ln]; !ok { - snew.Metric[ln] = lv - } - } - - snew.Metric = model.Metric( - relabel.Process(model.LabelSet(snew.Metric), t.cfg.RelabelConfigs...)) - - if snew.Metric == nil { - return nil - } - - t.shardsMtx.Lock() - enqueued := t.shards.enqueue(&snew) - t.shardsMtx.Unlock() - - if enqueued { - queueLength.WithLabelValues(t.queueName).Inc() - } else { - droppedSamplesTotal.WithLabelValues(t.queueName).Inc() - if t.logLimiter.Allow() { - log.Warn("Remote storage queue full, discarding sample. Multiple subsequent messages of this kind may be suppressed.") - } - } - return nil -} - -// NeedsThrottling implements storage.SampleAppender. It will always return -// false as a remote storage drops samples on the floor if backlogging instead -// of asking for throttling. -func (*QueueManager) NeedsThrottling() bool { - return false -} - -// Start the queue manager sending samples to the remote storage. -// Does not block. -func (t *QueueManager) Start() { - t.wg.Add(2) - go t.updateShardsLoop() - go t.reshardLoop() - - t.shardsMtx.Lock() - defer t.shardsMtx.Unlock() - t.shards.start() -} - -// Stop stops sending samples to the remote storage and waits for pending -// sends to complete. -func (t *QueueManager) Stop() { - log.Infof("Stopping remote storage...") - close(t.quit) - t.wg.Wait() - - t.shardsMtx.Lock() - defer t.shardsMtx.Unlock() - t.shards.stop() - log.Info("Remote storage stopped.") -} - -func (t *QueueManager) updateShardsLoop() { - defer t.wg.Done() - - ticker := time.Tick(shardUpdateDuration) - for { - select { - case <-ticker: - t.calculateDesiredShards() - case <-t.quit: - return - } - } -} - -func (t *QueueManager) calculateDesiredShards() { - t.samplesIn.tick() - t.samplesOut.tick() - t.samplesOutDuration.tick() - - // We use the number of incoming samples as a prediction of how much work we - // will need to do next iteration. We add to this any pending samples - // (received - send) so we can catch up with any backlog. We use the average - // outgoing batch latency to work out how many shards we need. - var ( - samplesIn = t.samplesIn.rate() - samplesOut = t.samplesOut.rate() - samplesPending = samplesIn - samplesOut - samplesOutDuration = t.samplesOutDuration.rate() - ) - - // We use an integral accumulator, like in a PID, to help dampen oscillation. - t.integralAccumulator = t.integralAccumulator + (samplesPending * 0.1) - - if samplesOut <= 0 { - return - } - - var ( - timePerSample = samplesOutDuration / samplesOut - desiredShards = (timePerSample * (samplesIn + samplesPending + t.integralAccumulator)) / float64(time.Second) - ) - log.Debugf("QueueManager.caclulateDesiredShards samplesIn=%f, samplesOut=%f, samplesPending=%f, desiredShards=%f", - samplesIn, samplesOut, samplesPending, desiredShards) - - // Changes in the number of shards must be greater than shardToleranceFraction. - var ( - lowerBound = float64(t.numShards) * (1. - shardToleranceFraction) - upperBound = float64(t.numShards) * (1. + shardToleranceFraction) - ) - log.Debugf("QueueManager.updateShardsLoop %f <= %f <= %f", lowerBound, desiredShards, upperBound) - if lowerBound <= desiredShards && desiredShards <= upperBound { - return - } - - numShards := int(math.Ceil(desiredShards)) - if numShards > t.cfg.MaxShards { - numShards = t.cfg.MaxShards - } - if numShards == t.numShards { - return - } - - // Resharding can take some time, and we want this loop - // to stay close to shardUpdateDuration. - select { - case t.reshardChan <- numShards: - log.Infof("Remote storage resharding from %d to %d shards.", t.numShards, numShards) - t.numShards = numShards - default: - log.Infof("Currently resharding, skipping.") - } -} - -func (t *QueueManager) reshardLoop() { - defer t.wg.Done() - - for { - select { - case numShards := <-t.reshardChan: - t.reshard(numShards) - case <-t.quit: - return - } - } -} - -func (t *QueueManager) reshard(n int) { - numShards.WithLabelValues(t.queueName).Set(float64(n)) - - t.shardsMtx.Lock() - newShards := t.newShards(n) - oldShards := t.shards - t.shards = newShards - t.shardsMtx.Unlock() - - oldShards.stop() - - // We start the newShards after we have stopped (the therefore completely - // flushed) the oldShards, to guarantee we only every deliver samples in - // order. - newShards.start() -} - -type shards struct { - qm *QueueManager - queues []chan *model.Sample - done chan struct{} - wg sync.WaitGroup -} - -func (t *QueueManager) newShards(numShards int) *shards { - queues := make([]chan *model.Sample, numShards) - for i := 0; i < numShards; i++ { - queues[i] = make(chan *model.Sample, t.cfg.QueueCapacity) - } - s := &shards{ - qm: t, - queues: queues, - done: make(chan struct{}), - } - s.wg.Add(numShards) - return s -} - -func (s *shards) len() int { - return len(s.queues) -} - -func (s *shards) start() { - for i := 0; i < len(s.queues); i++ { - go s.runShard(i) - } -} - -func (s *shards) stop() { - for _, shard := range s.queues { - close(shard) - } - s.wg.Wait() -} - -func (s *shards) enqueue(sample *model.Sample) bool { - s.qm.samplesIn.incr(1) - - fp := sample.Metric.FastFingerprint() - shard := uint64(fp) % uint64(len(s.queues)) - - select { - case s.queues[shard] <- sample: - return true - default: - return false - } -} - -func (s *shards) runShard(i int) { - defer s.wg.Done() - queue := s.queues[i] - - // Send batches of at most MaxSamplesPerSend samples to the remote storage. - // If we have fewer samples than that, flush them out after a deadline - // anyways. - pendingSamples := model.Samples{} - - for { - select { - case sample, ok := <-queue: - if !ok { - if len(pendingSamples) > 0 { - log.Debugf("Flushing %d samples to remote storage...", len(pendingSamples)) - s.sendSamples(pendingSamples) - log.Debugf("Done flushing.") - } - return - } - - queueLength.WithLabelValues(s.qm.queueName).Dec() - pendingSamples = append(pendingSamples, sample) - - for len(pendingSamples) >= s.qm.cfg.MaxSamplesPerSend { - s.sendSamples(pendingSamples[:s.qm.cfg.MaxSamplesPerSend]) - pendingSamples = pendingSamples[s.qm.cfg.MaxSamplesPerSend:] - } - case <-time.After(s.qm.cfg.BatchSendDeadline): - if len(pendingSamples) > 0 { - s.sendSamples(pendingSamples) - pendingSamples = pendingSamples[:0] - } - } - } -} - -func (s *shards) sendSamples(samples model.Samples) { - // Samples are sent to the remote storage on a best-effort basis. If a - // sample isn't sent correctly the first time, it's simply dropped on the - // floor. - begin := time.Now() - err := s.qm.cfg.Client.Store(samples) - duration := time.Since(begin) - - if err != nil { - log.Warnf("error sending %d samples to remote storage: %s", len(samples), err) - failedSamplesTotal.WithLabelValues(s.qm.queueName).Add(float64(len(samples))) - } else { - sentSamplesTotal.WithLabelValues(s.qm.queueName).Add(float64(len(samples))) - } - sentBatchDuration.WithLabelValues(s.qm.queueName).Observe(duration.Seconds()) - - s.qm.samplesOut.incr(int64(len(samples))) - s.qm.samplesOutDuration.incr(int64(duration)) -} diff --git a/storage/remote/queue_manager_test.go b/storage/remote/queue_manager_test.go deleted file mode 100644 index bfe4b69e10..0000000000 --- a/storage/remote/queue_manager_test.go +++ /dev/null @@ -1,259 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package remote - -import ( - "fmt" - "sync" - "sync/atomic" - "testing" - "time" - - "github.com/prometheus/common/model" -) - -type TestStorageClient struct { - receivedSamples map[string]model.Samples - expectedSamples map[string]model.Samples - wg sync.WaitGroup - mtx sync.Mutex -} - -func NewTestStorageClient() *TestStorageClient { - return &TestStorageClient{ - receivedSamples: map[string]model.Samples{}, - expectedSamples: map[string]model.Samples{}, - } -} - -func (c *TestStorageClient) expectSamples(ss model.Samples) { - c.mtx.Lock() - defer c.mtx.Unlock() - - for _, s := range ss { - ts := s.Metric.String() - c.expectedSamples[ts] = append(c.expectedSamples[ts], s) - } - c.wg.Add(len(ss)) -} - -func (c *TestStorageClient) waitForExpectedSamples(t *testing.T) { - c.wg.Wait() - - c.mtx.Lock() - defer c.mtx.Unlock() - for ts, expectedSamples := range c.expectedSamples { - for i, expected := range expectedSamples { - if !expected.Equal(c.receivedSamples[ts][i]) { - t.Fatalf("%d. Expected %v, got %v", i, expected, c.receivedSamples[ts][i]) - } - } - } -} - -func (c *TestStorageClient) Store(ss model.Samples) error { - c.mtx.Lock() - defer c.mtx.Unlock() - - for _, s := range ss { - ts := s.Metric.String() - c.receivedSamples[ts] = append(c.receivedSamples[ts], s) - } - c.wg.Add(-len(ss)) - return nil -} - -func (c *TestStorageClient) Name() string { - return "teststorageclient" -} - -func TestSampleDelivery(t *testing.T) { - // Let's create an even number of send batches so we don't run into the - // batch timeout case. - n := defaultQueueCapacity * 2 - - samples := make(model.Samples, 0, n) - for i := 0; i < n; i++ { - name := model.LabelValue(fmt.Sprintf("test_metric_%d", i)) - samples = append(samples, &model.Sample{ - Metric: model.Metric{ - model.MetricNameLabel: name, - }, - Value: model.SampleValue(i), - }) - } - - c := NewTestStorageClient() - c.expectSamples(samples[:len(samples)/2]) - - m := NewQueueManager(QueueManagerConfig{ - Client: c, - MaxShards: 1, - }) - - // These should be received by the client. - for _, s := range samples[:len(samples)/2] { - m.Append(s) - } - // These will be dropped because the queue is full. - for _, s := range samples[len(samples)/2:] { - m.Append(s) - } - m.Start() - defer m.Stop() - - c.waitForExpectedSamples(t) -} - -func TestSampleDeliveryOrder(t *testing.T) { - ts := 10 - n := defaultMaxSamplesPerSend * ts - - samples := make(model.Samples, 0, n) - for i := 0; i < n; i++ { - name := model.LabelValue(fmt.Sprintf("test_metric_%d", i%ts)) - samples = append(samples, &model.Sample{ - Metric: model.Metric{ - model.MetricNameLabel: name, - }, - Value: model.SampleValue(i), - Timestamp: model.Time(i), - }) - } - - c := NewTestStorageClient() - c.expectSamples(samples) - m := NewQueueManager(QueueManagerConfig{ - Client: c, - // Ensure we don't drop samples in this test. - QueueCapacity: n, - }) - - // These should be received by the client. - for _, s := range samples { - m.Append(s) - } - m.Start() - defer m.Stop() - - c.waitForExpectedSamples(t) -} - -// TestBlockingStorageClient is a queue_manager StorageClient which will block -// on any calls to Store(), until the `block` channel is closed, at which point -// the `numCalls` property will contain a count of how many times Store() was -// called. -type TestBlockingStorageClient struct { - numCalls uint64 - block chan bool -} - -func NewTestBlockedStorageClient() *TestBlockingStorageClient { - return &TestBlockingStorageClient{ - block: make(chan bool), - numCalls: 0, - } -} - -func (c *TestBlockingStorageClient) Store(s model.Samples) error { - atomic.AddUint64(&c.numCalls, 1) - <-c.block - return nil -} - -func (c *TestBlockingStorageClient) NumCalls() uint64 { - return atomic.LoadUint64(&c.numCalls) -} - -func (c *TestBlockingStorageClient) unlock() { - close(c.block) -} - -func (c *TestBlockingStorageClient) Name() string { - return "testblockingstorageclient" -} - -func (t *QueueManager) queueLen() int { - t.shardsMtx.Lock() - defer t.shardsMtx.Unlock() - queueLength := 0 - for _, shard := range t.shards.queues { - queueLength += len(shard) - } - return queueLength -} - -func TestSpawnNotMoreThanMaxConcurrentSendsGoroutines(t *testing.T) { - // Our goal is to fully empty the queue: - // `MaxSamplesPerSend*Shards` samples should be consumed by the - // per-shard goroutines, and then another `MaxSamplesPerSend` - // should be left on the queue. - n := defaultMaxSamplesPerSend*1 + defaultMaxSamplesPerSend - - samples := make(model.Samples, 0, n) - for i := 0; i < n; i++ { - name := model.LabelValue(fmt.Sprintf("test_metric_%d", i)) - samples = append(samples, &model.Sample{ - Metric: model.Metric{ - model.MetricNameLabel: name, - }, - Value: model.SampleValue(i), - }) - } - - c := NewTestBlockedStorageClient() - m := NewQueueManager(QueueManagerConfig{ - Client: c, - QueueCapacity: n, - MaxShards: 1, - }) - - m.Start() - - defer func() { - c.unlock() - m.Stop() - }() - - for _, s := range samples { - m.Append(s) - } - - // Wait until the runShard() loops drain the queue. If things went right, it - // should then immediately block in sendSamples(), but, in case of error, - // it would spawn too many goroutines, and thus we'd see more calls to - // client.Store() - // - // The timed wait is maybe non-ideal, but, in order to verify that we're - // not spawning too many concurrent goroutines, we have to wait on the - // Run() loop to consume a specific number of elements from the - // queue... and it doesn't signal that in any obvious way, except by - // draining the queue. We cap the waiting at 1 second -- that should give - // plenty of time, and keeps the failure fairly quick if we're not draining - // the queue properly. - for i := 0; i < 100 && m.queueLen() > 0; i++ { - time.Sleep(10 * time.Millisecond) - } - - if m.queueLen() != defaultMaxSamplesPerSend { - t.Fatalf("Failed to drain QueueManager queue, %d elements left", - m.queueLen(), - ) - } - - numCalls := c.NumCalls() - if numCalls != uint64(1) { - t.Errorf("Saw %d concurrent sends, expected 1", numCalls) - } -} diff --git a/storage/remote/remote.go b/storage/remote/remote.go deleted file mode 100644 index a53f866b3d..0000000000 --- a/storage/remote/remote.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright 2017 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package remote - -import ( - "sync" - - "github.com/prometheus/common/model" - - "github.com/prometheus/prometheus/config" -) - -// Storage allows queueing samples for remote writes. -type Storage struct { - mtx sync.RWMutex - queues []*QueueManager -} - -// ApplyConfig updates the state as the new config requires. -func (s *Storage) ApplyConfig(conf *config.Config) error { - s.mtx.Lock() - defer s.mtx.Unlock() - - newQueues := []*QueueManager{} - // TODO: we should only stop & recreate queues which have changes, - // as this can be quite disruptive. - for i, rwConf := range conf.RemoteWriteConfigs { - c, err := NewClient(i, rwConf) - if err != nil { - return err - } - newQueues = append(newQueues, NewQueueManager(QueueManagerConfig{ - Client: c, - ExternalLabels: conf.GlobalConfig.ExternalLabels, - RelabelConfigs: rwConf.WriteRelabelConfigs, - })) - } - - for _, q := range s.queues { - q.Stop() - } - - s.queues = newQueues - for _, q := range s.queues { - q.Start() - } - return nil -} - -// Stop the background processing of the storage queues. -func (s *Storage) Stop() { - for _, q := range s.queues { - q.Stop() - } -} - -// Append implements storage.SampleAppender. Always returns nil. -func (s *Storage) Append(smpl *model.Sample) error { - s.mtx.RLock() - defer s.mtx.RUnlock() - - for _, q := range s.queues { - q.Append(smpl) - } - return nil -} - -// NeedsThrottling implements storage.SampleAppender. It will always return -// false as a remote storage drops samples on the floor if backlogging instead -// of asking for throttling. -func (s *Storage) NeedsThrottling() bool { - return false -} diff --git a/storage/remote/remote.pb.go b/storage/remote/remote.pb.go deleted file mode 100644 index a5a5356bf7..0000000000 --- a/storage/remote/remote.pb.go +++ /dev/null @@ -1,120 +0,0 @@ -// Code generated by protoc-gen-go. -// source: remote.proto -// DO NOT EDIT! - -/* -Package remote is a generated protocol buffer package. - -It is generated from these files: - remote.proto - -It has these top-level messages: - Sample - LabelPair - TimeSeries - WriteRequest -*/ -package remote - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type Sample struct { - Value float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` - TimestampMs int64 `protobuf:"varint,2,opt,name=timestamp_ms,json=timestampMs" json:"timestamp_ms,omitempty"` -} - -func (m *Sample) Reset() { *m = Sample{} } -func (m *Sample) String() string { return proto.CompactTextString(m) } -func (*Sample) ProtoMessage() {} -func (*Sample) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } - -type LabelPair struct { - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Value string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` -} - -func (m *LabelPair) Reset() { *m = LabelPair{} } -func (m *LabelPair) String() string { return proto.CompactTextString(m) } -func (*LabelPair) ProtoMessage() {} -func (*LabelPair) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } - -type TimeSeries struct { - Labels []*LabelPair `protobuf:"bytes,1,rep,name=labels" json:"labels,omitempty"` - // Sorted by time, oldest sample first. - Samples []*Sample `protobuf:"bytes,2,rep,name=samples" json:"samples,omitempty"` -} - -func (m *TimeSeries) Reset() { *m = TimeSeries{} } -func (m *TimeSeries) String() string { return proto.CompactTextString(m) } -func (*TimeSeries) ProtoMessage() {} -func (*TimeSeries) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } - -func (m *TimeSeries) GetLabels() []*LabelPair { - if m != nil { - return m.Labels - } - return nil -} - -func (m *TimeSeries) GetSamples() []*Sample { - if m != nil { - return m.Samples - } - return nil -} - -type WriteRequest struct { - Timeseries []*TimeSeries `protobuf:"bytes,1,rep,name=timeseries" json:"timeseries,omitempty"` -} - -func (m *WriteRequest) Reset() { *m = WriteRequest{} } -func (m *WriteRequest) String() string { return proto.CompactTextString(m) } -func (*WriteRequest) ProtoMessage() {} -func (*WriteRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } - -func (m *WriteRequest) GetTimeseries() []*TimeSeries { - if m != nil { - return m.Timeseries - } - return nil -} - -func init() { - proto.RegisterType((*Sample)(nil), "remote.Sample") - proto.RegisterType((*LabelPair)(nil), "remote.LabelPair") - proto.RegisterType((*TimeSeries)(nil), "remote.TimeSeries") - proto.RegisterType((*WriteRequest)(nil), "remote.WriteRequest") -} - -func init() { proto.RegisterFile("remote.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 216 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x4c, 0x90, 0x3f, 0x4f, 0x80, 0x30, - 0x10, 0xc5, 0x03, 0x68, 0x0d, 0x07, 0x31, 0xf1, 0xe2, 0xc0, 0xa8, 0x9d, 0x70, 0x61, 0xc0, 0xf8, - 0x01, 0x74, 0xd6, 0xc4, 0x14, 0x13, 0x47, 0x53, 0x92, 0x1b, 0x9a, 0xb4, 0x82, 0x6d, 0xf1, 0xf3, - 0x5b, 0x5a, 0xfe, 0xb8, 0xf5, 0xdd, 0xbd, 0x7b, 0xf7, 0xeb, 0x41, 0x6d, 0xc9, 0x4c, 0x9e, 0xba, - 0xd9, 0x4e, 0x7e, 0x42, 0x96, 0x14, 0x7f, 0x06, 0x36, 0x48, 0x33, 0x6b, 0xc2, 0x5b, 0xb8, 0xfc, - 0x95, 0x7a, 0xa1, 0x26, 0xbb, 0xcb, 0xda, 0x4c, 0x24, 0x81, 0xf7, 0x50, 0x7b, 0x65, 0xc8, 0xf9, - 0x60, 0xfa, 0x32, 0xae, 0xc9, 0x43, 0xb3, 0x10, 0xd5, 0x51, 0x7b, 0x73, 0xfc, 0x09, 0xca, 0x57, - 0x39, 0x92, 0x7e, 0x97, 0xca, 0x22, 0xc2, 0xc5, 0xb7, 0x34, 0x29, 0xa4, 0x14, 0xf1, 0x7d, 0x26, - 0xe7, 0xb1, 0x98, 0x04, 0x97, 0x00, 0x1f, 0x21, 0x65, 0x20, 0xab, 0xc8, 0xe1, 0x03, 0x30, 0xbd, - 0x86, 0xb8, 0x30, 0x59, 0xb4, 0x55, 0x7f, 0xd3, 0x6d, 0xb8, 0x47, 0xb4, 0xd8, 0x0c, 0xd8, 0xc2, - 0x95, 0x8b, 0xc8, 0x2b, 0xcd, 0xea, 0xbd, 0xde, 0xbd, 0xe9, 0x27, 0x62, 0x6f, 0xf3, 0x17, 0xa8, - 0x3f, 0xad, 0xf2, 0x24, 0xe8, 0x67, 0x09, 0xb8, 0xd8, 0x03, 0x44, 0xf0, 0xb8, 0x72, 0x5b, 0x84, - 0xfb, 0xf0, 0x09, 0x23, 0xfe, 0xb9, 0x46, 0x16, 0xef, 0xf5, 0xf8, 0x17, 0x00, 0x00, 0xff, 0xff, - 0x73, 0xb4, 0xd1, 0xb6, 0x3f, 0x01, 0x00, 0x00, -} diff --git a/storage/remote/remote.proto b/storage/remote/remote.proto deleted file mode 100644 index 88017c5168..0000000000 --- a/storage/remote/remote.proto +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2016 Prometheus Team -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package remote; - -message Sample { - double value = 1; - int64 timestamp_ms = 2; -} - -message LabelPair { - string name = 1; - string value = 2; -} - -message TimeSeries { - repeated LabelPair labels = 1; - // Sorted by time, oldest sample first. - repeated Sample samples = 2; -} - -message WriteRequest { - repeated TimeSeries timeseries = 1; -} diff --git a/util/httputil/client.go b/util/httputil/client.go index 42b1615bd7..4123814328 100644 --- a/util/httputil/client.go +++ b/util/httputil/client.go @@ -21,6 +21,7 @@ import ( "net" "net/http" "net/url" + "strings" "time" "github.com/prometheus/prometheus/config" @@ -31,10 +32,42 @@ func NewClient(rt http.RoundTripper) *http.Client { return &http.Client{Transport: rt} } -// NewDeadlineClient returns a new http.Client which will time out long running -// requests. -func NewDeadlineClient(timeout time.Duration, proxyURL *url.URL) *http.Client { - return NewClient(NewDeadlineRoundTripper(timeout, proxyURL)) +// NewClientFromConfig returns a new HTTP client configured for the +// given config.HTTPClientConfig. +func NewClientFromConfig(cfg config.HTTPClientConfig) (*http.Client, error) { + tlsConfig, err := NewTLSConfig(cfg.TLSConfig) + if err != nil { + return nil, err + } + // The only timeout we care about is the configured scrape timeout. + // It is applied on request. So we leave out any timings here. + var rt http.RoundTripper = &http.Transport{ + Proxy: http.ProxyURL(cfg.ProxyURL.URL), + DisableKeepAlives: true, + TLSClientConfig: tlsConfig, + } + + // If a bearer token is provided, create a round tripper that will set the + // Authorization header correctly on each request. + bearerToken := cfg.BearerToken + if len(bearerToken) == 0 && len(cfg.BearerTokenFile) > 0 { + b, err := ioutil.ReadFile(cfg.BearerTokenFile) + if err != nil { + return nil, fmt.Errorf("unable to read bearer token file %s: %s", cfg.BearerTokenFile, err) + } + bearerToken = strings.TrimSpace(string(b)) + } + + if len(bearerToken) > 0 { + rt = NewBearerAuthRoundTripper(bearerToken, rt) + } + + if cfg.BasicAuth != nil { + rt = NewBasicAuthRoundTripper(cfg.BasicAuth.Username, cfg.BasicAuth.Password, rt) + } + + // Return a new client with the configured round tripper. + return NewClient(rt), nil } // NewDeadlineRoundTripper returns a new http.RoundTripper which will time out @@ -119,6 +152,7 @@ func cloneRequest(r *http.Request) *http.Request { return r2 } +// NewTLSConfig creates a new tls.Config from the given config.TLSConfig. func NewTLSConfig(cfg config.TLSConfig) (*tls.Config, error) { tlsConfig := &tls.Config{InsecureSkipVerify: cfg.InsecureSkipVerify} diff --git a/vendor/github.com/dgryski/go-bits/clz.go b/vendor/github.com/dgryski/go-bits/clz.go new file mode 100644 index 0000000000..32c79425c2 --- /dev/null +++ b/vendor/github.com/dgryski/go-bits/clz.go @@ -0,0 +1,37 @@ +// +build !amd64 appengine + +package bits + +// Clz counts leading zeroes +func Clz(x uint64) uint64 { + var n uint64 + + n = 1 + + if (x >> 32) == 0 { + n = n + 32 + x = x << 32 + } + if (x >> (32 + 16)) == 0 { + n = n + 16 + x = x << 16 + } + + if (x >> (32 + 16 + 8)) == 0 { + n = n + 8 + x = x << 8 + } + + if (x >> (32 + 16 + 8 + 4)) == 0 { + n = n + 4 + x = x << 4 + } + + if (x >> (32 + 16 + 8 + 4 + 2)) == 0 { + n = n + 2 + x = x << 2 + } + + n = n - (x >> 63) + return uint64(n) +} diff --git a/vendor/github.com/dgryski/go-bits/ctz.go b/vendor/github.com/dgryski/go-bits/ctz.go new file mode 100644 index 0000000000..c97d7b393e --- /dev/null +++ b/vendor/github.com/dgryski/go-bits/ctz.go @@ -0,0 +1,39 @@ +// +build !amd64 appengine + +package bits + +// Ctz counts trailing zeroes +func Ctz(x uint64) uint64 { + + if x == 0 { + return 64 + } + + var n uint64 + + if (x & 0x00000000FFFFFFFF) == 0 { + n = n + 32 + x = x >> 32 + } + if (x & 0x000000000000FFFF) == 0 { + n = n + 16 + x = x >> 16 + } + if (x & 0x00000000000000FF) == 0 { + n = n + 8 + x = x >> 8 + } + if (x & 0x000000000000000F) == 0 { + n = n + 4 + x = x >> 4 + } + if (x & 0x0000000000000003) == 0 { + n = n + 2 + x = x >> 2 + } + if (x & 0x0000000000000001) == 0 { + n = n + 1 + } + + return n +} diff --git a/vendor/github.com/dgryski/go-bits/popcnt.go b/vendor/github.com/dgryski/go-bits/popcnt.go new file mode 100644 index 0000000000..e2eded2fea --- /dev/null +++ b/vendor/github.com/dgryski/go-bits/popcnt.go @@ -0,0 +1,15 @@ +// +build !amd64 appengine popcntgo + +package bits + +// Popcnt counts the number of bits set +func Popcnt(x uint64) uint64 { + // bit population count, see + // http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel + x -= (x >> 1) & 0x5555555555555555 + x = (x>>2)&0x3333333333333333 + x&0x3333333333333333 + x += x >> 4 + x &= 0x0f0f0f0f0f0f0f0f + x *= 0x0101010101010101 + return x >> 56 +} diff --git a/vendor/github.com/influxdata/influxdb/client/v2/client.go b/vendor/github.com/influxdata/influxdb/client/v2/client.go new file mode 100644 index 0000000000..6a72e8d15d --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/client/v2/client.go @@ -0,0 +1,609 @@ +// Package client (v2) is the current official Go client for InfluxDB. +package client // import "github.com/influxdata/influxdb/client/v2" + +import ( + "bytes" + "crypto/tls" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "github.com/influxdata/influxdb/models" +) + +// HTTPConfig is the config data needed to create an HTTP Client. +type HTTPConfig struct { + // Addr should be of the form "http://host:port" + // or "http://[ipv6-host%zone]:port". + Addr string + + // Username is the influxdb username, optional. + Username string + + // Password is the influxdb password, optional. + Password string + + // UserAgent is the http User Agent, defaults to "InfluxDBClient". + UserAgent string + + // Timeout for influxdb writes, defaults to no timeout. + Timeout time.Duration + + // InsecureSkipVerify gets passed to the http client, if true, it will + // skip https certificate verification. Defaults to false. + InsecureSkipVerify bool + + // TLSConfig allows the user to set their own TLS config for the HTTP + // Client. If set, this option overrides InsecureSkipVerify. + TLSConfig *tls.Config +} + +// BatchPointsConfig is the config data needed to create an instance of the BatchPoints struct. +type BatchPointsConfig struct { + // Precision is the write precision of the points, defaults to "ns". + Precision string + + // Database is the database to write points to. + Database string + + // RetentionPolicy is the retention policy of the points. + RetentionPolicy string + + // Write consistency is the number of servers required to confirm write. + WriteConsistency string +} + +// Client is a client interface for writing & querying the database. +type Client interface { + // Ping checks that status of cluster, and will always return 0 time and no + // error for UDP clients. + Ping(timeout time.Duration) (time.Duration, string, error) + + // Write takes a BatchPoints object and writes all Points to InfluxDB. + Write(bp BatchPoints) error + + // Query makes an InfluxDB Query on the database. This will fail if using + // the UDP client. + Query(q Query) (*Response, error) + + // Close releases any resources a Client may be using. + Close() error +} + +// NewHTTPClient returns a new Client from the provided config. +// Client is safe for concurrent use by multiple goroutines. +func NewHTTPClient(conf HTTPConfig) (Client, error) { + if conf.UserAgent == "" { + conf.UserAgent = "InfluxDBClient" + } + + u, err := url.Parse(conf.Addr) + if err != nil { + return nil, err + } else if u.Scheme != "http" && u.Scheme != "https" { + m := fmt.Sprintf("Unsupported protocol scheme: %s, your address"+ + " must start with http:// or https://", u.Scheme) + return nil, errors.New(m) + } + + tr := &http.Transport{ + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: conf.InsecureSkipVerify, + }, + } + if conf.TLSConfig != nil { + tr.TLSClientConfig = conf.TLSConfig + } + return &client{ + url: *u, + username: conf.Username, + password: conf.Password, + useragent: conf.UserAgent, + httpClient: &http.Client{ + Timeout: conf.Timeout, + Transport: tr, + }, + transport: tr, + }, nil +} + +// Ping will check to see if the server is up with an optional timeout on waiting for leader. +// Ping returns how long the request took, the version of the server it connected to, and an error if one occurred. +func (c *client) Ping(timeout time.Duration) (time.Duration, string, error) { + now := time.Now() + u := c.url + u.Path = "ping" + + req, err := http.NewRequest("GET", u.String(), nil) + if err != nil { + return 0, "", err + } + + req.Header.Set("User-Agent", c.useragent) + + if c.username != "" { + req.SetBasicAuth(c.username, c.password) + } + + if timeout > 0 { + params := req.URL.Query() + params.Set("wait_for_leader", fmt.Sprintf("%.0fs", timeout.Seconds())) + req.URL.RawQuery = params.Encode() + } + + resp, err := c.httpClient.Do(req) + if err != nil { + return 0, "", err + } + defer resp.Body.Close() + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return 0, "", err + } + + if resp.StatusCode != http.StatusNoContent { + var err = fmt.Errorf(string(body)) + return 0, "", err + } + + version := resp.Header.Get("X-Influxdb-Version") + return time.Since(now), version, nil +} + +// Close releases the client's resources. +func (c *client) Close() error { + c.transport.CloseIdleConnections() + return nil +} + +// client is safe for concurrent use as the fields are all read-only +// once the client is instantiated. +type client struct { + // N.B - if url.UserInfo is accessed in future modifications to the + // methods on client, you will need to syncronise access to url. + url url.URL + username string + password string + useragent string + httpClient *http.Client + transport *http.Transport +} + +// BatchPoints is an interface into a batched grouping of points to write into +// InfluxDB together. BatchPoints is NOT thread-safe, you must create a separate +// batch for each goroutine. +type BatchPoints interface { + // AddPoint adds the given point to the Batch of points. + AddPoint(p *Point) + // AddPoints adds the given points to the Batch of points. + AddPoints(ps []*Point) + // Points lists the points in the Batch. + Points() []*Point + + // Precision returns the currently set precision of this Batch. + Precision() string + // SetPrecision sets the precision of this batch. + SetPrecision(s string) error + + // Database returns the currently set database of this Batch. + Database() string + // SetDatabase sets the database of this Batch. + SetDatabase(s string) + + // WriteConsistency returns the currently set write consistency of this Batch. + WriteConsistency() string + // SetWriteConsistency sets the write consistency of this Batch. + SetWriteConsistency(s string) + + // RetentionPolicy returns the currently set retention policy of this Batch. + RetentionPolicy() string + // SetRetentionPolicy sets the retention policy of this Batch. + SetRetentionPolicy(s string) +} + +// NewBatchPoints returns a BatchPoints interface based on the given config. +func NewBatchPoints(conf BatchPointsConfig) (BatchPoints, error) { + if conf.Precision == "" { + conf.Precision = "ns" + } + if _, err := time.ParseDuration("1" + conf.Precision); err != nil { + return nil, err + } + bp := &batchpoints{ + database: conf.Database, + precision: conf.Precision, + retentionPolicy: conf.RetentionPolicy, + writeConsistency: conf.WriteConsistency, + } + return bp, nil +} + +type batchpoints struct { + points []*Point + database string + precision string + retentionPolicy string + writeConsistency string +} + +func (bp *batchpoints) AddPoint(p *Point) { + bp.points = append(bp.points, p) +} + +func (bp *batchpoints) AddPoints(ps []*Point) { + bp.points = append(bp.points, ps...) +} + +func (bp *batchpoints) Points() []*Point { + return bp.points +} + +func (bp *batchpoints) Precision() string { + return bp.precision +} + +func (bp *batchpoints) Database() string { + return bp.database +} + +func (bp *batchpoints) WriteConsistency() string { + return bp.writeConsistency +} + +func (bp *batchpoints) RetentionPolicy() string { + return bp.retentionPolicy +} + +func (bp *batchpoints) SetPrecision(p string) error { + if _, err := time.ParseDuration("1" + p); err != nil { + return err + } + bp.precision = p + return nil +} + +func (bp *batchpoints) SetDatabase(db string) { + bp.database = db +} + +func (bp *batchpoints) SetWriteConsistency(wc string) { + bp.writeConsistency = wc +} + +func (bp *batchpoints) SetRetentionPolicy(rp string) { + bp.retentionPolicy = rp +} + +// Point represents a single data point. +type Point struct { + pt models.Point +} + +// NewPoint returns a point with the given timestamp. If a timestamp is not +// given, then data is sent to the database without a timestamp, in which case +// the server will assign local time upon reception. NOTE: it is recommended to +// send data with a timestamp. +func NewPoint( + name string, + tags map[string]string, + fields map[string]interface{}, + t ...time.Time, +) (*Point, error) { + var T time.Time + if len(t) > 0 { + T = t[0] + } + + pt, err := models.NewPoint(name, models.NewTags(tags), fields, T) + if err != nil { + return nil, err + } + return &Point{ + pt: pt, + }, nil +} + +// String returns a line-protocol string of the Point. +func (p *Point) String() string { + return p.pt.String() +} + +// PrecisionString returns a line-protocol string of the Point, +// with the timestamp formatted for the given precision. +func (p *Point) PrecisionString(precison string) string { + return p.pt.PrecisionString(precison) +} + +// Name returns the measurement name of the point. +func (p *Point) Name() string { + return p.pt.Name() +} + +// Tags returns the tags associated with the point. +func (p *Point) Tags() map[string]string { + return p.pt.Tags().Map() +} + +// Time return the timestamp for the point. +func (p *Point) Time() time.Time { + return p.pt.Time() +} + +// UnixNano returns timestamp of the point in nanoseconds since Unix epoch. +func (p *Point) UnixNano() int64 { + return p.pt.UnixNano() +} + +// Fields returns the fields for the point. +func (p *Point) Fields() (map[string]interface{}, error) { + return p.pt.Fields() +} + +// NewPointFrom returns a point from the provided models.Point. +func NewPointFrom(pt models.Point) *Point { + return &Point{pt: pt} +} + +func (c *client) Write(bp BatchPoints) error { + var b bytes.Buffer + + for _, p := range bp.Points() { + if _, err := b.WriteString(p.pt.PrecisionString(bp.Precision())); err != nil { + return err + } + + if err := b.WriteByte('\n'); err != nil { + return err + } + } + + u := c.url + u.Path = "write" + req, err := http.NewRequest("POST", u.String(), &b) + if err != nil { + return err + } + req.Header.Set("Content-Type", "") + req.Header.Set("User-Agent", c.useragent) + if c.username != "" { + req.SetBasicAuth(c.username, c.password) + } + + params := req.URL.Query() + params.Set("db", bp.Database()) + params.Set("rp", bp.RetentionPolicy()) + params.Set("precision", bp.Precision()) + params.Set("consistency", bp.WriteConsistency()) + req.URL.RawQuery = params.Encode() + + resp, err := c.httpClient.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return err + } + + if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK { + var err = fmt.Errorf(string(body)) + return err + } + + return nil +} + +// Query defines a query to send to the server. +type Query struct { + Command string + Database string + Precision string + Chunked bool + ChunkSize int + Parameters map[string]interface{} +} + +// NewQuery returns a query object. +// The database and precision arguments can be empty strings if they are not needed for the query. +func NewQuery(command, database, precision string) Query { + return Query{ + Command: command, + Database: database, + Precision: precision, + Parameters: make(map[string]interface{}), + } +} + +// NewQueryWithParameters returns a query object. +// The database and precision arguments can be empty strings if they are not needed for the query. +// parameters is a map of the parameter names used in the command to their values. +func NewQueryWithParameters(command, database, precision string, parameters map[string]interface{}) Query { + return Query{ + Command: command, + Database: database, + Precision: precision, + Parameters: parameters, + } +} + +// Response represents a list of statement results. +type Response struct { + Results []Result + Err string `json:"error,omitempty"` +} + +// Error returns the first error from any statement. +// It returns nil if no errors occurred on any statements. +func (r *Response) Error() error { + if r.Err != "" { + return fmt.Errorf(r.Err) + } + for _, result := range r.Results { + if result.Err != "" { + return fmt.Errorf(result.Err) + } + } + return nil +} + +// Message represents a user message. +type Message struct { + Level string + Text string +} + +// Result represents a resultset returned from a single statement. +type Result struct { + Series []models.Row + Messages []*Message + Err string `json:"error,omitempty"` +} + +// Query sends a command to the server and returns the Response. +func (c *client) Query(q Query) (*Response, error) { + u := c.url + u.Path = "query" + + jsonParameters, err := json.Marshal(q.Parameters) + + if err != nil { + return nil, err + } + + req, err := http.NewRequest("POST", u.String(), nil) + if err != nil { + return nil, err + } + + req.Header.Set("Content-Type", "") + req.Header.Set("User-Agent", c.useragent) + + if c.username != "" { + req.SetBasicAuth(c.username, c.password) + } + + params := req.URL.Query() + params.Set("q", q.Command) + params.Set("db", q.Database) + params.Set("params", string(jsonParameters)) + if q.Chunked { + params.Set("chunked", "true") + if q.ChunkSize > 0 { + params.Set("chunk_size", strconv.Itoa(q.ChunkSize)) + } + } + + if q.Precision != "" { + params.Set("epoch", q.Precision) + } + req.URL.RawQuery = params.Encode() + + resp, err := c.httpClient.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var response Response + if q.Chunked { + cr := NewChunkedResponse(resp.Body) + for { + r, err := cr.NextResponse() + if err != nil { + // If we got an error while decoding the response, send that back. + return nil, err + } + + if r == nil { + break + } + + response.Results = append(response.Results, r.Results...) + if r.Err != "" { + response.Err = r.Err + break + } + } + } else { + dec := json.NewDecoder(resp.Body) + dec.UseNumber() + decErr := dec.Decode(&response) + + // ignore this error if we got an invalid status code + if decErr != nil && decErr.Error() == "EOF" && resp.StatusCode != http.StatusOK { + decErr = nil + } + // If we got a valid decode error, send that back + if decErr != nil { + return nil, fmt.Errorf("unable to decode json: received status code %d err: %s", resp.StatusCode, decErr) + } + } + // If we don't have an error in our json response, and didn't get statusOK + // then send back an error + if resp.StatusCode != http.StatusOK && response.Error() == nil { + return &response, fmt.Errorf("received status code %d from server", + resp.StatusCode) + } + return &response, nil +} + +// duplexReader reads responses and writes it to another writer while +// satisfying the reader interface. +type duplexReader struct { + r io.Reader + w io.Writer +} + +func (r *duplexReader) Read(p []byte) (n int, err error) { + n, err = r.r.Read(p) + if err == nil { + r.w.Write(p[:n]) + } + return n, err +} + +// ChunkedResponse represents a response from the server that +// uses chunking to stream the output. +type ChunkedResponse struct { + dec *json.Decoder + duplex *duplexReader + buf bytes.Buffer +} + +// NewChunkedResponse reads a stream and produces responses from the stream. +func NewChunkedResponse(r io.Reader) *ChunkedResponse { + resp := &ChunkedResponse{} + resp.duplex = &duplexReader{r: r, w: &resp.buf} + resp.dec = json.NewDecoder(resp.duplex) + resp.dec.UseNumber() + return resp +} + +// NextResponse reads the next line of the stream and returns a response. +func (r *ChunkedResponse) NextResponse() (*Response, error) { + var response Response + + if err := r.dec.Decode(&response); err != nil { + if err == io.EOF { + return nil, nil + } + // A decoding error happened. This probably means the server crashed + // and sent a last-ditch error message to us. Ensure we have read the + // entirety of the connection to get any remaining error text. + io.Copy(ioutil.Discard, r.duplex) + return nil, errors.New(strings.TrimSpace(r.buf.String())) + } + + r.buf.Reset() + return &response, nil +} diff --git a/vendor/github.com/influxdata/influxdb/client/v2/udp.go b/vendor/github.com/influxdata/influxdb/client/v2/udp.go new file mode 100644 index 0000000000..779a28b33f --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/client/v2/udp.go @@ -0,0 +1,112 @@ +package client + +import ( + "fmt" + "io" + "net" + "time" +) + +const ( + // UDPPayloadSize is a reasonable default payload size for UDP packets that + // could be travelling over the internet. + UDPPayloadSize = 512 +) + +// UDPConfig is the config data needed to create a UDP Client. +type UDPConfig struct { + // Addr should be of the form "host:port" + // or "[ipv6-host%zone]:port". + Addr string + + // PayloadSize is the maximum size of a UDP client message, optional + // Tune this based on your network. Defaults to UDPPayloadSize. + PayloadSize int +} + +// NewUDPClient returns a client interface for writing to an InfluxDB UDP +// service from the given config. +func NewUDPClient(conf UDPConfig) (Client, error) { + var udpAddr *net.UDPAddr + udpAddr, err := net.ResolveUDPAddr("udp", conf.Addr) + if err != nil { + return nil, err + } + + conn, err := net.DialUDP("udp", nil, udpAddr) + if err != nil { + return nil, err + } + + payloadSize := conf.PayloadSize + if payloadSize == 0 { + payloadSize = UDPPayloadSize + } + + return &udpclient{ + conn: conn, + payloadSize: payloadSize, + }, nil +} + +// Close releases the udpclient's resources. +func (uc *udpclient) Close() error { + return uc.conn.Close() +} + +type udpclient struct { + conn io.WriteCloser + payloadSize int +} + +func (uc *udpclient) Write(bp BatchPoints) error { + var b = make([]byte, 0, uc.payloadSize) // initial buffer size, it will grow as needed + var d, _ = time.ParseDuration("1" + bp.Precision()) + + var delayedError error + + var checkBuffer = func(n int) { + if len(b) > 0 && len(b)+n > uc.payloadSize { + if _, err := uc.conn.Write(b); err != nil { + delayedError = err + } + b = b[:0] + } + } + + for _, p := range bp.Points() { + p.pt.Round(d) + pointSize := p.pt.StringSize() + 1 // include newline in size + //point := p.pt.RoundedString(d) + "\n" + + checkBuffer(pointSize) + + if p.Time().IsZero() || pointSize <= uc.payloadSize { + b = p.pt.AppendString(b) + b = append(b, '\n') + continue + } + + points := p.pt.Split(uc.payloadSize - 1) // account for newline character + for _, sp := range points { + checkBuffer(sp.StringSize() + 1) + b = sp.AppendString(b) + b = append(b, '\n') + } + } + + if len(b) > 0 { + if _, err := uc.conn.Write(b); err != nil { + return err + } + } + return delayedError +} + +func (uc *udpclient) Query(q Query) (*Response, error) { + return nil, fmt.Errorf("Querying via UDP is not supported") +} + +func (uc *udpclient) Ping(timeout time.Duration) (time.Duration, string, error) { + return 0, "", nil +} diff --git a/vendor/github.com/influxdata/influxdb/models/consistency.go b/vendor/github.com/influxdata/influxdb/models/consistency.go new file mode 100644 index 0000000000..2a3269bca1 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/models/consistency.go @@ -0,0 +1,48 @@ +package models + +import ( + "errors" + "strings" +) + +// ConsistencyLevel represent a required replication criteria before a write can +// be returned as successful. +// +// The consistency level is handled in open-source InfluxDB but only applicable to clusters. +type ConsistencyLevel int + +const ( + // ConsistencyLevelAny allows for hinted handoff, potentially no write happened yet. + ConsistencyLevelAny ConsistencyLevel = iota + + // ConsistencyLevelOne requires at least one data node acknowledged a write. + ConsistencyLevelOne + + // ConsistencyLevelQuorum requires a quorum of data nodes to acknowledge a write. + ConsistencyLevelQuorum + + // ConsistencyLevelAll requires all data nodes to acknowledge a write. + ConsistencyLevelAll +) + +var ( + // ErrInvalidConsistencyLevel is returned when parsing the string version + // of a consistency level. + ErrInvalidConsistencyLevel = errors.New("invalid consistency level") +) + +// ParseConsistencyLevel converts a consistency level string to the corresponding ConsistencyLevel const. +func ParseConsistencyLevel(level string) (ConsistencyLevel, error) { + switch strings.ToLower(level) { + case "any": + return ConsistencyLevelAny, nil + case "one": + return ConsistencyLevelOne, nil + case "quorum": + return ConsistencyLevelQuorum, nil + case "all": + return ConsistencyLevelAll, nil + default: + return 0, ErrInvalidConsistencyLevel + } +} diff --git a/vendor/github.com/influxdata/influxdb/models/inline_fnv.go b/vendor/github.com/influxdata/influxdb/models/inline_fnv.go new file mode 100644 index 0000000000..eec1ae8b01 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/models/inline_fnv.go @@ -0,0 +1,32 @@ +package models // import "github.com/influxdata/influxdb/models" + +// from stdlib hash/fnv/fnv.go +const ( + prime64 = 1099511628211 + offset64 = 14695981039346656037 +) + +// InlineFNV64a is an alloc-free port of the standard library's fnv64a. +// See https://en.wikipedia.org/wiki/Fowler%E2%80%93Noll%E2%80%93Vo_hash_function. +type InlineFNV64a uint64 + +// NewInlineFNV64a returns a new instance of InlineFNV64a. +func NewInlineFNV64a() InlineFNV64a { + return offset64 +} + +// Write adds data to the running hash. +func (s *InlineFNV64a) Write(data []byte) (int, error) { + hash := uint64(*s) + for _, c := range data { + hash ^= uint64(c) + hash *= prime64 + } + *s = InlineFNV64a(hash) + return len(data), nil +} + +// Sum64 returns the uint64 of the current resulting hash. +func (s *InlineFNV64a) Sum64() uint64 { + return uint64(*s) +} diff --git a/vendor/github.com/influxdata/influxdb/models/inline_strconv_parse.go b/vendor/github.com/influxdata/influxdb/models/inline_strconv_parse.go new file mode 100644 index 0000000000..dcc8ae402e --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/models/inline_strconv_parse.go @@ -0,0 +1,38 @@ +package models // import "github.com/influxdata/influxdb/models" + +import ( + "reflect" + "strconv" + "unsafe" +) + +// parseIntBytes is a zero-alloc wrapper around strconv.ParseInt. +func parseIntBytes(b []byte, base int, bitSize int) (i int64, err error) { + s := unsafeBytesToString(b) + return strconv.ParseInt(s, base, bitSize) +} + +// parseFloatBytes is a zero-alloc wrapper around strconv.ParseFloat. +func parseFloatBytes(b []byte, bitSize int) (float64, error) { + s := unsafeBytesToString(b) + return strconv.ParseFloat(s, bitSize) +} + +// parseBoolBytes is a zero-alloc wrapper around strconv.ParseBool. +func parseBoolBytes(b []byte) (bool, error) { + return strconv.ParseBool(unsafeBytesToString(b)) +} + +// unsafeBytesToString converts a []byte to a string without a heap allocation. +// +// It is unsafe, and is intended to prepare input to short-lived functions +// that require strings. +func unsafeBytesToString(in []byte) string { + src := *(*reflect.SliceHeader)(unsafe.Pointer(&in)) + dst := reflect.StringHeader{ + Data: src.Data, + Len: src.Len, + } + s := *(*string)(unsafe.Pointer(&dst)) + return s +} diff --git a/vendor/github.com/influxdata/influxdb/models/points.go b/vendor/github.com/influxdata/influxdb/models/points.go new file mode 100644 index 0000000000..c8a39632bc --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/models/points.go @@ -0,0 +1,2035 @@ +// Package models implements basic objects used throughout the TICK stack. +package models // import "github.com/influxdata/influxdb/models" + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "math" + "sort" + "strconv" + "strings" + "time" + + "github.com/influxdata/influxdb/pkg/escape" +) + +var ( + measurementEscapeCodes = map[byte][]byte{ + ',': []byte(`\,`), + ' ': []byte(`\ `), + } + + tagEscapeCodes = map[byte][]byte{ + ',': []byte(`\,`), + ' ': []byte(`\ `), + '=': []byte(`\=`), + } + + // ErrPointMustHaveAField is returned when operating on a point that does not have any fields. + ErrPointMustHaveAField = errors.New("point without fields is unsupported") + + // ErrInvalidNumber is returned when a number is expected but not provided. + ErrInvalidNumber = errors.New("invalid number") + + // ErrInvalidPoint is returned when a point cannot be parsed correctly. + ErrInvalidPoint = errors.New("point is invalid") +) + +const ( + // MaxKeyLength is the largest allowed size of the combined measurement and tag keys. + MaxKeyLength = 65535 +) + +// Point defines the values that will be written to the database. +type Point interface { + // Name return the measurement name for the point. + Name() string + + // SetName updates the measurement name for the point. + SetName(string) + + // Tags returns the tag set for the point. + Tags() Tags + + // AddTag adds or replaces a tag value for a point. + AddTag(key, value string) + + // SetTags replaces the tags for the point. + SetTags(tags Tags) + + // Fields returns the fields for the point. + Fields() (Fields, error) + + // Time return the timestamp for the point. + Time() time.Time + + // SetTime updates the timestamp for the point. + SetTime(t time.Time) + + // UnixNano returns the timestamp of the point as nanoseconds since Unix epoch. + UnixNano() int64 + + // HashID returns a non-cryptographic checksum of the point's key. + HashID() uint64 + + // Key returns the key (measurement joined with tags) of the point. + Key() []byte + + // String returns a string representation of the point. If there is a + // timestamp associated with the point then it will be specified with the default + // precision of nanoseconds. + String() string + + // MarshalBinary returns a binary representation of the point. + MarshalBinary() ([]byte, error) + + // PrecisionString returns a string representation of the point. If there + // is a timestamp associated with the point then it will be specified in the + // given unit. + PrecisionString(precision string) string + + // RoundedString returns a string representation of the point. If there + // is a timestamp associated with the point, then it will be rounded to the + // given duration. + RoundedString(d time.Duration) string + + // Split will attempt to return multiple points with the same timestamp whose + // string representations are no longer than size. Points with a single field or + // a point without a timestamp may exceed the requested size. + Split(size int) []Point + + // Round will round the timestamp of the point to the given duration. + Round(d time.Duration) + + // StringSize returns the length of the string that would be returned by String(). + StringSize() int + + // AppendString appends the result of String() to the provided buffer and returns + // the result, potentially reducing string allocations. + AppendString(buf []byte) []byte + + // FieldIterator retuns a FieldIterator that can be used to traverse the + // fields of a point without constructing the in-memory map. + FieldIterator() FieldIterator +} + +// FieldType represents the type of a field. +type FieldType int + +const ( + // Integer indicates the field's type is integer. + Integer FieldType = iota + + // Float indicates the field's type is float. + Float + + // Boolean indicates the field's type is boolean. + Boolean + + // String indicates the field's type is string. + String + + // Empty is used to indicate that there is no field. + Empty +) + +// FieldIterator provides a low-allocation interface to iterate through a point's fields. +type FieldIterator interface { + // Next indicates whether there any fields remaining. + Next() bool + + // FieldKey returns the key of the current field. + FieldKey() []byte + + // Type returns the FieldType of the current field. + Type() FieldType + + // StringValue returns the string value of the current field. + StringValue() string + + // IntegerValue returns the integer value of the current field. + IntegerValue() (int64, error) + + // BooleanValue returns the boolean value of the current field. + BooleanValue() (bool, error) + + // FloatValue returns the float value of the current field. + FloatValue() (float64, error) + + // Delete deletes the current field. + Delete() + + // Reset resets the iterator to its initial state. + Reset() +} + +// Points represents a sortable list of points by timestamp. +type Points []Point + +// Len implements sort.Interface. +func (a Points) Len() int { return len(a) } + +// Less implements sort.Interface. +func (a Points) Less(i, j int) bool { return a[i].Time().Before(a[j].Time()) } + +// Swap implements sort.Interface. +func (a Points) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +// point is the default implementation of Point. +type point struct { + time time.Time + + // text encoding of measurement and tags + // key must always be stored sorted by tags, if the original line was not sorted, + // we need to resort it + key []byte + + // text encoding of field data + fields []byte + + // text encoding of timestamp + ts []byte + + // cached version of parsed fields from data + cachedFields map[string]interface{} + + // cached version of parsed name from key + cachedName string + + // cached version of parsed tags + cachedTags Tags + + it fieldIterator +} + +const ( + // the number of characters for the largest possible int64 (9223372036854775807) + maxInt64Digits = 19 + + // the number of characters for the smallest possible int64 (-9223372036854775808) + minInt64Digits = 20 + + // the number of characters required for the largest float64 before a range check + // would occur during parsing + maxFloat64Digits = 25 + + // the number of characters required for smallest float64 before a range check occur + // would occur during parsing + minFloat64Digits = 27 +) + +// ParsePoints returns a slice of Points from a text representation of a point +// with each point separated by newlines. If any points fail to parse, a non-nil error +// will be returned in addition to the points that parsed successfully. +func ParsePoints(buf []byte) ([]Point, error) { + return ParsePointsWithPrecision(buf, time.Now().UTC(), "n") +} + +// ParsePointsString is identical to ParsePoints but accepts a string. +func ParsePointsString(buf string) ([]Point, error) { + return ParsePoints([]byte(buf)) +} + +// ParseKey returns the measurement name and tags from a point. +// +// NOTE: to minimize heap allocations, the returned Tags will refer to subslices of buf. +// This can have the unintended effect preventing buf from being garbage collected. +func ParseKey(buf []byte) (string, Tags, error) { + // Ignore the error because scanMeasurement returns "missing fields" which we ignore + // when just parsing a key + state, i, _ := scanMeasurement(buf, 0) + + var tags Tags + if state == tagKeyState { + tags = parseTags(buf) + // scanMeasurement returns the location of the comma if there are tags, strip that off + return string(buf[:i-1]), tags, nil + } + return string(buf[:i]), tags, nil +} + +// ParsePointsWithPrecision is similar to ParsePoints, but allows the +// caller to provide a precision for time. +// +// NOTE: to minimize heap allocations, the returned Points will refer to subslices of buf. +// This can have the unintended effect preventing buf from being garbage collected. +func ParsePointsWithPrecision(buf []byte, defaultTime time.Time, precision string) ([]Point, error) { + points := make([]Point, 0, bytes.Count(buf, []byte{'\n'})+1) + var ( + pos int + block []byte + failed []string + ) + for pos < len(buf) { + pos, block = scanLine(buf, pos) + pos++ + + if len(block) == 0 { + continue + } + + // lines which start with '#' are comments + start := skipWhitespace(block, 0) + + // If line is all whitespace, just skip it + if start >= len(block) { + continue + } + + if block[start] == '#' { + continue + } + + // strip the newline if one is present + if block[len(block)-1] == '\n' { + block = block[:len(block)-1] + } + + pt, err := parsePoint(block[start:], defaultTime, precision) + if err != nil { + failed = append(failed, fmt.Sprintf("unable to parse '%s': %v", string(block[start:]), err)) + } else { + points = append(points, pt) + } + + } + if len(failed) > 0 { + return points, fmt.Errorf("%s", strings.Join(failed, "\n")) + } + return points, nil + +} + +func parsePoint(buf []byte, defaultTime time.Time, precision string) (Point, error) { + // scan the first block which is measurement[,tag1=value1,tag2=value=2...] + pos, key, err := scanKey(buf, 0) + if err != nil { + return nil, err + } + + // measurement name is required + if len(key) == 0 { + return nil, fmt.Errorf("missing measurement") + } + + if len(key) > MaxKeyLength { + return nil, fmt.Errorf("max key length exceeded: %v > %v", len(key), MaxKeyLength) + } + + // scan the second block is which is field1=value1[,field2=value2,...] + pos, fields, err := scanFields(buf, pos) + if err != nil { + return nil, err + } + + // at least one field is required + if len(fields) == 0 { + return nil, fmt.Errorf("missing fields") + } + + // scan the last block which is an optional integer timestamp + pos, ts, err := scanTime(buf, pos) + if err != nil { + return nil, err + } + + pt := &point{ + key: key, + fields: fields, + ts: ts, + } + + if len(ts) == 0 { + pt.time = defaultTime + pt.SetPrecision(precision) + } else { + ts, err := parseIntBytes(ts, 10, 64) + if err != nil { + return nil, err + } + pt.time, err = SafeCalcTime(ts, precision) + if err != nil { + return nil, err + } + + // Determine if there are illegal non-whitespace characters after the + // timestamp block. + for pos < len(buf) { + if buf[pos] != ' ' { + return nil, ErrInvalidPoint + } + pos++ + } + } + return pt, nil +} + +// GetPrecisionMultiplier will return a multiplier for the precision specified. +func GetPrecisionMultiplier(precision string) int64 { + d := time.Nanosecond + switch precision { + case "u": + d = time.Microsecond + case "ms": + d = time.Millisecond + case "s": + d = time.Second + case "m": + d = time.Minute + case "h": + d = time.Hour + } + return int64(d) +} + +// scanKey scans buf starting at i for the measurement and tag portion of the point. +// It returns the ending position and the byte slice of key within buf. If there +// are tags, they will be sorted if they are not already. +func scanKey(buf []byte, i int) (int, []byte, error) { + start := skipWhitespace(buf, i) + + i = start + + // Determines whether the tags are sort, assume they are + sorted := true + + // indices holds the indexes within buf of the start of each tag. For example, + // a buf of 'cpu,host=a,region=b,zone=c' would have indices slice of [4,11,20] + // which indicates that the first tag starts at buf[4], seconds at buf[11], and + // last at buf[20] + indices := make([]int, 100) + + // tracks how many commas we've seen so we know how many values are indices. + // Since indices is an arbitrarily large slice, + // we need to know how many values in the buffer are in use. + commas := 0 + + // First scan the Point's measurement. + state, i, err := scanMeasurement(buf, i) + if err != nil { + return i, buf[start:i], err + } + + // Optionally scan tags if needed. + if state == tagKeyState { + i, commas, indices, err = scanTags(buf, i, indices) + if err != nil { + return i, buf[start:i], err + } + } + + // Now we know where the key region is within buf, and the location of tags, we + // need to determine if duplicate tags exist and if the tags are sorted. This iterates + // over the list comparing each tag in the sequence with each other. + for j := 0; j < commas-1; j++ { + // get the left and right tags + _, left := scanTo(buf[indices[j]:indices[j+1]-1], 0, '=') + _, right := scanTo(buf[indices[j+1]:indices[j+2]-1], 0, '=') + + // If left is greater than right, the tags are not sorted. We do not have to + // continue because the short path no longer works. + // If the tags are equal, then there are duplicate tags, and we should abort. + // If the tags are not sorted, this pass may not find duplicate tags and we + // need to do a more exhaustive search later. + if cmp := bytes.Compare(left, right); cmp > 0 { + sorted = false + break + } else if cmp == 0 { + return i, buf[start:i], fmt.Errorf("duplicate tags") + } + } + + // If the tags are not sorted, then sort them. This sort is inline and + // uses the tag indices we created earlier. The actual buffer is not sorted, the + // indices are using the buffer for value comparison. After the indices are sorted, + // the buffer is reconstructed from the sorted indices. + if !sorted && commas > 0 { + // Get the measurement name for later + measurement := buf[start : indices[0]-1] + + // Sort the indices + indices := indices[:commas] + insertionSort(0, commas, buf, indices) + + // Create a new key using the measurement and sorted indices + b := make([]byte, len(buf[start:i])) + pos := copy(b, measurement) + for _, i := range indices { + b[pos] = ',' + pos++ + _, v := scanToSpaceOr(buf, i, ',') + pos += copy(b[pos:], v) + } + + // Check again for duplicate tags now that the tags are sorted. + for j := 0; j < commas-1; j++ { + // get the left and right tags + _, left := scanTo(buf[indices[j]:], 0, '=') + _, right := scanTo(buf[indices[j+1]:], 0, '=') + + // If the tags are equal, then there are duplicate tags, and we should abort. + // If the tags are not sorted, this pass may not find duplicate tags and we + // need to do a more exhaustive search later. + if bytes.Equal(left, right) { + return i, b, fmt.Errorf("duplicate tags") + } + } + + return i, b, nil + } + + return i, buf[start:i], nil +} + +// The following constants allow us to specify which state to move to +// next, when scanning sections of a Point. +const ( + tagKeyState = iota + tagValueState + fieldsState +) + +// scanMeasurement examines the measurement part of a Point, returning +// the next state to move to, and the current location in the buffer. +func scanMeasurement(buf []byte, i int) (int, int, error) { + // Check first byte of measurement, anything except a comma is fine. + // It can't be a space, since whitespace is stripped prior to this + // function call. + if i >= len(buf) || buf[i] == ',' { + return -1, i, fmt.Errorf("missing measurement") + } + + for { + i++ + if i >= len(buf) { + // cpu + return -1, i, fmt.Errorf("missing fields") + } + + if buf[i-1] == '\\' { + // Skip character (it's escaped). + continue + } + + // Unescaped comma; move onto scanning the tags. + if buf[i] == ',' { + return tagKeyState, i + 1, nil + } + + // Unescaped space; move onto scanning the fields. + if buf[i] == ' ' { + // cpu value=1.0 + return fieldsState, i, nil + } + } +} + +// scanTags examines all the tags in a Point, keeping track of and +// returning the updated indices slice, number of commas and location +// in buf where to start examining the Point fields. +func scanTags(buf []byte, i int, indices []int) (int, int, []int, error) { + var ( + err error + commas int + state = tagKeyState + ) + + for { + switch state { + case tagKeyState: + // Grow our indices slice if we have too many tags. + if commas >= len(indices) { + newIndics := make([]int, cap(indices)*2) + copy(newIndics, indices) + indices = newIndics + } + indices[commas] = i + commas++ + + i, err = scanTagsKey(buf, i) + state = tagValueState // tag value always follows a tag key + case tagValueState: + state, i, err = scanTagsValue(buf, i) + case fieldsState: + indices[commas] = i + 1 + return i, commas, indices, nil + } + + if err != nil { + return i, commas, indices, err + } + } +} + +// scanTagsKey scans each character in a tag key. +func scanTagsKey(buf []byte, i int) (int, error) { + // First character of the key. + if i >= len(buf) || buf[i] == ' ' || buf[i] == ',' || buf[i] == '=' { + // cpu,{'', ' ', ',', '='} + return i, fmt.Errorf("missing tag key") + } + + // Examine each character in the tag key until we hit an unescaped + // equals (the tag value), or we hit an error (i.e., unescaped + // space or comma). + for { + i++ + + // Either we reached the end of the buffer or we hit an + // unescaped comma or space. + if i >= len(buf) || + ((buf[i] == ' ' || buf[i] == ',') && buf[i-1] != '\\') { + // cpu,tag{'', ' ', ','} + return i, fmt.Errorf("missing tag value") + } + + if buf[i] == '=' && buf[i-1] != '\\' { + // cpu,tag= + return i + 1, nil + } + } +} + +// scanTagsValue scans each character in a tag value. +func scanTagsValue(buf []byte, i int) (int, int, error) { + // Tag value cannot be empty. + if i >= len(buf) || buf[i] == ',' || buf[i] == ' ' { + // cpu,tag={',', ' '} + return -1, i, fmt.Errorf("missing tag value") + } + + // Examine each character in the tag value until we hit an unescaped + // comma (move onto next tag key), an unescaped space (move onto + // fields), or we error out. + for { + i++ + if i >= len(buf) { + // cpu,tag=value + return -1, i, fmt.Errorf("missing fields") + } + + // An unescaped equals sign is an invalid tag value. + if buf[i] == '=' && buf[i-1] != '\\' { + // cpu,tag={'=', 'fo=o'} + return -1, i, fmt.Errorf("invalid tag format") + } + + if buf[i] == ',' && buf[i-1] != '\\' { + // cpu,tag=foo, + return tagKeyState, i + 1, nil + } + + // cpu,tag=foo value=1.0 + // cpu, tag=foo\= value=1.0 + if buf[i] == ' ' && buf[i-1] != '\\' { + return fieldsState, i, nil + } + } +} + +func insertionSort(l, r int, buf []byte, indices []int) { + for i := l + 1; i < r; i++ { + for j := i; j > l && less(buf, indices, j, j-1); j-- { + indices[j], indices[j-1] = indices[j-1], indices[j] + } + } +} + +func less(buf []byte, indices []int, i, j int) bool { + // This grabs the tag names for i & j, it ignores the values + _, a := scanTo(buf, indices[i], '=') + _, b := scanTo(buf, indices[j], '=') + return bytes.Compare(a, b) < 0 +} + +// scanFields scans buf, starting at i for the fields section of a point. It returns +// the ending position and the byte slice of the fields within buf. +func scanFields(buf []byte, i int) (int, []byte, error) { + start := skipWhitespace(buf, i) + i = start + quoted := false + + // tracks how many '=' we've seen + equals := 0 + + // tracks how many commas we've seen + commas := 0 + + for { + // reached the end of buf? + if i >= len(buf) { + break + } + + // escaped characters? + if buf[i] == '\\' && i+1 < len(buf) { + i += 2 + continue + } + + // If the value is quoted, scan until we get to the end quote + // Only quote values in the field value since quotes are not significant + // in the field key + if buf[i] == '"' && equals > commas { + quoted = !quoted + i++ + continue + } + + // If we see an =, ensure that there is at least on char before and after it + if buf[i] == '=' && !quoted { + equals++ + + // check for "... =123" but allow "a\ =123" + if buf[i-1] == ' ' && buf[i-2] != '\\' { + return i, buf[start:i], fmt.Errorf("missing field key") + } + + // check for "...a=123,=456" but allow "a=123,a\,=456" + if buf[i-1] == ',' && buf[i-2] != '\\' { + return i, buf[start:i], fmt.Errorf("missing field key") + } + + // check for "... value=" + if i+1 >= len(buf) { + return i, buf[start:i], fmt.Errorf("missing field value") + } + + // check for "... value=,value2=..." + if buf[i+1] == ',' || buf[i+1] == ' ' { + return i, buf[start:i], fmt.Errorf("missing field value") + } + + if isNumeric(buf[i+1]) || buf[i+1] == '-' || buf[i+1] == 'N' || buf[i+1] == 'n' { + var err error + i, err = scanNumber(buf, i+1) + if err != nil { + return i, buf[start:i], err + } + continue + } + // If next byte is not a double-quote, the value must be a boolean + if buf[i+1] != '"' { + var err error + i, _, err = scanBoolean(buf, i+1) + if err != nil { + return i, buf[start:i], err + } + continue + } + } + + if buf[i] == ',' && !quoted { + commas++ + } + + // reached end of block? + if buf[i] == ' ' && !quoted { + break + } + i++ + } + + if quoted { + return i, buf[start:i], fmt.Errorf("unbalanced quotes") + } + + // check that all field sections had key and values (e.g. prevent "a=1,b" + if equals == 0 || commas != equals-1 { + return i, buf[start:i], fmt.Errorf("invalid field format") + } + + return i, buf[start:i], nil +} + +// scanTime scans buf, starting at i for the time section of a point. It +// returns the ending position and the byte slice of the timestamp within buf +// and and error if the timestamp is not in the correct numeric format. +func scanTime(buf []byte, i int) (int, []byte, error) { + start := skipWhitespace(buf, i) + i = start + + for { + // reached the end of buf? + if i >= len(buf) { + break + } + + // Reached end of block or trailing whitespace? + if buf[i] == '\n' || buf[i] == ' ' { + break + } + + // Handle negative timestamps + if i == start && buf[i] == '-' { + i++ + continue + } + + // Timestamps should be integers, make sure they are so we don't need + // to actually parse the timestamp until needed. + if buf[i] < '0' || buf[i] > '9' { + return i, buf[start:i], fmt.Errorf("bad timestamp") + } + i++ + } + return i, buf[start:i], nil +} + +func isNumeric(b byte) bool { + return (b >= '0' && b <= '9') || b == '.' +} + +// scanNumber returns the end position within buf, start at i after +// scanning over buf for an integer, or float. It returns an +// error if a invalid number is scanned. +func scanNumber(buf []byte, i int) (int, error) { + start := i + var isInt bool + + // Is negative number? + if i < len(buf) && buf[i] == '-' { + i++ + // There must be more characters now, as just '-' is illegal. + if i == len(buf) { + return i, ErrInvalidNumber + } + } + + // how many decimal points we've see + decimal := false + + // indicates the number is float in scientific notation + scientific := false + + for { + if i >= len(buf) { + break + } + + if buf[i] == ',' || buf[i] == ' ' { + break + } + + if buf[i] == 'i' && i > start && !isInt { + isInt = true + i++ + continue + } + + if buf[i] == '.' { + // Can't have more than 1 decimal (e.g. 1.1.1 should fail) + if decimal { + return i, ErrInvalidNumber + } + decimal = true + } + + // `e` is valid for floats but not as the first char + if i > start && (buf[i] == 'e' || buf[i] == 'E') { + scientific = true + i++ + continue + } + + // + and - are only valid at this point if they follow an e (scientific notation) + if (buf[i] == '+' || buf[i] == '-') && (buf[i-1] == 'e' || buf[i-1] == 'E') { + i++ + continue + } + + // NaN is an unsupported value + if i+2 < len(buf) && (buf[i] == 'N' || buf[i] == 'n') { + return i, ErrInvalidNumber + } + + if !isNumeric(buf[i]) { + return i, ErrInvalidNumber + } + i++ + } + + if isInt && (decimal || scientific) { + return i, ErrInvalidNumber + } + + numericDigits := i - start + if isInt { + numericDigits-- + } + if decimal { + numericDigits-- + } + if buf[start] == '-' { + numericDigits-- + } + + if numericDigits == 0 { + return i, ErrInvalidNumber + } + + // It's more common that numbers will be within min/max range for their type but we need to prevent + // out or range numbers from being parsed successfully. This uses some simple heuristics to decide + // if we should parse the number to the actual type. It does not do it all the time because it incurs + // extra allocations and we end up converting the type again when writing points to disk. + if isInt { + // Make sure the last char is an 'i' for integers (e.g. 9i10 is not valid) + if buf[i-1] != 'i' { + return i, ErrInvalidNumber + } + // Parse the int to check bounds the number of digits could be larger than the max range + // We subtract 1 from the index to remove the `i` from our tests + if len(buf[start:i-1]) >= maxInt64Digits || len(buf[start:i-1]) >= minInt64Digits { + if _, err := parseIntBytes(buf[start:i-1], 10, 64); err != nil { + return i, fmt.Errorf("unable to parse integer %s: %s", buf[start:i-1], err) + } + } + } else { + // Parse the float to check bounds if it's scientific or the number of digits could be larger than the max range + if scientific || len(buf[start:i]) >= maxFloat64Digits || len(buf[start:i]) >= minFloat64Digits { + if _, err := parseFloatBytes(buf[start:i], 10); err != nil { + return i, fmt.Errorf("invalid float") + } + } + } + + return i, nil +} + +// scanBoolean returns the end position within buf, start at i after +// scanning over buf for boolean. Valid values for a boolean are +// t, T, true, TRUE, f, F, false, FALSE. It returns an error if a invalid boolean +// is scanned. +func scanBoolean(buf []byte, i int) (int, []byte, error) { + start := i + + if i < len(buf) && (buf[i] != 't' && buf[i] != 'f' && buf[i] != 'T' && buf[i] != 'F') { + return i, buf[start:i], fmt.Errorf("invalid boolean") + } + + i++ + for { + if i >= len(buf) { + break + } + + if buf[i] == ',' || buf[i] == ' ' { + break + } + i++ + } + + // Single char bool (t, T, f, F) is ok + if i-start == 1 { + return i, buf[start:i], nil + } + + // length must be 4 for true or TRUE + if (buf[start] == 't' || buf[start] == 'T') && i-start != 4 { + return i, buf[start:i], fmt.Errorf("invalid boolean") + } + + // length must be 5 for false or FALSE + if (buf[start] == 'f' || buf[start] == 'F') && i-start != 5 { + return i, buf[start:i], fmt.Errorf("invalid boolean") + } + + // Otherwise + valid := false + switch buf[start] { + case 't': + valid = bytes.Equal(buf[start:i], []byte("true")) + case 'f': + valid = bytes.Equal(buf[start:i], []byte("false")) + case 'T': + valid = bytes.Equal(buf[start:i], []byte("TRUE")) || bytes.Equal(buf[start:i], []byte("True")) + case 'F': + valid = bytes.Equal(buf[start:i], []byte("FALSE")) || bytes.Equal(buf[start:i], []byte("False")) + } + + if !valid { + return i, buf[start:i], fmt.Errorf("invalid boolean") + } + + return i, buf[start:i], nil + +} + +// skipWhitespace returns the end position within buf, starting at i after +// scanning over spaces in tags. +func skipWhitespace(buf []byte, i int) int { + for i < len(buf) { + if buf[i] != ' ' && buf[i] != '\t' && buf[i] != 0 { + break + } + i++ + } + return i +} + +// scanLine returns the end position in buf and the next line found within +// buf. +func scanLine(buf []byte, i int) (int, []byte) { + start := i + quoted := false + fields := false + + // tracks how many '=' and commas we've seen + // this duplicates some of the functionality in scanFields + equals := 0 + commas := 0 + for { + // reached the end of buf? + if i >= len(buf) { + break + } + + // skip past escaped characters + if buf[i] == '\\' { + i += 2 + continue + } + + if buf[i] == ' ' { + fields = true + } + + // If we see a double quote, makes sure it is not escaped + if fields { + if !quoted && buf[i] == '=' { + i++ + equals++ + continue + } else if !quoted && buf[i] == ',' { + i++ + commas++ + continue + } else if buf[i] == '"' && equals > commas { + i++ + quoted = !quoted + continue + } + } + + if buf[i] == '\n' && !quoted { + break + } + + i++ + } + + return i, buf[start:i] +} + +// scanTo returns the end position in buf and the next consecutive block +// of bytes, starting from i and ending with stop byte, where stop byte +// has not been escaped. +// +// If there are leading spaces, they are skipped. +func scanTo(buf []byte, i int, stop byte) (int, []byte) { + start := i + for { + // reached the end of buf? + if i >= len(buf) { + break + } + + // Reached unescaped stop value? + if buf[i] == stop && (i == 0 || buf[i-1] != '\\') { + break + } + i++ + } + + return i, buf[start:i] +} + +// scanTo returns the end position in buf and the next consecutive block +// of bytes, starting from i and ending with stop byte. If there are leading +// spaces, they are skipped. +func scanToSpaceOr(buf []byte, i int, stop byte) (int, []byte) { + start := i + if buf[i] == stop || buf[i] == ' ' { + return i, buf[start:i] + } + + for { + i++ + if buf[i-1] == '\\' { + continue + } + + // reached the end of buf? + if i >= len(buf) { + return i, buf[start:i] + } + + // reached end of block? + if buf[i] == stop || buf[i] == ' ' { + return i, buf[start:i] + } + } +} + +func scanTagValue(buf []byte, i int) (int, []byte) { + start := i + for { + if i >= len(buf) { + break + } + + if buf[i] == ',' && buf[i-1] != '\\' { + break + } + i++ + } + if i > len(buf) { + return i, nil + } + return i, buf[start:i] +} + +func scanFieldValue(buf []byte, i int) (int, []byte) { + start := i + quoted := false + for i < len(buf) { + // Only escape char for a field value is a double-quote and backslash + if buf[i] == '\\' && i+1 < len(buf) && (buf[i+1] == '"' || buf[i+1] == '\\') { + i += 2 + continue + } + + // Quoted value? (e.g. string) + if buf[i] == '"' { + i++ + quoted = !quoted + continue + } + + if buf[i] == ',' && !quoted { + break + } + i++ + } + return i, buf[start:i] +} + +func escapeMeasurement(in []byte) []byte { + for b, esc := range measurementEscapeCodes { + in = bytes.Replace(in, []byte{b}, esc, -1) + } + return in +} + +func unescapeMeasurement(in []byte) []byte { + for b, esc := range measurementEscapeCodes { + in = bytes.Replace(in, esc, []byte{b}, -1) + } + return in +} + +func escapeTag(in []byte) []byte { + for b, esc := range tagEscapeCodes { + if bytes.IndexByte(in, b) != -1 { + in = bytes.Replace(in, []byte{b}, esc, -1) + } + } + return in +} + +func unescapeTag(in []byte) []byte { + if bytes.IndexByte(in, '\\') == -1 { + return in + } + + for b, esc := range tagEscapeCodes { + if bytes.IndexByte(in, b) != -1 { + in = bytes.Replace(in, esc, []byte{b}, -1) + } + } + return in +} + +// escapeStringFieldReplacer replaces double quotes and backslashes +// with the same character preceded by a backslash. +// As of Go 1.7 this benchmarked better in allocations and CPU time +// compared to iterating through a string byte-by-byte and appending to a new byte slice, +// calling strings.Replace twice, and better than (*Regex).ReplaceAllString. +var escapeStringFieldReplacer = strings.NewReplacer(`"`, `\"`, `\`, `\\`) + +// EscapeStringField returns a copy of in with any double quotes or +// backslashes with escaped values. +func EscapeStringField(in string) string { + return escapeStringFieldReplacer.Replace(in) +} + +// unescapeStringField returns a copy of in with any escaped double-quotes +// or backslashes unescaped. +func unescapeStringField(in string) string { + if strings.IndexByte(in, '\\') == -1 { + return in + } + + var out []byte + i := 0 + for { + if i >= len(in) { + break + } + // unescape backslashes + if in[i] == '\\' && i+1 < len(in) && in[i+1] == '\\' { + out = append(out, '\\') + i += 2 + continue + } + // unescape double-quotes + if in[i] == '\\' && i+1 < len(in) && in[i+1] == '"' { + out = append(out, '"') + i += 2 + continue + } + out = append(out, in[i]) + i++ + + } + return string(out) +} + +// NewPoint returns a new point with the given measurement name, tags, fields and timestamp. If +// an unsupported field value (NaN) or out of range time is passed, this function returns an error. +func NewPoint(name string, tags Tags, fields Fields, t time.Time) (Point, error) { + key, err := pointKey(name, tags, fields, t) + if err != nil { + return nil, err + } + + return &point{ + key: key, + time: t, + fields: fields.MarshalBinary(), + }, nil +} + +// pointKey checks some basic requirements for valid points, and returns the +// key, along with an possible error. +func pointKey(measurement string, tags Tags, fields Fields, t time.Time) ([]byte, error) { + if len(fields) == 0 { + return nil, ErrPointMustHaveAField + } + + if !t.IsZero() { + if err := CheckTime(t); err != nil { + return nil, err + } + } + + for key, value := range fields { + switch value := value.(type) { + case float64: + // Ensure the caller validates and handles invalid field values + if math.IsNaN(value) { + return nil, fmt.Errorf("NaN is an unsupported value for field %s", key) + } + case float32: + // Ensure the caller validates and handles invalid field values + if math.IsNaN(float64(value)) { + return nil, fmt.Errorf("NaN is an unsupported value for field %s", key) + } + } + if len(key) == 0 { + return nil, fmt.Errorf("all fields must have non-empty names") + } + } + + key := MakeKey([]byte(measurement), tags) + if len(key) > MaxKeyLength { + return nil, fmt.Errorf("max key length exceeded: %v > %v", len(key), MaxKeyLength) + } + + return key, nil +} + +// NewPointFromBytes returns a new Point from a marshalled Point. +func NewPointFromBytes(b []byte) (Point, error) { + p := &point{} + if err := p.UnmarshalBinary(b); err != nil { + return nil, err + } + fields, err := p.Fields() + if err != nil { + return nil, err + } + if len(fields) == 0 { + return nil, ErrPointMustHaveAField + } + return p, nil +} + +// MustNewPoint returns a new point with the given measurement name, tags, fields and timestamp. If +// an unsupported field value (NaN) is passed, this function panics. +func MustNewPoint(name string, tags Tags, fields Fields, time time.Time) Point { + pt, err := NewPoint(name, tags, fields, time) + if err != nil { + panic(err.Error()) + } + return pt +} + +// Key returns the key (measurement joined with tags) of the point. +func (p *point) Key() []byte { + return p.key +} + +func (p *point) name() []byte { + _, name := scanTo(p.key, 0, ',') + return name +} + +// Name return the measurement name for the point. +func (p *point) Name() string { + if p.cachedName != "" { + return p.cachedName + } + p.cachedName = string(escape.Unescape(p.name())) + return p.cachedName +} + +// SetName updates the measurement name for the point. +func (p *point) SetName(name string) { + p.cachedName = "" + p.key = MakeKey([]byte(name), p.Tags()) +} + +// Time return the timestamp for the point. +func (p *point) Time() time.Time { + return p.time +} + +// SetTime updates the timestamp for the point. +func (p *point) SetTime(t time.Time) { + p.time = t +} + +// Round will round the timestamp of the point to the given duration. +func (p *point) Round(d time.Duration) { + p.time = p.time.Round(d) +} + +// Tags returns the tag set for the point. +func (p *point) Tags() Tags { + if p.cachedTags != nil { + return p.cachedTags + } + p.cachedTags = parseTags(p.key) + return p.cachedTags +} + +func parseTags(buf []byte) Tags { + if len(buf) == 0 { + return nil + } + + pos, name := scanTo(buf, 0, ',') + + // it's an empty key, so there are no tags + if len(name) == 0 { + return nil + } + + tags := make(Tags, 0, bytes.Count(buf, []byte(","))) + hasEscape := bytes.IndexByte(buf, '\\') != -1 + + i := pos + 1 + var key, value []byte + for { + if i >= len(buf) { + break + } + i, key = scanTo(buf, i, '=') + i, value = scanTagValue(buf, i+1) + + if len(value) == 0 { + continue + } + + if hasEscape { + tags = append(tags, Tag{Key: unescapeTag(key), Value: unescapeTag(value)}) + } else { + tags = append(tags, Tag{Key: key, Value: value}) + } + + i++ + } + + return tags +} + +// MakeKey creates a key for a set of tags. +func MakeKey(name []byte, tags Tags) []byte { + // unescape the name and then re-escape it to avoid double escaping. + // The key should always be stored in escaped form. + return append(escapeMeasurement(unescapeMeasurement(name)), tags.HashKey()...) +} + +// SetTags replaces the tags for the point. +func (p *point) SetTags(tags Tags) { + p.key = MakeKey([]byte(p.Name()), tags) + p.cachedTags = tags +} + +// AddTag adds or replaces a tag value for a point. +func (p *point) AddTag(key, value string) { + tags := p.Tags() + tags = append(tags, Tag{Key: []byte(key), Value: []byte(value)}) + sort.Sort(tags) + p.cachedTags = tags + p.key = MakeKey([]byte(p.Name()), tags) +} + +// Fields returns the fields for the point. +func (p *point) Fields() (Fields, error) { + if p.cachedFields != nil { + return p.cachedFields, nil + } + cf, err := p.unmarshalBinary() + if err != nil { + return nil, err + } + p.cachedFields = cf + return p.cachedFields, nil +} + +// SetPrecision will round a time to the specified precision. +func (p *point) SetPrecision(precision string) { + switch precision { + case "n": + case "u": + p.SetTime(p.Time().Truncate(time.Microsecond)) + case "ms": + p.SetTime(p.Time().Truncate(time.Millisecond)) + case "s": + p.SetTime(p.Time().Truncate(time.Second)) + case "m": + p.SetTime(p.Time().Truncate(time.Minute)) + case "h": + p.SetTime(p.Time().Truncate(time.Hour)) + } +} + +// String returns the string representation of the point. +func (p *point) String() string { + if p.Time().IsZero() { + return string(p.Key()) + " " + string(p.fields) + } + return string(p.Key()) + " " + string(p.fields) + " " + strconv.FormatInt(p.UnixNano(), 10) +} + +// AppendString appends the string representation of the point to buf. +func (p *point) AppendString(buf []byte) []byte { + buf = append(buf, p.key...) + buf = append(buf, ' ') + buf = append(buf, p.fields...) + + if !p.time.IsZero() { + buf = append(buf, ' ') + buf = strconv.AppendInt(buf, p.UnixNano(), 10) + } + + return buf +} + +// StringSize returns the length of the string that would be returned by String(). +func (p *point) StringSize() int { + size := len(p.key) + len(p.fields) + 1 + + if !p.time.IsZero() { + digits := 1 // even "0" has one digit + t := p.UnixNano() + if t < 0 { + // account for negative sign, then negate + digits++ + t = -t + } + for t > 9 { // already accounted for one digit + digits++ + t /= 10 + } + size += digits + 1 // digits and a space + } + + return size +} + +// MarshalBinary returns a binary representation of the point. +func (p *point) MarshalBinary() ([]byte, error) { + if len(p.fields) == 0 { + return nil, ErrPointMustHaveAField + } + + tb, err := p.time.MarshalBinary() + if err != nil { + return nil, err + } + + b := make([]byte, 8+len(p.key)+len(p.fields)+len(tb)) + i := 0 + + binary.BigEndian.PutUint32(b[i:], uint32(len(p.key))) + i += 4 + + i += copy(b[i:], p.key) + + binary.BigEndian.PutUint32(b[i:i+4], uint32(len(p.fields))) + i += 4 + + i += copy(b[i:], p.fields) + + copy(b[i:], tb) + return b, nil +} + +// UnmarshalBinary decodes a binary representation of the point into a point struct. +func (p *point) UnmarshalBinary(b []byte) error { + var n int + + // Read key length. + if len(b) < 4 { + return io.ErrShortBuffer + } + n, b = int(binary.BigEndian.Uint32(b[:4])), b[4:] + + // Read key. + if len(b) < n { + return io.ErrShortBuffer + } + p.key, b = b[:n], b[n:] + + // Read fields length. + if len(b) < 4 { + return io.ErrShortBuffer + } + n, b = int(binary.BigEndian.Uint32(b[:4])), b[4:] + + // Read fields. + if len(b) < n { + return io.ErrShortBuffer + } + p.fields, b = b[:n], b[n:] + + // Read timestamp. + if err := p.time.UnmarshalBinary(b); err != nil { + return err + } + return nil +} + +// PrecisionString returns a string representation of the point. If there +// is a timestamp associated with the point then it will be specified in the +// given unit. +func (p *point) PrecisionString(precision string) string { + if p.Time().IsZero() { + return fmt.Sprintf("%s %s", p.Key(), string(p.fields)) + } + return fmt.Sprintf("%s %s %d", p.Key(), string(p.fields), + p.UnixNano()/GetPrecisionMultiplier(precision)) +} + +// RoundedString returns a string representation of the point. If there +// is a timestamp associated with the point, then it will be rounded to the +// given duration. +func (p *point) RoundedString(d time.Duration) string { + if p.Time().IsZero() { + return fmt.Sprintf("%s %s", p.Key(), string(p.fields)) + } + return fmt.Sprintf("%s %s %d", p.Key(), string(p.fields), + p.time.Round(d).UnixNano()) +} + +func (p *point) unmarshalBinary() (Fields, error) { + iter := p.FieldIterator() + fields := make(Fields, 8) + for iter.Next() { + if len(iter.FieldKey()) == 0 { + continue + } + switch iter.Type() { + case Float: + v, err := iter.FloatValue() + if err != nil { + return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err) + } + fields[string(iter.FieldKey())] = v + case Integer: + v, err := iter.IntegerValue() + if err != nil { + return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err) + } + fields[string(iter.FieldKey())] = v + case String: + fields[string(iter.FieldKey())] = iter.StringValue() + case Boolean: + v, err := iter.BooleanValue() + if err != nil { + return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err) + } + fields[string(iter.FieldKey())] = v + } + } + return fields, nil +} + +// HashID returns a non-cryptographic checksum of the point's key. +func (p *point) HashID() uint64 { + h := NewInlineFNV64a() + h.Write(p.key) + sum := h.Sum64() + return sum +} + +// UnixNano returns the timestamp of the point as nanoseconds since Unix epoch. +func (p *point) UnixNano() int64 { + return p.Time().UnixNano() +} + +// Split will attempt to return multiple points with the same timestamp whose +// string representations are no longer than size. Points with a single field or +// a point without a timestamp may exceed the requested size. +func (p *point) Split(size int) []Point { + if p.time.IsZero() || len(p.String()) <= size { + return []Point{p} + } + + // key string, timestamp string, spaces + size -= len(p.key) + len(strconv.FormatInt(p.time.UnixNano(), 10)) + 2 + + var points []Point + var start, cur int + + for cur < len(p.fields) { + end, _ := scanTo(p.fields, cur, '=') + end, _ = scanFieldValue(p.fields, end+1) + + if cur > start && end-start > size { + points = append(points, &point{ + key: p.key, + time: p.time, + fields: p.fields[start : cur-1], + }) + start = cur + } + + cur = end + 1 + } + + points = append(points, &point{ + key: p.key, + time: p.time, + fields: p.fields[start:], + }) + + return points +} + +// Tag represents a single key/value tag pair. +type Tag struct { + Key []byte + Value []byte +} + +// Clone returns a shallow copy of Tag. +// +// Tags associated with a Point created by ParsePointsWithPrecision will hold references to the byte slice that was parsed. +// Use Clone to create a Tag with new byte slices that do not refer to the argument to ParsePointsWithPrecision. +func (t Tag) Clone() Tag { + other := Tag{ + Key: make([]byte, len(t.Key)), + Value: make([]byte, len(t.Value)), + } + + copy(other.Key, t.Key) + copy(other.Value, t.Value) + + return other +} + +// Tags represents a sorted list of tags. +type Tags []Tag + +// NewTags returns a new Tags from a map. +func NewTags(m map[string]string) Tags { + if len(m) == 0 { + return nil + } + a := make(Tags, 0, len(m)) + for k, v := range m { + a = append(a, Tag{Key: []byte(k), Value: []byte(v)}) + } + sort.Sort(a) + return a +} + +// Clone returns a copy of the slice where the elements are a result of calling `Clone` on the original elements +// +// Tags associated with a Point created by ParsePointsWithPrecision will hold references to the byte slice that was parsed. +// Use Clone to create Tags with new byte slices that do not refer to the argument to ParsePointsWithPrecision. +func (a Tags) Clone() Tags { + if len(a) == 0 { + return nil + } + + others := make(Tags, len(a)) + for i := range a { + others[i] = a[i].Clone() + } + + return others +} + +// Len implements sort.Interface. +func (a Tags) Len() int { return len(a) } + +// Less implements sort.Interface. +func (a Tags) Less(i, j int) bool { return bytes.Compare(a[i].Key, a[j].Key) == -1 } + +// Swap implements sort.Interface. +func (a Tags) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +// Get returns the value for a key. +func (a Tags) Get(key []byte) []byte { + // OPTIMIZE: Use sort.Search if tagset is large. + + for _, t := range a { + if bytes.Equal(t.Key, key) { + return t.Value + } + } + return nil +} + +// GetString returns the string value for a string key. +func (a Tags) GetString(key string) string { + return string(a.Get([]byte(key))) +} + +// Set sets the value for a key. +func (a *Tags) Set(key, value []byte) { + for _, t := range *a { + if bytes.Equal(t.Key, key) { + t.Value = value + return + } + } + *a = append(*a, Tag{Key: key, Value: value}) + sort.Sort(*a) +} + +// SetString sets the string value for a string key. +func (a *Tags) SetString(key, value string) { + a.Set([]byte(key), []byte(value)) +} + +// Delete removes a tag by key. +func (a *Tags) Delete(key []byte) { + for i, t := range *a { + if bytes.Equal(t.Key, key) { + copy((*a)[i:], (*a)[i+1:]) + (*a)[len(*a)-1] = Tag{} + *a = (*a)[:len(*a)-1] + return + } + } +} + +// Map returns a map representation of the tags. +func (a Tags) Map() map[string]string { + m := make(map[string]string, len(a)) + for _, t := range a { + m[string(t.Key)] = string(t.Value) + } + return m +} + +// Merge merges the tags combining the two. If both define a tag with the +// same key, the merged value overwrites the old value. +// A new map is returned. +func (a Tags) Merge(other map[string]string) Tags { + merged := make(map[string]string, len(a)+len(other)) + for _, t := range a { + merged[string(t.Key)] = string(t.Value) + } + for k, v := range other { + merged[k] = v + } + return NewTags(merged) +} + +// HashKey hashes all of a tag's keys. +func (a Tags) HashKey() []byte { + // Empty maps marshal to empty bytes. + if len(a) == 0 { + return nil + } + + escaped := make(Tags, 0, len(a)) + for _, t := range a { + ek := escapeTag(t.Key) + ev := escapeTag(t.Value) + + if len(ev) > 0 { + escaped = append(escaped, Tag{Key: ek, Value: ev}) + } + } + + // Extract keys and determine final size. + sz := len(escaped) + (len(escaped) * 2) // separators + keys := make([][]byte, len(escaped)+1) + for i, t := range escaped { + keys[i] = t.Key + sz += len(t.Key) + len(t.Value) + } + keys = keys[:len(escaped)] + sort.Sort(byteSlices(keys)) + + // Generate marshaled bytes. + b := make([]byte, sz) + buf := b + idx := 0 + for i, k := range keys { + buf[idx] = ',' + idx++ + copy(buf[idx:idx+len(k)], k) + idx += len(k) + buf[idx] = '=' + idx++ + v := escaped[i].Value + copy(buf[idx:idx+len(v)], v) + idx += len(v) + } + return b[:idx] +} + +// Fields represents a mapping between a Point's field names and their +// values. +type Fields map[string]interface{} + +// FieldIterator retuns a FieldIterator that can be used to traverse the +// fields of a point without constructing the in-memory map. +func (p *point) FieldIterator() FieldIterator { + p.Reset() + return p +} + +type fieldIterator struct { + start, end int + key, keybuf []byte + valueBuf []byte + fieldType FieldType +} + +// Next indicates whether there any fields remaining. +func (p *point) Next() bool { + p.it.start = p.it.end + if p.it.start >= len(p.fields) { + return false + } + + p.it.end, p.it.key = scanTo(p.fields, p.it.start, '=') + if escape.IsEscaped(p.it.key) { + p.it.keybuf = escape.AppendUnescaped(p.it.keybuf[:0], p.it.key) + p.it.key = p.it.keybuf + } + + p.it.end, p.it.valueBuf = scanFieldValue(p.fields, p.it.end+1) + p.it.end++ + + if len(p.it.valueBuf) == 0 { + p.it.fieldType = Empty + return true + } + + c := p.it.valueBuf[0] + + if c == '"' { + p.it.fieldType = String + return true + } + + if strings.IndexByte(`0123456789-.nNiI`, c) >= 0 { + if p.it.valueBuf[len(p.it.valueBuf)-1] == 'i' { + p.it.fieldType = Integer + p.it.valueBuf = p.it.valueBuf[:len(p.it.valueBuf)-1] + } else { + p.it.fieldType = Float + } + return true + } + + // to keep the same behavior that currently exists, default to boolean + p.it.fieldType = Boolean + return true +} + +// FieldKey returns the key of the current field. +func (p *point) FieldKey() []byte { + return p.it.key +} + +// Type returns the FieldType of the current field. +func (p *point) Type() FieldType { + return p.it.fieldType +} + +// StringValue returns the string value of the current field. +func (p *point) StringValue() string { + return unescapeStringField(string(p.it.valueBuf[1 : len(p.it.valueBuf)-1])) +} + +// IntegerValue returns the integer value of the current field. +func (p *point) IntegerValue() (int64, error) { + n, err := parseIntBytes(p.it.valueBuf, 10, 64) + if err != nil { + return 0, fmt.Errorf("unable to parse integer value %q: %v", p.it.valueBuf, err) + } + return n, nil +} + +// BooleanValue returns the boolean value of the current field. +func (p *point) BooleanValue() (bool, error) { + b, err := parseBoolBytes(p.it.valueBuf) + if err != nil { + return false, fmt.Errorf("unable to parse bool value %q: %v", p.it.valueBuf, err) + } + return b, nil +} + +// FloatValue returns the float value of the current field. +func (p *point) FloatValue() (float64, error) { + f, err := parseFloatBytes(p.it.valueBuf, 64) + if err != nil { + return 0, fmt.Errorf("unable to parse floating point value %q: %v", p.it.valueBuf, err) + } + return f, nil +} + +// Delete deletes the current field. +func (p *point) Delete() { + switch { + case p.it.end == p.it.start: + case p.it.end >= len(p.fields): + p.fields = p.fields[:p.it.start] + case p.it.start == 0: + p.fields = p.fields[p.it.end:] + default: + p.fields = append(p.fields[:p.it.start], p.fields[p.it.end:]...) + } + + p.it.end = p.it.start + p.it.key = nil + p.it.valueBuf = nil + p.it.fieldType = Empty +} + +// Reset resets the iterator to its initial state. +func (p *point) Reset() { + p.it.fieldType = Empty + p.it.key = nil + p.it.valueBuf = nil + p.it.start = 0 + p.it.end = 0 +} + +// MarshalBinary encodes all the fields to their proper type and returns the binary +// represenation +// NOTE: uint64 is specifically not supported due to potential overflow when we decode +// again later to an int64 +// NOTE2: uint is accepted, and may be 64 bits, and is for some reason accepted... +func (p Fields) MarshalBinary() []byte { + var b []byte + keys := make([]string, 0, len(p)) + + for k := range p { + keys = append(keys, k) + } + + // Not really necessary, can probably be removed. + sort.Strings(keys) + + for i, k := range keys { + if i > 0 { + b = append(b, ',') + } + b = appendField(b, k, p[k]) + } + + return b +} + +func appendField(b []byte, k string, v interface{}) []byte { + b = append(b, []byte(escape.String(k))...) + b = append(b, '=') + + // check popular types first + switch v := v.(type) { + case float64: + b = strconv.AppendFloat(b, v, 'f', -1, 64) + case int64: + b = strconv.AppendInt(b, v, 10) + b = append(b, 'i') + case string: + b = append(b, '"') + b = append(b, []byte(EscapeStringField(v))...) + b = append(b, '"') + case bool: + b = strconv.AppendBool(b, v) + case int32: + b = strconv.AppendInt(b, int64(v), 10) + b = append(b, 'i') + case int16: + b = strconv.AppendInt(b, int64(v), 10) + b = append(b, 'i') + case int8: + b = strconv.AppendInt(b, int64(v), 10) + b = append(b, 'i') + case int: + b = strconv.AppendInt(b, int64(v), 10) + b = append(b, 'i') + case uint32: + b = strconv.AppendInt(b, int64(v), 10) + b = append(b, 'i') + case uint16: + b = strconv.AppendInt(b, int64(v), 10) + b = append(b, 'i') + case uint8: + b = strconv.AppendInt(b, int64(v), 10) + b = append(b, 'i') + // TODO: 'uint' should be considered just as "dangerous" as a uint64, + // perhaps the value should be checked and capped at MaxInt64? We could + // then include uint64 as an accepted value + case uint: + b = strconv.AppendInt(b, int64(v), 10) + b = append(b, 'i') + case float32: + b = strconv.AppendFloat(b, float64(v), 'f', -1, 32) + case []byte: + b = append(b, v...) + case nil: + // skip + default: + // Can't determine the type, so convert to string + b = append(b, '"') + b = append(b, []byte(EscapeStringField(fmt.Sprintf("%v", v)))...) + b = append(b, '"') + + } + + return b +} + +type byteSlices [][]byte + +func (a byteSlices) Len() int { return len(a) } +func (a byteSlices) Less(i, j int) bool { return bytes.Compare(a[i], a[j]) == -1 } +func (a byteSlices) Swap(i, j int) { a[i], a[j] = a[j], a[i] } diff --git a/vendor/github.com/influxdata/influxdb/models/rows.go b/vendor/github.com/influxdata/influxdb/models/rows.go new file mode 100644 index 0000000000..c087a4882d --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/models/rows.go @@ -0,0 +1,62 @@ +package models + +import ( + "sort" +) + +// Row represents a single row returned from the execution of a statement. +type Row struct { + Name string `json:"name,omitempty"` + Tags map[string]string `json:"tags,omitempty"` + Columns []string `json:"columns,omitempty"` + Values [][]interface{} `json:"values,omitempty"` + Partial bool `json:"partial,omitempty"` +} + +// SameSeries returns true if r contains values for the same series as o. +func (r *Row) SameSeries(o *Row) bool { + return r.tagsHash() == o.tagsHash() && r.Name == o.Name +} + +// tagsHash returns a hash of tag key/value pairs. +func (r *Row) tagsHash() uint64 { + h := NewInlineFNV64a() + keys := r.tagsKeys() + for _, k := range keys { + h.Write([]byte(k)) + h.Write([]byte(r.Tags[k])) + } + return h.Sum64() +} + +// tagKeys returns a sorted list of tag keys. +func (r *Row) tagsKeys() []string { + a := make([]string, 0, len(r.Tags)) + for k := range r.Tags { + a = append(a, k) + } + sort.Strings(a) + return a +} + +// Rows represents a collection of rows. Rows implements sort.Interface. +type Rows []*Row + +// Len implements sort.Interface. +func (p Rows) Len() int { return len(p) } + +// Less implements sort.Interface. +func (p Rows) Less(i, j int) bool { + // Sort by name first. + if p[i].Name != p[j].Name { + return p[i].Name < p[j].Name + } + + // Sort by tag set hash. Tags don't have a meaningful sort order so we + // just compute a hash and sort by that instead. This allows the tests + // to receive rows in a predictable order every time. + return p[i].tagsHash() < p[j].tagsHash() +} + +// Swap implements sort.Interface. +func (p Rows) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/vendor/github.com/influxdata/influxdb/models/statistic.go b/vendor/github.com/influxdata/influxdb/models/statistic.go new file mode 100644 index 0000000000..553e9d09fb --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/models/statistic.go @@ -0,0 +1,42 @@ +package models + +// Statistic is the representation of a statistic used by the monitoring service. +type Statistic struct { + Name string `json:"name"` + Tags map[string]string `json:"tags"` + Values map[string]interface{} `json:"values"` +} + +// NewStatistic returns an initialized Statistic. +func NewStatistic(name string) Statistic { + return Statistic{ + Name: name, + Tags: make(map[string]string), + Values: make(map[string]interface{}), + } +} + +// StatisticTags is a map that can be merged with others without causing +// mutations to either map. +type StatisticTags map[string]string + +// Merge creates a new map containing the merged contents of tags and t. +// If both tags and the receiver map contain the same key, the value in tags +// is used in the resulting map. +// +// Merge always returns a usable map. +func (t StatisticTags) Merge(tags map[string]string) map[string]string { + // Add everything in tags to the result. + out := make(map[string]string, len(tags)) + for k, v := range tags { + out[k] = v + } + + // Only add values from t that don't appear in tags. + for k, v := range t { + if _, ok := tags[k]; !ok { + out[k] = v + } + } + return out +} diff --git a/vendor/github.com/influxdata/influxdb/models/time.go b/vendor/github.com/influxdata/influxdb/models/time.go new file mode 100644 index 0000000000..e98f2cb336 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/models/time.go @@ -0,0 +1,74 @@ +package models + +// Helper time methods since parsing time can easily overflow and we only support a +// specific time range. + +import ( + "fmt" + "math" + "time" +) + +const ( + // MinNanoTime is the minumum time that can be represented. + // + // 1677-09-21 00:12:43.145224194 +0000 UTC + // + // The two lowest minimum integers are used as sentinel values. The + // minimum value needs to be used as a value lower than any other value for + // comparisons and another separate value is needed to act as a sentinel + // default value that is unusable by the user, but usable internally. + // Because these two values need to be used for a special purpose, we do + // not allow users to write points at these two times. + MinNanoTime = int64(math.MinInt64) + 2 + + // MaxNanoTime is the maximum time that can be represented. + // + // 2262-04-11 23:47:16.854775806 +0000 UTC + // + // The highest time represented by a nanosecond needs to be used for an + // exclusive range in the shard group, so the maximum time needs to be one + // less than the possible maximum number of nanoseconds representable by an + // int64 so that we don't lose a point at that one time. + MaxNanoTime = int64(math.MaxInt64) - 1 +) + +var ( + minNanoTime = time.Unix(0, MinNanoTime).UTC() + maxNanoTime = time.Unix(0, MaxNanoTime).UTC() + + // ErrTimeOutOfRange gets returned when time is out of the representable range using int64 nanoseconds since the epoch. + ErrTimeOutOfRange = fmt.Errorf("time outside range %d - %d", MinNanoTime, MaxNanoTime) +) + +// SafeCalcTime safely calculates the time given. Will return error if the time is outside the +// supported range. +func SafeCalcTime(timestamp int64, precision string) (time.Time, error) { + mult := GetPrecisionMultiplier(precision) + if t, ok := safeSignedMult(timestamp, mult); ok { + tme := time.Unix(0, t).UTC() + return tme, CheckTime(tme) + } + + return time.Time{}, ErrTimeOutOfRange +} + +// CheckTime checks that a time is within the safe range. +func CheckTime(t time.Time) error { + if t.Before(minNanoTime) || t.After(maxNanoTime) { + return ErrTimeOutOfRange + } + return nil +} + +// Perform the multiplication and check to make sure it didn't overflow. +func safeSignedMult(a, b int64) (int64, bool) { + if a == 0 || b == 0 || a == 1 || b == 1 { + return a * b, true + } + if a == MinNanoTime || b == MaxNanoTime { + return 0, false + } + c := a * b + return c, c/b == a +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/escape/bytes.go b/vendor/github.com/influxdata/influxdb/pkg/escape/bytes.go new file mode 100644 index 0000000000..ac7ed5ab38 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/escape/bytes.go @@ -0,0 +1,111 @@ +// Package escape contains utilities for escaping parts of InfluxQL +// and InfluxDB line protocol. +package escape // import "github.com/influxdata/influxdb/pkg/escape" + +import ( + "bytes" + "strings" +) + +// Codes is a map of bytes to be escaped. +var Codes = map[byte][]byte{ + ',': []byte(`\,`), + '"': []byte(`\"`), + ' ': []byte(`\ `), + '=': []byte(`\=`), +} + +// Bytes escapes characters on the input slice, as defined by Codes. +func Bytes(in []byte) []byte { + for b, esc := range Codes { + in = bytes.Replace(in, []byte{b}, esc, -1) + } + return in +} + +const escapeChars = `," =` + +// IsEscaped returns whether b has any escaped characters, +// i.e. whether b seems to have been processed by Bytes. +func IsEscaped(b []byte) bool { + for len(b) > 0 { + i := bytes.IndexByte(b, '\\') + if i < 0 { + return false + } + + if i+1 < len(b) && strings.IndexByte(escapeChars, b[i+1]) >= 0 { + return true + } + b = b[i+1:] + } + return false +} + +// AppendUnescaped appends the unescaped version of src to dst +// and returns the resulting slice. +func AppendUnescaped(dst, src []byte) []byte { + var pos int + for len(src) > 0 { + next := bytes.IndexByte(src[pos:], '\\') + if next < 0 || pos+next+1 >= len(src) { + return append(dst, src...) + } + + if pos+next+1 < len(src) && strings.IndexByte(escapeChars, src[pos+next+1]) >= 0 { + if pos+next > 0 { + dst = append(dst, src[:pos+next]...) + } + src = src[pos+next+1:] + pos = 0 + } else { + pos += next + 1 + } + } + + return dst +} + +// Unescape returns a new slice containing the unescaped version of in. +func Unescape(in []byte) []byte { + if len(in) == 0 { + return nil + } + + if bytes.IndexByte(in, '\\') == -1 { + return in + } + + i := 0 + inLen := len(in) + var out []byte + + for { + if i >= inLen { + break + } + if in[i] == '\\' && i+1 < inLen { + switch in[i+1] { + case ',': + out = append(out, ',') + i += 2 + continue + case '"': + out = append(out, '"') + i += 2 + continue + case ' ': + out = append(out, ' ') + i += 2 + continue + case '=': + out = append(out, '=') + i += 2 + continue + } + } + out = append(out, in[i]) + i += 1 + } + return out +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/escape/strings.go b/vendor/github.com/influxdata/influxdb/pkg/escape/strings.go new file mode 100644 index 0000000000..db98033b0d --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/escape/strings.go @@ -0,0 +1,21 @@ +package escape + +import "strings" + +var ( + escaper = strings.NewReplacer(`,`, `\,`, `"`, `\"`, ` `, `\ `, `=`, `\=`) + unescaper = strings.NewReplacer(`\,`, `,`, `\"`, `"`, `\ `, ` `, `\=`, `=`) +) + +// UnescapeString returns unescaped version of in. +func UnescapeString(in string) string { + if strings.IndexByte(in, '\\') == -1 { + return in + } + return unescaper.Replace(in) +} + +// String returns the escaped version of in. +func String(in string) string { + return escaper.Replace(in) +} diff --git a/vendor/github.com/influxdb/influxdb/client/influxdb.go b/vendor/github.com/influxdb/influxdb/client/influxdb.go deleted file mode 100644 index 235beb9648..0000000000 --- a/vendor/github.com/influxdb/influxdb/client/influxdb.go +++ /dev/null @@ -1,180 +0,0 @@ -package client - -import ( - "bytes" - "fmt" - "io/ioutil" - "net/http" - "net/url" - "time" - - "github.com/influxdb/influxdb/tsdb" -) - -const ( - // DefaultTimeout is the default connection timeout used to connect to an InfluxDB instance - DefaultTimeout = 0 -) - -// Config is used to specify what server to connect to. -// URL: The URL of the server connecting to. -// Username/Password are optional. They will be passed via basic auth if provided. -// UserAgent: If not provided, will default "InfluxDBClient", -// Timeout: If not provided, will default to 0 (no timeout) -type Config struct { - URL url.URL - Username string - Password string - UserAgent string - Timeout time.Duration - Precision string -} - -// NewConfig will create a config to be used in connecting to the client -func NewConfig() Config { - return Config{ - Timeout: DefaultTimeout, - } -} - -// Client is used to make calls to the server. -type Client struct { - url url.URL - username string - password string - httpClient *http.Client - userAgent string - precision string -} - -const ( - ConsistencyOne = "one" - ConsistencyAll = "all" - ConsistencyQuorum = "quorum" - ConsistencyAny = "any" -) - -// NewClient will instantiate and return a connected client to issue commands to the server. -func NewClient(c Config) (*Client, error) { - client := Client{ - url: c.URL, - username: c.Username, - password: c.Password, - httpClient: &http.Client{Timeout: c.Timeout}, - userAgent: c.UserAgent, - precision: c.Precision, - } - if client.userAgent == "" { - client.userAgent = "InfluxDBClient" - } - return &client, nil -} - -// Write takes BatchPoints and allows for writing of multiple points with defaults -// If successful, error is nil and Response is nil -// If an error occurs, Response may contain additional information if populated. -func (c *Client) Write(bp BatchPoints) (*Response, error) { - u := c.url - u.Path = "write" - - var b bytes.Buffer - for _, p := range bp.Points { - if p.Raw != "" { - if _, err := b.WriteString(p.Raw); err != nil { - return nil, err - } - } else { - for k, v := range bp.Tags { - if p.Tags == nil { - p.Tags = make(map[string]string, len(bp.Tags)) - } - p.Tags[k] = v - } - - if _, err := b.WriteString(p.MarshalString()); err != nil { - return nil, err - } - } - - if err := b.WriteByte('\n'); err != nil { - return nil, err - } - } - - req, err := http.NewRequest("POST", u.String(), &b) - if err != nil { - return nil, err - } - req.Header.Set("Content-Type", "") - req.Header.Set("User-Agent", c.userAgent) - if c.username != "" { - req.SetBasicAuth(c.username, c.password) - } - params := req.URL.Query() - params.Set("db", bp.Database) - params.Set("rp", bp.RetentionPolicy) - params.Set("precision", bp.Precision) - params.Set("consistency", bp.WriteConsistency) - req.URL.RawQuery = params.Encode() - - resp, err := c.httpClient.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var response Response - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, err - } - - if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK { - var err = fmt.Errorf(string(body)) - response.Err = err - return &response, err - } - - return nil, nil -} - -// Structs - -// Response represents a list of statement results. -type Response struct { - Err error -} - -// Point defines the fields that will be written to the database -// Measurement, Time, and Fields are required -// Precision can be specified if the time is in epoch format (integer). -// Valid values for Precision are n, u, ms, s, m, and h -type Point struct { - Measurement string - Tags map[string]string - Time time.Time - Fields map[string]interface{} - Precision string - Raw string -} - -func (p *Point) MarshalString() string { - return tsdb.NewPoint(p.Measurement, p.Tags, p.Fields, p.Time).String() -} - -// BatchPoints is used to send batched data in a single write. -// Database and Points are required -// If no retention policy is specified, it will use the databases default retention policy. -// If tags are specified, they will be "merged" with all points. If a point already has that tag, it is ignored. -// If time is specified, it will be applied to any point with an empty time. -// Precision can be specified if the time is in epoch format (integer). -// Valid values for Precision are n, u, ms, s, m, and h -type BatchPoints struct { - Points []Point `json:"points,omitempty"` - Database string `json:"database,omitempty"` - RetentionPolicy string `json:"retentionPolicy,omitempty"` - Tags map[string]string `json:"tags,omitempty"` - Time time.Time `json:"time,omitempty"` - Precision string `json:"precision,omitempty"` - WriteConsistency string `json:"-"` -} diff --git a/vendor/github.com/influxdb/influxdb/tsdb/points.go b/vendor/github.com/influxdb/influxdb/tsdb/points.go deleted file mode 100644 index dd8dbb6449..0000000000 --- a/vendor/github.com/influxdb/influxdb/tsdb/points.go +++ /dev/null @@ -1,1392 +0,0 @@ -package tsdb - -import ( - "bytes" - "fmt" - "hash/fnv" - "regexp" - "sort" - "strconv" - "strings" - "time" -) - -// Point defines the values that will be written to the database -type Point interface { - Name() string - SetName(string) - - Tags() Tags - AddTag(key, value string) - SetTags(tags Tags) - - Fields() Fields - AddField(name string, value interface{}) - - Time() time.Time - SetTime(t time.Time) - UnixNano() int64 - - HashID() uint64 - Key() []byte - - Data() []byte - SetData(buf []byte) - - String() string -} - -// Points represents a sortable list of points by timestamp. -type Points []Point - -func (a Points) Len() int { return len(a) } -func (a Points) Less(i, j int) bool { return a[i].Time().Before(a[j].Time()) } -func (a Points) Swap(i, j int) { a[i], a[j] = a[j], a[i] } - -// point is the default implementation of Point. -type point struct { - time time.Time - - // text encoding of measurement and tags - // key must always be stored sorted by tags, if the original line was not sorted, - // we need to resort it - key []byte - - // text encoding of field data - fields []byte - - // text encoding of timestamp - ts []byte - - // binary encoded field data - data []byte - - // cached version of parsed fields from data - cachedFields map[string]interface{} - - // cached version of parsed name from key - cachedName string -} - -const ( - // the number of characters for the largest possible int64 (9223372036854775807) - maxInt64Digits = 19 - - // the number of characters for the smallest possible int64 (-9223372036854775808) - minInt64Digits = 20 - - // the number of characters required for the largest float64 before a range check - // would occur during parsing - maxFloat64Digits = 25 - - // the number of characters required for smallest float64 before a range check occur - // would occur during parsing - minFloat64Digits = 27 -) - -var ( - // Compile the regex that detects unquoted double quote sequences - quoteReplacer = regexp.MustCompile(`([^\\])"`) - - escapeCodes = map[byte][]byte{ - ',': []byte(`\,`), - '"': []byte(`\"`), - ' ': []byte(`\ `), - '=': []byte(`\=`), - } - - escapeCodesStr = map[string]string{} - - measurementEscapeCodes = map[byte][]byte{ - ',': []byte(`\,`), - ' ': []byte(`\ `), - } - - tagEscapeCodes = map[byte][]byte{ - ',': []byte(`\,`), - ' ': []byte(`\ `), - '=': []byte(`\=`), - } -) - -func init() { - for k, v := range escapeCodes { - escapeCodesStr[string(k)] = string(v) - } -} - -func ParsePointsString(buf string) ([]Point, error) { - return ParsePoints([]byte(buf)) -} - -// ParsePoints returns a slice of Points from a text representation of a point -// with each point separated by newlines. -func ParsePoints(buf []byte) ([]Point, error) { - return ParsePointsWithPrecision(buf, time.Now().UTC(), "n") -} - -func ParsePointsWithPrecision(buf []byte, defaultTime time.Time, precision string) ([]Point, error) { - points := []Point{} - var ( - pos int - block []byte - ) - for { - pos, block = scanLine(buf, pos) - pos += 1 - - if len(block) == 0 { - break - } - - // lines which start with '#' are comments - start := skipWhitespace(block, 0) - - // If line is all whitespace, just skip it - if start >= len(block) { - continue - } - - if block[start] == '#' { - continue - } - - // strip the newline if one is present - if block[len(block)-1] == '\n' { - block = block[:len(block)-1] - } - - pt, err := parsePoint(block[start:len(block)], defaultTime, precision) - if err != nil { - return nil, fmt.Errorf("unable to parse '%s': %v", string(block[start:len(block)]), err) - } - points = append(points, pt) - - if pos >= len(buf) { - break - } - - } - return points, nil - -} - -func parsePoint(buf []byte, defaultTime time.Time, precision string) (Point, error) { - // scan the first block which is measurement[,tag1=value1,tag2=value=2...] - pos, key, err := scanKey(buf, 0) - if err != nil { - return nil, err - } - - // measurement name is required - if len(key) == 0 { - return nil, fmt.Errorf("missing measurement") - } - - // scan the second block is which is field1=value1[,field2=value2,...] - pos, fields, err := scanFields(buf, pos) - if err != nil { - return nil, err - } - - // at least one field is required - if len(fields) == 0 { - return nil, fmt.Errorf("missing fields") - } - - // scan the last block which is an optional integer timestamp - pos, ts, err := scanTime(buf, pos) - - if err != nil { - return nil, err - } - - pt := &point{ - key: key, - fields: fields, - ts: ts, - } - - if len(ts) == 0 { - pt.time = defaultTime - pt.SetPrecision(precision) - } else { - ts, err := strconv.ParseInt(string(ts), 10, 64) - if err != nil { - return nil, err - } - pt.time = time.Unix(0, ts*pt.GetPrecisionMultiplier(precision)) - } - return pt, nil -} - -// scanKey scans buf starting at i for the measurement and tag portion of the point. -// It returns the ending position and the byte slice of key within buf. If there -// are tags, they will be sorted if they are not already. -func scanKey(buf []byte, i int) (int, []byte, error) { - start := skipWhitespace(buf, i) - - i = start - - // Determines whether the tags are sort, assume they are - sorted := true - - // indices holds the indexes within buf of the start of each tag. For example, - // a buf of 'cpu,host=a,region=b,zone=c' would have indices slice of [4,11,20] - // which indicates that the first tag starts at buf[4], seconds at buf[11], and - // last at buf[20] - indices := make([]int, 100) - - // tracks how many commas we've seen so we know how many values are indices. - // Since indices is an arbitrarily large slice, - // we need to know how many values in the buffer are in use. - commas := 0 - - // tracks whether we've see an '=' - equals := 0 - - // loop over each byte in buf - for { - // reached the end of buf? - if i >= len(buf) { - if equals == 0 && commas > 0 { - return i, buf[start:i], fmt.Errorf("missing tag value") - } - - break - } - - // equals is special in the tags section. It must be escaped if part of a tag name or value. - // It does not need to be escaped if part of the measurement. - if buf[i] == '=' && commas > 0 { - if i-1 < 0 || i-2 < 0 { - return i, buf[start:i], fmt.Errorf("missing tag name") - } - - // Check for "cpu,=value" but allow "cpu,a\,=value" - if buf[i-1] == ',' && buf[i-2] != '\\' { - return i, buf[start:i], fmt.Errorf("missing tag name") - } - - // Check for "cpu,\ =value" - if buf[i-1] == ' ' && buf[i-2] != '\\' { - return i, buf[start:i], fmt.Errorf("missing tag name") - } - - i += 1 - equals += 1 - - // Check for "cpu,a=1,b= value=1" - if i < len(buf) && buf[i] == ' ' { - return i, buf[start:i], fmt.Errorf("missing tag value") - } - continue - } - - // escaped character - if buf[i] == '\\' { - i += 2 - continue - } - - // At a tag separator (comma), track it's location - if buf[i] == ',' { - if equals == 0 && commas > 0 { - return i, buf[start:i], fmt.Errorf("missing tag value") - } - i += 1 - - // grow our indices slice if we have too many tags - if commas >= len(indices) { - newIndics := make([]int, cap(indices)*2) - copy(newIndics, indices) - indices = newIndics - } - indices[commas] = i - commas += 1 - - // Check for "cpu, value=1" - if i < len(buf) && buf[i] == ' ' { - return i, buf[start:i], fmt.Errorf("missing tag key") - } - continue - } - - // reached end of the block? (next block would be fields) - if buf[i] == ' ' { - // check for "cpu,tag value=1" - if equals == 0 && commas > 0 { - return i, buf[start:i], fmt.Errorf("missing tag value") - } - if equals > 0 && commas-1 != equals-1 { - return i, buf[start:i], fmt.Errorf("missing tag value") - } - - // grow our indices slice if we have too many tags - if commas >= len(indices) { - newIndics := make([]int, cap(indices)*2) - copy(newIndics, indices) - indices = newIndics - } - - indices[commas] = i + 1 - break - } - - i += 1 - } - - // check that all field sections had key and values (e.g. prevent "a=1,b" - // We're using commas -1 because there should always be a comma after measurement - if equals > 0 && commas-1 != equals-1 { - return i, buf[start:i], fmt.Errorf("invalid tag format") - } - - // This check makes sure we actually received fields from the user. #3379 - // This will catch invalid syntax such as: `cpu,host=serverA,region=us-west` - if i >= len(buf) { - return i, buf[start:i], fmt.Errorf("missing fields") - } - - // Now we know where the key region is within buf, and the locations of tags, we - // need to deterimine if duplicate tags exist and if the tags are sorted. This iterates - // 1/2 of the list comparing each end with each other, walking towards the center from - // both sides. - for j := 0; j < commas/2; j++ { - // get the left and right tags - _, left := scanTo(buf[indices[j]:indices[j+1]-1], 0, '=') - _, right := scanTo(buf[indices[commas-j-1]:indices[commas-j]-1], 0, '=') - - // If the tags are equal, then there are duplicate tags, and we should abort - if bytes.Equal(left, right) { - return i, buf[start:i], fmt.Errorf("duplicate tags") - } - - // If left is greater than right, the tags are not sorted. We must continue - // since their could be duplicate tags still. - if bytes.Compare(left, right) > 0 { - sorted = false - } - } - - // If the tags are not sorted, then sort them. This sort is inline and - // uses the tag indices we created earlier. The actual buffer is not sorted, the - // indices are using the buffer for value comparison. After the indices are sorted, - // the buffer is reconstructed from the sorted indices. - if !sorted && commas > 0 { - // Get the measurement name for later - measurement := buf[start : indices[0]-1] - - // Sort the indices - indices := indices[:commas] - insertionSort(0, commas, buf, indices) - - // Create a new key using the measurement and sorted indices - b := make([]byte, len(buf[start:i])) - pos := copy(b, measurement) - for _, i := range indices { - b[pos] = ',' - pos += 1 - _, v := scanToSpaceOr(buf, i, ',') - pos += copy(b[pos:], v) - } - - return i, b, nil - } - - return i, buf[start:i], nil -} - -func insertionSort(l, r int, buf []byte, indices []int) { - for i := l + 1; i < r; i++ { - for j := i; j > l && less(buf, indices, j, j-1); j-- { - indices[j], indices[j-1] = indices[j-1], indices[j] - } - } -} - -func less(buf []byte, indices []int, i, j int) bool { - // This grabs the tag names for i & j, it ignores the values - _, a := scanTo(buf, indices[i], '=') - _, b := scanTo(buf, indices[j], '=') - return bytes.Compare(a, b) < 0 -} - -func isFieldEscapeChar(b byte) bool { - for c := range escapeCodes { - if c == b { - return true - } - } - return false -} - -// scanFields scans buf, starting at i for the fields section of a point. It returns -// the ending position and the byte slice of the fields within buf -func scanFields(buf []byte, i int) (int, []byte, error) { - start := skipWhitespace(buf, i) - i = start - quoted := false - - // tracks how many '=' we've seen - equals := 0 - - // tracks how many commas we've seen - commas := 0 - - for { - // reached the end of buf? - if i >= len(buf) { - break - } - - // escaped characters? - if buf[i] == '\\' && i+1 < len(buf) { - - // Is this an escape char within a string field? Only " and \ are allowed. - if quoted && (buf[i+1] == '"' || buf[i+1] == '\\') { - i += 2 - continue - // Non-string field escaped chars - } else if !quoted && isFieldEscapeChar(buf[i+1]) { - i += 2 - continue - } - } - - // If the value is quoted, scan until we get to the end quote - if buf[i] == '"' { - quoted = !quoted - i += 1 - continue - } - - // If we see an =, ensure that there is at least on char before and after it - if buf[i] == '=' && !quoted { - equals += 1 - - // check for "... =123" but allow "a\ =123" - if buf[i-1] == ' ' && buf[i-2] != '\\' { - return i, buf[start:i], fmt.Errorf("missing field name") - } - - // check for "...a=123,=456" but allow "a=123,a\,=456" - if buf[i-1] == ',' && buf[i-2] != '\\' { - return i, buf[start:i], fmt.Errorf("missing field name") - } - - // check for "... value=" - if i+1 >= len(buf) { - return i, buf[start:i], fmt.Errorf("missing field value") - } - - // check for "... value=,value2=..." - if buf[i+1] == ',' || buf[i+1] == ' ' { - return i, buf[start:i], fmt.Errorf("missing field value") - } - - if isNumeric(buf[i+1]) || buf[i+1] == '-' || buf[i+1] == 'N' || buf[i+1] == 'n' { - var err error - i, err = scanNumber(buf, i+1) - if err != nil { - return i, buf[start:i], err - } - continue - } - // If next byte is not a double-quote, the value must be a boolean - if buf[i+1] != '"' { - var err error - i, _, err = scanBoolean(buf, i+1) - if err != nil { - return i, buf[start:i], err - } - continue - } - } - - if buf[i] == ',' && !quoted { - commas += 1 - } - - // reached end of block? - if buf[i] == ' ' && !quoted { - break - } - i += 1 - } - - if quoted { - return i, buf[start:i], fmt.Errorf("unbalanced quotes") - } - - // check that all field sections had key and values (e.g. prevent "a=1,b" - if equals == 0 || commas != equals-1 { - return i, buf[start:i], fmt.Errorf("invalid field format") - } - - return i, buf[start:i], nil -} - -// scanTime scans buf, starting at i for the time section of a point. It returns -// the ending position and the byte slice of the fields within buf and error if the -// timestamp is not in the correct numeric format -func scanTime(buf []byte, i int) (int, []byte, error) { - start := skipWhitespace(buf, i) - i = start - for { - // reached the end of buf? - if i >= len(buf) { - break - } - - // Timestamps should integers, make sure they are so we don't need to actually - // parse the timestamp until needed - if buf[i] < '0' || buf[i] > '9' { - return i, buf[start:i], fmt.Errorf("bad timestamp") - } - - // reached end of block? - if buf[i] == '\n' { - break - } - i += 1 - } - return i, buf[start:i], nil -} - -func isNumeric(b byte) bool { - return (b >= '0' && b <= '9') || b == '.' -} - -// scanNumber returns the end position within buf, start at i after -// scanning over buf for an integer, or float. It returns an -// error if a invalid number is scanned. -func scanNumber(buf []byte, i int) (int, error) { - start := i - var isInt bool - - // Is negative number? - if i < len(buf) && buf[i] == '-' { - i += 1 - } - - // how many decimal points we've see - decimals := 0 - - // indicates the number is float in scientific notation - scientific := false - - for { - if i >= len(buf) { - break - } - - if buf[i] == ',' || buf[i] == ' ' { - break - } - - if buf[i] == 'i' && i > start && !isInt { - isInt = true - i += 1 - continue - } - - if buf[i] == '.' { - decimals += 1 - } - - // Can't have more than 1 decimal (e.g. 1.1.1 should fail) - if decimals > 1 { - return i, fmt.Errorf("invalid number") - } - - // `e` is valid for floats but not as the first char - if i > start && (buf[i] == 'e') { - scientific = true - i += 1 - continue - } - - // + and - are only valid at this point if they follow an e (scientific notation) - if (buf[i] == '+' || buf[i] == '-') && buf[i-1] == 'e' { - i += 1 - continue - } - - // NaN is a valid float - if i+2 < len(buf) && (buf[i] == 'N' || buf[i] == 'n') { - if (buf[i+1] == 'a' || buf[i+1] == 'A') && (buf[i+2] == 'N' || buf[i+2] == 'n') { - i += 3 - continue - } - return i, fmt.Errorf("invalid number") - } - if !isNumeric(buf[i]) { - return i, fmt.Errorf("invalid number") - } - i += 1 - } - if isInt && (decimals > 0 || scientific) { - return i, fmt.Errorf("invalid number") - } - - // It's more common that numbers will be within min/max range for their type but we need to prevent - // out or range numbers from being parsed successfully. This uses some simple heuristics to decide - // if we should parse the number to the actual type. It does not do it all the time because it incurs - // extra allocations and we end up converting the type again when writing points to disk. - if isInt { - // Make sure the last char is an 'i' for integers (e.g. 9i10 is not valid) - if buf[i-1] != 'i' { - return i, fmt.Errorf("invalid number") - } - // Parse the int to check bounds the number of digits could be larger than the max range - // We subtract 1 from the index to remove the `i` from our tests - if len(buf[start:i-1]) >= maxInt64Digits || len(buf[start:i-1]) >= minInt64Digits { - if _, err := strconv.ParseInt(string(buf[start:i-1]), 10, 64); err != nil { - return i, fmt.Errorf("unable to parse integer %s: %s", buf[start:i-1], err) - } - } - } else { - // Parse the float to check bounds if it's scientific or the number of digits could be larger than the max range - if scientific || len(buf[start:i]) >= maxFloat64Digits || len(buf[start:i]) >= minFloat64Digits { - if _, err := strconv.ParseFloat(string(buf[start:i]), 10); err != nil { - return i, fmt.Errorf("invalid float") - } - } - } - - return i, nil -} - -// scanBoolean returns the end position within buf, start at i after -// scanning over buf for boolean. Valid values for a boolean are -// t, T, true, TRUE, f, F, false, FALSE. It returns an error if a invalid boolean -// is scanned. -func scanBoolean(buf []byte, i int) (int, []byte, error) { - start := i - - if i < len(buf) && (buf[i] != 't' && buf[i] != 'f' && buf[i] != 'T' && buf[i] != 'F') { - return i, buf[start:i], fmt.Errorf("invalid boolean") - } - - i += 1 - for { - if i >= len(buf) { - break - } - - if buf[i] == ',' || buf[i] == ' ' { - break - } - i += 1 - } - - // Single char bool (t, T, f, F) is ok - if i-start == 1 { - return i, buf[start:i], nil - } - - // length must be 4 for true or TRUE - if (buf[start] == 't' || buf[start] == 'T') && i-start != 4 { - return i, buf[start:i], fmt.Errorf("invalid boolean") - } - - // length must be 5 for false or FALSE - if (buf[start] == 'f' || buf[start] == 'F') && i-start != 5 { - return i, buf[start:i], fmt.Errorf("invalid boolean") - } - - // Otherwise - valid := false - switch buf[start] { - case 't': - valid = bytes.Equal(buf[start:i], []byte("true")) - case 'f': - valid = bytes.Equal(buf[start:i], []byte("false")) - case 'T': - valid = bytes.Equal(buf[start:i], []byte("TRUE")) || bytes.Equal(buf[start:i], []byte("True")) - case 'F': - valid = bytes.Equal(buf[start:i], []byte("FALSE")) || bytes.Equal(buf[start:i], []byte("False")) - } - - if !valid { - return i, buf[start:i], fmt.Errorf("invalid boolean") - } - - return i, buf[start:i], nil - -} - -// skipWhitespace returns the end position within buf, starting at i after -// scanning over spaces in tags -func skipWhitespace(buf []byte, i int) int { - for { - if i >= len(buf) { - return i - } - - if buf[i] == ' ' || buf[i] == '\t' { - i += 1 - continue - } - break - } - return i -} - -// scanLine returns the end position in buf and the next line found within -// buf. -func scanLine(buf []byte, i int) (int, []byte) { - start := i - quoted := false - fields := false - for { - // reached the end of buf? - if i >= len(buf) { - break - } - - if buf[i] == ' ' { - fields = true - } - - // If we see a double quote, makes sure it is not escaped - if fields && buf[i] == '"' && (i-1 > 0 && buf[i-1] != '\\') { - i += 1 - quoted = !quoted - continue - } - - if buf[i] == '\n' && !quoted { - break - } - - i += 1 - } - - return i, buf[start:i] -} - -// scanTo returns the end position in buf and the next consecutive block -// of bytes, starting from i and ending with stop byte. If there are leading -// spaces or escaped chars, they are skipped. -func scanTo(buf []byte, i int, stop byte) (int, []byte) { - start := i - for { - // reached the end of buf? - if i >= len(buf) { - break - } - - if buf[i] == '\\' { - i += 2 - continue - } - - // reached end of block? - if buf[i] == stop { - break - } - i += 1 - } - - return i, buf[start:i] -} - -// scanTo returns the end position in buf and the next consecutive block -// of bytes, starting from i and ending with stop byte. If there are leading -// spaces, they are skipped. -func scanToSpaceOr(buf []byte, i int, stop byte) (int, []byte) { - start := i - for { - // reached the end of buf? - if i >= len(buf) { - break - } - - if buf[i] == '\\' { - i += 2 - continue - } - // reached end of block? - if buf[i] == stop || buf[i] == ' ' { - break - } - i += 1 - } - - return i, buf[start:i] -} - -func scanTagValue(buf []byte, i int) (int, []byte) { - start := i - for { - if i >= len(buf) { - break - } - - if buf[i] == '\\' { - i += 2 - continue - } - - if buf[i] == ',' { - break - } - i += 1 - } - return i, buf[start:i] -} - -func scanFieldValue(buf []byte, i int) (int, []byte) { - start := i - quoted := false - for { - if i >= len(buf) { - break - } - - // Only escape char for a field value is a double-quote - if buf[i] == '\\' && i+1 < len(buf) && buf[i+1] == '"' { - i += 2 - continue - } - - // Quoted value? (e.g. string) - if buf[i] == '"' { - i += 1 - quoted = !quoted - continue - } - - if buf[i] == ',' && !quoted { - break - } - i += 1 - } - return i, buf[start:i] -} - -func escapeMeasurement(in []byte) []byte { - for b, esc := range measurementEscapeCodes { - in = bytes.Replace(in, []byte{b}, esc, -1) - } - return in -} - -func unescapeMeasurement(in []byte) []byte { - for b, esc := range measurementEscapeCodes { - in = bytes.Replace(in, esc, []byte{b}, -1) - } - return in -} - -func escapeTag(in []byte) []byte { - for b, esc := range tagEscapeCodes { - in = bytes.Replace(in, []byte{b}, esc, -1) - } - return in -} - -func unescapeTag(in []byte) []byte { - for b, esc := range tagEscapeCodes { - in = bytes.Replace(in, esc, []byte{b}, -1) - } - return in -} - -func escape(in []byte) []byte { - for b, esc := range escapeCodes { - in = bytes.Replace(in, []byte{b}, esc, -1) - } - return in -} - -func escapeString(in string) string { - for b, esc := range escapeCodesStr { - in = strings.Replace(in, b, esc, -1) - } - return in -} - -func unescape(in []byte) []byte { - i := 0 - inLen := len(in) - var out []byte - - for { - if i >= inLen { - break - } - if in[i] == '\\' && i+1 < inLen { - switch in[i+1] { - case ',': - out = append(out, ',') - i += 2 - continue - case '"': - out = append(out, '"') - i += 2 - continue - case ' ': - out = append(out, ' ') - i += 2 - continue - case '=': - out = append(out, '=') - i += 2 - continue - } - } - out = append(out, in[i]) - i += 1 - } - return out -} - -func unescapeString(in string) string { - for b, esc := range escapeCodesStr { - in = strings.Replace(in, esc, b, -1) - } - return in -} - -// escapeStringField returns a copy of in with any double quotes or -// backslashes with escaped values -func escapeStringField(in string) string { - var out []byte - i := 0 - for { - if i >= len(in) { - break - } - // escape double-quotes - if in[i] == '\\' { - out = append(out, '\\') - out = append(out, '\\') - i += 1 - continue - } - // escape double-quotes - if in[i] == '"' { - out = append(out, '\\') - out = append(out, '"') - i += 1 - continue - } - out = append(out, in[i]) - i += 1 - - } - return string(out) -} - -// unescapeStringField returns a copy of in with any escaped double-quotes -// or backslashes unescaped -func unescapeStringField(in string) string { - var out []byte - i := 0 - for { - if i >= len(in) { - break - } - // unescape backslashes - if in[i] == '\\' && i+1 < len(in) && in[i+1] == '\\' { - out = append(out, '\\') - i += 2 - continue - } - // unescape double-quotes - if in[i] == '\\' && i+1 < len(in) && in[i+1] == '"' { - out = append(out, '"') - i += 2 - continue - } - out = append(out, in[i]) - i += 1 - - } - return string(out) -} - -// NewPoint returns a new point with the given measurement name, tags, fields and timestamp -func NewPoint(name string, tags Tags, fields Fields, time time.Time) Point { - return &point{ - key: MakeKey([]byte(name), tags), - time: time, - fields: fields.MarshalBinary(), - } -} - -func (p *point) Data() []byte { - return p.data -} - -func (p *point) SetData(b []byte) { - p.data = b -} - -func (p *point) Key() []byte { - return p.key -} - -func (p *point) name() []byte { - _, name := scanTo(p.key, 0, ',') - return name -} - -// Name return the measurement name for the point -func (p *point) Name() string { - if p.cachedName != "" { - return p.cachedName - } - p.cachedName = string(unescape(p.name())) - return p.cachedName -} - -// SetName updates the measurement name for the point -func (p *point) SetName(name string) { - p.cachedName = "" - p.key = MakeKey([]byte(name), p.Tags()) -} - -// Time return the timestamp for the point -func (p *point) Time() time.Time { - return p.time -} - -// SetTime updates the timestamp for the point -func (p *point) SetTime(t time.Time) { - p.time = t -} - -// Tags returns the tag set for the point -func (p *point) Tags() Tags { - tags := map[string]string{} - - if len(p.key) != 0 { - pos, name := scanTo(p.key, 0, ',') - - // it's an empyt key, so there are no tags - if len(name) == 0 { - return tags - } - - i := pos + 1 - var key, value []byte - for { - if i >= len(p.key) { - break - } - i, key = scanTo(p.key, i, '=') - i, value = scanTagValue(p.key, i+1) - - tags[string(unescapeTag(key))] = string(unescapeTag(value)) - - i += 1 - } - } - return tags -} - -func MakeKey(name []byte, tags Tags) []byte { - // unescape the name and then re-escape it to avoid double escaping. - // The key should always be stored in escaped form. - return append(escapeMeasurement(unescapeMeasurement(name)), tags.HashKey()...) -} - -// SetTags replaces the tags for the point -func (p *point) SetTags(tags Tags) { - p.key = MakeKey([]byte(p.Name()), tags) -} - -// AddTag adds or replaces a tag value for a point -func (p *point) AddTag(key, value string) { - tags := p.Tags() - tags[key] = value - p.key = MakeKey([]byte(p.Name()), tags) -} - -// Fields returns the fields for the point -func (p *point) Fields() Fields { - if p.cachedFields != nil { - return p.cachedFields - } - p.cachedFields = p.unmarshalBinary() - return p.cachedFields -} - -// AddField adds or replaces a field value for a point -func (p *point) AddField(name string, value interface{}) { - fields := p.Fields() - fields[name] = value - p.fields = fields.MarshalBinary() - p.cachedFields = nil -} - -// SetPrecision will round a time to the specified precision -func (p *point) SetPrecision(precision string) { - switch precision { - case "n": - case "u": - p.SetTime(p.Time().Truncate(time.Microsecond)) - case "ms": - p.SetTime(p.Time().Truncate(time.Millisecond)) - case "s": - p.SetTime(p.Time().Truncate(time.Second)) - case "m": - p.SetTime(p.Time().Truncate(time.Minute)) - case "h": - p.SetTime(p.Time().Truncate(time.Hour)) - } -} - -// GetPrecisionMultiplier will return a multiplier for the precision specified -func (p *point) GetPrecisionMultiplier(precision string) int64 { - d := time.Nanosecond - switch precision { - case "u": - d = time.Microsecond - case "ms": - d = time.Millisecond - case "s": - d = time.Second - case "m": - d = time.Minute - case "h": - d = time.Hour - } - return int64(d) -} - -func (p *point) String() string { - if p.Time().IsZero() { - return fmt.Sprintf("%s %s", p.Key(), string(p.fields)) - } - return fmt.Sprintf("%s %s %d", p.Key(), string(p.fields), p.UnixNano()) -} - -func (p *point) unmarshalBinary() Fields { - return newFieldsFromBinary(p.fields) -} - -func (p *point) HashID() uint64 { - h := fnv.New64a() - h.Write(p.key) - sum := h.Sum64() - return sum -} - -func (p *point) UnixNano() int64 { - return p.Time().UnixNano() -} - -type Tags map[string]string - -func (t Tags) HashKey() []byte { - // Empty maps marshal to empty bytes. - if len(t) == 0 { - return nil - } - - escaped := Tags{} - for k, v := range t { - ek := escapeTag([]byte(k)) - ev := escapeTag([]byte(v)) - escaped[string(ek)] = string(ev) - } - - // Extract keys and determine final size. - sz := len(escaped) + (len(escaped) * 2) // separators - keys := make([]string, len(escaped)+1) - i := 0 - for k, v := range escaped { - keys[i] = k - i += 1 - sz += len(k) + len(v) - } - keys = keys[:i] - sort.Strings(keys) - // Generate marshaled bytes. - b := make([]byte, sz) - buf := b - idx := 0 - for _, k := range keys { - buf[idx] = ',' - idx += 1 - copy(buf[idx:idx+len(k)], k) - idx += len(k) - buf[idx] = '=' - idx += 1 - v := escaped[k] - copy(buf[idx:idx+len(v)], v) - idx += len(v) - } - return b[:idx] -} - -type Fields map[string]interface{} - -func parseNumber(val []byte) (interface{}, error) { - if val[len(val)-1] == 'i' { - val = val[:len(val)-1] - return strconv.ParseInt(string(val), 10, 64) - } - for i := 0; i < len(val); i++ { - // If there is a decimal or an N (NaN), I (Inf), parse as float - if val[i] == '.' || val[i] == 'N' || val[i] == 'n' || val[i] == 'I' || val[i] == 'i' || val[i] == 'e' { - return strconv.ParseFloat(string(val), 64) - } - if val[i] < '0' && val[i] > '9' { - return string(val), nil - } - } - return strconv.ParseFloat(string(val), 64) -} - -func newFieldsFromBinary(buf []byte) Fields { - fields := Fields{} - var ( - i int - name, valueBuf []byte - value interface{} - err error - ) - for { - if i >= len(buf) { - break - } - - i, name = scanTo(buf, i, '=') - if len(name) == 0 { - continue - } - name = unescape(name) - - i, valueBuf = scanFieldValue(buf, i+1) - if len(valueBuf) == 0 { - fields[string(name)] = nil - continue - } - - // If the first char is a double-quote, then unmarshal as string - if valueBuf[0] == '"' { - value = unescapeStringField(string(valueBuf[1 : len(valueBuf)-1])) - // Check for numeric characters and special NaN or Inf - } else if (valueBuf[0] >= '0' && valueBuf[0] <= '9') || valueBuf[0] == '-' || valueBuf[0] == '+' || valueBuf[0] == '.' || - valueBuf[0] == 'N' || valueBuf[0] == 'n' || // NaN - valueBuf[0] == 'I' || valueBuf[0] == 'i' { // Inf - - value, err = parseNumber(valueBuf) - if err != nil { - panic(fmt.Sprintf("unable to parse number value '%v': %v", string(valueBuf), err)) - } - - // Otherwise parse it as bool - } else { - value, err = strconv.ParseBool(string(valueBuf)) - if err != nil { - panic(fmt.Sprintf("unable to parse bool value '%v': %v\n", string(valueBuf), err)) - } - } - fields[string(name)] = value - i += 1 - } - return fields -} - -// MarshalBinary encodes all the fields to their proper type and returns the binary -// represenation -// NOTE: uint64 is specifically not supported due to potential overflow when we decode -// again later to an int64 -func (p Fields) MarshalBinary() []byte { - b := []byte{} - keys := make([]string, len(p)) - i := 0 - for k, _ := range p { - keys[i] = k - i += 1 - } - sort.Strings(keys) - - for _, k := range keys { - v := p[k] - b = append(b, []byte(escapeString(k))...) - b = append(b, '=') - switch t := v.(type) { - case int: - b = append(b, []byte(strconv.FormatInt(int64(t), 10))...) - b = append(b, 'i') - case int8: - b = append(b, []byte(strconv.FormatInt(int64(t), 10))...) - b = append(b, 'i') - case int16: - b = append(b, []byte(strconv.FormatInt(int64(t), 10))...) - b = append(b, 'i') - case int32: - b = append(b, []byte(strconv.FormatInt(int64(t), 10))...) - b = append(b, 'i') - case int64: - b = append(b, []byte(strconv.FormatInt(t, 10))...) - b = append(b, 'i') - case uint: - b = append(b, []byte(strconv.FormatInt(int64(t), 10))...) - b = append(b, 'i') - case uint8: - b = append(b, []byte(strconv.FormatInt(int64(t), 10))...) - b = append(b, 'i') - case uint16: - b = append(b, []byte(strconv.FormatInt(int64(t), 10))...) - b = append(b, 'i') - case uint32: - b = append(b, []byte(strconv.FormatInt(int64(t), 10))...) - b = append(b, 'i') - case float32: - val := []byte(strconv.FormatFloat(float64(t), 'f', -1, 32)) - b = append(b, val...) - case float64: - val := []byte(strconv.FormatFloat(t, 'f', -1, 64)) - b = append(b, val...) - case bool: - b = append(b, []byte(strconv.FormatBool(t))...) - case []byte: - b = append(b, t...) - case string: - b = append(b, '"') - b = append(b, []byte(escapeStringField(t))...) - b = append(b, '"') - case nil: - // skip - default: - // Can't determine the type, so convert to string - b = append(b, '"') - b = append(b, []byte(escapeStringField(fmt.Sprintf("%v", v)))...) - b = append(b, '"') - - } - b = append(b, ',') - } - if len(b) > 0 { - return b[0 : len(b)-1] - } - return b -} - -type indexedSlice struct { - indices []int - b []byte -} - -func (s *indexedSlice) Less(i, j int) bool { - _, a := scanTo(s.b, s.indices[i], '=') - _, b := scanTo(s.b, s.indices[j], '=') - return bytes.Compare(a, b) < 0 -} - -func (s *indexedSlice) Swap(i, j int) { - s.indices[i], s.indices[j] = s.indices[j], s.indices[i] -} - -func (s *indexedSlice) Len() int { - return len(s.indices) -} diff --git a/vendor/vendor.json b/vendor/vendor.json index c286e24600..a390de54bd 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -490,6 +490,21 @@ "revision": "1d4fa605f6ff3ed628d7ae5eda7c0e56803e72a5", "revisionTime": "2016-10-07T00:41:22Z" }, + { + "path": "github.com/influxdata/influxdb/client/v2", + "revision": "15e594fc09f112cb696c084a20beaca25538a5fa", + "revisionTime": "2017-03-31T16:09:02-05:00" + }, + { + "path": "github.com/influxdata/influxdb/models", + "revision": "15e594fc09f112cb696c084a20beaca25538a5fa", + "revisionTime": "2017-03-31T16:09:02-05:00" + }, + { + "path": "github.com/influxdata/influxdb/pkg/escape", + "revision": "15e594fc09f112cb696c084a20beaca25538a5fa", + "revisionTime": "2017-03-31T16:09:02-05:00" + }, { "checksumSHA1": "0ZrwvB6KoGPj2PoDNSEJwxQ6Mog=", "path": "github.com/jmespath/go-jmespath", diff --git a/web/federate.go b/web/federate.go index 29221fdcce..658dfc8668 100644 --- a/web/federate.go +++ b/web/federate.go @@ -113,6 +113,16 @@ func (h *Handler) federation(w http.ResponseWriter, req *http.Request) { sort.Sort(byName(vec)) + externalLabels := h.externalLabels.Clone() + if _, ok := externalLabels[model.InstanceLabel]; !ok { + externalLabels[model.InstanceLabel] = "" + } + externalLabelNames := make(model.LabelNames, 0, len(externalLabels)) + for ln := range externalLabels { + externalLabelNames = append(externalLabelNames, ln) + } + sort.Sort(externalLabelNames) + var ( lastMetricName string protMetricFam *dto.MetricFamily diff --git a/web/federate_test.go b/web/federate_test.go index 7537a129c9..0b0204309f 100644 --- a/web/federate_test.go +++ b/web/federate_test.go @@ -27,10 +27,11 @@ import ( ) var scenarios = map[string]struct { - params string - accept string - code int - body string + params string + accept string + externalLabels model.LabelSet + code int + body string }{ "empty": { params: "", @@ -58,72 +59,100 @@ var scenarios = map[string]struct { params: "match[]=test_metric1", code: 200, body: `# TYPE test_metric1 untyped -test_metric1{foo="bar"} 10000 6000000 -test_metric1{foo="boo"} 1 6000000 +test_metric1{foo="bar",instance="i"} 10000 6000000 +test_metric1{foo="boo",instance="i"} 1 6000000 `, }, "test_metric2": { params: "match[]=test_metric2", code: 200, body: `# TYPE test_metric2 untyped -test_metric2{foo="boo"} 1 6000000 +test_metric2{foo="boo",instance="i"} 1 6000000 `, }, "test_metric_without_labels": { params: "match[]=test_metric_without_labels", code: 200, body: `# TYPE test_metric_without_labels untyped -test_metric_without_labels 1001 6000000 +test_metric_without_labels{instance=""} 1001 6000000 `, }, "{foo='boo'}": { params: "match[]={foo='boo'}", code: 200, body: `# TYPE test_metric1 untyped -test_metric1{foo="boo"} 1 6000000 +test_metric1{foo="boo",instance="i"} 1 6000000 # TYPE test_metric2 untyped -test_metric2{foo="boo"} 1 6000000 +test_metric2{foo="boo",instance="i"} 1 6000000 `, }, "two matchers": { params: "match[]=test_metric1&match[]=test_metric2", code: 200, body: `# TYPE test_metric1 untyped -test_metric1{foo="bar"} 10000 6000000 -test_metric1{foo="boo"} 1 6000000 +test_metric1{foo="bar",instance="i"} 10000 6000000 +test_metric1{foo="boo",instance="i"} 1 6000000 # TYPE test_metric2 untyped -test_metric2{foo="boo"} 1 6000000 +test_metric2{foo="boo",instance="i"} 1 6000000 `, }, "everything": { params: "match[]={__name__=~'.%2b'}", // '%2b' is an URL-encoded '+'. code: 200, body: `# TYPE test_metric1 untyped -test_metric1{foo="bar"} 10000 6000000 -test_metric1{foo="boo"} 1 6000000 +test_metric1{foo="bar",instance="i"} 10000 6000000 +test_metric1{foo="boo",instance="i"} 1 6000000 # TYPE test_metric2 untyped -test_metric2{foo="boo"} 1 6000000 +test_metric2{foo="boo",instance="i"} 1 6000000 # TYPE test_metric_without_labels untyped -test_metric_without_labels 1001 6000000 +test_metric_without_labels{instance=""} 1001 6000000 `, }, "empty label value matches everything that doesn't have that label": { params: "match[]={foo='',__name__=~'.%2b'}", code: 200, body: `# TYPE test_metric_without_labels untyped -test_metric_without_labels 1001 6000000 +test_metric_without_labels{instance=""} 1001 6000000 `, }, "empty label value for a label that doesn't exist at all, matches everything": { params: "match[]={bar='',__name__=~'.%2b'}", code: 200, body: `# TYPE test_metric1 untyped -test_metric1{foo="bar"} 10000 6000000 -test_metric1{foo="boo"} 1 6000000 +test_metric1{foo="bar",instance="i"} 10000 6000000 +test_metric1{foo="boo",instance="i"} 1 6000000 # TYPE test_metric2 untyped -test_metric2{foo="boo"} 1 6000000 +test_metric2{foo="boo",instance="i"} 1 6000000 # TYPE test_metric_without_labels untyped -test_metric_without_labels 1001 6000000 +test_metric_without_labels{instance=""} 1001 6000000 +`, + }, + "external labels are added if not already present": { + params: "match[]={__name__=~'.%2b'}", // '%2b' is an URL-encoded '+'. + externalLabels: model.LabelSet{"zone": "ie", "foo": "baz"}, + code: 200, + body: `# TYPE test_metric1 untyped +test_metric1{foo="bar",instance="i",zone="ie"} 10000 6000000 +test_metric1{foo="boo",instance="i",zone="ie"} 1 6000000 +# TYPE test_metric2 untyped +test_metric2{foo="boo",instance="i",zone="ie"} 1 6000000 +# TYPE test_metric_without_labels untyped +test_metric_without_labels{foo="baz",instance="",zone="ie"} 1001 6000000 +`, + }, + "instance is an external label": { + // This makes no sense as a configuration, but we should + // know what it does anyway. + params: "match[]={__name__=~'.%2b'}", // '%2b' is an URL-encoded '+'. + externalLabels: model.LabelSet{"instance": "baz"}, + code: 200, + body: `# TYPE test_metric1 untyped +test_metric1{foo="bar",instance="i"} 10000 6000000 +test_metric1{foo="boo",instance="i"} 1 6000000 +# TYPE test_metric2 untyped +test_metric2{foo="boo",instance="i"} 1 6000000 +# TYPE test_metric_without_labels untyped +test_metric_without_labels{instance="baz"} 1001 6000000 `, }, } @@ -131,9 +160,9 @@ test_metric_without_labels 1001 6000000 func TestFederation(t *testing.T) { suite, err := promql.NewTest(t, ` load 1m - test_metric1{foo="bar"} 0+100x100 - test_metric1{foo="boo"} 1+0x100 - test_metric2{foo="boo"} 1+0x100 + test_metric1{foo="bar",instance="i"} 0+100x100 + test_metric1{foo="boo",instance="i"} 1+0x100 + test_metric2{foo="boo",instance="i"} 1+0x100 test_metric_without_labels 1+10x100 `) if err != nil { @@ -152,6 +181,7 @@ func TestFederation(t *testing.T) { } for name, scenario := range scenarios { + h.externalLabels = scenario.externalLabels req, err := http.ReadRequest(bufio.NewReader(strings.NewReader( "GET http://example.org/federate?" + scenario.params + " HTTP/1.0\r\n\r\n", ))) @@ -174,7 +204,7 @@ func TestFederation(t *testing.T) { t.Errorf("Scenario %q: got code %d, want %d", name, got, want) } if got, want := normalizeBody(res.Body), scenario.body; got != want { - t.Errorf("Scenario %q: got body %q, want %q", name, got, want) + t.Errorf("Scenario %q: got body %s, want %s", name, got, want) } } } diff --git a/web/ui/bindata.go b/web/ui/bindata.go index 697e005dbe..58e0002bc3 100644 --- a/web/ui/bindata.go +++ b/web/ui/bindata.go @@ -121,7 +121,7 @@ func webUiTemplates_baseHtml() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "web/ui/templates/_base.html", size: 2858, mode: os.FileMode(420), modTime: time.Unix(1489763294, 0)} + info := bindataFileInfo{name: "web/ui/templates/_base.html", size: 2858, mode: os.FileMode(420), modTime: time.Unix(1490348605, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -141,7 +141,7 @@ func webUiTemplatesAlertsHtml() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "web/ui/templates/alerts.html", size: 1837, mode: os.FileMode(420), modTime: time.Unix(1489763294, 0)} + info := bindataFileInfo{name: "web/ui/templates/alerts.html", size: 1837, mode: os.FileMode(420), modTime: time.Unix(1490348605, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -201,7 +201,7 @@ func webUiTemplatesGraphHtml() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "web/ui/templates/graph.html", size: 1941, mode: os.FileMode(420), modTime: time.Unix(1489763294, 0)} + info := bindataFileInfo{name: "web/ui/templates/graph.html", size: 1941, mode: os.FileMode(420), modTime: time.Unix(1490348605, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -261,7 +261,7 @@ func webUiTemplatesTargetsHtml() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "web/ui/templates/targets.html", size: 2275, mode: os.FileMode(420), modTime: time.Unix(1490015258, 0)} + info := bindataFileInfo{name: "web/ui/templates/targets.html", size: 2275, mode: os.FileMode(420), modTime: time.Unix(1491298465, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -406,7 +406,7 @@ func webUiStaticJsAlertsJs() (*asset, error) { return a, nil } -var _webUiStaticJsGraphJs = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xe4\x3c\xfd\x77\xdb\x36\x92\xbf\xfb\xaf\x98\xb0\x79\x11\x55\xcb\x94\x9d\xee\xf6\x76\x65\xcb\xbd\x34\x1f\x9b\xec\xe6\x6b\x1d\xb7\xdb\x3e\xc7\xeb\x07\x91\x90\x88\x98\x22\xb9\x00\x68\x5b\x4d\xf4\xbf\xdf\xc3\x00\x20\x01\x92\xb2\xd4\xf6\x6e\xdf\xdd\xbb\xfc\x20\x87\xf8\x18\x0c\x06\x83\xc1\x7c\x01\x37\x84\xc3\x7b\x5e\x2c\xa9\x4c\x69\x25\x60\xea\x7e\x7c\xf9\x02\x9f\xd7\xc7\x7b\xaa\xc9\x82\x93\x32\x3d\xa7\xcb\x32\x23\x92\x1e\xef\x61\xd9\x87\xe7\x4f\xdf\xbd\x7d\x06\x53\x38\x3a\x3c\x3c\x3c\xde\xdb\x6b\x7a\x46\x7f\x51\xcd\x61\x0a\xf3\x2a\x8f\x25\x2b\xf2\x90\x66\x74\x49\x73\x39\x82\xa2\x54\xdf\x62\x04\x29\xc9\x93\x8c\x3e\x4d\x49\xbe\xa0\xf6\xeb\x8c\x2e\x8b\x1b\x3a\x84\xcf\x7b\x00\x32\x65\x22\xa2\x19\x4c\xc1\xf4\x3d\xb6\x85\x88\xcb\xcb\xf3\x37\xaf\x61\x0a\x79\x95\x65\x75\x85\x81\x0d\x53\x3b\x4a\x5d\xe3\x0e\x06\x53\x6f\xec\x56\x1b\x8d\x82\x8b\xba\x46\x07\x3c\x14\x43\xd5\x63\xa8\xba\xae\xeb\xfe\x9c\xc5\xd7\x22\x25\xb7\x76\xee\x1e\x6a\x09\x91\x04\xa6\x70\x71\x79\xbc\x67\x8b\x58\xce\x24\x23\x19\xfb\x85\x86\xc3\xe3\xbd\x75\x0f\x01\x23\xc9\x96\xf4\x05\x89\x65\xc1\xd5\xa4\x14\x1a\xc1\x2a\x98\xc0\xb7\x87\xf0\xb5\xfe\x79\xfc\x07\xf8\x1a\xbe\xf9\xf6\x8f\x23\x55\x75\xdb\xad\xfa\x0f\xac\x48\x5a\x15\x58\x98\x36\x85\xf8\xbd\xc4\x6f\xfc\xaf\x08\x26\x70\xd4\x8f\x91\x90\xb4\xfc\x91\x64\x15\x55\x08\x5d\xa8\xc6\x47\x22\x18\x41\x70\x74\xa8\xff\x2c\xd5\xef\x1f\xf1\xf7\x48\xff\xf9\xe6\x50\x7f\xa5\xea\xf7\x31\xfe\x7e\x8b\xbf\x47\xfa\xe3\x28\xc1\x8a\x24\xc0\xa1\x8f\x6e\xf1\x0b\x7f\xff\x80\xbf\x7f\xc2\xdf\xa3\x15\x96\xaf\x82\xbd\xcb\x3e\xb4\xf2\x6a\x89\xff\x51\x58\xf5\xb1\x62\x54\xf2\x42\x16\x72\x55\x52\x87\xec\xdd\x45\x56\x5c\x2d\x68\x36\x87\x29\x2e\x91\x5a\x3d\xf5\x19\xb1\xc4\xdb\x18\xed\x41\xf7\xf7\x71\x55\xc7\x63\xf8\x40\x25\x24\x74\x4e\xaa\x4c\x5a\x1e\x8c\x2c\x10\xfb\x8d\xc0\x0c\xd8\xe3\x76\x25\x57\x2c\x79\xc5\xf2\xb2\x92\xb6\x55\x5f\xd5\x97\x2f\x48\x51\xd5\x9d\xcd\x21\xf4\xda\x49\x32\x83\xe9\x74\x0a\x55\x9e\xd0\x39\xcb\x69\x62\x19\xb8\xdb\x0a\x8e\x90\x85\x0d\xf2\xcf\x38\xb9\xd5\x1b\x1d\xe2\x22\x97\xbc\xc8\x04\x90\x3c\xc1\x0f\xc2\x72\xca\x61\xce\x8b\x25\xbc\xc4\x7d\x30\x23\x5c\x80\x34\x02\x21\xda\x33\xc4\x6b\x76\xa0\x1e\x72\x50\x12\x99\xbe\xe7\x74\xce\xee\x06\x13\x78\xff\xe4\xfc\xe5\xd5\xfb\xb3\xe7\x2f\x5e\xfd\x34\xd2\xd5\xb3\x8a\x65\xc9\x8f\x94\x0b\x56\xe4\x83\x09\x7c\xff\xc3\xab\xd7\xcf\xae\x7e\x7c\x7e\xf6\xe1\xd5\xbb\xb7\x76\x73\x7d\xfa\x7b\x45\xf9\x2a\xa2\x77\x92\xe6\x49\x58\xcb\x0f\x77\x36\xc3\x9a\x8e\xae\x6c\x78\x18\xbe\xa9\x84\x24\x71\x4a\x23\x4e\xf3\x84\xf2\xd0\x93\x62\xb5\x2c\x1a\x36\xdd\x69\x16\x91\xb2\x54\xe3\xf8\xd0\x86\x76\x81\xff\x42\x25\x70\x3a\xa7\x9c\xe6\x31\x15\x20\x0b\x20\x59\x06\x32\xa5\xc0\x72\x49\x39\x15\x92\xe5\x0b\x2b\xb1\x04\xb0\x1c\xeb\x1a\xa2\x6a\x3a\x92\x3c\xd1\xe0\x66\x2c\x4f\x80\xde\xd0\x5c\x1a\xf1\xc2\x91\x5f\x6a\x89\xfb\x0f\xae\xd0\xe1\x96\x15\x68\x16\xcd\x59\x9e\x84\xc1\x57\x58\x7b\x75\xab\xab\x03\xd8\xb7\x0c\xd5\x4c\xe5\x5f\x8a\x6a\x2f\x0a\xbe\x84\xa9\x07\xcb\x40\xd0\xf5\x57\xf3\x82\x2f\x03\x3d\x3b\x3d\xc2\x5d\xc9\xfb\x3b\x48\x7a\x27\x09\xa7\xe4\x22\x27\x4b\x3a\x55\xed\x2e\x03\x87\x70\x77\x25\x8f\xae\xe9\xaa\xe4\x54\x88\xb0\x11\xfb\x96\xf7\xc6\x63\x78\xae\x08\x04\xb7\x44\x00\x36\xa2\x09\xdc\x32\x99\x16\x95\x44\x12\x89\x94\xcd\x25\x5c\xd3\x55\x84\xed\x15\x57\xd3\xe8\x36\x65\x71\x0a\xd3\x29\x1c\x7d\x03\x8f\x1e\xc1\x03\x1a\x61\xb3\xbf\xd1\x95\x85\xdb\x9e\x6c\x24\xaa\xd9\x92\xc9\x10\x31\x53\xff\x68\x54\x72\x24\xf0\x33\xbd\x2d\x6d\x0d\x32\x3d\xe2\xf5\xa4\x92\xc5\x01\xa7\x42\x49\x04\x85\x89\x9a\x28\xa8\x99\x42\x91\x03\x6e\x37\x8d\x12\xf2\xf7\x7c\x2e\xa8\x34\xe2\x21\xd2\x5f\x2f\x29\x5b\xa4\x12\x0e\x74\x59\x9c\x31\x9a\x9b\xb2\xe3\xba\x9f\x06\x7f\x6e\x48\xe8\x1f\x8c\xcd\x54\x00\x1e\xaa\xef\x28\x16\x22\x1c\xa4\x08\x62\x30\x82\x01\xa9\x64\x31\x68\x97\xd2\x2c\x12\x31\x2f\xb2\xcc\x0c\xbf\x6f\x70\xb3\xd3\xd3\x7f\x1e\xea\x83\x2a\x2a\xf2\x70\x70\x4d\x57\x55\xa9\x27\x34\x18\x79\x92\xaf\x85\x9e\x39\xdc\x60\xad\x0f\xb8\xd6\x22\xc7\x78\x6a\xea\xfd\xe1\x9e\xa3\x0e\x13\xa1\xa4\x7a\xe5\xca\xb0\x66\x7d\x34\x33\x21\x16\x9a\x93\x1c\xb1\xe6\x32\x94\xda\xb8\xd7\x34\xf9\x5e\xe6\x9b\x60\xd8\x26\x57\x33\x99\x77\x3b\xee\x30\xb2\x69\xe9\x8e\xca\x72\x41\xb9\x7c\x43\x25\x67\xf1\x26\x08\x82\x66\x34\x36\x20\x74\xfb\xab\x25\x76\x70\x01\x71\x3a\xe7\x54\xa4\xaf\x14\xcf\xdf\x90\x6c\x17\x58\xa6\xcb\xa5\xbb\x1d\xe3\x22\x17\x45\x46\xcf\x51\x58\xf7\xed\x62\xd3\x20\x68\x49\x40\xd5\x01\x36\x74\xd1\xa2\xa3\x16\x46\xee\x70\x92\xcc\x44\x7f\x2f\x72\xa1\x34\x98\x03\x59\x2c\x16\x19\x9d\x0e\x24\x99\x0d\xdc\xe9\xaa\x8e\x11\xfd\x57\xe7\x20\x1a\xaa\x9f\x30\x10\x69\x71\xdb\x6e\x5d\xe4\xba\x3c\x8f\x66\xd8\x34\x70\x78\xb2\x16\x1b\x6a\xef\x48\xc2\x17\xb8\xe7\x1e\x86\x34\xd2\x1f\x86\xc9\x7b\x0e\x34\x5d\x1f\x95\x84\xd3\x5c\x86\xc3\x88\xe5\x09\xbd\x0b\xdd\xf6\x2e\xcf\xda\x0a\x25\x6d\x1e\x86\xc1\x57\x4a\x90\x1a\x08\x44\x4a\x1e\x06\x84\x33\x72\x60\x0f\xc3\x60\x38\x8c\x52\x22\x9e\x66\x44\x88\x30\xe0\x34\x2b\x48\x12\x0c\x5b\x92\x48\xcb\x1f\x3c\xb2\x1a\x51\xa3\x77\x91\x16\xf9\x67\x54\x56\x3c\x07\xa5\x45\x0a\x98\x17\x71\x25\x60\x46\xe2\x6b\x75\x94\xa0\xf0\x65\xb9\x90\x94\x24\x50\xcc\x41\xc3\x52\x27\x4a\xd4\xc7\xa0\xd1\x0c\x97\xe6\x9a\xae\x92\xe2\x36\x57\xfa\x11\x47\xd8\xbd\x94\x6c\x36\x30\x8e\xe9\x91\x04\x8b\x6f\x48\x16\xfa\x5f\x43\xd3\x46\x43\xdd\x20\x49\xd7\xc3\xe6\xec\xe0\xbc\xd8\x70\x78\xe8\xba\x60\x18\xa5\x2c\x31\x54\x6f\x98\xf5\x89\x16\x89\x9b\x79\x55\x09\xa5\x36\x87\xdb\x1d\x55\x43\xf0\xba\x38\xad\x57\x4f\xee\x98\xd8\xd8\x7a\x75\x45\xee\x98\x70\x9a\x67\x74\x41\xf3\x64\x03\x3a\xba\xd2\x15\x36\x25\xcb\x73\xba\x69\xd2\xa6\xd6\x3d\x26\x6f\x48\xf6\x41\x12\xb9\x61\x97\x61\xfd\x95\x50\x0d\xbc\x43\x39\x4f\x9e\x11\x49\xfb\xfb\x38\x02\x8d\xe6\x49\x57\x90\x9a\xce\xca\x02\xa1\xca\x9e\x28\x59\x7c\x4d\x79\xa8\xb9\x22\x2b\x62\x92\xd1\x09\x0c\x68\x3e\xd0\x2a\x99\x52\x08\x88\x9c\xc0\xe0\xe7\x9f\x7f\xfe\xf9\xe0\xcd\x9b\x83\x67\xcf\xe0\xe5\xcb\xc9\x72\x69\xea\x65\x51\x64\x33\xc2\xdf\x67\x24\x46\x1d\x67\x02\x83\x59\x21\x65\x61\xeb\x05\x4b\xe8\xf7\xab\x0f\x2c\xa1\x13\x90\xbc\xa2\xa6\x34\x2d\x6e\xcf\x8b\x84\xac\xbe\xaf\xa4\x2c\xf2\x76\xd5\xd3\x8c\x12\xde\x2d\x2c\x84\x03\x44\x9f\x43\x1d\x6d\xb7\x9e\xb3\xcf\xe8\xcd\xa4\x49\x38\x50\xff\x3d\x67\x4b\xfa\x1e\xa7\x3e\x18\x22\x2d\x36\x81\xd1\x1a\x71\x0b\x8e\x12\x56\x49\x69\xce\xbe\xa0\x75\x7a\xf6\xec\x7b\xf7\xd4\x6c\x1d\x05\xf6\x00\xed\x82\xa8\x4a\x85\xd7\x99\x6e\x6e\x81\xd4\x1b\x5f\x7c\xa8\x0f\xb6\x8e\x69\x6a\x76\xa8\x7b\xfe\xe9\x1d\x8c\x86\xc0\xe0\x68\x60\x2c\x55\x6b\xe2\xc8\x55\x46\x11\x9c\x3e\x5e\x3b\xf0\x54\x23\x16\x17\xf5\xd1\xdb\x1c\xc6\x9a\xe9\x06\xd1\x22\x5b\x95\xa9\x6a\x32\x70\x44\xa8\x8f\x68\xd8\x11\x8d\x0d\x14\x92\x24\x46\x8c\xce\x64\x7e\x50\x72\xb6\x24\x7c\x15\xd4\x4a\x9b\x02\xec\xb4\xa9\x07\x3b\x88\x53\x1a\x5f\xb7\xda\x71\xb4\xc8\x3b\x4d\xab\x1c\x1b\xd3\xc4\x36\x5f\x03\xcd\x04\xdd\x88\x92\x07\xe6\xd7\x61\xd5\x19\xea\x7e\xcc\xbc\x49\xac\xad\x99\xe3\x2d\x4a\xe8\xac\xbc\x83\x63\x9c\xb1\xf8\x3a\xec\x2c\x57\x1f\xed\x95\xbe\xdc\x88\xbc\xbf\x7e\x78\xf7\xb6\x59\x8d\xf1\x18\x5e\xcd\x1d\xc3\x44\xe9\xe4\x66\x94\x11\x16\x17\x9c\x2d\x58\x4e\x32\x10\x94\x33\x2a\x00\xbd\x17\x8b\x42\xc2\xb2\x92\x44\xd2\xa4\x81\x13\x0a\x25\x40\x92\x21\x1a\x8a\xb7\x14\x72\x4a\x13\x75\x94\x71\xaa\x34\x13\xc9\xab\x58\x02\x93\xda\x70\xf4\x20\x2b\x8c\x10\x6e\xe4\xae\x87\x71\x93\x68\x2d\x81\x93\x5c\x28\x71\xf4\x4c\x6d\xe2\xd6\x5c\x1a\xe2\x41\x97\xed\x3b\xb4\xf8\x0e\x06\x87\x03\x98\xa8\x9d\x60\xcf\xbd\x36\xb5\x6b\x40\x7a\x17\xa2\x61\x1f\xd6\x0a\x70\xc7\xa8\xb2\x76\x46\x67\x2d\x5a\x6a\x9b\xc3\x2f\x56\x61\x70\xc6\xb2\xba\xda\xfd\xad\x7a\x54\x0a\xb3\xe1\xe7\x24\x13\xb4\xa5\xa4\x9b\x43\xa7\x3e\x69\xbb\xa8\xeb\x73\x63\x86\x92\xd8\xaa\xb1\xf1\x15\xea\xe1\x97\xc1\xb0\x87\xc9\xac\xea\x11\x73\x4a\x04\x3d\x33\x9a\x93\x3b\xe8\x7d\xc0\x13\xba\x03\xf0\x84\xf6\x00\xdf\x15\x75\x9a\x27\xbb\x20\xfe\x3c\x4f\x7e\x25\xda\x5b\x00\x5b\xa4\x1d\xc0\xbd\x7a\x5a\x8f\xc4\x6f\x29\x5f\xda\x0e\x50\x75\x01\xa7\xa5\x3a\x5b\x83\x11\x7c\x56\x96\xe8\xa4\x07\x1e\x8a\xf6\x11\x2c\x0b\x75\xc8\x06\x33\x3a\x2f\x38\x0d\xd6\x1d\x8d\xce\x2a\x7a\x6a\x9f\x72\x8a\x5f\x2c\x5f\x34\x1c\xad\x0d\x53\x25\xa2\xf4\x31\xd0\xa3\x5c\x58\xcb\x44\x35\x32\x4a\x45\xdd\x63\x93\x34\x32\x87\x1e\xba\x49\xef\x61\x57\x4b\xa9\xb2\x28\xab\x8c\x48\xfa\x0a\x67\x48\x66\x19\xd5\xb3\x14\x86\x79\x6b\xe1\xe6\xe8\xa5\xee\x48\x9d\xdd\xb1\xee\xf7\x5c\x36\x1e\xc0\x8d\x23\xee\xe4\x10\x7c\x18\x91\x4f\xe4\x2e\xb4\xb2\x54\x0d\x52\x24\x13\x08\xfe\xf2\xfc\x3c\x18\x99\xc2\x8a\x67\x9e\xb7\x0b\xf6\x21\x18\x93\x92\x8d\x6f\x8e\xc6\x19\x99\xd1\x6c\x7c\x75\xa5\x28\x7b\x75\x35\xbe\x41\x67\x6a\xdd\x53\x09\xc0\xf3\x55\xa9\xd6\xf5\x93\x28\xf2\xba\x5c\x54\x71\x4c\x85\x98\x34\x08\xaa\xea\x11\x3a\x2b\x94\x42\x59\x09\xd7\x8d\xa0\x68\xa6\xea\x95\x54\x94\x95\x80\x07\xd3\x29\x04\x06\x44\xe0\x36\xb4\x34\x4c\x8b\xdb\xe7\x4a\x43\x0f\x03\xfc\x03\x4a\x06\xb1\x7c\x01\xe4\x86\xb0\x4c\x51\x08\xb4\x89\x2b\x1e\x34\x47\x5c\xb3\xb0\x4d\xc9\xba\xfe\x9f\xa2\xdc\xb2\x26\x2b\x22\xa3\xe6\xd6\x34\x9d\x17\x1c\x42\x54\x34\xd0\x67\x0b\x0c\x4e\x6c\x87\x28\xa3\xf9\x42\xa6\xc7\xc0\xf6\xf7\x7b\xb0\x75\xf7\xc2\xc5\xe1\x65\xad\xc3\x91\x24\x09\x73\x7a\x0b\xef\xf0\x3b\x34\xc0\x2e\xd8\xe5\x08\x9a\xff\x0f\x87\x2e\xb6\x7b\x1e\xe0\x79\xf5\xcb\x2f\xab\x33\x2a\xaa\x4c\xd6\x1e\x4c\xfd\x0f\x05\xc5\x04\x5d\xfa\x23\x6f\xfa\xaa\x6d\xb7\x7c\x49\xca\x09\x7c\x5e\x6f\x1c\x08\x59\x59\xf1\x22\x49\x29\x49\x42\x6f\x86\x45\xc5\x63\x3a\xb1\x18\xbb\x50\x99\xa4\x4b\x31\x81\x80\x64\x59\xe0\x8f\x26\xe3\x94\x72\x87\x37\x54\x4b\x9f\x70\xf6\xd0\xbf\xa5\x90\x92\x1b\x6a\x30\xc7\x45\x88\x2b\xae\x8c\x65\x3d\xc7\x11\x88\x6b\x56\x7a\x1d\xeb\x0d\xe8\x90\x47\x4b\x4e\xe4\x2b\xf4\x7a\xe1\x67\x7b\xc4\x2e\x55\x4d\x37\xb7\xd3\xf1\xb6\x2e\x4b\x52\xaa\xc5\x58\x6f\x6d\xc8\xed\xc2\x61\x61\x34\x67\x99\xa4\x3c\x6c\x46\x8a\x8c\x64\x0d\xc7\x30\x5e\x8c\x60\x30\x18\xd6\x7c\x31\xea\x60\x0e\x50\x72\x65\x17\x9d\x08\xc9\x8b\x7c\x71\x3a\x18\x75\x1b\x14\x42\x59\x3f\x27\x63\xdb\xa4\xd5\x62\x3d\xdc\x11\xe5\x68\x5e\xf0\xe7\x24\x4e\x1b\x51\xca\xbb\xa4\xec\xa7\xcc\x05\x8f\xac\x46\x75\x09\x53\xe0\xed\x11\xdb\x38\x38\x8c\x08\x8d\x5c\x56\xec\x02\x2c\xef\x1d\xc1\xed\xbf\x1e\xed\x79\x9c\xca\x65\x87\xeb\x44\x1b\x73\x2c\x8c\x54\xdb\x66\x7a\x64\x34\xeb\x4e\xd0\x8a\x82\xde\x69\xce\x2e\x23\x11\x17\x9c\xc2\x41\x7f\x3d\x31\xf5\xed\xf9\xdb\x09\xa2\x1d\x74\x08\xdf\x01\x89\xb4\xc9\xfb\xb4\x58\x96\x84\xd3\x70\x36\x84\x09\xb0\x16\x91\x5a\x44\x73\xa8\x24\x36\x93\x23\x65\x8b\x34\x63\x8b\xd4\xa3\x09\xf4\x6e\x45\x03\xf0\x61\x38\x38\x49\xd8\xcd\xe9\xc0\xba\xef\xdb\xb3\x52\x7d\x2f\x23\x21\xb9\x12\xc5\xfb\x8a\xd5\xb0\xf9\xd0\xc7\xa1\x0f\xed\xf1\x18\xce\x53\x26\x50\x1d\xc7\x28\x45\x8a\x61\x0d\x20\x73\x49\x39\x10\x29\x49\x9c\x2a\xa0\xe8\xef\xb6\x72\x08\xca\xac\x5a\xb0\x7c\x04\x44\x00\x93\x2e\xac\x42\xa6\x94\xdf\x32\x41\x61\xc6\x29\xb9\x16\xad\x7e\x76\xb6\x24\x63\x72\x15\xf5\x88\x3a\xcf\xe5\xe4\x20\x8d\x5e\xa1\x49\xd7\xfe\x84\xdf\x75\x30\xad\xad\xbb\x60\x8b\x1e\xb0\xa0\xf2\x5d\x1d\xaf\xda\x7e\xf0\xb7\xe2\x5b\x8d\x39\xad\x0b\xd1\xdf\x6d\xa3\xa2\x00\x81\xe3\xd7\x36\xd2\x3a\xa8\x9d\x0c\xb6\x40\x48\x5a\xb6\x4b\xd0\x66\x09\xf6\x00\x2e\x37\x2b\xc0\xba\xcb\x30\xa2\x9e\xd4\x40\x5f\xe7\xc8\x06\x9f\x5c\x5b\x5e\xe9\x1a\x4d\x20\x3d\x52\x9f\x8e\xe3\x33\x62\xf9\x13\xce\xc9\x2a\x54\xe5\x23\x6f\x3a\x43\x38\x9d\xc2\x61\xb3\x2c\x18\x96\x31\x50\x50\x73\x31\x47\x35\x9c\xba\xad\xc0\xd2\x09\xd5\xc7\x4b\x67\x64\xec\x53\xaf\x93\xe7\x1d\xad\x3b\xd9\x18\x54\x4b\xe9\x73\x5b\x68\x5f\x6f\xdb\xfd\xab\xb5\x53\xdc\x5a\x75\xfc\x7f\x9b\x2a\x48\xb8\xa0\xcf\x2a\x4e\x70\xb3\x3a\x5c\x80\xab\x77\x4e\xef\x64\xc3\x0e\x58\x74\xf6\x1c\xa6\xa0\x94\x8c\x33\xba\x78\x7e\x57\x86\xc1\x3f\xc3\x8b\xc3\x83\x3f\x5f\xee\x0f\xc3\x8b\xd5\x6d\x92\x2e\xc5\xe5\xfe\xf0\xa1\xe6\x45\x54\x81\xf0\x6c\x56\x6c\x51\x43\x8c\xb0\x2c\x34\xe0\x6a\xaf\xd6\x03\xd3\x54\xc7\x63\x50\xad\x42\xda\xa8\x3a\x53\x65\x89\xfd\x60\x0a\xdf\xb4\x5c\x3f\xdf\x1e\x5a\xbf\x95\x1a\x15\xc9\x0c\x53\xc0\xe9\xbd\xca\xa5\x05\x70\x71\x74\x59\x63\x56\xe5\x4c\x1d\x96\xb6\xe6\xf1\xa5\x43\x3e\xdd\xff\xeb\x6e\xc8\xdb\x49\x48\xb8\x50\x00\x2e\xb7\x52\xd8\xb3\x1a\x77\xde\x67\x48\x9c\x0f\x34\x2e\xf2\xa4\xf6\xdd\x7a\x6b\x15\xb6\x02\x4d\x8e\xc3\xba\x4f\xb1\xbc\x27\x8f\xa1\x4f\xd9\x54\x34\xf7\x50\x38\xe9\x43\xe1\x1e\xa0\xa8\x68\xfa\xae\xa6\x16\xae\x5b\x3a\x1f\x3b\x1b\x6e\x83\xf5\x03\xf7\xf8\x07\x1a\x4d\xdc\xd5\xd0\xd7\xbb\x58\x47\x9e\x25\xfe\xef\x5f\xb0\xed\x2b\x05\x07\x70\xa4\x56\xf5\x54\xaf\xee\xc1\xc1\xc6\x55\x3b\xfd\xff\xb3\x6a\x0b\x2a\x9f\xd7\x51\x82\xed\x4b\x86\x02\xc7\x8b\x2d\x7c\xf9\x02\x5e\x81\x8f\x35\xb7\x41\xab\x25\x86\xd5\xac\xac\x71\xfd\xce\xbb\xb8\xdc\x77\x3b\x93\xf9\x87\x5f\x37\x19\x55\x94\xe8\xc6\xda\xab\x56\x77\x77\x22\x4d\xa2\x29\x54\x6d\x87\x8e\xb4\x4b\x30\xa5\x6d\x0b\x62\xa2\x17\x27\x04\x75\x6f\xea\xd0\x2e\x64\x31\x08\xed\x28\x49\x9f\xe7\x3d\x31\x80\x0d\x64\xc9\xe9\xad\x41\xd9\x2c\x9d\x25\x90\x4b\x64\xb3\x0d\x4d\x5b\x34\xa3\x77\xde\xbf\x30\x86\xc7\x23\x18\x08\xbd\xe3\x06\xbd\xf4\x36\x80\x9d\x3a\x9f\xf5\x77\x14\x48\xff\xd3\xf3\x16\xd5\x4c\x72\x12\xcb\xff\x55\x93\x77\x5a\xef\x9e\xae\x16\x67\x94\x70\xad\x36\x0f\x5b\xbb\xbd\x23\x8f\x1a\x49\xb3\xde\x6b\xbb\x90\x95\xf6\x1d\xf6\x04\x2f\x23\xba\x2c\xe5\x2a\x1c\x3a\x01\x25\xc2\xa5\xe2\x6b\xa3\x1c\x69\xea\x2a\x7a\xab\xc2\x70\xf8\xdf\x71\x4a\x98\x34\x9a\x22\xab\x8c\xae\xb6\x59\x33\xb6\xe9\x1d\x56\xb9\xbe\x0c\x86\x26\x1c\xf6\xe5\x0b\xbc\x21\x32\x8d\x96\xe4\x2e\xc4\xff\xcc\xb3\xa2\xe0\xfe\xa9\x31\x86\xc7\x7f\x3c\x1c\x8e\xe0\xa8\x1e\xb6\x89\xbf\x76\xe4\x0b\x8c\x6d\xf6\xab\x23\xf5\x11\xa9\x9f\x52\xee\xf9\x29\x6d\x61\x44\x66\xca\x18\x1e\xba\xfa\x5a\xc5\x33\x3b\x96\xf1\xd2\xd9\xcf\x92\x70\xb2\x6c\xf2\xe9\x02\x84\x12\x4c\xda\xca\xb1\x0d\x22\x6d\x4c\x06\xac\xb5\x73\x0d\x30\xc2\x15\x53\x8a\xb9\x99\xda\x81\xb7\x36\xc7\x6e\x53\x1d\x0e\x37\x0d\x8f\x7d\x20\xb4\x54\x9a\x6d\xbd\x2a\xba\xb6\xe2\x99\x3a\xc8\xfb\xdd\x9f\x3a\xed\x0c\x07\x0b\x8c\xc3\x5a\xcf\xd8\x65\xef\x1e\xdf\xa6\x9b\xbc\x81\x9b\xe4\x8c\x8a\xb2\xc8\x05\xed\x36\x3e\xd6\xb4\xf0\xe2\x7d\x06\x63\xa9\x79\xb4\xe1\x57\xbb\x7c\xbb\xe1\xfd\x9b\x31\x7e\xaa\x03\x42\xdb\x71\xf6\x4d\xbe\x9f\x52\x65\x08\x6d\xf0\x38\xb7\xf8\x5f\x27\xac\xe8\xca\x60\xe8\x79\xa2\x2b\x9e\x6d\xf3\x2f\xab\xf2\x89\xa1\xd2\xbf\xdb\xe7\x8c\xbd\xd0\x15\xb0\xa3\x6f\xd9\x40\x0d\x6b\xaf\xb2\x4f\xca\x6d\x5e\x86\xbb\x94\x8f\x14\xd3\x96\x6d\xf4\x55\x99\x32\xae\x02\xdc\xa2\x2d\xa4\x51\x10\x70\xcf\xc3\xa6\xfa\xdc\xa5\x3c\xe2\x66\x59\x31\xa6\xf9\xa0\x2f\xf5\xd6\xfe\xa3\x5c\x2d\x68\xbb\x8f\x9e\xbc\xe7\x5a\xf2\x63\xd5\xed\xce\x9a\xc4\xca\x98\xf4\x3a\x6d\x75\xeb\xd3\x3b\x1a\x57\x98\xa1\x6a\x1c\xda\x01\xec\x2b\xb0\xc3\x2e\x95\x6b\xea\xc5\xc5\xb2\xcc\xa8\xa4\x3b\x13\x70\xba\x81\x80\xf7\xc7\x0a\x92\xc6\x08\xef\x3b\x41\xe0\xa0\xd9\xb4\xc7\x2d\x0f\x53\x7d\x2c\xa5\x72\x99\x85\xc1\xeb\x82\x24\xa0\x36\xba\x9e\x5e\x0d\x78\x1f\x82\xa5\x80\x93\x19\x87\xf1\x29\x9c\xd5\x32\x4b\xb7\x72\x4e\x96\x7d\x08\x84\x1b\xe5\xe8\x8d\xaa\x36\x53\xd8\xc1\xd1\x54\xaf\x83\x2b\x31\x96\x62\xb1\x45\x73\x54\x3d\x22\xc5\xd8\xd8\xb6\x55\x6e\xcf\xe6\x2d\x43\x37\xaa\xc0\x6f\x1d\x7b\x30\x68\x0f\x6d\x69\xb0\x65\x68\x2f\x9d\x65\x07\xe5\xc5\x3d\xbe\xd4\xf2\x15\x95\x7c\xf5\xcc\xb2\xd0\x2d\xcb\x93\xe2\x56\x4f\xe7\x5c\x57\xb6\x5b\xd6\xf2\x93\xb5\x92\x2e\xfb\x34\x8c\x56\x4e\x4e\xa3\x66\xa0\xae\x64\x21\xf8\xbe\x98\x3a\x7d\xd1\x0e\x09\x53\x8b\x97\xd0\x7c\xaa\xb0\xea\x8f\x87\xf6\x58\x7b\xbd\x39\x3f\x6a\x0e\xa3\x66\x06\x5f\x9b\x4b\x36\xdb\xa9\xad\x33\xdc\x5f\x93\x19\xcd\xbc\x83\x09\xc3\x8d\xa2\x21\x39\x7e\x7f\x40\x97\xb2\x30\x17\x52\x1c\x0b\x1c\x6b\x81\xe5\xe0\x76\xd3\x44\xd1\x55\x4a\x3a\xda\xd8\xa5\xb3\xbf\x5d\xa8\x51\x59\x89\x34\x0c\x6c\xe4\x44\xed\x2e\xdd\x77\x1f\x82\x3a\x58\x62\x44\x8f\x88\x49\x49\x5f\x9e\xbf\x79\x6d\xf0\xbc\xc0\x3f\x75\x90\x6e\xed\xdb\x99\x99\x9d\x5d\x70\x92\xb0\x1b\x88\x33\x22\xc4\xf4\x63\xa0\x8b\x3f\x06\xcd\x50\x16\x93\x4f\x05\xcb\xc3\xe0\x64\xc6\x4f\x83\xa1\x1e\x3e\x61\x37\xa7\xc1\x56\x62\x6a\x9f\xf2\x79\x71\x2e\xde\x6a\xcf\xe9\x46\x72\x4a\xdb\xc2\xd4\x44\x96\x38\x4a\xd5\x1c\x0c\x70\xd4\xcf\xc1\xf1\x7d\xc4\xdf\x4a\xfd\xed\xe4\xef\xa1\x7f\x4d\xf2\xe9\xc7\xa0\xa6\x8b\xa5\xaf\x2a\xff\x18\xd4\x42\x0e\x25\xb1\xfa\x31\xb3\xd9\x9f\xf6\x91\x71\xa4\x69\xb8\x0e\x1c\xd3\x59\x77\xd8\xcd\xcd\xfa\xa3\x71\x4a\xd6\xb4\x44\x2f\x63\x43\x4a\xbd\x63\xb1\xe9\x8b\xac\x20\xd2\xd4\xdb\x4d\xc9\xc4\x5b\xf2\x56\x95\x0d\x9d\x3b\x05\xc1\xfe\xab\x7c\x1e\x8c\x20\x38\x30\x7f\xf1\x1b\x6e\x59\x96\xc1\x8c\x6a\x60\x89\xda\x4e\x05\xbc\x25\x6f\x61\xb6\x72\xe1\x0f\x23\x38\x4f\xa9\x05\x15\x93\x7c\x20\x55\x27\x4c\x83\xa0\xc9\x08\x44\x81\x79\x88\x20\x53\xba\x04\x22\x60\x41\x4a\x01\x61\x5e\x65\xd9\x30\x72\xbd\x22\xf6\xa2\xd7\xda\x73\xa0\x6e\x25\x8a\x97\xdf\xd4\xd6\x25\xef\xb5\x6e\x4b\x92\x51\x29\xad\xb1\x75\x66\xee\x9d\x45\x4f\x8b\xac\xe0\xd1\x7b\x5d\xd9\x58\x7e\xa8\x25\xe9\x48\xa3\xd2\xfc\x90\x87\x96\x44\x72\x76\x17\xf8\x22\xaa\xd1\x16\x4c\x0c\x9c\x09\xc8\x0b\x09\xc5\x1c\x74\x7b\x0c\xf9\x3c\x80\xf7\x99\x32\xc7\x81\xe2\x7d\x0e\x02\x71\xc1\x39\x8d\x25\x66\x2f\x53\x21\x58\x91\x47\x81\x9f\xf7\xa1\xf9\x7c\xdd\xb8\x6a\x88\x4d\x09\xe0\x75\xb0\xab\x91\x9b\x52\xb4\x43\x17\xc7\xf5\x97\xe6\xe2\x26\x76\x21\x85\xd9\xab\x68\xe4\xe0\xd2\xd4\x9b\xc2\x04\x3d\x02\x11\x93\x8c\xf0\xe0\xd8\x15\x55\xc2\x09\x29\xb7\x74\x2d\x1b\x2b\x69\x44\x13\x52\xc7\x17\x09\xcd\xc0\x4d\x3e\x41\x0d\xb8\xae\x73\x93\xd4\x0c\x29\xdc\x51\x26\xf8\x3b\xf2\xba\x4f\xcc\x5f\x5f\x2f\x97\x42\x47\x4e\x84\x4f\x29\x67\x03\xb9\x2a\x96\xa7\x75\xdd\x4d\xb4\x37\xff\xe2\xf0\xd2\x0d\x61\xaf\x26\xce\xd9\x88\x3b\x53\x43\xbb\x38\xba\x6c\xc2\x8b\x75\xcc\x7d\x3d\x6c\xb4\xc1\x4c\xe9\xd2\x86\x03\x23\xfc\x0c\x75\x8f\x75\x93\x88\x56\xb3\xe4\x07\xcc\x1f\x8c\x7e\xa1\xbc\x78\xc1\xb2\x2c\x54\xd3\x69\xb9\xde\xc8\x8e\x8a\x44\xe7\x4a\xe9\xbd\xce\xcd\x3a\x9b\xd0\x3a\x8d\xad\x11\xec\x9f\xe7\x98\x8f\x8f\x17\x3e\x49\xbe\x02\xc9\x49\x4c\x85\xe2\x77\x92\x03\xbd\x63\xfa\x32\x17\xca\x83\xc8\xcf\x0f\x6f\x3c\x20\xce\x70\x4d\x72\x79\x9c\xb2\x2c\xe1\x34\x0f\x87\x3d\xf1\xb1\xa6\x6d\x2b\x4b\x0a\x2b\x30\x5d\xdd\xab\x58\xb7\xf3\xde\x4d\xdc\xd8\x9c\x7f\x81\x4e\x78\x3f\xb5\xc1\xe1\xe3\x76\xe2\x7b\xab\xb9\xc9\x78\xef\xb6\x6f\xd0\xef\x5c\x81\xdb\xd6\x08\x87\x6a\xdc\x41\x34\x4f\x8c\x33\x68\xa3\xbf\x44\x51\xfe\x69\x91\xdf\x50\x2e\x41\x16\xf0\xc3\xdb\x57\x3f\xa1\xce\x2e\x24\x59\x96\xf6\x0a\x9c\x63\x13\xec\xee\x93\xfb\xf2\x05\xbe\xf9\xd6\x8c\x70\x94\xda\xdb\x98\x51\x8f\xa7\xca\xa2\x79\x50\x0f\x54\x4f\x13\x39\xa7\x93\x96\x21\x9c\x93\xe7\x3d\x49\x30\x10\x6d\x32\x64\x6f\x99\x4c\x81\xe5\x37\x4c\xb0\x59\x46\x21\x50\xa2\x28\xd0\x3b\x4f\x00\xd1\x57\xdc\xe2\x22\x9f\xb3\x45\xc5\x69\x02\x77\x07\x6a\x11\x60\x56\x54\x79\x42\x10\x00\xcd\x45\xc5\xa9\xb0\xe0\x65\x4a\xa4\xe6\x3c\x01\x84\x53\x48\x98\x28\x33\xb2\x32\x97\xe6\x80\xc0\x9c\xdd\x35\x70\x90\x0a\xde\xcd\x91\x9c\x94\x25\x06\xf8\x0b\x1c\xba\x0e\x97\xd7\xf0\xd5\xc4\x6d\x37\x6c\xd2\xe4\xe2\x22\x43\x23\x09\x2e\x0e\x2f\xa3\x3b\x38\x6d\xa8\xe6\x44\x47\x34\x8d\xaa\x1c\x6f\xe4\x85\x9f\xef\x26\x4d\xab\x11\x98\x5c\xa9\xb5\x97\xa7\xeb\xc0\x15\xde\xde\x3c\x80\x23\x35\xce\x89\x5d\x91\xce\x28\xa8\xd1\xa8\x21\x4c\x83\xde\x01\x9a\x2b\x36\x6f\x8b\x5b\x88\x39\x25\x52\x5f\xe8\x53\x87\xa4\xbf\x89\x3b\x57\xb5\xdd\x63\x54\xa7\xfe\x6a\x0c\x4c\xdc\x7a\xe2\x30\x7f\x2d\x48\xf5\x55\xbc\x49\xe3\x50\x74\x36\x36\x1a\x8b\xfa\x66\x5e\x38\x1c\x29\x96\x37\x12\xf4\x96\x25\x32\xbd\xa7\xcf\x3f\x54\x3d\x9a\xbb\x7f\x3a\x1c\xc1\xe3\xba\x9f\x56\xef\x29\x9f\xf4\x64\x7a\x7f\x67\xd2\x06\x02\x98\x40\x90\xb1\x9c\x5a\xf7\x0f\x9a\x11\x65\x91\x11\x63\xe7\xaa\x3a\xc2\x8d\xcf\x47\x33\xee\xa4\xe1\x77\x5d\xbc\x64\xaa\x25\xa9\x64\x11\x8c\xfc\x54\xd1\x3b\x23\x4f\xba\xc4\x8a\x50\x66\xa1\xa1\xfe\x59\x53\x7a\xd2\x47\x67\x07\xd6\x6a\x0b\xac\x9f\x0d\xfd\x37\x02\xd3\xc8\x16\x9c\xd1\x5c\xd6\xd3\xa3\x73\x9b\x47\x21\x59\x7c\xfd\xc2\xdc\x64\xa9\xe1\xbf\x60\x77\x52\xed\xb1\xe8\x6d\xb5\x9c\x51\x1e\xe9\xab\x2e\x7f\x7b\xf3\xfd\xf9\xa8\x67\xb1\x11\x45\xb3\xd8\x6e\xbe\xaa\x87\x86\xbd\x58\xdc\xcc\x2c\x2d\x6e\x28\x7f\x46\x25\x61\x59\xff\xfc\x5e\x36\x0d\x76\x9b\xa4\x46\xd3\x4f\xb5\xd2\x8b\x37\x82\xbb\x11\x38\x69\x77\x4e\x20\x6c\x70\x22\x4a\x92\x5b\x99\xaf\x0a\x03\xcc\x33\xaa\x7d\x2b\x77\xf0\x35\x4a\xe2\x61\x24\x8b\x1f\xce\x9f\x6a\x55\x3f\x1c\xea\x34\x23\xd5\xf7\x74\x70\xec\x80\x15\xb7\x44\xc6\x69\x17\x30\xce\xe3\x4a\xd7\x06\x3a\xab\x7e\x1a\xcc\x48\x7c\xbd\xe0\x4a\xb6\x1d\x18\x7d\x41\xa7\x38\xa1\x2e\x80\x25\x6a\x18\x75\x04\x75\x07\x8a\x8b\x5c\xd2\x1c\x2f\x8e\xea\x21\xf7\xc1\xcc\x36\xea\xb3\xb0\x50\xc2\x6a\x33\x6b\x02\xae\xc9\xb9\x32\x33\x31\xb9\x79\xc7\x5e\x60\x55\x53\x49\x35\x98\x71\x24\x8b\x1d\xd5\x29\x32\x7e\x82\xc6\xaa\xf6\xd1\xe8\x0a\x1e\xd4\x4f\xed\xed\xb1\x9e\x85\x7f\x8d\x75\xbd\x82\x45\x77\xab\x25\xcb\xbd\x0c\xe1\x8c\xe6\xa4\x9c\xf5\x0f\xf9\x3d\x4d\xc9\x0d\x2b\xb8\xd5\xc3\x5e\xda\x0e\x21\xec\xc4\x7a\x1a\xaf\x89\xf9\xeb\x0f\x2e\x52\x9a\xdd\xa8\x23\x66\xa7\x91\xcf\xf1\xe2\xe8\x6e\x0c\xbf\x69\x54\xd7\x9f\x5e\x5f\xdf\xdc\xea\x16\x11\xec\x97\xdf\xa2\x3b\xfa\xa2\xeb\x41\xcb\xba\xe8\x91\x04\xf5\xe9\x5e\x3b\xea\x7f\xab\xac\xf7\x32\xf9\x37\x89\x9b\x1d\x72\x02\x7a\x82\x25\x5b\x42\x16\xfd\x34\x51\x4a\xb2\xc1\xc2\x5c\x00\x12\x50\x12\xbc\xc1\xef\xde\x0f\x9a\x17\xdc\xde\x22\x32\x9a\x0b\x9a\xd0\xce\xa5\x20\x41\x6e\xe8\x9e\x51\x6f\x9c\xab\x40\x4f\xfe\xfa\xe4\x27\xb0\xfe\x70\xa5\x8e\x14\x3c\xa1\x5c\xdf\x22\x3a\xa8\xad\x64\x60\x52\x1b\xf2\xce\x98\x1a\xd8\x6d\x4a\xb5\x0a\x53\x09\xca\x95\xa6\xa4\x14\x1d\x9d\xa3\x88\xf8\xb8\xf7\x67\xeb\x1b\x44\xc6\x02\xf5\x34\xbe\xfe\x9b\x47\x68\x8e\x6f\xb5\x2b\x7a\xed\xe8\xb7\x05\xa2\x59\x16\x2c\x97\x02\xe6\x4a\x22\xb6\x6c\xe3\xae\x82\x7f\x4e\x66\xfe\xc5\x31\xf7\x46\x90\xe3\x33\xac\x6f\x28\xed\xc4\x05\xad\x00\x54\x2b\x87\x81\xec\xc4\x07\x3a\xb6\xdc\x5c\x6d\xba\x1f\x4b\x97\xd2\xda\x43\x62\x5d\x66\xdf\x17\xc9\xca\x92\xda\x01\xe7\xdf\x68\xbf\xc2\x8b\x19\x20\x67\x45\x62\xae\xe0\x61\x3f\x2f\xf4\x2c\x6e\x99\x8c\x53\x9c\x80\xe3\xe0\xd0\xf8\xc7\x44\x50\x08\x6e\x68\x2c\x0b\x1e\x4c\x6a\xfd\xd3\x69\xdb\xbb\x82\x76\x18\x63\xdd\x04\x27\x92\x9f\x9e\xc8\x44\xd9\xbd\xea\xac\x9a\x0e\x1e\x0f\x4e\x4f\xd8\x69\xae\x17\xf6\x64\xcc\x4e\x4f\xc6\x32\x51\x3f\xfc\xb4\x09\x1a\xb4\xd3\x77\xfa\x93\xd2\xba\xb8\xb4\x6e\x3c\xe0\x1a\xc0\xd4\x6d\x78\xc1\x2e\xdd\xd3\xb2\x76\x3f\xf6\xf9\x28\x6a\x17\xc5\xf1\x7d\x53\x3b\x6d\x39\x62\x35\x48\xe3\x2e\x55\x53\x33\x4d\x8c\x0b\xe2\xe2\xe8\xb2\xa9\x72\x67\xad\xe7\x89\xf9\xc1\xc7\x35\xfd\x8d\x9f\xe9\xff\x30\xfd\x6f\x7e\x3b\xfd\x6f\xda\xf4\xaf\x53\x33\xcf\xe9\x9d\xd2\x70\x82\xda\x29\x55\xa3\xf7\x49\xa3\xf7\x09\x4e\xe0\xc6\xfa\x7c\x2c\x6e\x9f\xfc\xdb\x30\x0d\xa4\xfd\x69\xdd\xf8\xe2\xd3\xa5\x59\x21\xf8\x4f\xb5\x6a\x6e\xf9\xa1\x5e\xb9\x19\x1f\x9f\x06\xed\x84\xb3\xdf\xc5\x1a\x0e\x26\x3b\x73\x86\xf1\xca\x69\xce\xe8\x1f\x5d\x37\xf1\x46\x72\x57\x62\x13\x23\xb6\x07\x42\xcd\xf6\xfe\x81\xb0\x89\x37\x90\x33\x6b\x7f\xcc\xe1\x96\x41\x8d\xbf\x61\xd2\x7b\x1e\xfc\x90\x8b\xaa\x2c\x0b\x2e\x69\x62\x72\x6c\xd1\xa3\xda\x01\xb2\xf5\x68\xe7\x1b\x5e\x29\xeb\xbb\xaf\xd6\x7e\xca\xc8\x73\x2e\x39\x3a\xd5\x59\x7f\xb1\xaf\x6a\xd5\x17\x1b\x5c\xff\x28\x92\xaf\x41\x80\xe6\x92\xc9\xd5\x1b\x7d\x6f\x07\x27\x16\x3c\x0a\x26\x10\x3c\x22\xcb\xf2\xd8\x26\xba\x9f\x60\x49\x26\xeb\x82\x53\x2c\x58\xd4\x05\x83\x60\x30\x81\xc1\xa3\x7f\x55\x85\x3c\x36\xb7\x6f\x82\x41\xa0\x8a\xbe\xfa\xe6\xcf\x75\xc9\x58\x97\xdc\x3d\x7e\x71\x3c\xa8\xef\xb8\x1b\x25\xdf\xd8\x34\x06\xbd\xe6\xfa\xcf\xc5\xa3\x93\xd3\x60\xf0\x71\x7c\x39\x5e\x8c\x9c\x9b\x1a\xa2\x95\xec\x58\x4f\xe3\x42\x5c\x5a\xd7\xe6\xda\x5b\x95\xf7\xa4\x2f\x43\xb6\x79\xa3\xce\xc6\x78\x5a\x8b\xa9\xba\xb5\x1e\x24\xeb\x5f\x49\x04\xd2\x5c\x51\x40\xc0\xe8\x03\xfb\xe1\xec\x75\xe3\x7b\x74\x5b\xf5\xca\x54\xaf\x81\x76\xa5\xac\x9b\x68\xa9\x57\x6b\x5d\x5e\x38\x14\x49\x12\xad\x95\x83\x79\xed\x0e\xb9\x29\xf8\x8a\x24\xc9\x95\x79\x65\xc3\xdc\x01\xf5\x9a\xeb\x67\x49\x54\xd1\x08\x3e\xaf\x87\x5d\x0d\xa5\x35\x7f\x3b\xa3\x2e\x0d\xd4\xec\x4c\x80\x35\x2b\x62\x34\xf3\x23\x41\x09\xd7\x6f\x42\x05\x41\x6b\xc1\x6c\x98\xc1\x50\x0f\x53\x1c\xde\xdb\xfc\xa9\x7e\x38\x91\xa8\x66\x9a\x3f\xc2\xa3\x61\x24\xca\x8c\xc9\x70\xf0\x68\x50\x67\x7e\x35\x30\x5e\xd2\xac\xac\xcd\xac\xf6\x64\xfe\xde\x6a\x16\xba\x3e\xee\x36\x0c\x3d\xe1\xa6\x8b\x08\x1d\x4c\xb7\x52\xcb\x52\xd9\xa5\x96\x7d\xc7\xcc\x67\x9c\x2e\xae\x5a\x65\x44\x92\x3d\xac\xdf\x10\x73\x1e\x02\x32\x4e\x15\xf3\xc2\x9a\x16\x98\x6a\x65\xb5\xc2\xf9\xc3\xd9\xeb\x66\x69\x87\x4e\xb5\x96\x27\xad\xb5\x1f\xee\x01\x0c\x9b\xc7\x0e\xf5\x7e\xd0\xdc\xd7\xb8\x94\x1f\x9a\xe5\x1d\x1a\x3b\xad\x1b\x3c\xb7\x7e\xf2\xda\x8a\x6b\xee\xe4\x2b\x3a\x8d\xc7\xf0\xf6\xdd\xf9\xf3\x49\xeb\xb6\xd3\x8c\xc2\x35\x2d\x25\xde\x69\x5b\xe5\xb1\xf6\x99\x8e\x2b\xc9\xb2\xb1\x90\xdc\xfe\x8d\x8b\xfc\x26\x5a\x14\x13\x84\xfb\x9a\xe5\xd7\x2f\x0a\xfe\xbc\x0e\x62\xdd\xb3\x06\x35\x3d\xfa\xb7\x2d\x2e\xa7\x16\x3e\x76\xd7\x9a\xe9\x7b\xd1\x9b\x85\xde\x5b\x78\x6b\xc7\x8d\x78\xb5\x76\xbd\xa6\x40\x73\x57\xc9\x46\x0b\x7e\x37\x7b\x3a\x20\xde\xcd\x3e\xd1\x58\x09\xa1\x0e\xaf\x2e\x68\x4e\x39\x91\x9a\x5d\x75\x33\x4f\xe0\x58\xfc\xbd\x78\xdf\xc3\x08\x13\xc9\x42\x07\xb6\xcd\x6c\xd0\xcf\x91\xe9\x80\xf2\x23\xf3\xc6\x4d\xca\x84\x2c\xf8\x0a\x99\x43\x99\x20\x34\xfc\xbc\x1e\x41\x10\x8c\x40\xc7\x36\xbe\x53\x07\xb2\x43\xd4\xad\x7b\xc4\x61\x48\x77\x85\x34\xdf\xf5\xc8\x68\x77\x89\xcc\xb5\xd1\xa6\xd3\x10\x3e\x9b\x69\x2d\xd0\x0d\x80\xed\x7a\x92\x7e\x7a\x29\xdd\x62\x90\x5d\xba\xb4\x25\xe3\xdf\x3d\x31\x56\x43\x73\x65\x46\xcd\x79\x68\x38\xd3\xc4\xef\x82\xb3\xd3\xd3\x7a\x95\xdf\x90\x8c\x25\x3d\x62\x47\xdf\xd0\x74\xc5\x96\xee\x46\x65\x6c\x97\xfa\x05\x2f\x96\xef\xf4\x00\x06\x40\x77\xb8\x11\x1c\xee\x48\x99\xa8\x19\x5d\x3b\x6a\x61\x0a\xe3\x7f\x2e\x3e\x26\xfb\x1f\xa3\x68\x7f\x1a\xed\x3f\x1c\xff\x3a\x62\xf5\xcc\xd0\xa5\x17\x72\xe4\x79\x55\x66\xd4\xd0\xcb\x4c\xd3\x29\xef\xac\x7d\x53\xd7\x3a\x69\x7e\xf5\xe4\x22\x49\x85\x74\xe1\x1d\xf7\x67\x8e\x6d\x9d\xe4\x7d\xeb\xb1\x81\x3d\x46\x9a\x65\x5f\x35\x72\x46\x9d\xab\x4e\x83\x46\x69\x68\x74\x86\xfe\x23\xb5\xc4\x97\x3c\xdf\xcd\x95\xb4\x45\x78\xde\x55\x6e\x84\xa6\x1f\xfb\x0c\x9d\x21\xeb\x9c\x7c\xf4\xba\xbf\x9b\xeb\x41\x5f\x14\x5c\x41\xb1\x9b\xd4\x45\x67\xe7\x65\x68\x2a\x74\x3e\xb3\xf8\x07\x93\x69\xd8\x41\xd2\x10\xdb\xda\x51\x36\xd1\xec\x3e\x7c\xb6\x53\x62\xdb\x24\x94\x2e\x11\xd3\xf0\x70\x74\xcf\xbc\xb5\xf8\xeb\x05\xd5\x2d\xf4\x0f\x8f\x9d\x68\x52\xeb\x36\x1d\x92\x18\x5a\xb8\xef\xdb\xf8\xd7\x5b\x1b\x5d\xd3\xd9\xdd\xef\xe6\xef\x72\x73\x0a\x77\xf1\xab\xd7\x59\x03\x79\x12\xc7\xd5\xb2\xca\x88\xc4\xcc\xc3\x1d\x84\xc9\x06\x8e\x85\x7d\x93\x88\xdf\x01\x5b\xc7\x1e\x9b\x47\x60\xdb\x17\x40\x9d\xd6\xbf\x7a\xab\x6d\x9e\xfc\x76\x31\xec\xdd\x12\x06\x9f\xb9\xdb\x41\x65\xe9\x2e\x62\xd3\x5b\x59\xda\x4f\xf2\xc4\x26\x4d\x49\xbd\xa2\x5a\x41\x9d\x0e\x9c\x03\xbc\x69\x5e\xbf\x7b\xed\xf6\xbd\x38\xd4\xf7\x88\xdd\xc6\x16\x68\x42\xe3\x22\xa1\x3f\x9c\xbd\x7a\x5a\x2c\xcb\x22\xa7\xb9\xa5\xa5\x07\xe0\xe8\xb2\x31\x9d\x3e\xee\x2b\x9b\x29\x80\x60\x38\x34\x50\xd5\x4e\x72\x51\x98\x42\x20\xc9\xcc\xc9\x4d\xf3\x87\xac\x6f\xa4\x3a\xc5\xfa\x85\x1a\x49\x66\xc0\x04\xc6\x2c\x17\x94\x1b\xc7\x81\xab\x90\x5e\x34\xc3\x5c\xd6\x53\xfd\xd1\x5e\x28\x5e\xf7\x2c\x7f\xf7\xfe\xef\xb6\x45\x6f\xcb\x31\x77\xa9\x1d\x45\xcd\x8c\x12\x2c\x94\x66\xc2\x0c\x9b\x06\x51\x37\xb1\x70\xdb\x78\x3d\xea\x55\x47\x63\x69\x69\x5a\x35\x97\x95\x16\xc3\x7e\x09\xcc\x3c\xe1\xeb\xab\x79\x9a\x2d\xf5\x67\x74\x4d\x57\xc2\x1b\x69\xd8\x65\xd2\xeb\xe6\xc5\x5d\x07\xd2\x85\x41\x61\x1f\xae\xe9\xea\xd2\xea\xaa\x06\xca\x85\x2a\x6b\x52\x82\x5c\x63\x48\xf7\x6e\x39\x14\x94\x19\x6c\x94\x68\x7d\xe1\xe1\x03\x95\x55\x69\x82\x29\x31\x89\x53\x3a\xd1\x0f\x08\x35\x8b\xed\x5d\x8c\xe8\x7d\x73\x47\x48\x22\x59\x3c\xfe\x24\xc6\xda\xd8\xa9\x1f\xac\x4e\xed\x23\xd6\xdf\xdd\x4c\xd5\x22\x7a\x2f\x4f\x9b\x00\x79\xe7\xfa\x03\x66\x2f\xc1\x67\xfb\xea\x83\xf7\x9a\xb4\x71\x13\x5a\xbf\x5a\xfd\xf2\x34\x32\x7c\x93\xf7\x64\xb7\x0c\x13\xcf\x68\xc9\x69\x4c\x24\xd5\xf6\x1c\x9a\xf4\x7e\x2e\x57\xc2\x38\x8d\xe5\x79\xf1\x86\x2d\x14\x8f\x24\xb5\xd5\x0f\x7d\x37\x04\xf0\x21\x7f\xed\x90\xe8\xb1\x01\x42\x27\xa1\x1d\x99\x52\x93\xdb\x77\x03\xae\x1b\x2f\x07\x9a\x56\xe7\x29\x15\x14\xe4\x6d\x61\xee\x9c\x88\x7e\xbc\xf1\xed\xb7\x5e\x74\x87\x0a\x0a\xe1\x14\x48\x92\xd0\x04\x8a\x3c\x5b\xa1\xab\x73\x46\xe2\xeb\x5b\xc2\x13\xbc\x5c\x40\x24\x9b\xb1\x8c\xc9\x95\xb2\xdc\x8a\x2c\xd1\x3c\x62\xc2\xde\x91\xc3\x20\xbd\x24\xdb\xe8\x28\x48\x89\x48\xef\xd1\x6c\x9a\xb7\xa8\xec\xe1\xa7\xa5\x61\xf2\x82\x93\xc5\x52\x47\xa0\x7b\xe4\x63\xdf\x28\x3a\x3a\xc1\x57\xf5\x62\xe0\x9d\x0e\xb3\xf0\x3e\x50\x73\x26\x87\x47\x43\x2d\xf4\x12\x5e\x94\x18\xa8\x52\x70\xe0\x2b\xbc\xb5\x14\x63\xd8\x3b\x74\xb2\x60\xba\x28\x37\x5a\x3a\x57\xe2\x6f\xed\xec\xa3\x0d\x7c\x53\x8b\x8d\xdf\x37\xcd\x1e\x03\xf5\xf7\xcc\xb6\x5f\x34\xb5\xbd\x52\x9e\xe6\x53\xf8\xe2\xb0\x39\x37\x6b\x79\xd8\x23\x96\x55\x1b\x57\xdc\x15\xbb\x48\xba\xfb\x65\x5d\xd1\x12\x73\xe0\xbd\x97\x5d\x4f\x0c\xef\x6f\xf5\x9b\xc3\x2d\x22\x2b\xcc\xc7\x2d\x83\x17\x97\xf6\x61\xa8\x36\xeb\xf0\x78\xef\xbf\x02\x00\x00\xff\xff\x2b\x68\xa7\x71\xac\x63\x00\x00") +var _webUiStaticJsGraphJs = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xe4\x3d\xfd\x77\xdb\x36\x92\xbf\xfb\xaf\x98\xb0\x79\x11\x55\xcb\x94\x9d\xee\xf6\x76\x65\xcb\xbd\x34\x1f\x9b\xec\xe6\x6b\x1d\xb7\xdb\x3e\xc7\xeb\x07\x91\x90\x88\x98\x22\xb9\x00\x68\x5b\x4d\xf4\xbf\xdf\xc3\x00\x20\x01\x92\xb2\xd4\xf6\x6e\xdf\xdd\xbb\xfe\x20\x87\xf8\x18\x0c\x06\x83\xc1\x7c\x01\xbd\x21\x1c\xde\xf3\x62\x49\x65\x4a\x2b\x01\x53\xf7\xe3\xcb\x17\xf8\xbc\x3e\xde\x53\x4d\x16\x9c\x94\xe9\x39\x5d\x96\x19\x91\xf4\x78\x0f\xcb\x3e\x3c\x7f\xfa\xee\xed\x33\x98\xc2\xd1\xe1\xe1\xe1\xf1\xde\x5e\xd3\x33\xfa\x8b\x6a\x0e\x53\x98\x57\x79\x2c\x59\x91\x87\x34\xa3\x4b\x9a\xcb\x11\x14\xa5\xfa\x16\x23\x48\x49\x9e\x64\xf4\x69\x4a\xf2\x05\xb5\x5f\x67\x74\x59\xdc\xd0\x21\x7c\xde\x03\x90\x29\x13\x11\xcd\x60\x0a\xa6\xef\xb1\x2d\x44\x5c\x5e\x9e\xbf\x79\x0d\x53\xc8\xab\x2c\xab\x2b\x0c\x6c\x98\xda\x51\xea\x1a\x77\x30\x98\x7a\x63\xb7\xda\x68\x14\x5c\xd4\x35\x3a\xe0\xa1\x18\xaa\x1e\x43\xd5\x75\x5d\xf7\xe7\x2c\xbe\x16\x29\xb9\xb5\x73\xf7\x50\x4b\x88\x24\x30\x85\x8b\xcb\xe3\x3d\x5b\xc4\x72\x26\x19\xc9\xd8\x2f\x34\x1c\x1e\xef\xad\x7b\x08\x18\x49\xb6\xa4\x2f\x48\x2c\x0b\xae\x26\xa5\xd0\x08\x56\xc1\x04\xbe\x3d\x84\xaf\xf5\xcf\xe3\x3f\xc0\xd7\xf0\xcd\xb7\x7f\x1c\xa9\xaa\xdb\x6e\xd5\x7f\x60\x45\xd2\xaa\xc0\xc2\xb4\x29\xc4\xef\x25\x7e\xe3\x3f\x45\x30\x81\xa3\x7e\x8c\x84\xa4\xe5\x8f\x24\xab\xa8\x42\xe8\x42\x35\x3e\x12\xc1\x08\x82\xa3\x43\xfd\x67\xa9\x7e\xff\x88\xbf\x47\xfa\xcf\x37\x87\xfa\x2b\x55\xbf\x8f\xf1\xf7\x5b\xfc\x3d\xd2\x1f\x47\x09\x56\x24\x01\x0e\x7d\x74\x8b\x5f\xf8\xfb\x07\xfc\xfd\x13\xfe\x1e\xad\xb0\x7c\x15\xec\x5d\xf6\xa1\x95\x57\x4b\xfc\x87\xc2\xaa\x8f\x15\xa3\x92\x17\xb2\x90\xab\x92\x3a\x64\xef\x2e\xb2\xe2\x6a\x41\xb3\x39\x4c\x71\x89\xd4\xea\xa9\xcf\x88\x25\xde\xc6\x68\x0f\xba\xbf\x8f\xab\x3a\x1e\xc3\x07\x2a\x21\xa1\x73\x52\x65\xd2\xf2\x60\x64\x81\xd8\x6f\x04\x66\xc0\x1e\xb7\x2b\xb9\x62\xc9\x2b\x96\x97\x95\xb4\xad\xfa\xaa\xbe\x7c\x41\x8a\xaa\xee\x6c\x0e\xa1\xd7\x4e\x92\x19\x4c\xa7\x53\xa8\xf2\x84\xce\x59\x4e\x13\xcb\xc0\xdd\x56\x70\x84\x2c\x6c\x90\x7f\xc6\xc9\xad\xde\xe8\x10\x17\xb9\xe4\x45\x26\x80\xe4\x09\x7e\x10\x96\x53\x0e\x73\x5e\x2c\xe1\x25\xee\x83\x19\xe1\x02\xa4\x11\x08\xd1\x9e\x21\x5e\xb3\x03\xf5\x90\x83\x92\xc8\xf4\x3d\xa7\x73\x76\x37\x98\xc0\xfb\x27\xe7\x2f\xaf\xde\x9f\x3d\x7f\xf1\xea\xa7\x91\xae\x9e\x55\x2c\x4b\x7e\xa4\x5c\xb0\x22\x1f\x4c\xe0\xfb\x1f\x5e\xbd\x7e\x76\xf5\xe3\xf3\xb3\x0f\xaf\xde\xbd\xb5\x9b\xeb\xd3\xdf\x2b\xca\x57\x11\xbd\x93\x34\x4f\xc2\x5a\x7e\xb8\xb3\x19\xd6\x74\x74\x65\xc3\xc3\xf0\x4d\x25\x24\x89\x53\x1a\x71\x9a\x27\x94\x87\x9e\x14\xab\x65\xd1\xb0\xe9\x4e\xb3\x88\x94\xa5\x1a\xc7\x87\x36\xb4\x0b\xfc\x17\x2a\x81\xd3\x39\xe5\x34\x8f\xa9\x00\x59\x00\xc9\x32\x90\x29\x05\x96\x4b\xca\xa9\x90\x2c\x5f\x58\x89\x25\x80\xe5\x58\xd7\x10\x55\xd3\x91\xe4\x89\x06\x37\x63\x79\x02\xf4\x86\xe6\xd2\x88\x17\x8e\xfc\x52\x4b\xdc\x7f\x70\x85\x0e\xb7\xac\x40\xb3\x68\xce\xf2\x24\x0c\xbe\xc2\xda\xab\x5b\x5d\x1d\xc0\xbe\x65\xa8\x66\x2a\xff\x52\x54\x7b\x51\xf0\x25\x4c\x3d\x58\x06\x82\xae\xbf\x9a\x17\x7c\x19\xe8\xd9\xe9\x11\xee\x4a\xde\xdf\x41\xd2\x3b\x49\x38\x25\x17\x39\x59\xd2\xa9\x6a\x77\x19\x38\x84\xbb\x2b\x79\x74\x4d\x57\x25\xa7\x42\x84\x8d\xd8\xb7\xbc\x37\x1e\xc3\x73\x45\x20\xb8\x25\x02\xb0\x11\x4d\xe0\x96\xc9\xb4\xa8\x24\x92\x48\xa4\x6c\x2e\xe1\x9a\xae\x22\x6c\xaf\xb8\x9a\x46\xb7\x29\x8b\x53\x98\x4e\xe1\xe8\x1b\x78\xf4\x08\x1e\xd0\x08\x9b\xfd\x8d\xae\x2c\xdc\xf6\x64\x23\x51\xcd\x96\x4c\x86\x88\x99\xfa\x8f\x46\x25\x47\x02\x3f\xd3\xdb\xd2\xd6\x20\xd3\x23\x5e\x4f\x2a\x59\x1c\x70\x2a\x94\x44\x50\x98\xa8\x89\x82\x9a\x29\x14\x39\xe0\x76\xd3\x28\x21\x7f\xcf\xe7\x82\x4a\x23\x1e\x22\xfd\xf5\x92\xb2\x45\x2a\xe1\x40\x97\xc5\x19\xa3\xb9\x29\x3b\xae\xfb\x69\xf0\xe7\x86\x84\xfe\xc1\xd8\x4c\x05\xe0\xa1\xfa\x8e\x62\x21\xc2\x41\x8a\x20\x06\x23\x18\x90\x4a\x16\x83\x76\x29\xcd\x22\x11\xf3\x22\xcb\xcc\xf0\xfb\x06\x37\x3b\x3d\xfd\xe7\xa1\x3e\xa8\xa2\x22\x0f\x07\xd7\x74\x55\x95\x7a\x42\x83\x91\x27\xf9\x5a\xe8\x99\xc3\x0d\xd6\xfa\x80\x6b\x2d\x72\x8c\xa7\xa6\xde\x1f\xee\x39\xea\x30\x11\x4a\xaa\x57\xae\x0c\x6b\xd6\x47\x33\x13\x62\xa1\x39\xc9\x11\x6b\x2e\x43\xa9\x8d\x7b\x4d\x93\xef\x65\xbe\x09\x86\x6d\x72\x35\x93\x79\xb7\xe3\x0e\x23\x9b\x96\xee\xa8\x2c\x17\x94\xcb\x37\x54\x72\x16\x6f\x82\x20\x68\x46\x63\x03\x42\xb7\xbf\x5a\x62\x07\x17\x10\xa7\x73\x4e\x45\xfa\x4a\xf1\xfc\x0d\xc9\x76\x81\x65\xba\x5c\xba\xdb\x31\x2e\x72\x51\x64\xf4\x1c\x85\x75\xdf\x2e\x36\x0d\x82\x96\x04\x54\x1d\x60\x43\x17\x2d\x3a\x6a\x61\xe4\x0e\x27\xc9\x4c\xf4\xf7\x22\x17\x4a\x83\x39\x90\xc5\x62\x91\xd1\xe9\x40\x92\xd9\xc0\x9d\xae\xea\x18\xd1\x7f\x75\x0e\xa2\xa1\xfa\x09\x03\x91\x16\xb7\xed\xd6\x45\xae\xcb\xf3\x68\x86\x4d\x03\x87\x27\x6b\xb1\xa1\xf6\x8e\x24\x7c\x81\x7b\xee\x61\x48\x23\xfd\x61\x98\xbc\xe7\x40\xd3\xf5\x51\x49\x38\xcd\x65\x38\x8c\x58\x9e\xd0\xbb\xd0\x6d\xef\xf2\xac\xad\x50\xd2\xe6\x61\x18\x7c\xa5\x04\xa9\x81\x40\xa4\xe4\x61\x40\x38\x23\x07\xf6\x30\x0c\x86\xc3\x28\x25\xe2\x69\x46\x84\x08\x03\x4e\xb3\x82\x24\xc1\xb0\x25\x89\xb4\xfc\xc1\x23\xab\x11\x35\x7a\x17\x69\x91\x7f\x46\x65\xc5\x73\x50\x5a\xa4\x80\x79\x11\x57\x02\x66\x24\xbe\x56\x47\x09\x0a\x5f\x96\x0b\x49\x49\x02\xc5\x1c\x34\x2c\x75\xa2\x44\x7d\x0c\x1a\xcd\x70\x69\xae\xe9\x2a\x29\x6e\x73\xa5\x1f\x71\x84\xdd\x4b\xc9\x66\x03\xe3\x98\x1e\x49\xb0\xf8\x86\x64\xa1\xff\x35\x34\x6d\x34\xd4\x0d\x92\x74\x3d\x6c\xce\x0e\xce\x8b\x0d\x87\x87\xae\x0b\x86\x51\xca\x12\x43\xf5\x86\x59\x9f\x68\x91\xb8\x99\x57\x95\x50\x6a\x73\xb8\xdd\x51\x35\x04\xaf\x8b\xd3\x7a\xf5\xe4\x8e\x89\x8d\xad\x57\x57\xe4\x8e\x09\xa7\x79\x46\x17\x34\x4f\x36\xa0\xa3\x2b\x5d\x61\x53\xb2\x3c\xa7\x9b\x26\x6d\x6a\xdd\x63\xf2\x86\x64\x1f\x24\x91\x1b\x76\x19\xd6\x5f\x09\xd5\xc0\x3b\x94\xf3\xe4\x19\x91\xb4\xbf\x8f\x23\xd0\x68\x9e\x74\x05\xa9\xe9\xac\x2c\x10\xaa\xec\x89\x92\xc5\xd7\x94\x87\x9a\x2b\xb2\x22\x26\x19\x9d\xc0\x80\xe6\x03\xad\x92\x29\x85\x80\xc8\x09\x0c\x7e\xfe\xf9\xe7\x9f\x0f\xde\xbc\x39\x78\xf6\x0c\x5e\xbe\x9c\x2c\x97\xa6\x5e\x16\x45\x36\x23\xfc\x7d\x46\x62\xd4\x71\x26\x30\x98\x15\x52\x16\xb6\x5e\xb0\x84\x7e\xbf\xfa\xc0\x12\x3a\x01\xc9\x2b\x6a\x4a\xd3\xe2\xf6\xbc\x48\xc8\xea\xfb\x4a\xca\x22\x6f\x57\x3d\xcd\x28\xe1\xdd\xc2\x42\x38\x40\xf4\x39\xd4\xd1\x76\xeb\x39\xfb\x8c\xde\x4c\x9a\x84\x03\xf5\xcf\x73\xb6\xa4\xef\x71\xea\x83\x21\xd2\x62\x13\x18\xad\x11\xb7\xe0\x28\x61\x95\x94\xe6\xec\x0b\x5a\xa7\x67\xcf\xbe\x77\x4f\xcd\xd6\x51\x60\x0f\xd0\x2e\x88\xaa\x54\x78\x9d\xe9\xe6\x16\x48\xbd\xf1\xc5\x87\xfa\x60\xeb\x98\xa6\x66\x87\xba\xe7\x9f\xde\xc1\x68\x08\x0c\x8e\x06\xc6\x52\xb5\x26\x8e\x5c\x65\x14\xc1\xe9\xe3\xb5\x03\x4f\x35\x62\x71\x51\x1f\xbd\xcd\x61\xac\x99\x6e\x10\x2d\xb2\x55\x99\xaa\x26\x03\x47\x84\xfa\x88\x86\x1d\xd1\xd8\x40\x21\x49\x62\xc4\xe8\x4c\xe6\x07\x25\x67\x4b\xc2\x57\x41\xad\xb4\x29\xc0\x4e\x9b\x7a\xb0\x83\x38\xa5\xf1\x75\xab\x1d\x47\x8b\xbc\xd3\xb4\xca\xb1\x31\x4d\x6c\xf3\x35\xd0\x4c\xd0\x8d\x28\x79\x60\x7e\x1d\x56\x9d\xa1\xee\xc7\xcc\x9b\xc4\xda\x9a\x39\xde\xa2\x84\xce\xca\x3b\x38\xc6\x19\x8b\xaf\xc3\xce\x72\xf5\xd1\x5e\xe9\xcb\x8d\xc8\xfb\xeb\x87\x77\x6f\x9b\xd5\x18\x8f\xe1\xd5\xdc\x31\x4c\x94\x4e\x6e\x46\x19\x61\x71\xc1\xd9\x82\xe5\x24\x03\x41\x39\xa3\x02\xd0\x7b\xb1\x28\x24\x2c\x2b\x49\x24\x4d\x1a\x38\xa1\x50\x02\x24\x19\xa2\xa1\x78\x4b\x21\xa7\x34\x51\x47\x19\xa7\x4a\x33\x91\xbc\x8a\x25\x30\xa9\x0d\x47\x0f\xb2\xc2\x08\xe1\x46\xee\x7a\x18\x37\x89\xd6\x12\x38\xc9\x85\x12\x47\xcf\xd4\x26\x6e\xcd\xa5\x21\x1e\x74\xd9\xbe\x43\x8b\xef\x60\x70\x38\x80\x89\xda\x09\xf6\xdc\x6b\x53\xbb\x06\xa4\x77\x21\x1a\xf6\x61\xad\x00\x77\x8c\x2a\x6b\x67\x74\xd6\xa2\xa5\xb6\x39\xfc\x62\x15\x06\x67\x2c\xab\xab\xdd\xdf\xaa\x47\xa5\x30\x1b\x7e\x4e\x32\x41\x5b\x4a\xba\x39\x74\xea\x93\xb6\x8b\xba\x3e\x37\x66\x28\x89\xad\x1a\x1b\x5f\xa1\x1e\x7e\x19\x0c\x7b\x98\xcc\xaa\x1e\x31\xa7\x44\xd0\x33\xa3\x39\xb9\x83\xde\x07\x3c\xa1\x3b\x00\x4f\x68\x0f\xf0\x5d\x51\xa7\x79\xb2\x0b\xe2\xcf\xf3\xe4\x57\xa2\xbd\x05\xb0\x45\xda\x01\xdc\xab\xa7\xf5\x48\xfc\x96\xf2\xa5\xed\x00\x55\x17\x70\x5a\xaa\xb3\x35\x18\xc1\x67\x65\x89\x4e\x7a\xe0\xa1\x68\x1f\xc1\xb2\x50\x87\x6c\x30\xa3\xf3\x82\xd3\x60\xdd\xd1\xe8\xac\xa2\xa7\xf6\x29\xa7\xf8\xc5\xf2\x45\xc3\xd1\xda\x30\x55\x22\x4a\x1f\x03\x3d\xca\x85\xb5\x4c\x54\x23\xa3\x54\xd4\x3d\x36\x49\x23\x73\xe8\xa1\x9b\xf4\x1e\x76\xb5\x94\x2a\x8b\xb2\xca\x88\xa4\xaf\x70\x86\x64\x96\x51\x3d\x4b\x61\x98\xb7\x16\x6e\x8e\x5e\xea\x8e\xd4\xd9\x1d\xeb\x7e\xcf\x65\xe3\x01\xdc\x38\xe2\x4e\x0e\xc1\x87\x11\xf9\x44\xee\x42\x2b\x4b\xd5\x20\x45\x32\x81\xe0\x2f\xcf\xcf\x83\x91\x29\xac\x78\xe6\x79\xbb\x60\x1f\x82\x31\x29\xd9\xf8\xe6\x68\x9c\x91\x19\xcd\xc6\x57\x57\x8a\xb2\x57\x57\xe3\x1b\x74\xa6\xd6\x3d\x95\x00\x3c\x5f\x95\x6a\x5d\x3f\x89\x22\xaf\xcb\x45\x15\xc7\x54\x88\x49\x83\xa0\xaa\x1e\xa1\xb3\x42\x29\x94\x95\x70\xdd\x08\x8a\x66\xaa\x5e\x49\x45\x59\x09\x78\x30\x9d\x42\x60\x40\x04\x6e\x43\x4b\xc3\xb4\xb8\x7d\xae\x34\xf4\x30\xc0\x3f\xa0\x64\x10\xcb\x17\x40\x6e\x08\xcb\x14\x85\x40\x9b\xb8\xe2\x41\x73\xc4\x35\x0b\xdb\x94\xac\xeb\x7f\x29\xca\x2d\x6b\xb2\x22\x32\x6a\x6e\x4d\xd3\x79\xc1\x21\x44\x45\x03\x7d\xb6\xc0\xe0\xc4\x76\x88\x32\x9a\x2f\x64\x7a\x0c\x6c\x7f\xbf\x07\x5b\x77\x2f\x5c\x1c\x5e\xd6\x3a\x1c\x49\x92\x30\xa7\xb7\xf0\x0e\xbf\x43\x03\xec\x82\x5d\x8e\xa0\xf9\xf7\x70\xe8\x62\xbb\xe7\x01\x9e\x57\xbf\xfc\xb2\x3a\xa3\xa2\xca\x64\xed\xc1\xd4\xff\xa1\xa0\x98\xa0\x4b\x7f\xe4\x4d\x5f\xb5\xed\x96\x2f\x49\x39\x81\xcf\xeb\x8d\x03\x21\x2b\x2b\x5e\x24\x29\x25\x49\xe8\xcd\xb0\xa8\x78\x4c\x27\x16\x63\x17\x2a\x93\x74\x29\x26\x10\x90\x2c\x0b\xfc\xd1\x64\x9c\x52\xee\xf0\x86\x6a\xe9\x13\xce\x1e\xfa\xb7\x14\x52\x72\x43\x0d\xe6\xb8\x08\x71\xc5\x95\xb1\xac\xe7\x38\x02\x71\xcd\x4a\xaf\x63\xbd\x01\x1d\xf2\x68\xc9\x89\x7c\x85\x5e\x2f\xfc\x6c\x8f\xd8\xa5\xaa\xe9\xe6\x76\x3a\xde\xd6\x65\x49\x4a\xb5\x18\xeb\xad\x0d\xb9\x5d\x38\x2c\x8c\xe6\x2c\x93\x94\x87\xcd\x48\x91\x91\xac\xe1\x18\xc6\x8b\x11\x0c\x06\xc3\x9a\x2f\x46\x1d\xcc\x01\x4a\xae\xec\xa2\x13\x21\x79\x91\x2f\x4e\x07\xa3\x6e\x83\x42\x28\xeb\xe7\x64\x6c\x9b\xb4\x5a\xac\x87\x3b\xa2\x1c\xcd\x0b\xfe\x9c\xc4\x69\x23\x4a\x79\x97\x94\xfd\x94\xb9\xe0\x91\xd5\xa8\x2e\x61\x0a\xbc\x3d\x62\x1b\x07\x87\x11\xa1\x91\xcb\x8a\x5d\x80\xe5\xbd\x23\xb8\xfd\xd7\xa3\x3d\x8f\x53\xb9\xec\x70\x9d\x68\x63\x8e\x85\x91\x6a\xdb\x4c\x8f\x8c\x66\xdd\x09\x5a\x51\xd0\x3b\xcd\xd9\x65\x24\xe2\x82\x53\x38\xe8\xaf\x27\xa6\xbe\x3d\x7f\x3b\x41\xb4\x83\x0e\xe1\x3b\x20\x91\x36\x79\x9f\x16\xcb\x92\x70\x1a\xce\x86\x30\x01\xd6\x22\x52\x8b\x68\x0e\x95\xc4\x66\x72\xa4\x6c\x91\x66\x6c\x91\x7a\x34\x81\xde\xad\x68\x00\x3e\x0c\x07\x27\x09\xbb\x39\x1d\x58\xf7\x7d\x7b\x56\xaa\xef\x65\x24\x24\x57\xa2\x78\x5f\xb1\x1a\x36\x1f\xfa\x38\xf4\xa1\x3d\x1e\xc3\x79\xca\x04\xaa\xe3\x18\xa5\x48\x31\xac\x01\x64\x2e\x29\x07\x22\x25\x89\x53\x05\x14\xfd\xdd\x56\x0e\x41\x99\x55\x0b\x96\x8f\x80\x08\x60\xd2\x85\x55\xc8\x94\xf2\x5b\x26\x28\xcc\x38\x25\xd7\xa2\xd5\xcf\xce\x96\x64\x4c\xae\xa2\x1e\x51\xe7\xb9\x9c\x1c\xa4\xd1\x2b\x34\xe9\xda\x9f\xf0\xbb\x0e\xa6\xb5\x75\x17\x6c\xd1\x03\x16\x54\xbe\xab\xe3\x55\xdb\x0f\xfe\x56\x7c\xab\x31\xa7\x75\x21\xfa\xbb\x6d\x54\x14\x20\x70\xfc\xda\x46\x5a\x07\xb5\x93\xc1\x16\x08\x49\xcb\x76\x09\xda\x2c\xc1\x1e\xc0\xe5\x66\x05\x58\x77\x19\x46\xd4\x93\x1a\xe8\xeb\x1c\xd9\xe0\x93\x6b\xcb\x2b\x5d\xa3\x09\xa4\x47\xea\xd3\x71\x7c\x46\x2c\x7f\xc2\x39\x59\x85\xaa\x7c\xe4\x4d\x67\x08\xa7\x53\x38\x6c\x96\x05\xc3\x32\x06\x0a\x6a\x2e\xe6\xa8\x86\x53\xb7\x15\x58\x3a\xa1\xfa\x78\xe9\x8c\x8c\x7d\xea\x75\xf2\xbc\xa3\x75\x27\x1b\x83\x6a\x29\x7d\x6e\x0b\xed\xeb\x6d\xbb\x7f\xb5\x76\x8a\x5b\xab\x8e\xff\x6f\x53\x05\x09\x17\xf4\x59\xc5\x09\x6e\x56\x87\x0b\x70\xf5\xce\xe9\x9d\x6c\xd8\x01\x8b\xce\x9e\xc3\x14\x94\x92\x71\x46\x17\xcf\xef\xca\x30\xf8\x67\x78\x71\x78\xf0\xe7\xcb\xfd\x61\x78\xb1\xba\x4d\xd2\xa5\xb8\xdc\x1f\x3e\xd4\xbc\x88\x2a\x10\x9e\xcd\x8a\x2d\x6a\x88\x11\x96\x85\x06\x5c\xed\xd5\x7a\x60\x9a\xea\x78\x0c\xaa\x55\x48\x1b\x55\x67\xaa\x2c\xb1\x1f\x4c\xe1\x9b\x96\xeb\xe7\xdb\x43\xeb\xb7\x52\xa3\x22\x99\x61\x0a\x38\xbd\x57\xb9\xb4\x00\x2e\x8e\x2e\x6b\xcc\xaa\x9c\xa9\xc3\xd2\xd6\x3c\xbe\x74\xc8\xa7\xfb\x7f\xdd\x0d\x79\x3b\x09\x09\x17\x0a\xc0\xe5\x56\x0a\x7b\x56\xe3\xce\xfb\x0c\x89\xf3\x81\xc6\x45\x9e\xd4\xbe\x5b\x6f\xad\xc2\x56\xa0\xc9\x71\x58\xf7\x29\x96\xf7\xe4\x31\xf4\x29\x9b\x8a\xe6\x1e\x0a\x27\x7d\x28\xdc\x03\x14\x15\x4d\xdf\xd5\xd4\xc2\x75\x4b\xe7\x63\x67\xc3\x6d\xb0\x7e\xe0\x1e\xff\x40\xa3\x89\xbb\x1a\xfa\x7a\x17\xeb\xc8\xb3\xc4\xff\xfd\x0b\xb6\x7d\xa5\xe0\x00\x8e\xd4\xaa\x9e\xea\xd5\x3d\x38\xd8\xb8\x6a\xa7\xff\x7f\x56\x6d\x41\xe5\xf3\x3a\x4a\xb0\x7d\xc9\x50\xe0\x78\xb1\x85\x2f\x5f\xc0\x2b\xf0\xb1\xe6\x36\x68\xb5\xc4\xb0\x9a\x95\x35\xae\xdf\x79\x17\x97\xfb\x6e\x67\x32\xff\xf0\xeb\x26\xa3\x8a\x12\xdd\x58\x7b\xd5\xea\xee\x4e\xa4\x49\x34\x85\xaa\xed\xd0\x91\x76\x09\xa6\xb4\x6d\x41\x4c\xf4\xe2\x84\xa0\xee\x4d\x1d\xda\x85\x2c\x06\xa1\x1d\x25\xe9\xf3\xbc\x27\x06\xb0\x81\x2c\x39\xbd\x35\x28\x9b\xa5\xb3\x04\x72\x89\x6c\xb6\xa1\x69\x8b\x66\xf4\xce\xfb\x17\xc6\xf0\x78\x04\x03\xa1\x77\xdc\xa0\x97\xde\x06\xb0\x53\xe7\xb3\xfe\x8e\x02\xe9\x7f\x7a\xde\xa2\x9a\x49\x4e\x62\xf9\xbf\x6a\xf2\x4e\xeb\xdd\xd3\xd5\xe2\x8c\x12\xae\xd5\xe6\x61\x6b\xb7\x77\xe4\x51\x23\x69\xd6\x7b\x6d\x17\xb2\xd2\xbe\xc3\x9e\xe0\x65\x44\x97\xa5\x5c\x85\x43\x27\xa0\x44\xb8\x54\x7c\x6d\x94\x23\x4d\x5d\x45\x6f\x55\x18\x0e\xff\x3b\x4e\x09\x93\x46\x53\x64\x95\xd1\xd5\x36\x6b\xc6\x36\xbd\xc3\x2a\xd7\x97\xc1\xd0\x84\xc3\xbe\x7c\x81\x37\x44\xa6\xd1\x92\xdc\x85\xf8\x8f\x79\x56\x14\xdc\x3f\x35\xc6\xf0\xf8\x8f\x87\xc3\x11\x1c\xd5\xc3\x36\xf1\xd7\x8e\x7c\x81\xb1\xcd\x7e\x75\xa4\x3e\x22\xf5\x53\xca\x3d\x3f\xa5\x2d\x8c\xc8\x4c\x19\xc3\x43\x57\x5f\xab\x78\x66\xc7\x32\x5e\x3a\xfb\x59\x12\x4e\x96\x4d\x3e\x5d\x80\x50\x82\x49\x5b\x39\xb6\x41\xa4\x8d\xc9\x80\xb5\x76\xae\x01\x46\xb8\x62\x4a\x31\x37\x53\x3b\xf0\xd6\xe6\xd8\x6d\xaa\xc3\xe1\xa6\xe1\xb1\x0f\x84\x96\x4a\xb3\xad\x57\x45\xd7\x56\x3c\x53\x07\x79\xbf\xfb\x53\xa7\x9d\xe1\x60\x81\x71\x58\xeb\x19\xbb\xec\xdd\xe3\xdb\x74\x93\x37\x70\x93\x9c\x51\x51\x16\xb9\xa0\xdd\xc6\xc7\x9a\x16\x5e\xbc\xcf\x60\x2c\x35\x8f\x36\xfc\x6a\x97\x6f\x37\xbc\x7f\x33\xc6\x4f\x75\x40\x68\x3b\xce\xbe\xc9\xf7\x53\xaa\x0c\xa1\x0d\x1e\xe7\x16\xff\xeb\x84\x15\x5d\x19\x0c\x3d\x4f\x74\xc5\xb3\x6d\xfe\x65\x55\x3e\x31\x54\xfa\x77\xfb\x9c\xb1\x17\xba\x02\x76\xf4\x2d\x1b\xa8\x61\xed\x55\xf6\x49\xb9\xcd\xcb\x70\x97\xf2\x91\x62\xda\xb2\x8d\xbe\x2a\x53\xc6\x55\x80\x5b\xb4\x85\x34\x0a\x02\xee\x79\xd8\x54\x9f\xbb\x94\x47\xdc\x2c\x2b\xc6\x34\x1f\xf4\xa5\xde\xda\xff\x28\x57\x0b\xda\xee\xa3\x27\xef\xb9\x96\xfc\x58\x75\xbb\xb3\x26\xb1\x32\x26\xbd\x4e\x5b\xdd\xfa\xf4\x8e\xc6\x15\x66\xa8\x1a\x87\x76\x00\xfb\x0a\xec\xb0\x4b\xe5\x9a\x7a\x71\xb1\x2c\x33\x2a\xe9\xce\x04\x9c\x6e\x20\xe0\xfd\xb1\x82\xa4\x31\xc2\xfb\x4e\x10\x38\x68\x36\xed\xb1\xd7\x51\x16\x92\x64\xaa\xf8\x83\x8e\x55\xf7\x90\x17\xc3\xcc\xc6\xd7\x6a\xac\xbd\x96\x93\xaa\x3e\xd9\x52\xb9\xcc\xc2\xe0\x75\x41\x12\x50\xb2\x42\x53\xa8\xc6\x6d\x1f\x82\xa5\x80\x93\x19\x87\xf1\x29\x9c\xd5\x62\x4f\xb7\x72\x0e\xa7\x7d\x08\x6c\x33\x55\x13\x9c\x2b\x1c\x11\xa0\x89\xa8\xeb\x1e\x2d\xd4\x87\x2d\xac\xda\x91\xdc\x86\x6c\x3b\x38\xb7\xea\xb5\x77\xa5\xd4\x52\x2c\xb6\x68\xab\xaa\x47\xa4\x36\x13\xb6\x6d\x95\x5b\x7d\x60\xcb\xd0\x8d\xfa\xf1\x5b\xc7\x1e\x0c\xda\x43\x5b\x1a\x6c\x19\xda\x4b\xa1\xd9\x41\x61\x72\x8f\x4c\xb5\x3c\x45\x25\x5f\x3d\xb3\x6c\x7b\xcb\xf2\xa4\xb8\xd5\xd3\x39\xd7\x95\xed\x96\xb5\xcc\x66\xad\x44\xcf\x3e\xad\xa6\x95\x07\xd4\xa8\x36\xa8\x9f\x59\x08\xbe\xff\xa7\x4e\x99\xb4\x43\xc2\xd4\xe2\x25\xf4\xde\x50\x58\xf5\xc7\x60\x7b\x2c\xcc\xde\x3c\x23\x35\x87\x51\x33\x83\xaf\xcd\xc5\x9e\xed\xd4\xd6\x59\xf5\xaf\xc9\x8c\x66\xde\x61\x88\x21\x4e\xd1\x90\x1c\xbf\x3f\xa0\x1b\x5b\x98\x4b\x30\x8e\xd5\x8f\xb5\xc0\x72\x70\xbb\x69\xa2\xe8\x2a\x25\x91\x6d\xbc\xd4\x91\x29\x2e\xd4\xa8\xac\x44\x1a\x06\x36\x5a\xa3\x36\x97\xee\xbb\x0f\x41\x1d\xa0\x31\xe2\x4e\xc4\xa4\xa4\x2f\xcf\xdf\xbc\x36\x78\x5e\xe0\x9f\x3a\x30\xb8\xf6\x6d\xdb\xcc\xce\x2e\x38\x49\xd8\x0d\xc4\x19\x11\x62\xfa\x31\xd0\xc5\x1f\x83\x66\x28\x8b\xc9\xa7\x82\xe5\x61\x70\x32\xe3\xa7\xc1\x50\x0f\x9f\xb0\x9b\xd3\x60\x2b\x31\xb5\x1f\xfb\xbc\x38\x17\x6f\xb5\xb7\x76\x23\x39\xa5\x6d\x61\x6a\x22\x4b\x1c\xa5\xde\x0e\x06\x38\xea\xe7\xe0\xf8\x3e\xe2\x6f\xa5\xfe\x76\xf2\xf7\xd0\xbf\x26\xf9\xf4\x63\x50\xd3\xc5\xd2\x57\x95\x7f\x0c\x6a\x2f\x3d\x4a\x7f\xf5\x63\x66\xb3\x3f\xed\x23\xe3\x48\xd3\x70\x1d\x38\xe6\xba\xee\xb0\x9b\x6b\xf7\x47\xe3\x08\xad\x69\x89\x9e\xcd\x86\x94\x7a\xc7\x62\xd3\x17\x59\x41\xa4\xa9\xb7\x9b\x92\x89\xb7\xe4\xad\x2a\x1b\x3a\xf7\x18\x82\xfd\x57\xf9\x3c\x18\x41\x70\x60\xfe\xe2\x37\xdc\xb2\x2c\x83\x19\xd5\xc0\x12\xb5\x9d\x0a\x78\x4b\xde\xc2\x6c\xe5\xc2\x1f\x46\x70\x9e\x52\x0b\x2a\x26\xf9\x40\xaa\x4e\x98\x7a\x41\x93\x11\x88\x02\x73\x1f\x41\xa6\x74\x09\x44\xc0\x82\x94\x02\xc2\xbc\xca\xb2\x61\xe4\x7a\x62\xec\xe5\xb2\xb5\xe7\xb4\xdd\x4a\x14\x2f\xa7\xaa\xad\xbf\xde\x6b\x51\x97\x24\xa3\x52\x5a\x03\xef\xcc\xdc\x75\x8b\x9e\x16\x59\xc1\xa3\xf7\xba\xb2\xb1\x36\x51\x33\xd3\x27\xae\xd2\x36\x91\x87\x96\x44\x72\x76\x17\xf8\x22\xaa\xd1\x50\x4c\xdc\x9d\x09\xc8\x0b\x09\xc5\x1c\x74\x7b\x0c\x33\x3d\x80\xf7\x19\x25\x82\x02\xc5\x3b\x24\x04\xe2\x82\x73\x1a\x4b\xcc\x98\xa6\x42\xb0\x22\x8f\x02\x3f\xd7\x44\xf3\xf9\xba\x71\x0f\x11\x9b\x86\xc0\xeb\x00\x5b\x23\x37\xa5\x68\x87\x4b\x8e\xeb\x2f\xcd\xc5\x4d\xbc\x44\x0a\xb3\x57\xd1\xb0\xc2\xa5\xa9\x37\x85\x09\xb4\x04\x22\x26\x19\xe1\xc1\xb1\x2b\xaa\x84\x13\xc6\x6e\xe9\x77\x36\x3e\xd3\x88\x26\xa4\x8e\x2f\x12\x9a\x81\x9b\x1c\x86\x1a\x70\x5d\xe7\x26\xc6\x19\x52\xb8\xa3\x4c\xf0\x77\xe4\x75\x9f\x98\xbf\xbe\x2d\x20\x85\x8e\xd6\x08\x9f\x52\xce\x06\x72\xd5\x3a\x4f\xd3\xbb\x9b\xe8\x08\xc2\xc5\xe1\xa5\x1b\x36\x5f\x4d\x9c\xb3\x11\x77\xa6\x86\x76\x71\x74\xd9\x84\x34\xeb\x38\xff\x7a\xd8\x68\xa0\x99\xd2\xdf\x0d\x07\x46\xf8\x19\xea\x1e\xeb\x26\xf9\xad\x66\x49\xad\x4c\x45\xbf\x50\x5e\xbc\x60\x59\x16\xaa\xe9\xb4\xdc\x7d\x64\x47\x45\xa2\x73\x8d\xf5\x5e\x87\x6a\x9d\xc1\x68\x1d\xd5\xd6\xf0\xf6\xcf\x73\xbc\x03\x80\x97\x4c\x49\xbe\x02\xc9\x49\x4c\x85\xe2\x77\x92\x03\xbd\x63\xfa\x02\x19\xca\x83\xc8\xcf\x49\x6f\xbc\x2e\xce\x70\x4d\x42\x7b\x9c\xb2\x2c\xe1\x34\x0f\x87\x3d\x31\xb9\xa6\x6d\x2b\x33\x0b\x2b\x30\x45\xde\xab\x58\xb7\x73\xed\x4d\xac\xda\x9c\x7f\x81\x4e\xb2\x3f\xb5\x01\xe9\xe3\x76\xb2\x7d\xab\xb9\xc9\xb2\xef\xb6\x6f\xd0\xef\x5c\xbb\xdb\xd6\x08\x87\x6a\x5c\x50\x34\x4f\x8c\x03\x6a\xa3\x8f\x46\x51\xfe\x69\x91\xdf\x50\x2e\x41\x16\xf0\xc3\xdb\x57\x3f\xa1\x4e\x2e\x24\x59\x96\xf6\xda\x9d\x63\x87\xec\xee\x07\xfc\xf2\x05\xbe\xf9\xd6\x8c\x70\x94\xda\x1b\xa0\x51\x8f\x77\xcc\xa2\x79\x50\x0f\x54\x4f\x13\x39\xa7\x93\x0a\x22\x9c\x93\xe7\x3d\x49\x30\xf8\x6d\xb2\x72\x6f\x99\x4c\x81\xe5\x37\x4c\xb0\x59\x46\x21\x50\xa2\x28\xd0\x3b\x4f\x00\xd1\xd7\xea\xe2\x22\x9f\xb3\x45\xc5\x69\x02\x77\x07\x6a\x11\x60\x56\x54\x79\x42\x10\x00\xcd\x45\xc5\xa9\xb0\xe0\x65\x4a\xa4\xe6\x3c\x01\x84\x53\x48\x98\x28\x33\xb2\x32\x17\xf5\x80\xc0\x9c\xdd\x35\x70\x90\x0a\xde\x6d\x95\x9c\x94\x25\x26\x15\x14\x38\x74\x1d\xa2\xaf\xe1\xab\x89\xdb\x6e\xd8\xa4\xc9\xff\x45\x86\x46\x12\x5c\x1c\x5e\x46\x77\x70\xda\x50\xcd\x89\xc8\x68\x1a\x55\x39\xde\x02\x0c\x3f\xdf\x4d\x9a\x56\x23\x30\xf9\x59\x6b\x2f\x37\xd8\x81\x2b\xbc\xbd\x79\x00\x47\x6a\x9c\x13\xbb\x22\x9d\x51\x50\xa3\x51\x43\x98\x06\xbd\x03\x34\xd7\x7a\xde\x16\xb7\x10\x73\x4a\xa4\xbe\x44\xa8\x0e\x49\x7f\x13\x77\xae\x87\xbb\xc7\xa8\x4e\x37\xd6\x18\x98\x58\xf9\xc4\x61\xfe\x5a\x90\xea\xeb\x7f\x93\xc6\x89\xe9\x6c\x6c\x34\x16\xf5\x6d\xc0\x70\x38\x52\x2c\x6f\x24\xe8\x2d\x4b\x64\x7a\x4f\x9f\x7f\xa8\x7a\x34\xb1\xff\x74\x38\x82\xc7\x75\x3f\xad\xde\x53\x3e\xe9\xc9\x2e\xff\xce\xa4\x2a\x04\x30\x81\x20\x63\x39\xb5\x2e\x27\x34\x23\xca\x22\x23\xc6\x30\x56\x75\x84\x1b\x3f\x93\x35\x7e\x6b\x7e\xd7\xc5\x4b\xa6\x5a\x92\x4a\x16\xc1\xc8\x4f\x4f\xbd\x33\xf2\xa4\x4b\xac\x08\x65\x16\x3a\x07\x3e\x6b\x4a\x4f\xfa\xe8\xec\xc0\x5a\x6d\x81\xf5\xb3\xa1\xff\x46\x60\x1a\xd9\x82\x33\x9a\xcb\x7a\x7a\x74\x6e\x73\x37\x24\x8b\xaf\x5f\x98\xdb\x33\x35\xfc\x17\xec\x4e\xaa\x3d\x16\xbd\xad\x96\x33\xca\x23\x7d\xbd\xe6\x6f\x6f\xbe\x3f\x1f\xf5\x2c\x36\xa2\x68\x16\xdb\xcd\x91\xf5\xd0\xb0\x97\x99\x9b\x99\xa5\xc5\x0d\xe5\xcf\xa8\x24\x2c\xeb\x9f\xdf\xcb\xa6\xc1\x6e\x93\xd4\x68\xfa\xe9\x5d\x7a\xf1\x46\x70\x37\x02\x27\xd5\xcf\x09\xbe\x0d\x4e\x44\x49\x72\x2b\xf3\x55\x61\x80\xb9\x4d\xb5\x3f\xe7\x0e\xbe\x46\x49\x3c\x8c\x64\xf1\xc3\xf9\x53\xad\xea\x87\x43\x9d\xda\xa4\xfa\x9e\x0e\x8e\x1d\xb0\xe2\x96\xc8\x38\xed\x02\xc6\x79\x5c\xe9\xda\x40\x67\xf2\x4f\x83\x19\x89\xaf\x17\x5c\xc9\xb6\x03\xa3\x2f\xe8\xb4\x2a\xd4\x05\xb0\x44\x0d\xa3\x8e\xa0\xee\x40\x71\x91\x4b\x9a\xe3\x65\x55\x3d\xe4\x3e\x98\xd9\x46\x7d\x16\x16\x4a\x58\x6d\x66\x4d\xc0\x35\x39\x57\x66\x26\x26\x1f\xf0\xd8\x0b\xe6\x6a\x2a\xa9\x06\x33\x8e\x64\xb1\xa3\x3a\x45\xc6\x4f\xd0\x58\xd5\x3e\x1a\x5d\xc1\x83\xfa\xa9\xbd\xb1\xd6\xb3\xf0\xaf\xb1\xae\x57\xb0\xe8\x6e\xb5\x64\xb9\x97\x21\x9c\xd1\x9c\x34\xb7\xfe\x21\xbf\xa7\x29\xb9\x61\x05\xb7\x7a\xd8\x4b\xdb\x21\x84\x9d\x58\x4f\xe3\x35\x31\x7f\xfd\xc1\x45\x4a\xb3\x1b\x75\xc4\xec\x34\xf2\x39\x5e\x56\xdd\x8d\xe1\x37\x8d\xea\xfa\xf0\xeb\x2b\xa3\x5b\xdd\x22\x82\xfd\xf2\x5b\x74\x47\x5f\x74\x3d\x68\x59\x17\x3d\x92\xa0\x3e\xdd\xeb\xe0\xc0\x6f\x95\xf5\xde\xed\x81\x4d\xe2\x66\x87\x3c\x84\x9e\x00\xcd\x96\x30\x49\x3f\x4d\x94\x92\x6c\xb0\x30\x97\x8e\x04\x94\x04\x5f\x0d\x70\xef\x24\xcd\x0b\x6e\x6f\x2e\x19\xcd\x05\x4d\x68\xe7\x22\x92\x20\x37\x74\xcf\xa8\x37\xce\xf5\xa3\x27\x7f\x7d\xf2\x13\x58\x27\xb1\x52\x47\x0a\x9e\x50\xae\x6f\x2e\x1d\xd4\x56\x32\x30\xa9\x0d\x79\x67\x4c\x0d\xec\x36\xa5\x5a\x85\xa9\x04\xe5\x4a\x53\x52\x8a\x8e\xce\x8b\x44\x7c\xdc\x3b\xbb\xf5\xad\x25\x63\x81\x7a\x1a\x5f\xff\x6d\x27\x34\xc7\xb7\xda\x15\xbd\x76\xf4\xdb\x02\xd1\x2c\x0b\x96\x4b\x01\x73\x25\x11\x5b\xb6\x71\x57\xc1\x3f\x27\x33\xff\xb2\x9a\x7b\x0b\xc9\xf1\x19\xd6\xb7\xa2\x76\xe2\x82\x56\xd0\xab\x95\x37\x41\x76\xe2\x03\x1d\xcf\x6e\xae\x53\xdd\x8f\xa5\x4b\x69\xed\x21\xb1\x2e\xb3\xef\x8b\x64\x65\x49\xed\x80\xf3\x6f\xd1\x5f\xe1\x65\x10\x90\xb3\x22\x31\xd7\xfe\xb0\x9f\x17\xee\x16\xb7\x4c\xc6\x69\xe8\x84\x14\xce\x57\xa5\xd1\x1e\x63\x22\x28\x04\x37\x34\x96\x05\x0f\x26\xb5\xfe\xd9\x0d\x3f\xf8\x2b\x68\x87\x31\xd6\x4d\x70\x22\xf9\xe9\x89\x4c\x94\xdd\xab\xce\xaa\xe9\xe0\xf1\xe0\xf4\x84\x9d\xe6\x7a\x61\x4f\xc6\xec\xf4\x64\x2c\x13\xf5\xc3\x4f\x9b\xac\xd7\x76\xca\x50\x7f\x22\x5c\x4f\x28\xc4\xbf\x65\x81\x6b\x00\x53\xb7\xe1\x05\xbb\x74\x4f\xcb\xda\xfd\xd8\xe7\xa3\xa8\x5d\x14\xc7\xf7\x4d\xed\xb4\xe5\x88\xd5\x20\x8d\xbb\x54\x4d\xcd\x34\x31\x2e\x88\x8b\xa3\xcb\xa6\xca\x9d\xb5\x9e\x27\xe6\x24\x1f\xd7\xf4\x37\x7e\xa6\xff\xc3\xf4\xbf\xf9\xed\xf4\xbf\x69\xd3\xbf\x4e\x07\x3d\xa7\x77\x4a\xc3\x09\x6a\xa7\x54\x8d\xde\x27\x8d\xde\x27\x38\x81\x1b\xeb\xf3\xb1\xb8\x7d\xf2\x6f\xe0\x34\x90\xf6\xa7\x75\xe3\x8b\x4f\x97\x66\x85\xe0\x3f\xd5\xaa\xb9\xe5\x87\x7a\xe5\x66\x7c\x7c\x1a\xb4\x93\xdc\x7e\x17\x6b\x38\x98\xec\xcc\x19\xc6\x2b\xa7\x39\xa3\x7f\x74\xdd\xc4\x1b\xc9\x5d\x89\x4d\x8c\xd8\x1e\x08\x35\xdb\xfb\x07\xc2\x26\xde\x40\xce\xac\xfd\x31\x87\x5b\x06\x35\xfe\x86\x49\xef\x79\xf0\x43\x2e\xaa\xb2\x2c\xb8\xa4\x89\xc9\xeb\x45\x8f\x6a\x07\xc8\xd6\xa3\x9d\x6f\x78\x19\xad\xef\x8e\x5c\xfb\xf9\x24\xcf\xb9\xe4\xe8\x54\x67\xfd\xc5\xbe\xaa\x55\x5f\xa6\x70\xfd\xa3\x48\xbe\x06\x01\x9a\x4b\x26\x57\x6f\xf4\x5d\x21\x9c\x58\xf0\x28\x98\x40\xf0\x88\x2c\xcb\x63\x9b\x5c\x7f\x82\x25\x99\xac\x0b\x4e\xb1\x60\x51\x17\x0c\x82\xc1\x04\x06\x8f\xfe\x55\x15\xf2\xd8\xdc\xf8\x09\x06\x81\x2a\xfa\xea\x9b\x3f\xd7\x25\x63\x5d\x72\xf7\xf8\xc5\xf1\xa0\xbe\x57\x6f\x94\x7c\x63\xd3\x18\xf4\x9a\x2b\x47\x17\x8f\x4e\x4e\x83\xc1\xc7\xf1\xe5\x78\x31\x72\x6e\x87\x88\x56\x82\x65\x3d\x8d\x0b\x71\x69\x5d\x9b\x6b\x6f\x55\xde\x93\xbe\xac\xdc\xe6\x5d\x3c\x1b\xe3\x69\x2d\xa6\xea\xd6\x7a\x04\xad\x7f\x25\x11\x48\x73\x2d\x02\x01\xa3\x0f\xec\x87\xb3\xd7\x8d\xef\xd1\x6d\xd5\x2b\x53\xbd\x06\xda\x95\xb2\x6e\xa2\xa5\x5e\xad\x75\x79\xe1\x50\x24\x49\xb4\x56\x0e\xe6\x85\x3d\xe4\xa6\xe0\x2b\x92\x24\x57\xe6\x65\x0f\x73\xef\xd4\x6b\xae\x9f\x42\x51\x45\x23\xf8\xbc\x1e\x76\x35\x94\xd6\xfc\xed\x8c\xba\x34\x50\xb3\x33\x01\xd6\xac\x88\xd1\xcc\x8f\x04\x25\x5c\xbf\x43\x15\x04\xad\x05\xb3\x61\x06\x43\x3d\x4c\xab\x78\x6f\x73\xb6\xfa\xe1\x44\xa2\x9a\x69\xfe\x08\x8f\x86\x91\x28\x33\x26\xc3\xc1\xa3\x41\x9d\x6d\xd6\xc0\x78\x49\xb3\xb2\x36\xb3\xda\x93\xf9\x7b\xab\x59\xe8\xfa\xb8\xdb\x30\xf4\x84\x9b\x2e\x22\x74\x30\xdd\x4a\x2d\x4b\x65\x97\x5a\xf6\xed\x34\x9f\x71\xba\xb8\x6a\x95\x11\x49\xf6\xb0\x7e\xb7\xcc\x79\x7c\xc8\x38\x55\xcc\xab\x6e\x5a\x60\xaa\x95\xd5\x0a\xe7\x0f\x67\xaf\x9b\xa5\x1d\x3a\xd5\x5a\x9e\xb4\xd6\x7e\xb8\x07\x30\x6c\x1e\x58\xd4\xfb\x41\x73\x5f\xe3\x52\x7e\x68\x96\x77\x68\xec\xb4\x6e\xf0\xdc\xfa\xc9\x6b\x2b\xae\x79\x07\x40\xd1\x69\x3c\x86\xb7\xef\xce\x9f\x4f\x5a\x37\xac\x66\x14\xae\x69\x29\xf1\x1e\xdd\x2a\x8f\xb5\xcf\x74\x5c\x49\x96\x8d\x85\xe4\xf6\x6f\x5c\xe4\x37\xd1\xa2\x98\x20\xdc\xd7\x2c\xbf\x7e\x51\xf0\xe7\x75\x10\xeb\x9e\x35\xa8\xe9\xd1\xbf\x6d\x71\x39\xb5\xf0\xb1\xbb\xd6\x4c\xdf\x8b\xde\x2c\xf4\xde\xc2\x9b\x42\x6e\xc4\xab\xb5\xeb\x35\x05\x9a\xfb\x51\x36\x5a\xf0\xbb\xd9\xd3\x01\xf1\x6e\xf6\x89\xc6\x4a\x08\x75\x78\x75\x41\x73\xca\x89\xd4\xec\xaa\x9b\x79\x02\xc7\xe2\xef\xc5\xfb\x1e\x46\x98\xbc\x16\x3a\xb0\x6d\x66\x83\x7e\x02\x4d\x07\x94\x1f\x99\x77\x75\x52\x26\x64\xc1\x57\xc8\x1c\xca\x04\xa1\xe1\xe7\xf5\x08\x82\x60\x04\x3a\xb6\xf1\x9d\x3a\x90\x1d\xa2\x6e\xdd\x23\x0e\x43\xba\x2b\xa4\xf9\xae\x47\x46\xbb\x4b\x64\xae\xaa\x36\x9d\x86\xf0\xd9\x4c\x6b\x81\x6e\x00\x6c\xd7\x93\xf4\xd3\x4b\xe9\x16\x83\xec\xd2\xa5\x2d\x19\xff\xee\x89\xb1\x1a\x9a\x2b\x33\x6a\xce\x43\xc3\x99\x26\x7e\x17\x9c\x9d\x9e\xd6\xab\xfc\x86\x64\x2c\xe9\x11\x3b\xfa\x56\xa8\x2b\xb6\x74\x37\x2a\x63\xbb\xd4\x2f\x78\xb1\x7c\xa7\x07\x30\x00\xba\xc3\x8d\xe0\x70\x47\xca\x44\xcd\xe8\xda\x51\x0b\x53\x18\xff\x73\xf1\x31\xd9\xff\x18\x45\xfb\xd3\x68\xff\xe1\xf8\xd7\x11\xab\x67\x86\x2e\xbd\x90\x23\xcf\xab\x32\xa3\x86\x5e\x66\x9a\x4e\x79\x67\xed\x9b\xba\xd6\x49\xf3\xab\x27\x17\x49\x2a\xa4\x0b\xef\xb8\x3f\x73\x6c\xeb\x24\xef\x5b\x8f\x0d\xec\x31\xd2\x2c\xfb\xaa\x91\x33\xea\x5c\x75\x1a\x34\x4a\x43\xa3\x33\xf4\x1f\xa9\x25\xbe\x1e\xfa\x6e\xae\xa4\x2d\xc2\xf3\xae\x8f\x23\x34\xfd\xc0\x68\xe8\x0c\x69\xcf\xd2\x1c\xbd\xee\xef\xe6\x7a\xd0\x17\x05\x57\x50\xec\x26\x75\xd1\xd9\x79\x19\x9a\x0a\x9d\x43\x2d\xfe\xc1\x64\x1a\x76\x90\x34\xc4\xae\xd3\x0d\x0d\x05\xee\xc3\x67\x3b\x25\xb6\x4d\x42\xe9\x12\x31\x0d\x0f\x47\xf7\xcc\x5b\x8b\xbf\x5e\x50\xdd\x42\xff\xf0\xd8\x89\x26\xb5\x6e\xd3\x21\x89\xa1\x85\xfb\xa6\x8e\x7f\xa5\xb6\xd1\x35\x9d\xdd\xfd\x6e\xfe\x2e\x37\xa7\x70\x17\xbf\x7a\x9d\x35\x90\x27\x71\x5c\x2d\xab\x8c\x48\xcc\x3c\xdc\x41\x98\x6c\xe0\x58\xd8\x37\xc9\xff\x1d\xb0\x75\xec\xb1\x79\x78\xb6\x7d\xe9\xd4\x69\xfd\xab\xb7\xda\xe6\xc9\x6f\x17\xc3\xde\xcd\x64\xf0\x99\xbb\x1d\x54\x96\xee\x22\x36\xbd\x95\xa5\xfd\x24\x4f\x6c\xd2\x94\xd4\x2b\xaa\x15\xd4\xe9\xc0\x39\xc0\x9b\xe6\xf5\x5b\xdb\x6e\xdf\x8b\x43\x7d\x77\xd9\x6d\x6c\x81\x26\x34\x2e\x12\xfa\xc3\xd9\xab\xa7\xc5\xb2\x2c\x72\x9a\x5b\x5a\x7a\x00\x8e\x2e\x1b\xd3\xe9\xe3\xbe\xb2\x99\x02\x08\x86\x43\x03\x55\xed\x24\x17\x85\x29\x04\x92\xcc\x9c\xdc\x34\x7f\xc8\xfa\x16\xac\x53\xac\x5f\xc5\x91\x64\x06\x4c\x60\xcc\x72\x41\xb9\x71\x1c\xb8\x0a\xe9\x45\x33\xcc\x65\x3d\xd5\x1f\xed\x25\xe6\x75\xcf\xf2\x77\xef\x1c\x6f\x5b\xf4\xb6\x1c\x73\x97\xda\x51\xd4\xcc\x28\xc1\x42\x69\x26\xcc\xb0\x69\x10\x75\x13\x0b\xb7\x8d\xd7\xa3\x5e\x75\x34\x96\x96\xa6\x55\x73\x59\x69\x31\xec\x97\xc0\xcc\x13\xbe\xbe\x9a\xa7\xd9\x52\x7f\x46\xd7\x74\x25\xbc\x91\x86\x5d\x26\xbd\x6e\x5e\xf9\x75\x20\x5d\x18\x14\xf6\xe1\x9a\xae\x2e\xad\xae\x6a\xa0\x5c\xa8\xb2\x26\x25\xc8\x35\x86\x74\xef\x96\x43\x41\x99\xc1\x46\x89\xd6\x97\x2c\x3e\x50\x59\x95\x26\x98\x12\x93\x38\xa5\x13\xfd\x68\x51\xb3\xd8\xde\x65\x8c\xde\x77\x7e\x84\x24\x92\xc5\xe3\x4f\x62\xac\x8d\x9d\xfa\x91\xec\xd4\x3e\x9c\xfd\xdd\xcd\x54\x2d\xa2\xf7\xda\xb5\x09\x90\x77\xae\x5c\x60\xf6\x12\x7c\xb6\x2f\x4d\x78\x2f\x58\x1b\x37\xa1\xf5\xab\xd5\xaf\x5d\x23\xc3\x37\x79\x4f\x76\xcb\x30\xf1\x8c\x96\x9c\xc6\x44\x52\x6d\xcf\xa1\x49\xef\xe7\x72\x25\x8c\xd3\x58\x9e\x17\x6f\xd8\x42\xf1\x48\x52\x5b\xfd\xd0\x77\x2b\x01\xff\xe7\x01\xda\x21\xd1\x63\x03\x84\x4e\x42\x3b\x32\xa5\x26\xb7\xef\x06\x5c\x37\x5e\x0e\x34\xad\xce\x53\x2a\x28\xc8\xdb\xc2\xdc\x73\x11\xfd\x78\xe3\x7b\x73\xbd\xe8\x0e\x15\x14\xc2\x29\x90\x24\xa1\x09\x14\x79\xb6\x42\x57\xe7\x8c\xc4\xd7\xb7\x84\x27\x78\xa1\x81\x48\x36\x63\x19\x93\x2b\x65\xb9\x15\x59\xa2\x79\xc4\x84\xbd\x23\x87\x41\x7a\x49\xb6\xd1\x51\x90\x12\x91\xde\xa3\xd9\x34\xef\x5f\xd9\xc3\x4f\x4b\xc3\xe4\x05\x27\x8b\xa5\x8e\x40\xf7\xc8\xc7\xbe\x51\x74\x74\x82\xaf\xea\xc5\xc0\x8b\x0e\x66\xe1\x7d\xa0\xe6\x4c\x0e\x8f\x86\x5a\xe8\x25\xbc\x28\x31\x50\xa5\xe0\xc0\x57\x78\x53\x2a\xc6\xb0\x77\xe8\x64\xc1\x74\x51\x6e\xb4\x74\xae\xc4\xdf\xda\xd9\x47\x1b\xf8\xa6\x16\x1b\xbf\x6f\x9a\x3d\x06\xea\xef\x99\x6d\xbf\x68\x6a\x7b\xa5\x3c\xcd\xa7\xf0\xc5\x61\x73\x6e\xd6\xf2\xb0\x47\x2c\xab\x36\xae\xb8\x2b\x76\x91\x74\xf7\xcb\xba\xa2\x25\xe6\xc0\x7b\xa3\xbb\x9e\x18\xde\x19\xeb\x37\x87\x5b\x44\x56\x98\x8f\x5b\x06\x2f\x2e\xed\xc3\x50\x6d\xd6\xe1\xf1\xde\x7f\x05\x00\x00\xff\xff\x0d\x9a\xa6\x4d\x20\x64\x00\x00") func webUiStaticJsGraphJsBytes() ([]byte, error) { return bindataRead( @@ -421,7 +421,7 @@ func webUiStaticJsGraphJs() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "web/ui/static/js/graph.js", size: 25516, mode: os.FileMode(420), modTime: time.Unix(1489763294, 0)} + info := bindataFileInfo{name: "web/ui/static/js/graph.js", size: 25632, mode: os.FileMode(420), modTime: time.Unix(1491298467, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -441,7 +441,7 @@ func webUiStaticJsGraph_templateHandlebar() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "web/ui/static/js/graph_template.handlebar", size: 6337, mode: os.FileMode(420), modTime: time.Unix(1489763294, 0)} + info := bindataFileInfo{name: "web/ui/static/js/graph_template.handlebar", size: 6337, mode: os.FileMode(420), modTime: time.Unix(1490348605, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -461,7 +461,7 @@ func webUiStaticJsProm_consoleJs() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "web/ui/static/js/prom_console.js", size: 21651, mode: os.FileMode(420), modTime: time.Unix(1489763294, 0)} + info := bindataFileInfo{name: "web/ui/static/js/prom_console.js", size: 21651, mode: os.FileMode(420), modTime: time.Unix(1490348605, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -701,7 +701,7 @@ func webUiStaticVendorFuzzyFuzzyJs() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "web/ui/static/vendor/fuzzy/fuzzy.js", size: 5669, mode: os.FileMode(420), modTime: time.Unix(1489763294, 0)} + info := bindataFileInfo{name: "web/ui/static/vendor/fuzzy/fuzzy.js", size: 5669, mode: os.FileMode(420), modTime: time.Unix(1490348605, 0)} a := &asset{bytes: bytes, info: info} return a, nil } diff --git a/web/ui/static/js/graph.js b/web/ui/static/js/graph.js index fad868a0e2..ec232b2474 100644 --- a/web/ui/static/js/graph.js +++ b/web/ui/static/js/graph.js @@ -415,7 +415,8 @@ Prometheus.Graph.prototype.submitQuery = function() { return; } var duration = new Date().getTime() - startTime; - self.evalStats.html("Load time: " + duration + "ms
Resolution: " + resolution + "s"); + var totalTimeSeries = xhr.responseJSON.data.result.length; + self.evalStats.html("Load time: " + duration + "ms
Resolution: " + resolution + "s
" + "Total time series: " + totalTimeSeries); self.spinner.hide(); } }); diff --git a/web/web.go b/web/web.go index e54e33f817..382965db3e 100644 --- a/web/web.go +++ b/web/web.go @@ -41,6 +41,7 @@ import ( "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/notifier" + "github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/retrieval" "github.com/prometheus/prometheus/rules" @@ -379,6 +380,12 @@ func (h *Handler) targets(w http.ResponseWriter, r *http.Request) { tps[job] = append(tps[job], t) } + for _, targets := range tps { + sort.Slice(targets, func(i, j int) bool { + return targets[i].Labels().Get(labels.InstanceName) < targets[j].Labels().Get(labels.InstanceName) + }) + } + h.executeTemplate(w, "targets.html", struct { TargetPools map[string][]*retrieval.Target }{