mirror of
https://github.com/prometheus/prometheus.git
synced 2025-12-03 16:41:05 +01:00
feat(storage): switch to AppenderV2 (to split)
Signed-off-by: bwplotka <bwplotka@gmail.com>
This commit is contained in:
parent
cb83cf5d92
commit
652ea5541b
@ -484,7 +484,7 @@ func (h *FloatHistogram) Sub(other *FloatHistogram) (res *FloatHistogram, counte
|
|||||||
// supposed to be used according to the schema.
|
// supposed to be used according to the schema.
|
||||||
func (h *FloatHistogram) Equals(h2 *FloatHistogram) bool {
|
func (h *FloatHistogram) Equals(h2 *FloatHistogram) bool {
|
||||||
if h2 == nil {
|
if h2 == nil {
|
||||||
return false
|
return h == nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if h.Schema != h2.Schema ||
|
if h.Schema != h2.Schema ||
|
||||||
|
|||||||
@ -247,7 +247,7 @@ func (h *Histogram) CumulativeBucketIterator() BucketIterator[uint64] {
|
|||||||
// supposed to be used according to the schema.
|
// supposed to be used according to the schema.
|
||||||
func (h *Histogram) Equals(h2 *Histogram) bool {
|
func (h *Histogram) Equals(h2 *Histogram) bool {
|
||||||
if h2 == nil {
|
if h2 == nil {
|
||||||
return false
|
return h == nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if h.Schema != h2.Schema || h.Count != h2.Count ||
|
if h.Schema != h2.Schema || h.Count != h2.Count ||
|
||||||
|
|||||||
@ -648,12 +648,12 @@ func (cmd *loadCmd) set(m labels.Labels, vals ...parser.SequenceValue) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// append the defined time series to the storage.
|
// append the defined time series to the storage.
|
||||||
func (cmd *loadCmd) append(a storage.Appender) error {
|
func (cmd *loadCmd) append(a storage.AppenderV2) error {
|
||||||
for h, smpls := range cmd.defs {
|
for h, smpls := range cmd.defs {
|
||||||
m := cmd.metrics[h]
|
ls := cmd.metrics[h]
|
||||||
|
|
||||||
for _, s := range smpls {
|
for _, s := range smpls {
|
||||||
if err := appendSample(a, s, m); err != nil {
|
if _, err := a.Append(0, ls, 0, s.T, s.F, nil, s.H, storage.AOptions{}); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -699,7 +699,7 @@ func processClassicHistogramSeries(m labels.Labels, name string, histogramMap ma
|
|||||||
|
|
||||||
// If classic histograms are defined, convert them into native histograms with custom
|
// If classic histograms are defined, convert them into native histograms with custom
|
||||||
// bounds and append the defined time series to the storage.
|
// bounds and append the defined time series to the storage.
|
||||||
func (cmd *loadCmd) appendCustomHistogram(a storage.Appender) error {
|
func (cmd *loadCmd) appendCustomHistogram(a storage.AppenderV2) error {
|
||||||
histogramMap := map[uint64]tempHistogramWrapper{}
|
histogramMap := map[uint64]tempHistogramWrapper{}
|
||||||
|
|
||||||
// Go through all the time series to collate classic histogram data
|
// Go through all the time series to collate classic histogram data
|
||||||
@ -754,7 +754,7 @@ func (cmd *loadCmd) appendCustomHistogram(a storage.Appender) error {
|
|||||||
}
|
}
|
||||||
sort.Slice(samples, func(i, j int) bool { return samples[i].T < samples[j].T })
|
sort.Slice(samples, func(i, j int) bool { return samples[i].T < samples[j].T })
|
||||||
for _, s := range samples {
|
for _, s := range samples {
|
||||||
if err := appendSample(a, s, histogramWrapper.metric); err != nil {
|
if _, err := a.Append(0, histogramWrapper.metric, 0, s.T, s.F, nil, s.H, storage.AOptions{}); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -762,19 +762,6 @@ func (cmd *loadCmd) appendCustomHistogram(a storage.Appender) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func appendSample(a storage.Appender, s promql.Sample, m labels.Labels) error {
|
|
||||||
if s.H != nil {
|
|
||||||
if _, err := a.AppendHistogram(0, m, s.T, nil, s.H); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if _, err := a.Append(0, m, s.T, s.F); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// evalCmd is a command that evaluates an expression for the given time (range)
|
// evalCmd is a command that evaluates an expression for the given time (range)
|
||||||
// and expects a specific result.
|
// and expects a specific result.
|
||||||
type evalCmd struct {
|
type evalCmd struct {
|
||||||
@ -1386,7 +1373,7 @@ func (t *test) exec(tc testCommand, engine promql.QueryEngine) error {
|
|||||||
t.clear()
|
t.clear()
|
||||||
|
|
||||||
case *loadCmd:
|
case *loadCmd:
|
||||||
app := t.storage.Appender(t.context)
|
app := t.storage.AppenderV2(t.context)
|
||||||
if err := cmd.append(app); err != nil {
|
if err := cmd.append(app); err != nil {
|
||||||
app.Rollback()
|
app.Rollback()
|
||||||
return err
|
return err
|
||||||
@ -1699,16 +1686,16 @@ func (ll *LazyLoader) clear() error {
|
|||||||
|
|
||||||
// appendTill appends the defined time series to the storage till the given timestamp (in milliseconds).
|
// appendTill appends the defined time series to the storage till the given timestamp (in milliseconds).
|
||||||
func (ll *LazyLoader) appendTill(ts int64) error {
|
func (ll *LazyLoader) appendTill(ts int64) error {
|
||||||
app := ll.storage.Appender(ll.Context())
|
app := ll.storage.AppenderV2(ll.Context())
|
||||||
for h, smpls := range ll.loadCmd.defs {
|
for h, smpls := range ll.loadCmd.defs {
|
||||||
m := ll.loadCmd.metrics[h]
|
ls := ll.loadCmd.metrics[h]
|
||||||
for i, s := range smpls {
|
for i, s := range smpls {
|
||||||
if s.T > ts {
|
if s.T > ts {
|
||||||
// Removing the already added samples.
|
// Removing the already added samples.
|
||||||
ll.loadCmd.defs[h] = smpls[i:]
|
ll.loadCmd.defs[h] = smpls[i:]
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if err := appendSample(app, s, m); err != nil {
|
if _, err := app.Append(0, ls, 0, s.T, s.F, nil, s.H, storage.AOptions{}); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if i == len(smpls)-1 {
|
if i == len(smpls)-1 {
|
||||||
|
|||||||
@ -17,242 +17,32 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"fmt"
|
|
||||||
"math"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/gogo/protobuf/proto"
|
"github.com/gogo/protobuf/proto"
|
||||||
dto "github.com/prometheus/client_model/go"
|
dto "github.com/prometheus/client_model/go"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/model/exemplar"
|
|
||||||
"github.com/prometheus/prometheus/model/histogram"
|
"github.com/prometheus/prometheus/model/histogram"
|
||||||
"github.com/prometheus/prometheus/model/labels"
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
"github.com/prometheus/prometheus/model/metadata"
|
|
||||||
"github.com/prometheus/prometheus/storage"
|
"github.com/prometheus/prometheus/storage"
|
||||||
)
|
)
|
||||||
|
|
||||||
type nopAppendable struct{}
|
type nopAppendable struct{}
|
||||||
|
|
||||||
func (nopAppendable) Appender(context.Context) storage.Appender {
|
func (nopAppendable) AppenderV2(context.Context) storage.AppenderV2 {
|
||||||
return nopAppender{}
|
return nopAppender{}
|
||||||
}
|
}
|
||||||
|
|
||||||
type nopAppender struct{}
|
type nopAppender struct{}
|
||||||
|
|
||||||
func (nopAppender) SetOptions(*storage.AppendOptions) {}
|
func (nopAppender) Append(ref storage.SeriesRef, ls labels.Labels, st, t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram, opts storage.AOptions) (storage.SeriesRef, error) {
|
||||||
|
|
||||||
func (nopAppender) Append(storage.SeriesRef, labels.Labels, int64, float64) (storage.SeriesRef, error) {
|
|
||||||
return 1, nil
|
return 1, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (nopAppender) AppendExemplar(storage.SeriesRef, labels.Labels, exemplar.Exemplar) (storage.SeriesRef, error) {
|
|
||||||
return 2, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (nopAppender) AppendHistogram(storage.SeriesRef, labels.Labels, int64, *histogram.Histogram, *histogram.FloatHistogram) (storage.SeriesRef, error) {
|
|
||||||
return 3, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (nopAppender) AppendHistogramSTZeroSample(storage.SeriesRef, labels.Labels, int64, int64, *histogram.Histogram, *histogram.FloatHistogram) (storage.SeriesRef, error) {
|
|
||||||
return 0, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (nopAppender) UpdateMetadata(storage.SeriesRef, labels.Labels, metadata.Metadata) (storage.SeriesRef, error) {
|
|
||||||
return 4, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (nopAppender) AppendSTZeroSample(storage.SeriesRef, labels.Labels, int64, int64) (storage.SeriesRef, error) {
|
|
||||||
return 5, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (nopAppender) Commit() error { return nil }
|
func (nopAppender) Commit() error { return nil }
|
||||||
func (nopAppender) Rollback() error { return nil }
|
func (nopAppender) Rollback() error { return nil }
|
||||||
|
|
||||||
type floatSample struct {
|
|
||||||
metric labels.Labels
|
|
||||||
t int64
|
|
||||||
f float64
|
|
||||||
}
|
|
||||||
|
|
||||||
func equalFloatSamples(a, b floatSample) bool {
|
|
||||||
// Compare Float64bits so NaN values which are exactly the same will compare equal.
|
|
||||||
return labels.Equal(a.metric, b.metric) && a.t == b.t && math.Float64bits(a.f) == math.Float64bits(b.f)
|
|
||||||
}
|
|
||||||
|
|
||||||
type histogramSample struct {
|
|
||||||
metric labels.Labels
|
|
||||||
t int64
|
|
||||||
h *histogram.Histogram
|
|
||||||
fh *histogram.FloatHistogram
|
|
||||||
}
|
|
||||||
|
|
||||||
type metadataEntry struct {
|
|
||||||
m metadata.Metadata
|
|
||||||
metric labels.Labels
|
|
||||||
}
|
|
||||||
|
|
||||||
func metadataEntryEqual(a, b metadataEntry) bool {
|
|
||||||
if !labels.Equal(a.metric, b.metric) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if a.m.Type != b.m.Type {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if a.m.Unit != b.m.Unit {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if a.m.Help != b.m.Help {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
type collectResultAppendable struct {
|
|
||||||
*collectResultAppender
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *collectResultAppendable) Appender(context.Context) storage.Appender {
|
|
||||||
return a
|
|
||||||
}
|
|
||||||
|
|
||||||
// collectResultAppender records all samples that were added through the appender.
|
|
||||||
// It can be used as its zero value or be backed by another appender it writes samples through.
|
|
||||||
type collectResultAppender struct {
|
|
||||||
mtx sync.Mutex
|
|
||||||
|
|
||||||
next storage.Appender
|
|
||||||
resultFloats []floatSample
|
|
||||||
pendingFloats []floatSample
|
|
||||||
rolledbackFloats []floatSample
|
|
||||||
resultHistograms []histogramSample
|
|
||||||
pendingHistograms []histogramSample
|
|
||||||
rolledbackHistograms []histogramSample
|
|
||||||
resultExemplars []exemplar.Exemplar
|
|
||||||
pendingExemplars []exemplar.Exemplar
|
|
||||||
resultMetadata []metadataEntry
|
|
||||||
pendingMetadata []metadataEntry
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*collectResultAppender) SetOptions(*storage.AppendOptions) {}
|
|
||||||
|
|
||||||
func (a *collectResultAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64, v float64) (storage.SeriesRef, error) {
|
|
||||||
a.mtx.Lock()
|
|
||||||
defer a.mtx.Unlock()
|
|
||||||
a.pendingFloats = append(a.pendingFloats, floatSample{
|
|
||||||
metric: lset,
|
|
||||||
t: t,
|
|
||||||
f: v,
|
|
||||||
})
|
|
||||||
|
|
||||||
if a.next == nil {
|
|
||||||
if ref == 0 {
|
|
||||||
// Use labels hash as a stand-in for unique series reference, to avoid having to track all series.
|
|
||||||
ref = storage.SeriesRef(lset.Hash())
|
|
||||||
}
|
|
||||||
return ref, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
ref, err := a.next.Append(ref, lset, t, v)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
return ref, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *collectResultAppender) AppendExemplar(ref storage.SeriesRef, l labels.Labels, e exemplar.Exemplar) (storage.SeriesRef, error) {
|
|
||||||
a.mtx.Lock()
|
|
||||||
defer a.mtx.Unlock()
|
|
||||||
a.pendingExemplars = append(a.pendingExemplars, e)
|
|
||||||
if a.next == nil {
|
|
||||||
return 0, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return a.next.AppendExemplar(ref, l, e)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *collectResultAppender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) {
|
|
||||||
a.mtx.Lock()
|
|
||||||
defer a.mtx.Unlock()
|
|
||||||
a.pendingHistograms = append(a.pendingHistograms, histogramSample{h: h, fh: fh, t: t, metric: l})
|
|
||||||
if a.next == nil {
|
|
||||||
return 0, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return a.next.AppendHistogram(ref, l, t, h, fh)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *collectResultAppender) AppendHistogramSTZeroSample(ref storage.SeriesRef, l labels.Labels, _, st int64, h *histogram.Histogram, _ *histogram.FloatHistogram) (storage.SeriesRef, error) {
|
|
||||||
if h != nil {
|
|
||||||
return a.AppendHistogram(ref, l, st, &histogram.Histogram{}, nil)
|
|
||||||
}
|
|
||||||
return a.AppendHistogram(ref, l, st, nil, &histogram.FloatHistogram{})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *collectResultAppender) UpdateMetadata(ref storage.SeriesRef, l labels.Labels, m metadata.Metadata) (storage.SeriesRef, error) {
|
|
||||||
a.mtx.Lock()
|
|
||||||
defer a.mtx.Unlock()
|
|
||||||
a.pendingMetadata = append(a.pendingMetadata, metadataEntry{metric: l, m: m})
|
|
||||||
if a.next == nil {
|
|
||||||
if ref == 0 {
|
|
||||||
ref = storage.SeriesRef(l.Hash())
|
|
||||||
}
|
|
||||||
return ref, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return a.next.UpdateMetadata(ref, l, m)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *collectResultAppender) AppendSTZeroSample(ref storage.SeriesRef, l labels.Labels, _, st int64) (storage.SeriesRef, error) {
|
|
||||||
return a.Append(ref, l, st, 0.0)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *collectResultAppender) Commit() error {
|
|
||||||
a.mtx.Lock()
|
|
||||||
defer a.mtx.Unlock()
|
|
||||||
a.resultFloats = append(a.resultFloats, a.pendingFloats...)
|
|
||||||
a.resultExemplars = append(a.resultExemplars, a.pendingExemplars...)
|
|
||||||
a.resultHistograms = append(a.resultHistograms, a.pendingHistograms...)
|
|
||||||
a.resultMetadata = append(a.resultMetadata, a.pendingMetadata...)
|
|
||||||
a.pendingFloats = nil
|
|
||||||
a.pendingExemplars = nil
|
|
||||||
a.pendingHistograms = nil
|
|
||||||
a.pendingMetadata = nil
|
|
||||||
if a.next == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return a.next.Commit()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *collectResultAppender) Rollback() error {
|
|
||||||
a.mtx.Lock()
|
|
||||||
defer a.mtx.Unlock()
|
|
||||||
a.rolledbackFloats = a.pendingFloats
|
|
||||||
a.rolledbackHistograms = a.pendingHistograms
|
|
||||||
a.pendingFloats = nil
|
|
||||||
a.pendingHistograms = nil
|
|
||||||
if a.next == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return a.next.Rollback()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *collectResultAppender) String() string {
|
|
||||||
var sb strings.Builder
|
|
||||||
for _, s := range a.resultFloats {
|
|
||||||
sb.WriteString(fmt.Sprintf("committed: %s %f %d\n", s.metric, s.f, s.t))
|
|
||||||
}
|
|
||||||
for _, s := range a.pendingFloats {
|
|
||||||
sb.WriteString(fmt.Sprintf("pending: %s %f %d\n", s.metric, s.f, s.t))
|
|
||||||
}
|
|
||||||
for _, s := range a.rolledbackFloats {
|
|
||||||
sb.WriteString(fmt.Sprintf("rolledback: %s %f %d\n", s.metric, s.f, s.t))
|
|
||||||
}
|
|
||||||
return sb.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
// protoMarshalDelimited marshals a MetricFamily into a delimited
|
// protoMarshalDelimited marshals a MetricFamily into a delimited
|
||||||
// Prometheus proto exposition format bytes (known as `encoding=delimited`)
|
// Prometheus proto exposition format bytes (known as `encoding=delimited`)
|
||||||
//
|
//
|
||||||
|
|||||||
@ -38,8 +38,10 @@ import (
|
|||||||
"github.com/prometheus/prometheus/util/pool"
|
"github.com/prometheus/prometheus/util/pool"
|
||||||
)
|
)
|
||||||
|
|
||||||
// NewManager is the Manager constructor.
|
// NewManager is the Manager constructor using deprecated Appendable.
|
||||||
func NewManager(o *Options, logger *slog.Logger, newScrapeFailureLogger func(string) (*logging.JSONFileLogger, error), app storage.Appendable, registerer prometheus.Registerer) (*Manager, error) {
|
//
|
||||||
|
// Deprecated: Use NewManagerV2 instead. NewManager will be removed (or replaced with NewManagerV2) soon (ETA: Q2 2026).
|
||||||
|
func NewManager(o *Options, logger *slog.Logger, newScrapeFailureLogger func(string) (*logging.JSONFileLogger, error), appendableV1 storage.Appendable, registerer prometheus.Registerer) (*Manager, error) {
|
||||||
if o == nil {
|
if o == nil {
|
||||||
o = &Options{}
|
o = &Options{}
|
||||||
}
|
}
|
||||||
@ -53,7 +55,39 @@ func NewManager(o *Options, logger *slog.Logger, newScrapeFailureLogger func(str
|
|||||||
}
|
}
|
||||||
|
|
||||||
m := &Manager{
|
m := &Manager{
|
||||||
append: app,
|
appendableV1: appendableV1,
|
||||||
|
opts: o,
|
||||||
|
logger: logger,
|
||||||
|
newScrapeFailureLogger: newScrapeFailureLogger,
|
||||||
|
scrapeConfigs: make(map[string]*config.ScrapeConfig),
|
||||||
|
scrapePools: make(map[string]*scrapePool),
|
||||||
|
graceShut: make(chan struct{}),
|
||||||
|
triggerReload: make(chan struct{}, 1),
|
||||||
|
metrics: sm,
|
||||||
|
buffers: pool.New(1e3, 100e6, 3, func(sz int) any { return make([]byte, 0, sz) }),
|
||||||
|
}
|
||||||
|
|
||||||
|
m.metrics.setTargetMetadataCacheGatherer(m)
|
||||||
|
|
||||||
|
return m, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewManagerWithAppendableV2 is the Manager constructor using AppendableV2.
|
||||||
|
func NewManagerWithAppendableV2(o *Options, logger *slog.Logger, newScrapeFailureLogger func(string) (*logging.JSONFileLogger, error), appendableV2 storage.AppendableV2, registerer prometheus.Registerer) (*Manager, error) {
|
||||||
|
if o == nil {
|
||||||
|
o = &Options{}
|
||||||
|
}
|
||||||
|
if logger == nil {
|
||||||
|
logger = promslog.NewNopLogger()
|
||||||
|
}
|
||||||
|
|
||||||
|
sm, err := newScrapeMetrics(registerer)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to create scrape manager due to error: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
m := &Manager{
|
||||||
|
appendableV2: appendableV2,
|
||||||
opts: o,
|
opts: o,
|
||||||
logger: logger,
|
logger: logger,
|
||||||
newScrapeFailureLogger: newScrapeFailureLogger,
|
newScrapeFailureLogger: newScrapeFailureLogger,
|
||||||
@ -75,19 +109,22 @@ type Options struct {
|
|||||||
ExtraMetrics bool
|
ExtraMetrics bool
|
||||||
// Option used by downstream scraper users like OpenTelemetry Collector
|
// Option used by downstream scraper users like OpenTelemetry Collector
|
||||||
// to help lookup metric metadata. Should be false for Prometheus.
|
// to help lookup metric metadata. Should be false for Prometheus.
|
||||||
|
// TODO(bwplotka): Remove once appender v1 flow is removed, collector can use AppenderV2
|
||||||
|
// which is capable of passing metadata on every Append.
|
||||||
PassMetadataInContext bool
|
PassMetadataInContext bool
|
||||||
// Option to enable appending of scraped Metadata to the TSDB/other appenders. Individual appenders
|
// Option to enable appending of scraped Metadata to the TSDB/other appenders. Individual appenders
|
||||||
// can decide what to do with metadata, but for practical purposes this flag exists so that metadata
|
// can decide what to do with metadata, but for practical purposes this flag exists so that metadata
|
||||||
// can be written to the WAL and thus read for remote write.
|
// can be written to the WAL and thus read for remote write.
|
||||||
// TODO: implement some form of metadata storage
|
|
||||||
AppendMetadata bool
|
AppendMetadata bool
|
||||||
// Option to increase the interval used by scrape manager to throttle target groups updates.
|
// Option to increase the interval used by scrape manager to throttle target groups updates.
|
||||||
DiscoveryReloadInterval model.Duration
|
DiscoveryReloadInterval model.Duration
|
||||||
|
|
||||||
// Option to enable the ingestion of the created timestamp as a synthetic zero sample.
|
// Option to enable the ingestion of the created timestamp as a synthetic zero sample.
|
||||||
// See: https://github.com/prometheus/proposals/blob/main/proposals/2023-06-13_created-timestamp.md
|
// See: https://github.com/prometheus/proposals/blob/main/proposals/2023-06-13_created-timestamp.md
|
||||||
|
// TODO(bwplotka): Remove once appender v1 flow is removed.
|
||||||
EnableStartTimestampZeroIngestion bool
|
EnableStartTimestampZeroIngestion bool
|
||||||
|
|
||||||
// EnableTypeAndUnitLabels
|
// EnableTypeAndUnitLabels represents type-and-unit-labels feature flag.
|
||||||
EnableTypeAndUnitLabels bool
|
EnableTypeAndUnitLabels bool
|
||||||
|
|
||||||
// Optional HTTP client options to use when scraping.
|
// Optional HTTP client options to use when scraping.
|
||||||
@ -100,9 +137,12 @@ type Options struct {
|
|||||||
// Manager maintains a set of scrape pools and manages start/stop cycles
|
// Manager maintains a set of scrape pools and manages start/stop cycles
|
||||||
// when receiving new target groups from the discovery manager.
|
// when receiving new target groups from the discovery manager.
|
||||||
type Manager struct {
|
type Manager struct {
|
||||||
opts *Options
|
opts *Options
|
||||||
logger *slog.Logger
|
logger *slog.Logger
|
||||||
append storage.Appendable
|
|
||||||
|
appendableV1 storage.Appendable
|
||||||
|
appendableV2 storage.AppendableV2
|
||||||
|
|
||||||
graceShut chan struct{}
|
graceShut chan struct{}
|
||||||
|
|
||||||
offsetSeed uint64 // Global offsetSeed seed is used to spread scrape workload across HA setup.
|
offsetSeed uint64 // Global offsetSeed seed is used to spread scrape workload across HA setup.
|
||||||
@ -183,7 +223,7 @@ func (m *Manager) reload() {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
m.metrics.targetScrapePools.Inc()
|
m.metrics.targetScrapePools.Inc()
|
||||||
sp, err := newScrapePool(scrapeConfig, m.append, m.offsetSeed, m.logger.With("scrape_pool", setName), m.buffers, m.opts, m.metrics)
|
sp, err := newScrapePool(scrapeConfig, m.appendableV1, m.appendableV2, m.offsetSeed, m.logger.With("scrape_pool", setName), m.buffers, m.opts, m.metrics)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
m.metrics.targetScrapePoolsFailed.Inc()
|
m.metrics.targetScrapePoolsFailed.Inc()
|
||||||
m.logger.Error("error creating new scrape pool", "err", err, "scrape_pool", setName)
|
m.logger.Error("error creating new scrape pool", "err", err, "scrape_pool", setName)
|
||||||
|
|||||||
@ -30,25 +30,24 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/gogo/protobuf/proto"
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
dto "github.com/prometheus/client_model/go"
|
dto "github.com/prometheus/client_model/go"
|
||||||
"github.com/prometheus/common/expfmt"
|
"github.com/prometheus/common/expfmt"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"github.com/prometheus/common/promslog"
|
"github.com/prometheus/common/promslog"
|
||||||
|
"github.com/prometheus/prometheus/util/teststorage"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"go.yaml.in/yaml/v2"
|
"go.yaml.in/yaml/v2"
|
||||||
|
"google.golang.org/protobuf/proto"
|
||||||
"google.golang.org/protobuf/types/known/timestamppb"
|
"google.golang.org/protobuf/types/known/timestamppb"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/config"
|
"github.com/prometheus/prometheus/config"
|
||||||
"github.com/prometheus/prometheus/discovery"
|
"github.com/prometheus/prometheus/discovery"
|
||||||
_ "github.com/prometheus/prometheus/discovery/file"
|
_ "github.com/prometheus/prometheus/discovery/file"
|
||||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||||
"github.com/prometheus/prometheus/model/histogram"
|
|
||||||
"github.com/prometheus/prometheus/model/labels"
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
"github.com/prometheus/prometheus/model/relabel"
|
"github.com/prometheus/prometheus/model/relabel"
|
||||||
"github.com/prometheus/prometheus/model/timestamp"
|
"github.com/prometheus/prometheus/model/timestamp"
|
||||||
"github.com/prometheus/prometheus/storage"
|
|
||||||
"github.com/prometheus/prometheus/tsdb/tsdbutil"
|
"github.com/prometheus/prometheus/tsdb/tsdbutil"
|
||||||
"github.com/prometheus/prometheus/util/runutil"
|
"github.com/prometheus/prometheus/util/runutil"
|
||||||
"github.com/prometheus/prometheus/util/testutil"
|
"github.com/prometheus/prometheus/util/testutil"
|
||||||
@ -528,7 +527,7 @@ scrape_configs:
|
|||||||
return noopLoop()
|
return noopLoop()
|
||||||
}
|
}
|
||||||
sp := &scrapePool{
|
sp := &scrapePool{
|
||||||
appendable: &nopAppendable{},
|
appendableV2: &nopAppendable{},
|
||||||
activeTargets: map[uint64]*Target{
|
activeTargets: map[uint64]*Target{
|
||||||
1: {},
|
1: {},
|
||||||
},
|
},
|
||||||
@ -692,7 +691,7 @@ scrape_configs:
|
|||||||
_, cancel := context.WithCancel(context.Background())
|
_, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
sp := &scrapePool{
|
sp := &scrapePool{
|
||||||
appendable: &nopAppendable{},
|
appendableV2: &nopAppendable{},
|
||||||
activeTargets: map[uint64]*Target{},
|
activeTargets: map[uint64]*Target{},
|
||||||
loops: map[uint64]loop{
|
loops: map[uint64]loop{
|
||||||
1: noopLoop(),
|
1: noopLoop(),
|
||||||
@ -777,11 +776,10 @@ func TestManagerSTZeroIngestion(t *testing.T) {
|
|||||||
// TODO(bwplotka): Add more types than just counter?
|
// TODO(bwplotka): Add more types than just counter?
|
||||||
encoded := prepareTestEncodedCounter(t, testFormat, expectedMetricName, expectedSampleValue, sampleTs, stTs)
|
encoded := prepareTestEncodedCounter(t, testFormat, expectedMetricName, expectedSampleValue, sampleTs, stTs)
|
||||||
|
|
||||||
app := &collectResultAppender{}
|
discoveryManager, scrapeManager, appTest := runManagers(t, ctx, &Options{
|
||||||
discoveryManager, scrapeManager := runManagers(t, ctx, &Options{
|
|
||||||
EnableStartTimestampZeroIngestion: testSTZeroIngest,
|
EnableStartTimestampZeroIngestion: testSTZeroIngest,
|
||||||
skipOffsetting: true,
|
skipOffsetting: true,
|
||||||
}, &collectResultAppendable{app})
|
})
|
||||||
defer scrapeManager.Stop()
|
defer scrapeManager.Stop()
|
||||||
|
|
||||||
server := setupTestServer(t, config.ScrapeProtocolsHeaders[testFormat], encoded)
|
server := setupTestServer(t, config.ScrapeProtocolsHeaders[testFormat], encoded)
|
||||||
@ -806,11 +804,8 @@ scrape_configs:
|
|||||||
ctx, cancel = context.WithTimeout(ctx, 1*time.Minute)
|
ctx, cancel = context.WithTimeout(ctx, 1*time.Minute)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
require.NoError(t, runutil.Retry(100*time.Millisecond, ctx.Done(), func() error {
|
require.NoError(t, runutil.Retry(100*time.Millisecond, ctx.Done(), func() error {
|
||||||
app.mtx.Lock()
|
|
||||||
defer app.mtx.Unlock()
|
|
||||||
|
|
||||||
// Check if scrape happened and grab the relevant samples.
|
// Check if scrape happened and grab the relevant samples.
|
||||||
if len(app.resultFloats) > 0 {
|
if appTest.ResultSamplesGreaterThan(0) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return errors.New("expected some float samples, got none")
|
return errors.New("expected some float samples, got none")
|
||||||
@ -818,22 +813,22 @@ scrape_configs:
|
|||||||
|
|
||||||
// Verify results.
|
// Verify results.
|
||||||
// Verify what we got vs expectations around ST injection.
|
// Verify what we got vs expectations around ST injection.
|
||||||
samples := findSamplesForMetric(app.resultFloats, expectedMetricName)
|
samples := findSamplesForMetric(appTest.ResultSamples, expectedMetricName)
|
||||||
if testWithST && testSTZeroIngest {
|
if testWithST && testSTZeroIngest {
|
||||||
require.Len(t, samples, 2)
|
require.Len(t, samples, 2)
|
||||||
require.Equal(t, 0.0, samples[0].f)
|
require.Equal(t, 0.0, samples[0].V)
|
||||||
require.Equal(t, timestamp.FromTime(stTs), samples[0].t)
|
require.Equal(t, timestamp.FromTime(stTs), samples[0].T)
|
||||||
require.Equal(t, expectedSampleValue, samples[1].f)
|
require.Equal(t, expectedSampleValue, samples[1].V)
|
||||||
require.Equal(t, timestamp.FromTime(sampleTs), samples[1].t)
|
require.Equal(t, timestamp.FromTime(sampleTs), samples[1].T)
|
||||||
} else {
|
} else {
|
||||||
require.Len(t, samples, 1)
|
require.Len(t, samples, 1)
|
||||||
require.Equal(t, expectedSampleValue, samples[0].f)
|
require.Equal(t, expectedSampleValue, samples[0].V)
|
||||||
require.Equal(t, timestamp.FromTime(sampleTs), samples[0].t)
|
require.Equal(t, timestamp.FromTime(sampleTs), samples[0].T)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verify what we got vs expectations around additional _created series for OM text.
|
// Verify what we got vs expectations around additional _created series for OM text.
|
||||||
// enableSTZeroInjection also kills that _created line.
|
// enableSTZeroInjection also kills that _created line.
|
||||||
createdSeriesSamples := findSamplesForMetric(app.resultFloats, expectedCreatedMetricName)
|
createdSeriesSamples := findSamplesForMetric(appTest.ResultSamples, expectedCreatedMetricName)
|
||||||
if testFormat == config.OpenMetricsText1_0_0 && testWithST && !testSTZeroIngest {
|
if testFormat == config.OpenMetricsText1_0_0 && testWithST && !testSTZeroIngest {
|
||||||
// For OM Text, when counter has ST, and feature flag disabled we should see _created lines.
|
// For OM Text, when counter has ST, and feature flag disabled we should see _created lines.
|
||||||
require.Len(t, createdSeriesSamples, 1)
|
require.Len(t, createdSeriesSamples, 1)
|
||||||
@ -841,7 +836,7 @@ scrape_configs:
|
|||||||
// We don't check the st timestamp as explicit ts was not implemented in expfmt.Encoder,
|
// We don't check the st timestamp as explicit ts was not implemented in expfmt.Encoder,
|
||||||
// but exists in OM https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#:~:text=An%20example%20with%20a%20Metric%20with%20no%20labels%2C%20and%20a%20MetricPoint%20with%20a%20timestamp%20and%20a%20created
|
// but exists in OM https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#:~:text=An%20example%20with%20a%20Metric%20with%20no%20labels%2C%20and%20a%20MetricPoint%20with%20a%20timestamp%20and%20a%20created
|
||||||
// We can implement this, but we want to potentially get rid of OM 1.0 ST lines
|
// We can implement this, but we want to potentially get rid of OM 1.0 ST lines
|
||||||
require.Equal(t, float64(timestamppb.New(stTs).AsTime().UnixNano())/1e9, createdSeriesSamples[0].f)
|
require.Equal(t, float64(timestamppb.New(stTs).AsTime().UnixNano())/1e9, createdSeriesSamples[0].V)
|
||||||
} else {
|
} else {
|
||||||
require.Empty(t, createdSeriesSamples)
|
require.Empty(t, createdSeriesSamples)
|
||||||
}
|
}
|
||||||
@ -885,9 +880,9 @@ func prepareTestEncodedCounter(t *testing.T, format config.ScrapeProtocol, mName
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func findSamplesForMetric(floats []floatSample, metricName string) (ret []floatSample) {
|
func findSamplesForMetric(s []sample, metricName string) (ret []sample) {
|
||||||
for _, f := range floats {
|
for _, f := range s {
|
||||||
if f.metric.Get(model.MetricNameLabel) == metricName {
|
if f.L.Get(model.MetricNameLabel) == metricName {
|
||||||
ret = append(ret, f)
|
ret = append(ret, f)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -923,136 +918,6 @@ func generateTestHistogram(i int) *dto.Histogram {
|
|||||||
return h
|
return h
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestManagerSTZeroIngestionHistogram(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
const mName = "expected_histogram"
|
|
||||||
|
|
||||||
for _, tc := range []struct {
|
|
||||||
name string
|
|
||||||
inputHistSample *dto.Histogram
|
|
||||||
enableSTZeroIngestion bool
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "disabled with ST on histogram",
|
|
||||||
inputHistSample: func() *dto.Histogram {
|
|
||||||
h := generateTestHistogram(0)
|
|
||||||
h.CreatedTimestamp = timestamppb.Now()
|
|
||||||
return h
|
|
||||||
}(),
|
|
||||||
enableSTZeroIngestion: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "enabled with ST on histogram",
|
|
||||||
inputHistSample: func() *dto.Histogram {
|
|
||||||
h := generateTestHistogram(0)
|
|
||||||
h.CreatedTimestamp = timestamppb.Now()
|
|
||||||
return h
|
|
||||||
}(),
|
|
||||||
enableSTZeroIngestion: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "enabled without ST on histogram",
|
|
||||||
inputHistSample: func() *dto.Histogram {
|
|
||||||
h := generateTestHistogram(0)
|
|
||||||
return h
|
|
||||||
}(),
|
|
||||||
enableSTZeroIngestion: true,
|
|
||||||
},
|
|
||||||
} {
|
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
app := &collectResultAppender{}
|
|
||||||
discoveryManager, scrapeManager := runManagers(t, ctx, &Options{
|
|
||||||
EnableStartTimestampZeroIngestion: tc.enableSTZeroIngestion,
|
|
||||||
skipOffsetting: true,
|
|
||||||
}, &collectResultAppendable{app})
|
|
||||||
defer scrapeManager.Stop()
|
|
||||||
|
|
||||||
once := sync.Once{}
|
|
||||||
// Start fake HTTP target to that allow one scrape only.
|
|
||||||
server := httptest.NewServer(
|
|
||||||
http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
|
|
||||||
fail := true
|
|
||||||
once.Do(func() {
|
|
||||||
fail = false
|
|
||||||
w.Header().Set("Content-Type", `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`)
|
|
||||||
|
|
||||||
ctrType := dto.MetricType_HISTOGRAM
|
|
||||||
w.Write(protoMarshalDelimited(t, &dto.MetricFamily{
|
|
||||||
Name: proto.String(mName),
|
|
||||||
Type: &ctrType,
|
|
||||||
Metric: []*dto.Metric{{Histogram: tc.inputHistSample}},
|
|
||||||
}))
|
|
||||||
})
|
|
||||||
|
|
||||||
if fail {
|
|
||||||
w.WriteHeader(http.StatusInternalServerError)
|
|
||||||
}
|
|
||||||
}),
|
|
||||||
)
|
|
||||||
defer server.Close()
|
|
||||||
|
|
||||||
serverURL, err := url.Parse(server.URL)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
testConfig := fmt.Sprintf(`
|
|
||||||
global:
|
|
||||||
# Disable regular scrapes.
|
|
||||||
scrape_interval: 9999m
|
|
||||||
scrape_timeout: 5s
|
|
||||||
|
|
||||||
scrape_configs:
|
|
||||||
- job_name: test
|
|
||||||
scrape_native_histograms: true
|
|
||||||
static_configs:
|
|
||||||
- targets: ['%s']
|
|
||||||
`, serverURL.Host)
|
|
||||||
applyConfig(t, testConfig, scrapeManager, discoveryManager)
|
|
||||||
|
|
||||||
var got []histogramSample
|
|
||||||
|
|
||||||
// Wait for one scrape.
|
|
||||||
ctx, cancel = context.WithTimeout(ctx, 1*time.Minute)
|
|
||||||
defer cancel()
|
|
||||||
require.NoError(t, runutil.Retry(100*time.Millisecond, ctx.Done(), func() error {
|
|
||||||
app.mtx.Lock()
|
|
||||||
defer app.mtx.Unlock()
|
|
||||||
|
|
||||||
// Check if scrape happened and grab the relevant histograms, they have to be there - or it's a bug
|
|
||||||
// and it's not worth waiting.
|
|
||||||
for _, h := range app.resultHistograms {
|
|
||||||
if h.metric.Get(model.MetricNameLabel) == mName {
|
|
||||||
got = append(got, h)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(app.resultHistograms) > 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return errors.New("expected some histogram samples, got none")
|
|
||||||
}), "after 1 minute")
|
|
||||||
|
|
||||||
// Check for zero samples, assuming we only injected always one histogram sample.
|
|
||||||
// Did it contain ST to inject? If yes, was ST zero enabled?
|
|
||||||
if tc.inputHistSample.CreatedTimestamp.IsValid() && tc.enableSTZeroIngestion {
|
|
||||||
require.Len(t, got, 2)
|
|
||||||
// Zero sample.
|
|
||||||
require.Equal(t, histogram.Histogram{}, *got[0].h)
|
|
||||||
// Quick soft check to make sure it's the same sample or at least not zero.
|
|
||||||
require.Equal(t, tc.inputHistSample.GetSampleSum(), got[1].h.Sum)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Expect only one, valid sample.
|
|
||||||
require.Len(t, got, 1)
|
|
||||||
// Quick soft check to make sure it's the same sample or at least not zero.
|
|
||||||
require.Equal(t, tc.inputHistSample.GetSampleSum(), got[0].h.Sum)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestUnregisterMetrics(t *testing.T) {
|
func TestUnregisterMetrics(t *testing.T) {
|
||||||
reg := prometheus.NewRegistry()
|
reg := prometheus.NewRegistry()
|
||||||
// Check that all metrics can be unregistered, allowing a second manager to be created.
|
// Check that all metrics can be unregistered, allowing a second manager to be created.
|
||||||
@ -1066,115 +931,6 @@ func TestUnregisterMetrics(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestNHCBAndSTZeroIngestion verifies that both ConvertClassicHistogramsToNHCBEnabled
|
|
||||||
// and EnableStartTimestampZeroIngestion can be used simultaneously without errors.
|
|
||||||
// This test addresses issue #17216 by ensuring the previously blocking check has been removed.
|
|
||||||
// The test verifies that the presence of exemplars in the input does not cause errors,
|
|
||||||
// although exemplars are not preserved during NHCB conversion (as documented below).
|
|
||||||
func TestNHCBAndSTZeroIngestion(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
const (
|
|
||||||
mName = "test_histogram"
|
|
||||||
// The expected sum of the histogram, as defined by the test's OpenMetrics exposition data.
|
|
||||||
// This value (45.5) is the sum reported in the test_histogram_sum metric below.
|
|
||||||
expectedHistogramSum = 45.5
|
|
||||||
)
|
|
||||||
|
|
||||||
ctx := t.Context()
|
|
||||||
|
|
||||||
app := &collectResultAppender{}
|
|
||||||
discoveryManager, scrapeManager := runManagers(t, ctx, &Options{
|
|
||||||
EnableStartTimestampZeroIngestion: true,
|
|
||||||
skipOffsetting: true,
|
|
||||||
}, &collectResultAppendable{app})
|
|
||||||
defer scrapeManager.Stop()
|
|
||||||
|
|
||||||
once := sync.Once{}
|
|
||||||
server := httptest.NewServer(
|
|
||||||
http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
|
|
||||||
fail := true
|
|
||||||
once.Do(func() {
|
|
||||||
fail = false
|
|
||||||
w.Header().Set("Content-Type", `application/openmetrics-text`)
|
|
||||||
|
|
||||||
// Expose a histogram with created timestamp and exemplars to verify no parsing errors occur.
|
|
||||||
fmt.Fprint(w, `# HELP test_histogram A histogram with created timestamp and exemplars
|
|
||||||
# TYPE test_histogram histogram
|
|
||||||
test_histogram_bucket{le="0.0"} 1
|
|
||||||
test_histogram_bucket{le="1.0"} 10 # {trace_id="trace-1"} 0.5 123456789
|
|
||||||
test_histogram_bucket{le="2.0"} 20 # {trace_id="trace-2"} 1.5 123456780
|
|
||||||
test_histogram_bucket{le="+Inf"} 30 # {trace_id="trace-3"} 2.5
|
|
||||||
test_histogram_count 30
|
|
||||||
test_histogram_sum 45.5
|
|
||||||
test_histogram_created 1520430001
|
|
||||||
# EOF
|
|
||||||
`)
|
|
||||||
})
|
|
||||||
|
|
||||||
if fail {
|
|
||||||
w.WriteHeader(http.StatusInternalServerError)
|
|
||||||
}
|
|
||||||
}),
|
|
||||||
)
|
|
||||||
defer server.Close()
|
|
||||||
|
|
||||||
serverURL, err := url.Parse(server.URL)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Configuration with both convert_classic_histograms_to_nhcb enabled and ST zero ingestion enabled.
|
|
||||||
testConfig := fmt.Sprintf(`
|
|
||||||
global:
|
|
||||||
# Use a very long scrape_interval to prevent automatic scraping during the test.
|
|
||||||
scrape_interval: 9999m
|
|
||||||
scrape_timeout: 5s
|
|
||||||
|
|
||||||
scrape_configs:
|
|
||||||
- job_name: test
|
|
||||||
convert_classic_histograms_to_nhcb: true
|
|
||||||
static_configs:
|
|
||||||
- targets: ['%s']
|
|
||||||
`, serverURL.Host)
|
|
||||||
|
|
||||||
applyConfig(t, testConfig, scrapeManager, discoveryManager)
|
|
||||||
|
|
||||||
// Verify that the scrape pool was created (proves the blocking check was removed).
|
|
||||||
require.Eventually(t, func() bool {
|
|
||||||
scrapeManager.mtxScrape.Lock()
|
|
||||||
defer scrapeManager.mtxScrape.Unlock()
|
|
||||||
_, exists := scrapeManager.scrapePools["test"]
|
|
||||||
return exists
|
|
||||||
}, 5*time.Second, 100*time.Millisecond, "scrape pool should be created for job 'test'")
|
|
||||||
|
|
||||||
// Helper function to get matching histograms to avoid race conditions.
|
|
||||||
getMatchingHistograms := func() []histogramSample {
|
|
||||||
app.mtx.Lock()
|
|
||||||
defer app.mtx.Unlock()
|
|
||||||
|
|
||||||
var got []histogramSample
|
|
||||||
for _, h := range app.resultHistograms {
|
|
||||||
if h.metric.Get(model.MetricNameLabel) == mName {
|
|
||||||
got = append(got, h)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return got
|
|
||||||
}
|
|
||||||
|
|
||||||
require.Eventually(t, func() bool {
|
|
||||||
return len(getMatchingHistograms()) > 0
|
|
||||||
}, 1*time.Minute, 100*time.Millisecond, "expected histogram samples, got none")
|
|
||||||
|
|
||||||
// Verify that samples were ingested (proving both features work together).
|
|
||||||
got := getMatchingHistograms()
|
|
||||||
|
|
||||||
// With ST zero ingestion enabled and a created timestamp present, we expect 2 samples:
|
|
||||||
// one zero sample and one actual sample.
|
|
||||||
require.Len(t, got, 2, "expected 2 histogram samples (zero sample + actual sample)")
|
|
||||||
require.Equal(t, histogram.Histogram{}, *got[0].h, "first sample should be zero sample")
|
|
||||||
require.InDelta(t, expectedHistogramSum, got[1].h.Sum, 1e-9, "second sample should retain the expected sum")
|
|
||||||
require.Len(t, app.resultExemplars, 2, "expected 2 exemplars from histogram buckets")
|
|
||||||
}
|
|
||||||
|
|
||||||
func applyConfig(
|
func applyConfig(
|
||||||
t *testing.T,
|
t *testing.T,
|
||||||
config string,
|
config string,
|
||||||
@ -1195,16 +951,15 @@ func applyConfig(
|
|||||||
require.NoError(t, discoveryManager.ApplyConfig(c))
|
require.NoError(t, discoveryManager.ApplyConfig(c))
|
||||||
}
|
}
|
||||||
|
|
||||||
func runManagers(t *testing.T, ctx context.Context, opts *Options, app storage.Appendable) (*discovery.Manager, *Manager) {
|
func runManagers(t *testing.T, ctx context.Context, opts *Options) (*discovery.Manager, *Manager, *teststorage.Appender) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
|
|
||||||
if opts == nil {
|
if opts == nil {
|
||||||
opts = &Options{}
|
opts = &Options{}
|
||||||
}
|
}
|
||||||
opts.DiscoveryReloadInterval = model.Duration(100 * time.Millisecond)
|
opts.DiscoveryReloadInterval = model.Duration(100 * time.Millisecond)
|
||||||
if app == nil {
|
|
||||||
app = nopAppendable{}
|
appTest := teststorage.NewAppender()
|
||||||
}
|
|
||||||
|
|
||||||
reg := prometheus.NewRegistry()
|
reg := prometheus.NewRegistry()
|
||||||
sdMetrics, err := discovery.RegisterSDMetrics(reg, discovery.NewRefreshMetrics(reg))
|
sdMetrics, err := discovery.RegisterSDMetrics(reg, discovery.NewRefreshMetrics(reg))
|
||||||
@ -1216,17 +971,17 @@ func runManagers(t *testing.T, ctx context.Context, opts *Options, app storage.A
|
|||||||
sdMetrics,
|
sdMetrics,
|
||||||
discovery.Updatert(100*time.Millisecond),
|
discovery.Updatert(100*time.Millisecond),
|
||||||
)
|
)
|
||||||
scrapeManager, err := NewManager(
|
scrapeManager, err := NewManagerWithAppendableV2(
|
||||||
opts,
|
opts,
|
||||||
nil,
|
nil,
|
||||||
nil,
|
nil,
|
||||||
app,
|
appTest,
|
||||||
prometheus.NewRegistry(),
|
prometheus.NewRegistry(),
|
||||||
)
|
)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
go discoveryManager.Run()
|
go discoveryManager.Run()
|
||||||
go scrapeManager.Run(discoveryManager.SyncCh())
|
go scrapeManager.Run(discoveryManager.SyncCh())
|
||||||
return discoveryManager, scrapeManager
|
return discoveryManager, scrapeManager, appTest
|
||||||
}
|
}
|
||||||
|
|
||||||
func writeIntoFile(t *testing.T, content, filePattern string) *os.File {
|
func writeIntoFile(t *testing.T, content, filePattern string) *os.File {
|
||||||
@ -1293,7 +1048,7 @@ scrape_configs:
|
|||||||
- files: ['%s']
|
- files: ['%s']
|
||||||
`
|
`
|
||||||
|
|
||||||
discoveryManager, scrapeManager := runManagers(t, ctx, nil, nil)
|
discoveryManager, scrapeManager, _ := runManagers(t, ctx, nil)
|
||||||
defer scrapeManager.Stop()
|
defer scrapeManager.Stop()
|
||||||
|
|
||||||
applyConfig(
|
applyConfig(
|
||||||
@ -1392,7 +1147,7 @@ scrape_configs:
|
|||||||
file_sd_configs:
|
file_sd_configs:
|
||||||
- files: ['%s', '%s']
|
- files: ['%s', '%s']
|
||||||
`
|
`
|
||||||
discoveryManager, scrapeManager := runManagers(t, ctx, nil, nil)
|
discoveryManager, scrapeManager, _ := runManagers(t, ctx, nil)
|
||||||
defer scrapeManager.Stop()
|
defer scrapeManager.Stop()
|
||||||
|
|
||||||
applyConfig(
|
applyConfig(
|
||||||
@ -1451,7 +1206,7 @@ scrape_configs:
|
|||||||
file_sd_configs:
|
file_sd_configs:
|
||||||
- files: ['%s']
|
- files: ['%s']
|
||||||
`
|
`
|
||||||
discoveryManager, scrapeManager := runManagers(t, ctx, nil, nil)
|
discoveryManager, scrapeManager, _ := runManagers(t, ctx, nil)
|
||||||
defer scrapeManager.Stop()
|
defer scrapeManager.Stop()
|
||||||
|
|
||||||
applyConfig(
|
applyConfig(
|
||||||
@ -1517,7 +1272,7 @@ scrape_configs:
|
|||||||
- targets: ['%s']
|
- targets: ['%s']
|
||||||
`
|
`
|
||||||
|
|
||||||
discoveryManager, scrapeManager := runManagers(t, ctx, nil, nil)
|
discoveryManager, scrapeManager, _ := runManagers(t, ctx, nil)
|
||||||
defer scrapeManager.Stop()
|
defer scrapeManager.Stop()
|
||||||
|
|
||||||
// Apply the initial config with an existing file
|
// Apply the initial config with an existing file
|
||||||
@ -1601,7 +1356,7 @@ scrape_configs:
|
|||||||
|
|
||||||
cfg := loadConfiguration(t, cfgText)
|
cfg := loadConfiguration(t, cfgText)
|
||||||
|
|
||||||
m, err := NewManager(&Options{}, nil, nil, &nopAppendable{}, prometheus.NewRegistry())
|
m, err := NewManagerWithAppendableV2(&Options{}, nil, nil, &nopAppendable{}, prometheus.NewRegistry())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
defer m.Stop()
|
defer m.Stop()
|
||||||
require.NoError(t, m.ApplyConfig(cfg))
|
require.NoError(t, m.ApplyConfig(cfg))
|
||||||
|
|||||||
669
scrape/scrape.go
669
scrape/scrape.go
@ -80,10 +80,12 @@ type FailureLogger interface {
|
|||||||
|
|
||||||
// scrapePool manages scrapes for sets of targets.
|
// scrapePool manages scrapes for sets of targets.
|
||||||
type scrapePool struct {
|
type scrapePool struct {
|
||||||
appendable storage.Appendable
|
appendableV1 storage.Appendable
|
||||||
logger *slog.Logger
|
appendableV2 storage.AppendableV2
|
||||||
cancel context.CancelFunc
|
|
||||||
httpOpts []config_util.HTTPClientOption
|
logger *slog.Logger
|
||||||
|
cancel context.CancelFunc
|
||||||
|
httpOpts []config_util.HTTPClientOption
|
||||||
|
|
||||||
// mtx must not be taken after targetMtx.
|
// mtx must not be taken after targetMtx.
|
||||||
mtx sync.Mutex
|
mtx sync.Mutex
|
||||||
@ -147,7 +149,14 @@ const maxAheadTime = 10 * time.Minute
|
|||||||
// returning an empty label set is interpreted as "drop".
|
// returning an empty label set is interpreted as "drop".
|
||||||
type labelsMutator func(labels.Labels) labels.Labels
|
type labelsMutator func(labels.Labels) labels.Labels
|
||||||
|
|
||||||
func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed uint64, logger *slog.Logger, buffers *pool.Pool, options *Options, metrics *scrapeMetrics) (*scrapePool, error) {
|
type scrapeLoopAppender interface {
|
||||||
|
storage.AppenderTransaction
|
||||||
|
|
||||||
|
addReportSample(s reportSample, t int64, v float64, b *labels.Builder, rejectOOO bool) error
|
||||||
|
append(b []byte, contentType string, ts time.Time) (total, added, seriesAdded int, err error)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newScrapePool(cfg *config.ScrapeConfig, appendableV1 storage.Appendable, appendableV2 storage.AppendableV2, offsetSeed uint64, logger *slog.Logger, buffers *pool.Pool, options *Options, metrics *scrapeMetrics) (*scrapePool, error) {
|
||||||
if logger == nil {
|
if logger == nil {
|
||||||
logger = promslog.NewNopLogger()
|
logger = promslog.NewNopLogger()
|
||||||
}
|
}
|
||||||
@ -169,7 +178,8 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed
|
|||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
sp := &scrapePool{
|
sp := &scrapePool{
|
||||||
cancel: cancel,
|
cancel: cancel,
|
||||||
appendable: app,
|
appendableV1: appendableV1,
|
||||||
|
appendableV2: appendableV2,
|
||||||
config: cfg,
|
config: cfg,
|
||||||
client: client,
|
client: client,
|
||||||
activeTargets: map[uint64]*Target{},
|
activeTargets: map[uint64]*Target{},
|
||||||
@ -183,55 +193,76 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed
|
|||||||
escapingScheme: escapingScheme,
|
escapingScheme: escapingScheme,
|
||||||
}
|
}
|
||||||
sp.newLoop = func(opts scrapeLoopOptions) loop {
|
sp.newLoop = func(opts scrapeLoopOptions) loop {
|
||||||
// Update the targets retrieval function for metadata to a new scrape cache.
|
// NOTE: Formatting matches scrapeLoop fields order for readability.
|
||||||
cache := opts.cache
|
sl := &scrapeLoop{
|
||||||
if cache == nil {
|
buffers: buffers,
|
||||||
cache = newScrapeCache(metrics)
|
appendableV1: appendableV1,
|
||||||
}
|
appendableV2: appendableV2,
|
||||||
opts.target.SetMetadataStore(cache)
|
sampleMutator: func(l labels.Labels) labels.Labels {
|
||||||
|
|
||||||
return newScrapeLoop(
|
|
||||||
ctx,
|
|
||||||
opts.scraper,
|
|
||||||
logger.With("target", opts.target),
|
|
||||||
buffers,
|
|
||||||
func(l labels.Labels) labels.Labels {
|
|
||||||
return mutateSampleLabels(l, opts.target, opts.honorLabels, opts.mrc)
|
return mutateSampleLabels(l, opts.target, opts.honorLabels, opts.mrc)
|
||||||
},
|
},
|
||||||
func(l labels.Labels) labels.Labels { return mutateReportSampleLabels(l, opts.target) },
|
reportSampleMutator: func(l labels.Labels) labels.Labels { return mutateReportSampleLabels(l, opts.target) },
|
||||||
func(ctx context.Context) storage.Appender { return app.Appender(ctx) },
|
offsetSeed: offsetSeed,
|
||||||
cache,
|
metrics: metrics,
|
||||||
sp.symbolTable,
|
|
||||||
offsetSeed,
|
symbolTable: sp.symbolTable,
|
||||||
opts.honorTimestamps,
|
validationScheme: sp.validationScheme,
|
||||||
opts.trackTimestampsStaleness,
|
escapingScheme: sp.escapingScheme,
|
||||||
opts.enableCompression,
|
|
||||||
opts.sampleLimit,
|
enableNativeHistogramScraping: cfg.ScrapeNativeHistogramsEnabled(),
|
||||||
opts.bucketLimit,
|
|
||||||
opts.maxSchema,
|
enableSTZeroIngestion: options.EnableStartTimestampZeroIngestion,
|
||||||
opts.labelLimits,
|
enableTypeAndUnitLabels: options.EnableTypeAndUnitLabels,
|
||||||
opts.interval,
|
reportExtraMetrics: options.ExtraMetrics,
|
||||||
opts.timeout,
|
appendMetadataToWAL: options.AppendMetadata,
|
||||||
opts.alwaysScrapeClassicHist,
|
skipOffsetting: options.skipOffsetting,
|
||||||
opts.convertClassicHistToNHCB,
|
|
||||||
cfg.ScrapeNativeHistogramsEnabled(),
|
scrapeLoopOptions: opts,
|
||||||
options.EnableStartTimestampZeroIngestion,
|
}
|
||||||
options.EnableTypeAndUnitLabels,
|
sl.init(ctx, options.PassMetadataInContext)
|
||||||
options.ExtraMetrics,
|
return sl
|
||||||
options.AppendMetadata,
|
|
||||||
opts.target,
|
|
||||||
options.PassMetadataInContext,
|
|
||||||
metrics,
|
|
||||||
options.skipOffsetting,
|
|
||||||
sp.validationScheme,
|
|
||||||
sp.escapingScheme,
|
|
||||||
opts.fallbackScrapeProtocol,
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
sp.metrics.targetScrapePoolTargetLimit.WithLabelValues(sp.config.JobName).Set(float64(sp.config.TargetLimit))
|
sp.metrics.targetScrapePoolTargetLimit.WithLabelValues(sp.config.JobName).Set(float64(sp.config.TargetLimit))
|
||||||
return sp, nil
|
return sp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// init prepares scrapeLoop after raw construction.
|
||||||
|
// NOTE: While newScrapeLoop constructor pattern would be safer, it has proven to be
|
||||||
|
// highly not readable (too many params). Instead, we follow init pattern.
|
||||||
|
func (sl *scrapeLoop) init(ctx context.Context, passMetadataInContext bool) {
|
||||||
|
if sl.l == nil {
|
||||||
|
sl.l = promslog.NewNopLogger()
|
||||||
|
}
|
||||||
|
sl.parentCtx = ctx
|
||||||
|
sl.stopped = make(chan struct{})
|
||||||
|
if sl.buffers == nil {
|
||||||
|
sl.buffers = pool.New(1e3, 1e6, 3, func(sz int) any { return make([]byte, 0, sz) })
|
||||||
|
}
|
||||||
|
if sl.cache == nil {
|
||||||
|
sl.cache = newScrapeCache(sl.metrics)
|
||||||
|
if sl.target != nil {
|
||||||
|
// Update the targets retrieval function for metadata to a new scrape cache.
|
||||||
|
sl.target.SetMetadataStore(sl.cache)
|
||||||
|
// TODO(bwplotka): Not sure why, but doing this before sl.target.SetMetadataStore(sl.cache) blocks goroutines...
|
||||||
|
// Debug, something is odd.
|
||||||
|
sl.l = sl.l.With("target", sl.target)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
appenderCtx := ctx
|
||||||
|
if passMetadataInContext {
|
||||||
|
// Store the cache and target in the context. This is then used by downstream OTel Collector
|
||||||
|
// to lookup the metadata required to process the samples. Not used by Prometheus itself.
|
||||||
|
// TODO(gouthamve) We're using a dedicated context because using the parentCtx caused a memory
|
||||||
|
// leak. We should ideally fix the main leak. See: https://github.com/prometheus/prometheus/pull/10590
|
||||||
|
// TODO(bwplotka): Remove once OpenTelemetry collector uses AppenderV2 (add issue)
|
||||||
|
appenderCtx = ContextWithMetricMetadataStore(appenderCtx, sl.cache)
|
||||||
|
appenderCtx = ContextWithTarget(appenderCtx, sl.target)
|
||||||
|
}
|
||||||
|
sl.appenderCtx = appenderCtx
|
||||||
|
sl.ctx, sl.cancel = context.WithCancel(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
func (sp *scrapePool) ActiveTargets() []*Target {
|
func (sp *scrapePool) ActiveTargets() []*Target {
|
||||||
sp.targetMtx.Lock()
|
sp.targetMtx.Lock()
|
||||||
defer sp.targetMtx.Unlock()
|
defer sp.targetMtx.Unlock()
|
||||||
@ -392,6 +423,8 @@ func (sp *scrapePool) restartLoops(reuseCache bool) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
t := sp.activeTargets[fp]
|
t := sp.activeTargets[fp]
|
||||||
|
// Update the targets retrieval function for metadata to a new target.
|
||||||
|
t.SetMetadataStore(cache)
|
||||||
targetInterval, targetTimeout, err := t.intervalAndTimeout(interval, timeout)
|
targetInterval, targetTimeout, err := t.intervalAndTimeout(interval, timeout)
|
||||||
var (
|
var (
|
||||||
s = &targetScraper{
|
s = &targetScraper{
|
||||||
@ -753,39 +786,6 @@ func mutateReportSampleLabels(lset labels.Labels, target *Target) labels.Labels
|
|||||||
return lb.Labels()
|
return lb.Labels()
|
||||||
}
|
}
|
||||||
|
|
||||||
// appender returns an appender for ingested samples from the target.
|
|
||||||
func appender(app storage.Appender, sampleLimit, bucketLimit int, maxSchema int32) storage.Appender {
|
|
||||||
app = &timeLimitAppender{
|
|
||||||
Appender: app,
|
|
||||||
maxTime: timestamp.FromTime(time.Now().Add(maxAheadTime)),
|
|
||||||
}
|
|
||||||
|
|
||||||
// The sampleLimit is applied after metrics are potentially dropped via relabeling.
|
|
||||||
if sampleLimit > 0 {
|
|
||||||
app = &limitAppender{
|
|
||||||
Appender: app,
|
|
||||||
limit: sampleLimit,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if bucketLimit > 0 {
|
|
||||||
app = &bucketLimitAppender{
|
|
||||||
Appender: app,
|
|
||||||
limit: bucketLimit,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if maxSchema < histogram.ExponentialSchemaMax {
|
|
||||||
app = &maxSchemaAppender{
|
|
||||||
Appender: app,
|
|
||||||
maxSchema: maxSchema,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return app
|
|
||||||
}
|
|
||||||
|
|
||||||
// A scraper retrieves samples and accepts a status report at the end.
|
|
||||||
type scraper interface {
|
type scraper interface {
|
||||||
scrape(ctx context.Context) (*http.Response, error)
|
scrape(ctx context.Context) (*http.Response, error)
|
||||||
readResponse(ctx context.Context, resp *http.Response, w io.Writer) (string, error)
|
readResponse(ctx context.Context, resp *http.Response, w io.Writer) (string, error)
|
||||||
@ -931,55 +931,50 @@ type cacheEntry struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type scrapeLoop struct {
|
type scrapeLoop struct {
|
||||||
scraper scraper
|
// Parameters.
|
||||||
l *slog.Logger
|
ctx context.Context
|
||||||
scrapeFailureLogger FailureLogger
|
cancel func()
|
||||||
scrapeFailureLoggerMtx sync.RWMutex
|
stopped chan struct{}
|
||||||
cache *scrapeCache
|
parentCtx context.Context
|
||||||
lastScrapeSize int
|
appenderCtx context.Context
|
||||||
buffers *pool.Pool
|
l *slog.Logger
|
||||||
offsetSeed uint64
|
buffers *pool.Pool
|
||||||
honorTimestamps bool
|
appendableV1 storage.Appendable
|
||||||
trackTimestampsStaleness bool
|
appendableV2 storage.AppendableV2
|
||||||
enableCompression bool
|
|
||||||
forcedErr error
|
|
||||||
forcedErrMtx sync.Mutex
|
|
||||||
sampleLimit int
|
|
||||||
bucketLimit int
|
|
||||||
maxSchema int32
|
|
||||||
labelLimits *labelLimits
|
|
||||||
interval time.Duration
|
|
||||||
timeout time.Duration
|
|
||||||
validationScheme model.ValidationScheme
|
|
||||||
escapingScheme model.EscapingScheme
|
|
||||||
|
|
||||||
alwaysScrapeClassicHist bool
|
|
||||||
convertClassicHistToNHCB bool
|
|
||||||
enableSTZeroIngestion bool
|
|
||||||
enableTypeAndUnitLabels bool
|
|
||||||
fallbackScrapeProtocol string
|
|
||||||
|
|
||||||
enableNativeHistogramScraping bool
|
|
||||||
|
|
||||||
appender func(ctx context.Context) storage.Appender
|
|
||||||
symbolTable *labels.SymbolTable
|
|
||||||
sampleMutator labelsMutator
|
sampleMutator labelsMutator
|
||||||
reportSampleMutator labelsMutator
|
reportSampleMutator labelsMutator
|
||||||
|
offsetSeed uint64
|
||||||
|
metrics *scrapeMetrics
|
||||||
|
|
||||||
parentCtx context.Context
|
// Scrape pool shared data.
|
||||||
appenderCtx context.Context
|
symbolTable *labels.SymbolTable
|
||||||
ctx context.Context
|
validationScheme model.ValidationScheme
|
||||||
cancel func()
|
escapingScheme model.EscapingScheme
|
||||||
stopped chan struct{}
|
|
||||||
|
|
||||||
|
// Options inherited from config.ScrapeConfig.
|
||||||
|
enableNativeHistogramScraping bool
|
||||||
|
|
||||||
|
// Options inherited from scrape.Options.
|
||||||
|
enableSTZeroIngestion bool
|
||||||
|
enableTypeAndUnitLabels bool
|
||||||
|
reportExtraMetrics bool
|
||||||
|
appendMetadataToWAL bool
|
||||||
|
skipOffsetting bool // For testability.
|
||||||
|
|
||||||
|
// Common options.
|
||||||
|
scrapeLoopOptions
|
||||||
|
|
||||||
|
// error injection through setForcedError.
|
||||||
|
forcedErr error
|
||||||
|
forcedErrMtx sync.Mutex
|
||||||
|
|
||||||
|
// Special logger set on setScrapeFailureLogger
|
||||||
|
scrapeFailureLoggerMtx sync.RWMutex
|
||||||
|
scrapeFailureLogger FailureLogger
|
||||||
|
|
||||||
|
// Locally cached data.
|
||||||
|
lastScrapeSize int
|
||||||
disabledEndOfRunStalenessMarkers atomic.Bool
|
disabledEndOfRunStalenessMarkers atomic.Bool
|
||||||
|
|
||||||
reportExtraMetrics bool
|
|
||||||
appendMetadataToWAL bool
|
|
||||||
|
|
||||||
metrics *scrapeMetrics
|
|
||||||
|
|
||||||
skipOffsetting bool // For testability.
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// scrapeCache tracks mappings of exposed metric strings to label sets and
|
// scrapeCache tracks mappings of exposed metric strings to label sets and
|
||||||
@ -1004,8 +999,8 @@ type scrapeCache struct {
|
|||||||
seriesCur map[storage.SeriesRef]*cacheEntry
|
seriesCur map[storage.SeriesRef]*cacheEntry
|
||||||
seriesPrev map[storage.SeriesRef]*cacheEntry
|
seriesPrev map[storage.SeriesRef]*cacheEntry
|
||||||
|
|
||||||
// TODO(bwplotka): Consider moving Metadata API to use WAL instead of scrape loop to
|
// TODO(bwplotka): Consider moving metadata caching to head. See
|
||||||
// avoid locking (using metadata API can block scraping).
|
// https://github.com/prometheus/prometheus/issues/17619.
|
||||||
metaMtx sync.Mutex // Mutex is needed due to api touching it when metadata is queried.
|
metaMtx sync.Mutex // Mutex is needed due to api touching it when metadata is queried.
|
||||||
metadata map[string]*metaEntry // metadata by metric family name.
|
metadata map[string]*metaEntry // metadata by metric family name.
|
||||||
|
|
||||||
@ -1240,101 +1235,6 @@ func (c *scrapeCache) LengthMetadata() int {
|
|||||||
return len(c.metadata)
|
return len(c.metadata)
|
||||||
}
|
}
|
||||||
|
|
||||||
func newScrapeLoop(ctx context.Context,
|
|
||||||
sc scraper,
|
|
||||||
l *slog.Logger,
|
|
||||||
buffers *pool.Pool,
|
|
||||||
sampleMutator labelsMutator,
|
|
||||||
reportSampleMutator labelsMutator,
|
|
||||||
appender func(ctx context.Context) storage.Appender,
|
|
||||||
cache *scrapeCache,
|
|
||||||
symbolTable *labels.SymbolTable,
|
|
||||||
offsetSeed uint64,
|
|
||||||
honorTimestamps bool,
|
|
||||||
trackTimestampsStaleness bool,
|
|
||||||
enableCompression bool,
|
|
||||||
sampleLimit int,
|
|
||||||
bucketLimit int,
|
|
||||||
maxSchema int32,
|
|
||||||
labelLimits *labelLimits,
|
|
||||||
interval time.Duration,
|
|
||||||
timeout time.Duration,
|
|
||||||
alwaysScrapeClassicHist bool,
|
|
||||||
convertClassicHistToNHCB bool,
|
|
||||||
enableNativeHistogramScraping bool,
|
|
||||||
enableSTZeroIngestion bool,
|
|
||||||
enableTypeAndUnitLabels bool,
|
|
||||||
reportExtraMetrics bool,
|
|
||||||
appendMetadataToWAL bool,
|
|
||||||
target *Target,
|
|
||||||
passMetadataInContext bool,
|
|
||||||
metrics *scrapeMetrics,
|
|
||||||
skipOffsetting bool,
|
|
||||||
validationScheme model.ValidationScheme,
|
|
||||||
escapingScheme model.EscapingScheme,
|
|
||||||
fallbackScrapeProtocol string,
|
|
||||||
) *scrapeLoop {
|
|
||||||
if l == nil {
|
|
||||||
l = promslog.NewNopLogger()
|
|
||||||
}
|
|
||||||
if buffers == nil {
|
|
||||||
buffers = pool.New(1e3, 1e6, 3, func(sz int) any { return make([]byte, 0, sz) })
|
|
||||||
}
|
|
||||||
if cache == nil {
|
|
||||||
cache = newScrapeCache(metrics)
|
|
||||||
}
|
|
||||||
|
|
||||||
appenderCtx := ctx
|
|
||||||
|
|
||||||
if passMetadataInContext {
|
|
||||||
// Store the cache and target in the context. This is then used by downstream OTel Collector
|
|
||||||
// to lookup the metadata required to process the samples. Not used by Prometheus itself.
|
|
||||||
// TODO(gouthamve) We're using a dedicated context because using the parentCtx caused a memory
|
|
||||||
// leak. We should ideally fix the main leak. See: https://github.com/prometheus/prometheus/pull/10590
|
|
||||||
appenderCtx = ContextWithMetricMetadataStore(appenderCtx, cache)
|
|
||||||
appenderCtx = ContextWithTarget(appenderCtx, target)
|
|
||||||
}
|
|
||||||
|
|
||||||
sl := &scrapeLoop{
|
|
||||||
scraper: sc,
|
|
||||||
buffers: buffers,
|
|
||||||
cache: cache,
|
|
||||||
appender: appender,
|
|
||||||
symbolTable: symbolTable,
|
|
||||||
sampleMutator: sampleMutator,
|
|
||||||
reportSampleMutator: reportSampleMutator,
|
|
||||||
stopped: make(chan struct{}),
|
|
||||||
offsetSeed: offsetSeed,
|
|
||||||
l: l,
|
|
||||||
parentCtx: ctx,
|
|
||||||
appenderCtx: appenderCtx,
|
|
||||||
honorTimestamps: honorTimestamps,
|
|
||||||
trackTimestampsStaleness: trackTimestampsStaleness,
|
|
||||||
enableCompression: enableCompression,
|
|
||||||
sampleLimit: sampleLimit,
|
|
||||||
bucketLimit: bucketLimit,
|
|
||||||
maxSchema: maxSchema,
|
|
||||||
labelLimits: labelLimits,
|
|
||||||
interval: interval,
|
|
||||||
timeout: timeout,
|
|
||||||
alwaysScrapeClassicHist: alwaysScrapeClassicHist,
|
|
||||||
convertClassicHistToNHCB: convertClassicHistToNHCB,
|
|
||||||
enableSTZeroIngestion: enableSTZeroIngestion,
|
|
||||||
enableTypeAndUnitLabels: enableTypeAndUnitLabels,
|
|
||||||
fallbackScrapeProtocol: fallbackScrapeProtocol,
|
|
||||||
enableNativeHistogramScraping: enableNativeHistogramScraping,
|
|
||||||
reportExtraMetrics: reportExtraMetrics,
|
|
||||||
appendMetadataToWAL: appendMetadataToWAL,
|
|
||||||
metrics: metrics,
|
|
||||||
skipOffsetting: skipOffsetting,
|
|
||||||
validationScheme: validationScheme,
|
|
||||||
escapingScheme: escapingScheme,
|
|
||||||
}
|
|
||||||
sl.ctx, sl.cancel = context.WithCancel(ctx)
|
|
||||||
|
|
||||||
return sl
|
|
||||||
}
|
|
||||||
|
|
||||||
func (sl *scrapeLoop) setScrapeFailureLogger(l FailureLogger) {
|
func (sl *scrapeLoop) setScrapeFailureLogger(l FailureLogger) {
|
||||||
sl.scrapeFailureLoggerMtx.Lock()
|
sl.scrapeFailureLoggerMtx.Lock()
|
||||||
defer sl.scrapeFailureLoggerMtx.Unlock()
|
defer sl.scrapeFailureLoggerMtx.Unlock()
|
||||||
@ -1411,6 +1311,13 @@ mainLoop:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (sl *scrapeLoop) appender() scrapeLoopAppender {
|
||||||
|
if sl.appendableV2 != nil {
|
||||||
|
return &scrapeLoopAppenderV2{scrapeLoop: sl, AppenderV2: sl.appendableV2.AppenderV2(sl.appenderCtx)}
|
||||||
|
}
|
||||||
|
return &scrapeLoopAppenderV1{scrapeLoop: sl, Appender: sl.appendableV1.Appender(sl.appenderCtx)}
|
||||||
|
}
|
||||||
|
|
||||||
// scrapeAndReport performs a scrape and then appends the result to the storage
|
// scrapeAndReport performs a scrape and then appends the result to the storage
|
||||||
// together with reporting metrics, by using as few appenders as possible.
|
// together with reporting metrics, by using as few appenders as possible.
|
||||||
// In the happy scenario, a single appender is used.
|
// In the happy scenario, a single appender is used.
|
||||||
@ -1432,20 +1339,20 @@ func (sl *scrapeLoop) scrapeAndReport(last, appendTime time.Time, errc chan<- er
|
|||||||
var total, added, seriesAdded, bytesRead int
|
var total, added, seriesAdded, bytesRead int
|
||||||
var err, appErr, scrapeErr error
|
var err, appErr, scrapeErr error
|
||||||
|
|
||||||
app := sl.appender(sl.appenderCtx)
|
sla := sl.appender()
|
||||||
defer func() {
|
defer func() {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
app.Rollback()
|
_ = sla.Rollback()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
err = app.Commit()
|
err = sla.Commit()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
sl.l.Error("Scrape commit failed", "err", err)
|
sl.l.Error("Scrape commit failed", "err", err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
if err = sl.report(app, appendTime, time.Since(start), total, added, seriesAdded, bytesRead, scrapeErr); err != nil {
|
if err = sl.report(sla, appendTime, time.Since(start), total, added, seriesAdded, bytesRead, scrapeErr); err != nil {
|
||||||
sl.l.Warn("Appending scrape report failed", "err", err)
|
sl.l.Warn("Appending scrape report failed", "err", err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
@ -1453,9 +1360,9 @@ func (sl *scrapeLoop) scrapeAndReport(last, appendTime time.Time, errc chan<- er
|
|||||||
if forcedErr := sl.getForcedError(); forcedErr != nil {
|
if forcedErr := sl.getForcedError(); forcedErr != nil {
|
||||||
scrapeErr = forcedErr
|
scrapeErr = forcedErr
|
||||||
// Add stale markers.
|
// Add stale markers.
|
||||||
if _, _, _, err := sl.append(app, []byte{}, "", appendTime); err != nil {
|
if _, _, _, err := sla.append([]byte{}, "", appendTime); err != nil {
|
||||||
app.Rollback()
|
_ = sla.Rollback()
|
||||||
app = sl.appender(sl.appenderCtx)
|
sla = sl.appender()
|
||||||
sl.l.Warn("Append failed", "err", err)
|
sl.l.Warn("Append failed", "err", err)
|
||||||
}
|
}
|
||||||
if errc != nil {
|
if errc != nil {
|
||||||
@ -1505,16 +1412,16 @@ func (sl *scrapeLoop) scrapeAndReport(last, appendTime time.Time, errc chan<- er
|
|||||||
|
|
||||||
// A failed scrape is the same as an empty scrape,
|
// A failed scrape is the same as an empty scrape,
|
||||||
// we still call sl.append to trigger stale markers.
|
// we still call sl.append to trigger stale markers.
|
||||||
total, added, seriesAdded, appErr = sl.append(app, b, contentType, appendTime)
|
total, added, seriesAdded, appErr = sla.append(b, contentType, appendTime)
|
||||||
if appErr != nil {
|
if appErr != nil {
|
||||||
app.Rollback()
|
_ = sla.Rollback()
|
||||||
app = sl.appender(sl.appenderCtx)
|
sla = sl.appender()
|
||||||
sl.l.Debug("Append failed", "err", appErr)
|
sl.l.Debug("Append failed", "err", appErr)
|
||||||
// The append failed, probably due to a parse error or sample limit.
|
// The append failed, probably due to a parse error or sample limit.
|
||||||
// Call sl.append again with an empty scrape to trigger stale markers.
|
// Call sl.append again with an empty scrape to trigger stale markers.
|
||||||
if _, _, _, err := sl.append(app, []byte{}, "", appendTime); err != nil {
|
if _, _, _, err := sla.append([]byte{}, "", appendTime); err != nil {
|
||||||
app.Rollback()
|
_ = sla.Rollback()
|
||||||
app = sl.appender(sl.appenderCtx)
|
sla = sl.appender()
|
||||||
sl.l.Warn("Append failed", "err", err)
|
sl.l.Warn("Append failed", "err", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1584,24 +1491,24 @@ func (sl *scrapeLoop) endOfRunStaleness(last time.Time, ticker *time.Ticker, int
|
|||||||
// If the target has since been recreated and scraped, the
|
// If the target has since been recreated and scraped, the
|
||||||
// stale markers will be out of order and ignored.
|
// stale markers will be out of order and ignored.
|
||||||
// sl.context would have been cancelled, hence using sl.appenderCtx.
|
// sl.context would have been cancelled, hence using sl.appenderCtx.
|
||||||
app := sl.appender(sl.appenderCtx)
|
sla := sl.appender()
|
||||||
var err error
|
var err error
|
||||||
defer func() {
|
defer func() {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
app.Rollback()
|
_ = sla.Rollback()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
err = app.Commit()
|
err = sla.Commit()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
sl.l.Warn("Stale commit failed", "err", err)
|
sl.l.Warn("Stale commit failed", "err", err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
if _, _, _, err = sl.append(app, []byte{}, "", staleTime); err != nil {
|
if _, _, _, err = sla.append([]byte{}, "", staleTime); err != nil {
|
||||||
app.Rollback()
|
_ = sla.Rollback()
|
||||||
app = sl.appender(sl.appenderCtx)
|
sla = sl.appender()
|
||||||
sl.l.Warn("Stale append failed", "err", err)
|
sl.l.Warn("Stale append failed", "err", err)
|
||||||
}
|
}
|
||||||
if err = sl.reportStale(app, staleTime); err != nil {
|
if err = sl.reportStale(sla, staleTime); err != nil {
|
||||||
sl.l.Warn("Stale report failed", "err", err)
|
sl.l.Warn("Stale report failed", "err", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1629,12 +1536,11 @@ type appendErrors struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Update the stale markers.
|
// Update the stale markers.
|
||||||
func (sl *scrapeLoop) updateStaleMarkers(app storage.Appender, defTime int64) (err error) {
|
func (sl *scrapeLoop) updateStaleMarkers(app storage.AppenderV2, defTime int64) (err error) {
|
||||||
sl.cache.forEachStale(func(ref storage.SeriesRef, lset labels.Labels) bool {
|
sl.cache.forEachStale(func(ref storage.SeriesRef, lset labels.Labels) bool {
|
||||||
// Series no longer exposed, mark it stale.
|
// Series no longer exposed, mark it stale.
|
||||||
app.SetOptions(&storage.AppendOptions{DiscardOutOfOrder: true})
|
// TODO(bwplotka): Pass through Metadata and MFName?
|
||||||
_, err = app.Append(ref, lset, defTime, math.Float64frombits(value.StaleNaN))
|
_, err = app.Append(ref, lset, 0, defTime, math.Float64frombits(value.StaleNaN), nil, nil, storage.AOptions{RejectOutOfOrder: true})
|
||||||
app.SetOptions(nil)
|
|
||||||
switch {
|
switch {
|
||||||
case errors.Is(err, storage.ErrOutOfOrderSample), errors.Is(err, storage.ErrDuplicateSampleForTimestamp):
|
case errors.Is(err, storage.ErrOutOfOrderSample), errors.Is(err, storage.ErrDuplicateSampleForTimestamp):
|
||||||
// Do not count these in logging, as this is expected if a target
|
// Do not count these in logging, as this is expected if a target
|
||||||
@ -1646,12 +1552,20 @@ func (sl *scrapeLoop) updateStaleMarkers(app storage.Appender, defTime int64) (e
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sl *scrapeLoop) append(app storage.Appender, b []byte, contentType string, ts time.Time) (total, added, seriesAdded int, err error) {
|
type scrapeLoopAppenderV2 struct {
|
||||||
|
*scrapeLoop
|
||||||
|
|
||||||
|
storage.AppenderV2
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ scrapeLoopAppender = &scrapeLoopAppenderV2{}
|
||||||
|
|
||||||
|
func (sl *scrapeLoopAppenderV2) append(b []byte, contentType string, ts time.Time) (total, added, seriesAdded int, err error) {
|
||||||
defTime := timestamp.FromTime(ts)
|
defTime := timestamp.FromTime(ts)
|
||||||
|
|
||||||
if len(b) == 0 {
|
if len(b) == 0 {
|
||||||
// Empty scrape. Just update the stale makers and swap the cache (but don't flush it).
|
// Empty scrape. Just update the stale makers and swap the cache (but don't flush it).
|
||||||
err = sl.updateStaleMarkers(app, defTime)
|
err = sl.updateStaleMarkers(sl.AppenderV2, defTime)
|
||||||
sl.cache.iterDone(false)
|
sl.cache.iterDone(false)
|
||||||
return total, added, seriesAdded, err
|
return total, added, seriesAdded, err
|
||||||
}
|
}
|
||||||
@ -1689,13 +1603,11 @@ func (sl *scrapeLoop) append(app storage.Appender, b []byte, contentType string,
|
|||||||
e exemplar.Exemplar // Escapes to heap so hoisted out of loop.
|
e exemplar.Exemplar // Escapes to heap so hoisted out of loop.
|
||||||
lastMeta *metaEntry
|
lastMeta *metaEntry
|
||||||
lastMFName []byte
|
lastMFName []byte
|
||||||
|
exemplars = make([]exemplar.Exemplar, 0, 1)
|
||||||
)
|
)
|
||||||
|
|
||||||
exemplars := make([]exemplar.Exemplar, 0, 1)
|
|
||||||
|
|
||||||
// Take an appender with limits.
|
// Take an appender with limits.
|
||||||
app = appender(app, sl.sampleLimit, sl.bucketLimit, sl.maxSchema)
|
app := appender(sl.AppenderV2, sl.sampleLimit, sl.bucketLimit, sl.maxSchema)
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
@ -1783,7 +1695,7 @@ loop:
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if !lset.Has(labels.MetricName) {
|
if !lset.Has(model.MetricNameLabel) {
|
||||||
err = errNameLabelMandatory
|
err = errNameLabelMandatory
|
||||||
break loop
|
break loop
|
||||||
}
|
}
|
||||||
@ -1802,53 +1714,79 @@ loop:
|
|||||||
if seriesAlreadyScraped && parsedTimestamp == nil {
|
if seriesAlreadyScraped && parsedTimestamp == nil {
|
||||||
err = storage.ErrDuplicateSampleForTimestamp
|
err = storage.ErrDuplicateSampleForTimestamp
|
||||||
} else {
|
} else {
|
||||||
|
st := int64(0)
|
||||||
if sl.enableSTZeroIngestion {
|
if sl.enableSTZeroIngestion {
|
||||||
if stMs := p.StartTimestamp(); stMs != 0 {
|
// p.StartTimestamp tend to be expensive (e.g. OM1) do it only if we care.
|
||||||
|
st = p.StartTimestamp()
|
||||||
|
}
|
||||||
|
|
||||||
|
exemplars = exemplars[:0] // Reset and reuse the exemplar slice.
|
||||||
|
for hasExemplar := p.Exemplar(&e); hasExemplar; hasExemplar = p.Exemplar(&e) {
|
||||||
|
if !e.HasTs {
|
||||||
if isHistogram {
|
if isHistogram {
|
||||||
if h != nil {
|
// We drop exemplars for native histograms if they don't have a timestamp.
|
||||||
ref, err = app.AppendHistogramSTZeroSample(ref, lset, t, stMs, h, nil)
|
// Missing timestamps are deliberately not supported as we want to start
|
||||||
} else {
|
// enforcing timestamps for exemplars as otherwise proper deduplication
|
||||||
ref, err = app.AppendHistogramSTZeroSample(ref, lset, t, stMs, nil, fh)
|
// is inefficient and purely based on heuristics: we cannot distinguish
|
||||||
}
|
// between repeated exemplars and new instances with the same values.
|
||||||
} else {
|
// This is done silently without logs as it is not an error but out of spec.
|
||||||
ref, err = app.AppendSTZeroSample(ref, lset, t, stMs)
|
// This does not affect classic histograms so that behaviour is unchanged.
|
||||||
|
e = exemplar.Exemplar{} // Reset for next time round loop.
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
if err != nil && !errors.Is(err, storage.ErrOutOfOrderST) { // OOO is a common case, ignoring completely for now.
|
e.Ts = t
|
||||||
// ST is an experimental feature. For now, we don't need to fail the
|
}
|
||||||
// scrape on errors updating the created timestamp, log debug.
|
exemplars = append(exemplars, e)
|
||||||
sl.l.Debug("Error when appending ST in scrape loop", "series", string(met), "ct", stMs, "t", t, "err", err)
|
e = exemplar.Exemplar{} // Reset for next time round loop.
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prepare append call.
|
||||||
|
appOpts := storage.AOptions{
|
||||||
|
MetricFamilyName: yoloString(lastMFName),
|
||||||
|
}
|
||||||
|
if len(exemplars) > 0 {
|
||||||
|
// Sort so that checking for duplicates / out of order is more efficient during validation.
|
||||||
|
// TODO(bwplotka): Double check if this is even true now.
|
||||||
|
slices.SortFunc(exemplars, exemplar.Compare)
|
||||||
|
appOpts.Exemplars = exemplars
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(bwplotka): This mimicking the scrape appender v1 flow. Once we remove v1
|
||||||
|
// flow we should rename appendMetadataToWAL flag to passMetadata because at this
|
||||||
|
// point if the metadata is appended to WAL or only to memory or anything else is
|
||||||
|
// completely up to the implementation. All known implementations (Prom and Otel) also
|
||||||
|
// support always passing metadata (e.g. Prometheus head memSeries.metadata
|
||||||
|
// can help with detection, no need to detect and pass it only if changed.
|
||||||
|
if sl.appendMetadataToWAL && lastMeta != nil {
|
||||||
|
if !seriesCached || lastMeta.lastIterChange != sl.cache.iter {
|
||||||
|
// In majority cases we can trust that the current series/histogram is matching the lastMeta and lastMFName.
|
||||||
|
// However, optional TYPE etc metadata and broken OM text can break this, detect those cases here.
|
||||||
|
// TODO(bwplotka): Consider moving this to parser as many parser users end up doing this (e.g. ST and NHCB parsing).
|
||||||
|
if !isSeriesPartOfFamily(lset.Get(model.MetricNameLabel), lastMFName, lastMeta.Type) {
|
||||||
|
lastMeta = nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if lastMeta != nil {
|
||||||
|
appOpts.Metadata = lastMeta.Metadata
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if isHistogram {
|
// Append sample to the storage.
|
||||||
if h != nil {
|
ref, err = app.Append(ref, lset, st, t, val, h, fh, appOpts)
|
||||||
ref, err = app.AppendHistogram(ref, lset, t, h, nil)
|
|
||||||
} else {
|
|
||||||
ref, err = app.AppendHistogram(ref, lset, t, nil, fh)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
ref, err = app.Append(ref, lset, t, val)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
sampleAdded, err = sl.checkAddError(met, exemplars, err, &sampleLimitErr, &bucketLimitErr, &appErrs)
|
||||||
if err == nil {
|
|
||||||
if (parsedTimestamp == nil || sl.trackTimestampsStaleness) && ce != nil {
|
|
||||||
sl.cache.trackStaleness(ce.ref, ce)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
sampleAdded, err = sl.checkAddError(met, err, &sampleLimitErr, &bucketLimitErr, &appErrs)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if !errors.Is(err, storage.ErrNotFound) {
|
if !errors.Is(err, storage.ErrNotFound) {
|
||||||
sl.l.Debug("Unexpected error", "series", string(met), "err", err)
|
sl.l.Debug("Unexpected error", "series", string(met), "err", err)
|
||||||
}
|
}
|
||||||
break loop
|
break loop
|
||||||
|
} else if (parsedTimestamp == nil || sl.trackTimestampsStaleness) && ce != nil {
|
||||||
|
sl.cache.trackStaleness(ce.ref, ce)
|
||||||
}
|
}
|
||||||
|
|
||||||
// If series wasn't cached (is new, not seen on previous scrape) we need need to add it to the scrape cache.
|
// If series wasn't cached (is new, not seen on previous scrape) we need to add it to the scrape cache.
|
||||||
// But we only do this for series that were appended to TSDB without errors.
|
// But we only do this for series that were appended to TSDB without errors.
|
||||||
// If a series was new but we didn't append it due to sample_limit or other errors then we don't need
|
// If a series was new, but we didn't append it due to sample_limit or other errors then we don't need
|
||||||
// it in the scrape cache because we don't need to emit StaleNaNs for it when it disappears.
|
// it in the scrape cache because we don't need to emit StaleNaNs for it when it disappears.
|
||||||
if !seriesCached && sampleAdded {
|
if !seriesCached && sampleAdded {
|
||||||
ce = sl.cache.addRef(met, ref, lset, hash)
|
ce = sl.cache.addRef(met, ref, lset, hash)
|
||||||
@ -1857,7 +1795,7 @@ loop:
|
|||||||
// But make sure we only do this if we have a cache entry (ce) for our series.
|
// But make sure we only do this if we have a cache entry (ce) for our series.
|
||||||
sl.cache.trackStaleness(ref, ce)
|
sl.cache.trackStaleness(ref, ce)
|
||||||
}
|
}
|
||||||
if sampleAdded && sampleLimitErr == nil && bucketLimitErr == nil {
|
if sampleLimitErr == nil && bucketLimitErr == nil {
|
||||||
seriesAdded++
|
seriesAdded++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1867,62 +1805,6 @@ loop:
|
|||||||
// We still report duplicated samples here since this number should be the exact number
|
// We still report duplicated samples here since this number should be the exact number
|
||||||
// of time series exposed on a scrape after relabelling.
|
// of time series exposed on a scrape after relabelling.
|
||||||
added++
|
added++
|
||||||
exemplars = exemplars[:0] // Reset and reuse the exemplar slice.
|
|
||||||
for hasExemplar := p.Exemplar(&e); hasExemplar; hasExemplar = p.Exemplar(&e) {
|
|
||||||
if !e.HasTs {
|
|
||||||
if isHistogram {
|
|
||||||
// We drop exemplars for native histograms if they don't have a timestamp.
|
|
||||||
// Missing timestamps are deliberately not supported as we want to start
|
|
||||||
// enforcing timestamps for exemplars as otherwise proper deduplication
|
|
||||||
// is inefficient and purely based on heuristics: we cannot distinguish
|
|
||||||
// between repeated exemplars and new instances with the same values.
|
|
||||||
// This is done silently without logs as it is not an error but out of spec.
|
|
||||||
// This does not affect classic histograms so that behaviour is unchanged.
|
|
||||||
e = exemplar.Exemplar{} // Reset for next time round loop.
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
e.Ts = t
|
|
||||||
}
|
|
||||||
exemplars = append(exemplars, e)
|
|
||||||
e = exemplar.Exemplar{} // Reset for next time round loop.
|
|
||||||
}
|
|
||||||
// Sort so that checking for duplicates / out of order is more efficient during validation.
|
|
||||||
slices.SortFunc(exemplars, exemplar.Compare)
|
|
||||||
outOfOrderExemplars := 0
|
|
||||||
for _, e := range exemplars {
|
|
||||||
_, exemplarErr := app.AppendExemplar(ref, lset, e)
|
|
||||||
switch {
|
|
||||||
case exemplarErr == nil:
|
|
||||||
// Do nothing.
|
|
||||||
case errors.Is(exemplarErr, storage.ErrOutOfOrderExemplar):
|
|
||||||
outOfOrderExemplars++
|
|
||||||
default:
|
|
||||||
// Since exemplar storage is still experimental, we don't fail the scrape on ingestion errors.
|
|
||||||
sl.l.Debug("Error while adding exemplar in AddExemplar", "exemplar", fmt.Sprintf("%+v", e), "err", exemplarErr)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if outOfOrderExemplars > 0 && outOfOrderExemplars == len(exemplars) {
|
|
||||||
// Only report out of order exemplars if all are out of order, otherwise this was a partial update
|
|
||||||
// to some existing set of exemplars.
|
|
||||||
appErrs.numExemplarOutOfOrder += outOfOrderExemplars
|
|
||||||
sl.l.Debug("Out of order exemplars", "count", outOfOrderExemplars, "latest", fmt.Sprintf("%+v", exemplars[len(exemplars)-1]))
|
|
||||||
sl.metrics.targetScrapeExemplarOutOfOrder.Add(float64(outOfOrderExemplars))
|
|
||||||
}
|
|
||||||
|
|
||||||
if sl.appendMetadataToWAL && lastMeta != nil {
|
|
||||||
// Is it new series OR did metadata change for this family?
|
|
||||||
if !seriesCached || lastMeta.lastIterChange == sl.cache.iter {
|
|
||||||
// In majority cases we can trust that the current series/histogram is matching the lastMeta and lastMFName.
|
|
||||||
// However, optional TYPE etc metadata and broken OM text can break this, detect those cases here.
|
|
||||||
// TODO(bwplotka): Consider moving this to parser as many parser users end up doing this (e.g. ST and NHCB parsing).
|
|
||||||
if isSeriesPartOfFamily(lset.Get(labels.MetricName), lastMFName, lastMeta.Type) {
|
|
||||||
if _, merr := app.UpdateMetadata(ref, lset, lastMeta.Metadata); merr != nil {
|
|
||||||
// No need to fail the scrape on errors appending metadata.
|
|
||||||
sl.l.Debug("Error when appending metadata in scrape loop", "ref", fmt.Sprintf("%d", ref), "metadata", fmt.Sprintf("%+v", lastMeta.Metadata), "err", merr)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if sampleLimitErr != nil {
|
if sampleLimitErr != nil {
|
||||||
if err == nil {
|
if err == nil {
|
||||||
@ -1956,6 +1838,38 @@ loop:
|
|||||||
return total, added, seriesAdded, err
|
return total, added, seriesAdded, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// appender returns an appender for ingested samples from the target.
|
||||||
|
func appender(app storage.AppenderV2, sampleLimit, bucketLimit int, maxSchema int32) storage.AppenderV2 {
|
||||||
|
app = &timeLimitAppender{
|
||||||
|
AppenderV2: app,
|
||||||
|
maxTime: timestamp.FromTime(time.Now().Add(maxAheadTime)),
|
||||||
|
}
|
||||||
|
|
||||||
|
// The sampleLimit is applied after metrics are potentially dropped via relabeling.
|
||||||
|
if sampleLimit > 0 {
|
||||||
|
app = &limitAppender{
|
||||||
|
AppenderV2: app,
|
||||||
|
limit: sampleLimit,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if bucketLimit > 0 {
|
||||||
|
app = &bucketLimitAppender{
|
||||||
|
AppenderV2: app,
|
||||||
|
limit: bucketLimit,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if maxSchema < histogram.ExponentialSchemaMax {
|
||||||
|
app = &maxSchemaAppender{
|
||||||
|
AppenderV2: app,
|
||||||
|
maxSchema: maxSchema,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return app
|
||||||
|
}
|
||||||
|
|
||||||
func isSeriesPartOfFamily(mName string, mfName []byte, typ model.MetricType) bool {
|
func isSeriesPartOfFamily(mName string, mfName []byte, typ model.MetricType) bool {
|
||||||
mfNameStr := yoloString(mfName)
|
mfNameStr := yoloString(mfName)
|
||||||
if !strings.HasPrefix(mName, mfNameStr) { // Fast path.
|
if !strings.HasPrefix(mName, mfNameStr) { // Fast path.
|
||||||
@ -2027,7 +1941,8 @@ func isSeriesPartOfFamily(mName string, mfName []byte, typ model.MetricType) boo
|
|||||||
// during normal operation (e.g., accidental cardinality explosion, sudden traffic spikes).
|
// during normal operation (e.g., accidental cardinality explosion, sudden traffic spikes).
|
||||||
// Current case ordering prevents exercising other cases when limits are exceeded.
|
// Current case ordering prevents exercising other cases when limits are exceeded.
|
||||||
// Remaining error cases typically occur only a few times, often during initial setup.
|
// Remaining error cases typically occur only a few times, often during initial setup.
|
||||||
func (sl *scrapeLoop) checkAddError(met []byte, err error, sampleLimitErr, bucketLimitErr *error, appErrs *appendErrors) (bool, error) {
|
func (sl *scrapeLoop) checkAddError(met []byte, exemplars []exemplar.Exemplar, err error, sampleLimitErr, bucketLimitErr *error, appErrs *appendErrors) (sampleAdded bool, _ error) {
|
||||||
|
var pErr *storage.AppendPartialError
|
||||||
switch {
|
switch {
|
||||||
case err == nil:
|
case err == nil:
|
||||||
return true, nil
|
return true, nil
|
||||||
@ -2058,6 +1973,23 @@ func (sl *scrapeLoop) checkAddError(met []byte, err error, sampleLimitErr, bucke
|
|||||||
return false, nil
|
return false, nil
|
||||||
case errors.Is(err, storage.ErrNotFound):
|
case errors.Is(err, storage.ErrNotFound):
|
||||||
return false, storage.ErrNotFound
|
return false, storage.ErrNotFound
|
||||||
|
case errors.As(err, &pErr):
|
||||||
|
outOfOrderExemplars := 0
|
||||||
|
for _, e := range pErr.ExemplarErrors {
|
||||||
|
if errors.Is(e, storage.ErrOutOfOrderExemplar) {
|
||||||
|
outOfOrderExemplars++
|
||||||
|
}
|
||||||
|
// Since exemplar storage is still experimental, we don't fail or check other errors.
|
||||||
|
// Debug log is emmited in TSDB already.
|
||||||
|
}
|
||||||
|
if outOfOrderExemplars > 0 && outOfOrderExemplars == len(exemplars) {
|
||||||
|
// Only report out of order exemplars if all are out of order, otherwise this was a partial update
|
||||||
|
// to some existing set of exemplars.
|
||||||
|
appErrs.numExemplarOutOfOrder += outOfOrderExemplars
|
||||||
|
sl.l.Debug("Out of order exemplars", "count", outOfOrderExemplars, "latest", fmt.Sprintf("%+v", exemplars[len(exemplars)-1]))
|
||||||
|
sl.metrics.targetScrapeExemplarOutOfOrder.Add(float64(outOfOrderExemplars))
|
||||||
|
}
|
||||||
|
return true, nil
|
||||||
default:
|
default:
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
@ -2139,7 +2071,7 @@ var (
|
|||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
func (sl *scrapeLoop) report(app storage.Appender, start time.Time, duration time.Duration, scraped, added, seriesAdded, bytes int, scrapeErr error) (err error) {
|
func (sl *scrapeLoop) report(sla scrapeLoopAppender, start time.Time, duration time.Duration, scraped, added, seriesAdded, bytes int, scrapeErr error) (err error) {
|
||||||
sl.scraper.Report(start, duration, scrapeErr)
|
sl.scraper.Report(start, duration, scrapeErr)
|
||||||
|
|
||||||
ts := timestamp.FromTime(start)
|
ts := timestamp.FromTime(start)
|
||||||
@ -2150,71 +2082,70 @@ func (sl *scrapeLoop) report(app storage.Appender, start time.Time, duration tim
|
|||||||
}
|
}
|
||||||
b := labels.NewBuilderWithSymbolTable(sl.symbolTable)
|
b := labels.NewBuilderWithSymbolTable(sl.symbolTable)
|
||||||
|
|
||||||
if err = sl.addReportSample(app, scrapeHealthMetric, ts, health, b); err != nil {
|
if err = sla.addReportSample(scrapeHealthMetric, ts, health, b, false); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err = sl.addReportSample(app, scrapeDurationMetric, ts, duration.Seconds(), b); err != nil {
|
if err = sla.addReportSample(scrapeDurationMetric, ts, duration.Seconds(), b, false); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err = sl.addReportSample(app, scrapeSamplesMetric, ts, float64(scraped), b); err != nil {
|
if err = sla.addReportSample(scrapeSamplesMetric, ts, float64(scraped), b, false); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err = sl.addReportSample(app, samplesPostRelabelMetric, ts, float64(added), b); err != nil {
|
if err = sla.addReportSample(samplesPostRelabelMetric, ts, float64(added), b, false); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err = sl.addReportSample(app, scrapeSeriesAddedMetric, ts, float64(seriesAdded), b); err != nil {
|
if err = sla.addReportSample(scrapeSeriesAddedMetric, ts, float64(seriesAdded), b, false); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if sl.reportExtraMetrics {
|
if sl.reportExtraMetrics {
|
||||||
if err = sl.addReportSample(app, scrapeTimeoutMetric, ts, sl.timeout.Seconds(), b); err != nil {
|
if err = sla.addReportSample(scrapeTimeoutMetric, ts, sl.timeout.Seconds(), b, false); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err = sl.addReportSample(app, scrapeSampleLimitMetric, ts, float64(sl.sampleLimit), b); err != nil {
|
if err = sla.addReportSample(scrapeSampleLimitMetric, ts, float64(sl.sampleLimit), b, false); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err = sl.addReportSample(app, scrapeBodySizeBytesMetric, ts, float64(bytes), b); err != nil {
|
if err = sla.addReportSample(scrapeBodySizeBytesMetric, ts, float64(bytes), b, false); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sl *scrapeLoop) reportStale(app storage.Appender, start time.Time) (err error) {
|
func (sl *scrapeLoop) reportStale(sla scrapeLoopAppender, start time.Time) (err error) {
|
||||||
ts := timestamp.FromTime(start)
|
ts := timestamp.FromTime(start)
|
||||||
app.SetOptions(&storage.AppendOptions{DiscardOutOfOrder: true})
|
|
||||||
stale := math.Float64frombits(value.StaleNaN)
|
stale := math.Float64frombits(value.StaleNaN)
|
||||||
b := labels.NewBuilder(labels.EmptyLabels())
|
b := labels.NewBuilder(labels.EmptyLabels())
|
||||||
|
|
||||||
if err = sl.addReportSample(app, scrapeHealthMetric, ts, stale, b); err != nil {
|
if err = sla.addReportSample(scrapeHealthMetric, ts, stale, b, true); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err = sl.addReportSample(app, scrapeDurationMetric, ts, stale, b); err != nil {
|
if err = sla.addReportSample(scrapeDurationMetric, ts, stale, b, true); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err = sl.addReportSample(app, scrapeSamplesMetric, ts, stale, b); err != nil {
|
if err = sla.addReportSample(scrapeSamplesMetric, ts, stale, b, true); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err = sl.addReportSample(app, samplesPostRelabelMetric, ts, stale, b); err != nil {
|
if err = sla.addReportSample(samplesPostRelabelMetric, ts, stale, b, true); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err = sl.addReportSample(app, scrapeSeriesAddedMetric, ts, stale, b); err != nil {
|
if err = sla.addReportSample(scrapeSeriesAddedMetric, ts, stale, b, true); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if sl.reportExtraMetrics {
|
if sl.reportExtraMetrics {
|
||||||
if err = sl.addReportSample(app, scrapeTimeoutMetric, ts, stale, b); err != nil {
|
if err = sla.addReportSample(scrapeTimeoutMetric, ts, stale, b, true); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err = sl.addReportSample(app, scrapeSampleLimitMetric, ts, stale, b); err != nil {
|
if err = sla.addReportSample(scrapeSampleLimitMetric, ts, stale, b, true); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err = sl.addReportSample(app, scrapeBodySizeBytesMetric, ts, stale, b); err != nil {
|
if err = sla.addReportSample(scrapeBodySizeBytesMetric, ts, stale, b, true); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sl *scrapeLoop) addReportSample(app storage.Appender, s reportSample, t int64, v float64, b *labels.Builder) error {
|
func (sl *scrapeLoopAppenderV2) addReportSample(s reportSample, t int64, v float64, b *labels.Builder, rejectOOO bool) error {
|
||||||
ce, ok, _ := sl.cache.get(s.name)
|
ce, ok, _ := sl.cache.get(s.name)
|
||||||
var ref storage.SeriesRef
|
var ref storage.SeriesRef
|
||||||
var lset labels.Labels
|
var lset labels.Labels
|
||||||
@ -2226,21 +2157,19 @@ func (sl *scrapeLoop) addReportSample(app storage.Appender, s reportSample, t in
|
|||||||
// with scraped metrics in the cache.
|
// with scraped metrics in the cache.
|
||||||
// We have to drop it when building the actual metric.
|
// We have to drop it when building the actual metric.
|
||||||
b.Reset(labels.EmptyLabels())
|
b.Reset(labels.EmptyLabels())
|
||||||
b.Set(labels.MetricName, string(s.name[:len(s.name)-1]))
|
b.Set(model.MetricNameLabel, string(s.name[:len(s.name)-1]))
|
||||||
lset = sl.reportSampleMutator(b.Labels())
|
lset = sl.reportSampleMutator(b.Labels())
|
||||||
}
|
}
|
||||||
|
|
||||||
ref, err := app.Append(ref, lset, t, v)
|
ref, err := sl.Append(ref, lset, 0, t, v, nil, nil, storage.AOptions{
|
||||||
|
MetricFamilyName: yoloString(s.name),
|
||||||
|
Metadata: s.Metadata,
|
||||||
|
RejectOutOfOrder: rejectOOO,
|
||||||
|
})
|
||||||
switch {
|
switch {
|
||||||
case err == nil:
|
case err == nil:
|
||||||
if !ok {
|
if !ok {
|
||||||
sl.cache.addRef(s.name, ref, lset, lset.Hash())
|
sl.cache.addRef(s.name, ref, lset, lset.Hash())
|
||||||
// We only need to add metadata once a scrape target appears.
|
|
||||||
if sl.appendMetadataToWAL {
|
|
||||||
if _, merr := app.UpdateMetadata(ref, lset, s.Metadata); merr != nil {
|
|
||||||
sl.l.Debug("Error when appending metadata in addReportSample", "ref", fmt.Sprintf("%d", ref), "metadata", fmt.Sprintf("%+v", s.Metadata), "err", merr)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
case errors.Is(err, storage.ErrOutOfOrderSample), errors.Is(err, storage.ErrDuplicateSampleForTimestamp):
|
case errors.Is(err, storage.ErrOutOfOrderSample), errors.Is(err, storage.ErrDuplicateSampleForTimestamp):
|
||||||
|
|||||||
568
scrape/scrape_append_v1.go
Normal file
568
scrape/scrape_append_v1.go
Normal file
@ -0,0 +1,568 @@
|
|||||||
|
package scrape
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"math"
|
||||||
|
"slices"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/prometheus/common/model"
|
||||||
|
"github.com/prometheus/prometheus/model/exemplar"
|
||||||
|
"github.com/prometheus/prometheus/model/histogram"
|
||||||
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
|
"github.com/prometheus/prometheus/model/textparse"
|
||||||
|
"github.com/prometheus/prometheus/model/timestamp"
|
||||||
|
"github.com/prometheus/prometheus/model/value"
|
||||||
|
"github.com/prometheus/prometheus/storage"
|
||||||
|
)
|
||||||
|
|
||||||
|
// This file contains Appender v1 flow for the temporary compatibility with downstream
|
||||||
|
// scrape.NewManager users (e.g. OpenTelemetry).
|
||||||
|
//
|
||||||
|
// No new changes should be added here. Prometheus do NOT use this code.
|
||||||
|
// TODO(bwplotka): Remove once Otel has migrated (add issue).
|
||||||
|
|
||||||
|
type scrapeLoopAppenderV1 struct {
|
||||||
|
*scrapeLoop
|
||||||
|
|
||||||
|
storage.Appender
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ scrapeLoopAppender = &scrapeLoopAppenderV1{}
|
||||||
|
|
||||||
|
func (sl *scrapeLoop) updateStaleMarkersV1(app storage.Appender, defTime int64) (err error) {
|
||||||
|
sl.cache.forEachStale(func(ref storage.SeriesRef, lset labels.Labels) bool {
|
||||||
|
// Series no longer exposed, mark it stale.
|
||||||
|
app.SetOptions(&storage.AppendOptions{DiscardOutOfOrder: true})
|
||||||
|
_, err = app.Append(ref, lset, defTime, math.Float64frombits(value.StaleNaN))
|
||||||
|
app.SetOptions(nil)
|
||||||
|
switch {
|
||||||
|
case errors.Is(err, storage.ErrOutOfOrderSample), errors.Is(err, storage.ErrDuplicateSampleForTimestamp):
|
||||||
|
// Do not count these in logging, as this is expected if a target
|
||||||
|
// goes away and comes back again with a new scrape loop.
|
||||||
|
err = nil
|
||||||
|
}
|
||||||
|
return err == nil
|
||||||
|
})
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// appenderV1 returns an appender for ingested samples from the target.
|
||||||
|
func appenderV1(app storage.Appender, sampleLimit, bucketLimit int, maxSchema int32) storage.Appender {
|
||||||
|
app = &timeLimitAppenderV1{
|
||||||
|
Appender: app,
|
||||||
|
maxTime: timestamp.FromTime(time.Now().Add(maxAheadTime)),
|
||||||
|
}
|
||||||
|
|
||||||
|
// The sampleLimit is applied after metrics are potentially dropped via relabeling.
|
||||||
|
if sampleLimit > 0 {
|
||||||
|
app = &limitAppenderV1{
|
||||||
|
Appender: app,
|
||||||
|
limit: sampleLimit,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if bucketLimit > 0 {
|
||||||
|
app = &bucketLimitAppenderV1{
|
||||||
|
Appender: app,
|
||||||
|
limit: bucketLimit,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if maxSchema < histogram.ExponentialSchemaMax {
|
||||||
|
app = &maxSchemaAppenderV1{
|
||||||
|
Appender: app,
|
||||||
|
maxSchema: maxSchema,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return app
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sl *scrapeLoopAppenderV1) addReportSample(s reportSample, t int64, v float64, b *labels.Builder, rejectOOO bool) error {
|
||||||
|
ce, ok, _ := sl.cache.get(s.name)
|
||||||
|
var ref storage.SeriesRef
|
||||||
|
var lset labels.Labels
|
||||||
|
if ok {
|
||||||
|
ref = ce.ref
|
||||||
|
lset = ce.lset
|
||||||
|
} else {
|
||||||
|
// The constants are suffixed with the invalid \xff unicode rune to avoid collisions
|
||||||
|
// with scraped metrics in the cache.
|
||||||
|
// We have to drop it when building the actual metric.
|
||||||
|
b.Reset(labels.EmptyLabels())
|
||||||
|
b.Set(model.MetricNameLabel, string(s.name[:len(s.name)-1]))
|
||||||
|
lset = sl.reportSampleMutator(b.Labels())
|
||||||
|
}
|
||||||
|
|
||||||
|
opt := storage.AppendOptions{DiscardOutOfOrder: rejectOOO}
|
||||||
|
sl.SetOptions(&opt)
|
||||||
|
ref, err := sl.Append(ref, lset, t, v)
|
||||||
|
opt.DiscardOutOfOrder = false
|
||||||
|
sl.SetOptions(&opt)
|
||||||
|
switch {
|
||||||
|
case err == nil:
|
||||||
|
if !ok {
|
||||||
|
sl.cache.addRef(s.name, ref, lset, lset.Hash())
|
||||||
|
// We only need to add metadata once a scrape target appears.
|
||||||
|
if sl.appendMetadataToWAL {
|
||||||
|
if _, merr := sl.UpdateMetadata(ref, lset, s.Metadata); merr != nil {
|
||||||
|
sl.l.Debug("Error when appending metadata in addReportSample", "ref", fmt.Sprintf("%d", ref), "metadata", fmt.Sprintf("%+v", s.Metadata), "err", merr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
case errors.Is(err, storage.ErrOutOfOrderSample), errors.Is(err, storage.ErrDuplicateSampleForTimestamp):
|
||||||
|
// Do not log here, as this is expected if a target goes away and comes back
|
||||||
|
// again with a new scrape loop.
|
||||||
|
return nil
|
||||||
|
default:
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// append for the deprecated storage.Appender flow.
|
||||||
|
// This is only for downstream project migration purposes and will be removed soon.
|
||||||
|
func (sl *scrapeLoopAppenderV1) append(b []byte, contentType string, ts time.Time) (total, added, seriesAdded int, err error) {
|
||||||
|
defTime := timestamp.FromTime(ts)
|
||||||
|
|
||||||
|
if len(b) == 0 {
|
||||||
|
// Empty scrape. Just update the stale makers and swap the cache (but don't flush it).
|
||||||
|
err = sl.updateStaleMarkersV1(sl.Appender, defTime)
|
||||||
|
sl.cache.iterDone(false)
|
||||||
|
return total, added, seriesAdded, err
|
||||||
|
}
|
||||||
|
|
||||||
|
p, err := textparse.New(b, contentType, sl.symbolTable, textparse.ParserOptions{
|
||||||
|
EnableTypeAndUnitLabels: sl.enableTypeAndUnitLabels,
|
||||||
|
IgnoreNativeHistograms: !sl.enableNativeHistogramScraping,
|
||||||
|
ConvertClassicHistogramsToNHCB: sl.convertClassicHistToNHCB,
|
||||||
|
KeepClassicOnClassicAndNativeHistograms: sl.alwaysScrapeClassicHist,
|
||||||
|
OpenMetricsSkipSTSeries: sl.enableSTZeroIngestion,
|
||||||
|
FallbackContentType: sl.fallbackScrapeProtocol,
|
||||||
|
})
|
||||||
|
if p == nil {
|
||||||
|
sl.l.Error(
|
||||||
|
"Failed to determine correct type of scrape target.",
|
||||||
|
"content_type", contentType,
|
||||||
|
"fallback_media_type", sl.fallbackScrapeProtocol,
|
||||||
|
"err", err,
|
||||||
|
)
|
||||||
|
return total, added, seriesAdded, err
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
sl.l.Debug(
|
||||||
|
"Invalid content type on scrape, using fallback setting.",
|
||||||
|
"content_type", contentType,
|
||||||
|
"fallback_media_type", sl.fallbackScrapeProtocol,
|
||||||
|
"err", err,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
var (
|
||||||
|
appErrs = appendErrors{}
|
||||||
|
sampleLimitErr error
|
||||||
|
bucketLimitErr error
|
||||||
|
lset labels.Labels // Escapes to heap so hoisted out of loop.
|
||||||
|
e exemplar.Exemplar // Escapes to heap so hoisted out of loop.
|
||||||
|
lastMeta *metaEntry
|
||||||
|
lastMFName []byte
|
||||||
|
)
|
||||||
|
|
||||||
|
exemplars := make([]exemplar.Exemplar, 0, 1)
|
||||||
|
|
||||||
|
// Take an appender with limits.
|
||||||
|
app := appenderV1(sl.Appender, sl.sampleLimit, sl.bucketLimit, sl.maxSchema)
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Flush and swap the cache as the scrape was non-empty.
|
||||||
|
sl.cache.iterDone(true)
|
||||||
|
}()
|
||||||
|
|
||||||
|
loop:
|
||||||
|
for {
|
||||||
|
var (
|
||||||
|
et textparse.Entry
|
||||||
|
sampleAdded, isHistogram bool
|
||||||
|
met []byte
|
||||||
|
parsedTimestamp *int64
|
||||||
|
val float64
|
||||||
|
h *histogram.Histogram
|
||||||
|
fh *histogram.FloatHistogram
|
||||||
|
)
|
||||||
|
if et, err = p.Next(); err != nil {
|
||||||
|
if errors.Is(err, io.EOF) {
|
||||||
|
err = nil
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
switch et {
|
||||||
|
// TODO(bwplotka): Consider changing parser to give metadata at once instead of type, help and unit in separation, ideally on `Series()/Histogram()
|
||||||
|
// otherwise we can expose metadata without series on metadata API.
|
||||||
|
case textparse.EntryType:
|
||||||
|
// TODO(bwplotka): Build meta entry directly instead of locking and updating the map. This will
|
||||||
|
// allow to properly update metadata when e.g unit was added, then removed;
|
||||||
|
lastMFName, lastMeta = sl.cache.setType(p.Type())
|
||||||
|
continue
|
||||||
|
case textparse.EntryHelp:
|
||||||
|
lastMFName, lastMeta = sl.cache.setHelp(p.Help())
|
||||||
|
continue
|
||||||
|
case textparse.EntryUnit:
|
||||||
|
lastMFName, lastMeta = sl.cache.setUnit(p.Unit())
|
||||||
|
continue
|
||||||
|
case textparse.EntryComment:
|
||||||
|
continue
|
||||||
|
case textparse.EntryHistogram:
|
||||||
|
isHistogram = true
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
total++
|
||||||
|
|
||||||
|
t := defTime
|
||||||
|
if isHistogram {
|
||||||
|
met, parsedTimestamp, h, fh = p.Histogram()
|
||||||
|
} else {
|
||||||
|
met, parsedTimestamp, val = p.Series()
|
||||||
|
}
|
||||||
|
if !sl.honorTimestamps {
|
||||||
|
parsedTimestamp = nil
|
||||||
|
}
|
||||||
|
if parsedTimestamp != nil {
|
||||||
|
t = *parsedTimestamp
|
||||||
|
}
|
||||||
|
|
||||||
|
if sl.cache.getDropped(met) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
ce, seriesCached, seriesAlreadyScraped := sl.cache.get(met)
|
||||||
|
var (
|
||||||
|
ref storage.SeriesRef
|
||||||
|
hash uint64
|
||||||
|
)
|
||||||
|
|
||||||
|
if seriesCached {
|
||||||
|
ref = ce.ref
|
||||||
|
lset = ce.lset
|
||||||
|
hash = ce.hash
|
||||||
|
} else {
|
||||||
|
p.Labels(&lset)
|
||||||
|
hash = lset.Hash()
|
||||||
|
|
||||||
|
// Hash label set as it is seen local to the target. Then add target labels
|
||||||
|
// and relabeling and store the final label set.
|
||||||
|
lset = sl.sampleMutator(lset)
|
||||||
|
|
||||||
|
// The label set may be set to empty to indicate dropping.
|
||||||
|
if lset.IsEmpty() {
|
||||||
|
sl.cache.addDropped(met)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if !lset.Has(labels.MetricName) {
|
||||||
|
err = errNameLabelMandatory
|
||||||
|
break loop
|
||||||
|
}
|
||||||
|
if !lset.IsValid(sl.validationScheme) {
|
||||||
|
err = fmt.Errorf("invalid metric name or label names: %s", lset.String())
|
||||||
|
break loop
|
||||||
|
}
|
||||||
|
|
||||||
|
// If any label limits is exceeded the scrape should fail.
|
||||||
|
if err = verifyLabelLimits(lset, sl.labelLimits); err != nil {
|
||||||
|
sl.metrics.targetScrapePoolExceededLabelLimits.Inc()
|
||||||
|
break loop
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if seriesAlreadyScraped && parsedTimestamp == nil {
|
||||||
|
err = storage.ErrDuplicateSampleForTimestamp
|
||||||
|
} else {
|
||||||
|
if sl.enableSTZeroIngestion {
|
||||||
|
if stMs := p.StartTimestamp(); stMs != 0 {
|
||||||
|
if isHistogram {
|
||||||
|
if h != nil {
|
||||||
|
ref, err = app.AppendHistogramSTZeroSample(ref, lset, t, stMs, h, nil)
|
||||||
|
} else {
|
||||||
|
ref, err = app.AppendHistogramSTZeroSample(ref, lset, t, stMs, nil, fh)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
ref, err = app.AppendSTZeroSample(ref, lset, t, stMs)
|
||||||
|
}
|
||||||
|
if err != nil && !errors.Is(err, storage.ErrOutOfOrderST) { // OOO is a common case, ignoring completely for now.
|
||||||
|
// ST is an experimental feature. For now, we don't need to fail the
|
||||||
|
// scrape on errors updating the created timestamp, log debug.
|
||||||
|
sl.l.Debug("Error when appending ST in scrape loop", "series", string(met), "ct", stMs, "t", t, "err", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if isHistogram {
|
||||||
|
if h != nil {
|
||||||
|
ref, err = app.AppendHistogram(ref, lset, t, h, nil)
|
||||||
|
} else {
|
||||||
|
ref, err = app.AppendHistogram(ref, lset, t, nil, fh)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
ref, err = app.Append(ref, lset, t, val)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err == nil {
|
||||||
|
if (parsedTimestamp == nil || sl.trackTimestampsStaleness) && ce != nil {
|
||||||
|
sl.cache.trackStaleness(ce.ref, ce)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NOTE: exemplars are nil here for v1 appender flow. We append and check the exemplar errors later on.
|
||||||
|
sampleAdded, err = sl.checkAddError(met, nil, err, &sampleLimitErr, &bucketLimitErr, &appErrs)
|
||||||
|
if err != nil {
|
||||||
|
if !errors.Is(err, storage.ErrNotFound) {
|
||||||
|
sl.l.Debug("Unexpected error", "series", string(met), "err", err)
|
||||||
|
}
|
||||||
|
break loop
|
||||||
|
}
|
||||||
|
|
||||||
|
// If series wasn't cached (is new, not seen on previous scrape) we need need to add it to the scrape cache.
|
||||||
|
// But we only do this for series that were appended to TSDB without errors.
|
||||||
|
// If a series was new but we didn't append it due to sample_limit or other errors then we don't need
|
||||||
|
// it in the scrape cache because we don't need to emit StaleNaNs for it when it disappears.
|
||||||
|
if !seriesCached && sampleAdded {
|
||||||
|
ce = sl.cache.addRef(met, ref, lset, hash)
|
||||||
|
if ce != nil && (parsedTimestamp == nil || sl.trackTimestampsStaleness) {
|
||||||
|
// Bypass staleness logic if there is an explicit timestamp.
|
||||||
|
// But make sure we only do this if we have a cache entry (ce) for our series.
|
||||||
|
sl.cache.trackStaleness(ref, ce)
|
||||||
|
}
|
||||||
|
if sampleAdded && sampleLimitErr == nil && bucketLimitErr == nil {
|
||||||
|
seriesAdded++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Increment added even if there's an error so we correctly report the
|
||||||
|
// number of samples remaining after relabeling.
|
||||||
|
// We still report duplicated samples here since this number should be the exact number
|
||||||
|
// of time series exposed on a scrape after relabelling.
|
||||||
|
added++
|
||||||
|
exemplars = exemplars[:0] // Reset and reuse the exemplar slice.
|
||||||
|
for hasExemplar := p.Exemplar(&e); hasExemplar; hasExemplar = p.Exemplar(&e) {
|
||||||
|
if !e.HasTs {
|
||||||
|
if isHistogram {
|
||||||
|
// We drop exemplars for native histograms if they don't have a timestamp.
|
||||||
|
// Missing timestamps are deliberately not supported as we want to start
|
||||||
|
// enforcing timestamps for exemplars as otherwise proper deduplication
|
||||||
|
// is inefficient and purely based on heuristics: we cannot distinguish
|
||||||
|
// between repeated exemplars and new instances with the same values.
|
||||||
|
// This is done silently without logs as it is not an error but out of spec.
|
||||||
|
// This does not affect classic histograms so that behaviour is unchanged.
|
||||||
|
e = exemplar.Exemplar{} // Reset for next time round loop.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
e.Ts = t
|
||||||
|
}
|
||||||
|
exemplars = append(exemplars, e)
|
||||||
|
e = exemplar.Exemplar{} // Reset for next time round loop.
|
||||||
|
}
|
||||||
|
// Sort so that checking for duplicates / out of order is more efficient during validation.
|
||||||
|
slices.SortFunc(exemplars, exemplar.Compare)
|
||||||
|
outOfOrderExemplars := 0
|
||||||
|
for _, e := range exemplars {
|
||||||
|
_, exemplarErr := app.AppendExemplar(ref, lset, e)
|
||||||
|
switch {
|
||||||
|
case exemplarErr == nil:
|
||||||
|
// Do nothing.
|
||||||
|
case errors.Is(exemplarErr, storage.ErrOutOfOrderExemplar):
|
||||||
|
outOfOrderExemplars++
|
||||||
|
default:
|
||||||
|
// Since exemplar storage is still experimental, we don't fail the scrape on ingestion errors.
|
||||||
|
sl.l.Debug("Error while adding exemplar in AddExemplar", "exemplar", fmt.Sprintf("%+v", e), "err", exemplarErr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if outOfOrderExemplars > 0 && outOfOrderExemplars == len(exemplars) {
|
||||||
|
// Only report out of order exemplars if all are out of order, otherwise this was a partial update
|
||||||
|
// to some existing set of exemplars.
|
||||||
|
appErrs.numExemplarOutOfOrder += outOfOrderExemplars
|
||||||
|
sl.l.Debug("Out of order exemplars", "count", outOfOrderExemplars, "latest", fmt.Sprintf("%+v", exemplars[len(exemplars)-1]))
|
||||||
|
sl.metrics.targetScrapeExemplarOutOfOrder.Add(float64(outOfOrderExemplars))
|
||||||
|
}
|
||||||
|
|
||||||
|
if sl.appendMetadataToWAL && lastMeta != nil {
|
||||||
|
// Is it new series OR did metadata change for this family?
|
||||||
|
if !seriesCached || lastMeta.lastIterChange == sl.cache.iter {
|
||||||
|
// In majority cases we can trust that the current series/histogram is matching the lastMeta and lastMFName.
|
||||||
|
// However, optional TYPE etc metadata and broken OM text can break this, detect those cases here.
|
||||||
|
// TODO(bwplotka): Consider moving this to parser as many parser users end up doing this (e.g. ST and NHCB parsing).
|
||||||
|
if isSeriesPartOfFamily(lset.Get(model.MetricNameLabel), lastMFName, lastMeta.Type) {
|
||||||
|
if _, merr := app.UpdateMetadata(ref, lset, lastMeta.Metadata); merr != nil {
|
||||||
|
// No need to fail the scrape on errors appending metadata.
|
||||||
|
sl.l.Debug("Error when appending metadata in scrape loop", "ref", fmt.Sprintf("%d", ref), "metadata", fmt.Sprintf("%+v", lastMeta.Metadata), "err", merr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if sampleLimitErr != nil {
|
||||||
|
if err == nil {
|
||||||
|
err = sampleLimitErr
|
||||||
|
}
|
||||||
|
// We only want to increment this once per scrape, so this is Inc'd outside the loop.
|
||||||
|
sl.metrics.targetScrapeSampleLimit.Inc()
|
||||||
|
}
|
||||||
|
if bucketLimitErr != nil {
|
||||||
|
if err == nil {
|
||||||
|
err = bucketLimitErr // If sample limit is hit, that error takes precedence.
|
||||||
|
}
|
||||||
|
// We only want to increment this once per scrape, so this is Inc'd outside the loop.
|
||||||
|
sl.metrics.targetScrapeNativeHistogramBucketLimit.Inc()
|
||||||
|
}
|
||||||
|
if appErrs.numOutOfOrder > 0 {
|
||||||
|
sl.l.Warn("Error on ingesting out-of-order samples", "num_dropped", appErrs.numOutOfOrder)
|
||||||
|
}
|
||||||
|
if appErrs.numDuplicates > 0 {
|
||||||
|
sl.l.Warn("Error on ingesting samples with different value but same timestamp", "num_dropped", appErrs.numDuplicates)
|
||||||
|
}
|
||||||
|
if appErrs.numOutOfBounds > 0 {
|
||||||
|
sl.l.Warn("Error on ingesting samples that are too old or are too far into the future", "num_dropped", appErrs.numOutOfBounds)
|
||||||
|
}
|
||||||
|
if appErrs.numExemplarOutOfOrder > 0 {
|
||||||
|
sl.l.Warn("Error on ingesting out-of-order exemplars", "num_dropped", appErrs.numExemplarOutOfOrder)
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
err = sl.updateStaleMarkersV1(app, defTime)
|
||||||
|
}
|
||||||
|
return total, added, seriesAdded, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// limitAppenderV1 limits the number of total appended samples in a batch.
|
||||||
|
type limitAppenderV1 struct {
|
||||||
|
storage.Appender
|
||||||
|
|
||||||
|
limit int
|
||||||
|
i int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (app *limitAppenderV1) Append(ref storage.SeriesRef, lset labels.Labels, t int64, v float64) (storage.SeriesRef, error) {
|
||||||
|
// Bypass sample_limit checks only if we have a staleness marker for a known series (ref value is non-zero).
|
||||||
|
// This ensures that if a series is already in TSDB then we always write the marker.
|
||||||
|
if ref == 0 || !value.IsStaleNaN(v) {
|
||||||
|
app.i++
|
||||||
|
if app.i > app.limit {
|
||||||
|
return 0, errSampleLimit
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ref, err := app.Appender.Append(ref, lset, t, v)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return ref, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (app *limitAppenderV1) AppendHistogram(ref storage.SeriesRef, lset labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) {
|
||||||
|
// Bypass sample_limit checks only if we have a staleness marker for a known series (ref value is non-zero).
|
||||||
|
// This ensures that if a series is already in TSDB then we always write the marker.
|
||||||
|
if ref == 0 || (h != nil && !value.IsStaleNaN(h.Sum)) || (fh != nil && !value.IsStaleNaN(fh.Sum)) {
|
||||||
|
app.i++
|
||||||
|
if app.i > app.limit {
|
||||||
|
return 0, errSampleLimit
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ref, err := app.Appender.AppendHistogram(ref, lset, t, h, fh)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return ref, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type timeLimitAppenderV1 struct {
|
||||||
|
storage.Appender
|
||||||
|
|
||||||
|
maxTime int64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (app *timeLimitAppenderV1) Append(ref storage.SeriesRef, lset labels.Labels, t int64, v float64) (storage.SeriesRef, error) {
|
||||||
|
if t > app.maxTime {
|
||||||
|
return 0, storage.ErrOutOfBounds
|
||||||
|
}
|
||||||
|
|
||||||
|
ref, err := app.Appender.Append(ref, lset, t, v)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return ref, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// bucketLimitAppenderV1 limits the number of total appended samples in a batch.
|
||||||
|
type bucketLimitAppenderV1 struct {
|
||||||
|
storage.Appender
|
||||||
|
|
||||||
|
limit int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (app *bucketLimitAppenderV1) AppendHistogram(ref storage.SeriesRef, lset labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) {
|
||||||
|
var err error
|
||||||
|
if h != nil {
|
||||||
|
// Return with an early error if the histogram has too many buckets and the
|
||||||
|
// schema is not exponential, in which case we can't reduce the resolution.
|
||||||
|
if len(h.PositiveBuckets)+len(h.NegativeBuckets) > app.limit && !histogram.IsExponentialSchema(h.Schema) {
|
||||||
|
return 0, errBucketLimit
|
||||||
|
}
|
||||||
|
for len(h.PositiveBuckets)+len(h.NegativeBuckets) > app.limit {
|
||||||
|
if h.Schema <= histogram.ExponentialSchemaMin {
|
||||||
|
return 0, errBucketLimit
|
||||||
|
}
|
||||||
|
if err = h.ReduceResolution(h.Schema - 1); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if fh != nil {
|
||||||
|
// Return with an early error if the histogram has too many buckets and the
|
||||||
|
// schema is not exponential, in which case we can't reduce the resolution.
|
||||||
|
if len(fh.PositiveBuckets)+len(fh.NegativeBuckets) > app.limit && !histogram.IsExponentialSchema(fh.Schema) {
|
||||||
|
return 0, errBucketLimit
|
||||||
|
}
|
||||||
|
for len(fh.PositiveBuckets)+len(fh.NegativeBuckets) > app.limit {
|
||||||
|
if fh.Schema <= histogram.ExponentialSchemaMin {
|
||||||
|
return 0, errBucketLimit
|
||||||
|
}
|
||||||
|
if err = fh.ReduceResolution(fh.Schema - 1); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if ref, err = app.Appender.AppendHistogram(ref, lset, t, h, fh); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return ref, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type maxSchemaAppenderV1 struct {
|
||||||
|
storage.Appender
|
||||||
|
|
||||||
|
maxSchema int32
|
||||||
|
}
|
||||||
|
|
||||||
|
func (app *maxSchemaAppenderV1) AppendHistogram(ref storage.SeriesRef, lset labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) {
|
||||||
|
var err error
|
||||||
|
if h != nil {
|
||||||
|
if histogram.IsExponentialSchemaReserved(h.Schema) && h.Schema > app.maxSchema {
|
||||||
|
if err = h.ReduceResolution(app.maxSchema); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if fh != nil {
|
||||||
|
if histogram.IsExponentialSchemaReserved(fh.Schema) && fh.Schema > app.maxSchema {
|
||||||
|
if err = fh.ReduceResolution(app.maxSchema); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if ref, err = app.Appender.AppendHistogram(ref, lset, t, h, fh); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return ref, nil
|
||||||
|
}
|
||||||
1222
scrape/scrape_append_v1_test.go
Normal file
1222
scrape/scrape_append_v1_test.go
Normal file
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -24,13 +24,14 @@ import (
|
|||||||
|
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/config"
|
|
||||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
|
||||||
"github.com/prometheus/prometheus/model/histogram"
|
"github.com/prometheus/prometheus/model/histogram"
|
||||||
"github.com/prometheus/prometheus/model/labels"
|
|
||||||
"github.com/prometheus/prometheus/model/relabel"
|
|
||||||
"github.com/prometheus/prometheus/model/value"
|
"github.com/prometheus/prometheus/model/value"
|
||||||
"github.com/prometheus/prometheus/storage"
|
"github.com/prometheus/prometheus/storage"
|
||||||
|
|
||||||
|
"github.com/prometheus/prometheus/config"
|
||||||
|
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||||
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
|
"github.com/prometheus/prometheus/model/relabel"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TargetHealth describes the health state of a target.
|
// TargetHealth describes the health state of a target.
|
||||||
@ -325,13 +326,13 @@ var (
|
|||||||
|
|
||||||
// limitAppender limits the number of total appended samples in a batch.
|
// limitAppender limits the number of total appended samples in a batch.
|
||||||
type limitAppender struct {
|
type limitAppender struct {
|
||||||
storage.Appender
|
storage.AppenderV2
|
||||||
|
|
||||||
limit int
|
limit int
|
||||||
i int
|
i int
|
||||||
}
|
}
|
||||||
|
|
||||||
func (app *limitAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64, v float64) (storage.SeriesRef, error) {
|
func (app *limitAppender) Append(ref storage.SeriesRef, ls labels.Labels, st, t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram, opts storage.AOptions) (storage.SeriesRef, error) {
|
||||||
// Bypass sample_limit checks only if we have a staleness marker for a known series (ref value is non-zero).
|
// Bypass sample_limit checks only if we have a staleness marker for a known series (ref value is non-zero).
|
||||||
// This ensures that if a series is already in TSDB then we always write the marker.
|
// This ensures that if a series is already in TSDB then we always write the marker.
|
||||||
if ref == 0 || !value.IsStaleNaN(v) {
|
if ref == 0 || !value.IsStaleNaN(v) {
|
||||||
@ -340,56 +341,31 @@ func (app *limitAppender) Append(ref storage.SeriesRef, lset labels.Labels, t in
|
|||||||
return 0, errSampleLimit
|
return 0, errSampleLimit
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
ref, err := app.Appender.Append(ref, lset, t, v)
|
return app.AppenderV2.Append(ref, ls, st, t, v, h, fh, opts)
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
return ref, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (app *limitAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) {
|
|
||||||
// Bypass sample_limit checks only if we have a staleness marker for a known series (ref value is non-zero).
|
|
||||||
// This ensures that if a series is already in TSDB then we always write the marker.
|
|
||||||
if ref == 0 || (h != nil && !value.IsStaleNaN(h.Sum)) || (fh != nil && !value.IsStaleNaN(fh.Sum)) {
|
|
||||||
app.i++
|
|
||||||
if app.i > app.limit {
|
|
||||||
return 0, errSampleLimit
|
|
||||||
}
|
|
||||||
}
|
|
||||||
ref, err := app.Appender.AppendHistogram(ref, lset, t, h, fh)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
return ref, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type timeLimitAppender struct {
|
type timeLimitAppender struct {
|
||||||
storage.Appender
|
storage.AppenderV2
|
||||||
|
|
||||||
maxTime int64
|
maxTime int64
|
||||||
}
|
}
|
||||||
|
|
||||||
func (app *timeLimitAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64, v float64) (storage.SeriesRef, error) {
|
func (app *timeLimitAppender) Append(ref storage.SeriesRef, ls labels.Labels, st, t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram, opts storage.AOptions) (storage.SeriesRef, error) {
|
||||||
if t > app.maxTime {
|
if t > app.maxTime {
|
||||||
return 0, storage.ErrOutOfBounds
|
return 0, storage.ErrOutOfBounds
|
||||||
}
|
}
|
||||||
|
|
||||||
ref, err := app.Appender.Append(ref, lset, t, v)
|
return app.AppenderV2.Append(ref, ls, st, t, v, h, fh, opts)
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
return ref, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// bucketLimitAppender limits the number of total appended samples in a batch.
|
// bucketLimitAppender limits the number of total appended samples in a batch.
|
||||||
type bucketLimitAppender struct {
|
type bucketLimitAppender struct {
|
||||||
storage.Appender
|
storage.AppenderV2
|
||||||
|
|
||||||
limit int
|
limit int
|
||||||
}
|
}
|
||||||
|
|
||||||
func (app *bucketLimitAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) {
|
func (app *bucketLimitAppender) Append(ref storage.SeriesRef, ls labels.Labels, st, t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram, opts storage.AOptions) (_ storage.SeriesRef, err error) {
|
||||||
var err error
|
|
||||||
if h != nil {
|
if h != nil {
|
||||||
// Return with an early error if the histogram has too many buckets and the
|
// Return with an early error if the histogram has too many buckets and the
|
||||||
// schema is not exponential, in which case we can't reduce the resolution.
|
// schema is not exponential, in which case we can't reduce the resolution.
|
||||||
@ -420,20 +396,16 @@ func (app *bucketLimitAppender) AppendHistogram(ref storage.SeriesRef, lset labe
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if ref, err = app.Appender.AppendHistogram(ref, lset, t, h, fh); err != nil {
|
return app.AppenderV2.Append(ref, ls, st, t, v, h, fh, opts)
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
return ref, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type maxSchemaAppender struct {
|
type maxSchemaAppender struct {
|
||||||
storage.Appender
|
storage.AppenderV2
|
||||||
|
|
||||||
maxSchema int32
|
maxSchema int32
|
||||||
}
|
}
|
||||||
|
|
||||||
func (app *maxSchemaAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) {
|
func (app *maxSchemaAppender) Append(ref storage.SeriesRef, ls labels.Labels, st, t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram, opts storage.AOptions) (_ storage.SeriesRef, err error) {
|
||||||
var err error
|
|
||||||
if h != nil {
|
if h != nil {
|
||||||
if histogram.IsExponentialSchemaReserved(h.Schema) && h.Schema > app.maxSchema {
|
if histogram.IsExponentialSchemaReserved(h.Schema) && h.Schema > app.maxSchema {
|
||||||
if err = h.ReduceResolution(app.maxSchema); err != nil {
|
if err = h.ReduceResolution(app.maxSchema); err != nil {
|
||||||
@ -448,10 +420,7 @@ func (app *maxSchemaAppender) AppendHistogram(ref storage.SeriesRef, lset labels
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if ref, err = app.Appender.AppendHistogram(ref, lset, t, h, fh); err != nil {
|
return app.AppenderV2.Append(ref, ls, st, t, v, h, fh, opts)
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
return ref, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// PopulateDiscoveredLabels sets base labels on lb from target and group labels and scrape configuration, before relabeling.
|
// PopulateDiscoveredLabels sets base labels on lb from target and group labels and scrape configuration, before relabeling.
|
||||||
|
|||||||
@ -14,7 +14,6 @@
|
|||||||
package scrape
|
package scrape
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
"crypto/x509"
|
"crypto/x509"
|
||||||
"fmt"
|
"fmt"
|
||||||
@ -611,18 +610,16 @@ func TestBucketLimitAppender(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
resApp := &collectResultAppender{}
|
|
||||||
|
|
||||||
for _, c := range cases {
|
for _, c := range cases {
|
||||||
for _, floatHisto := range []bool{true, false} {
|
for _, floatHisto := range []bool{true, false} {
|
||||||
t.Run(fmt.Sprintf("floatHistogram=%t", floatHisto), func(t *testing.T) {
|
t.Run(fmt.Sprintf("floatHistogram=%t", floatHisto), func(t *testing.T) {
|
||||||
app := &bucketLimitAppender{Appender: resApp, limit: c.limit}
|
app := &bucketLimitAppender{AppenderV2: nopAppender{}, limit: c.limit}
|
||||||
ts := int64(10 * time.Minute / time.Millisecond)
|
ts := int64(10 * time.Minute / time.Millisecond)
|
||||||
lbls := labels.FromStrings("__name__", "sparse_histogram_series")
|
lbls := labels.FromStrings("__name__", "sparse_histogram_series")
|
||||||
var err error
|
var err error
|
||||||
if floatHisto {
|
if floatHisto {
|
||||||
fh := c.h.Copy().ToFloat(nil)
|
fh := c.h.Copy().ToFloat(nil)
|
||||||
_, err = app.AppendHistogram(0, lbls, ts, nil, fh)
|
_, err = app.Append(0, lbls, 0, ts, 0, nil, fh, storage.AOptions{})
|
||||||
if c.expectError {
|
if c.expectError {
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
} else {
|
} else {
|
||||||
@ -632,7 +629,7 @@ func TestBucketLimitAppender(t *testing.T) {
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
h := c.h.Copy()
|
h := c.h.Copy()
|
||||||
_, err = app.AppendHistogram(0, lbls, ts, h, nil)
|
_, err = app.Append(0, lbls, 0, ts, 0, h, nil, storage.AOptions{})
|
||||||
if c.expectError {
|
if c.expectError {
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
} else {
|
} else {
|
||||||
@ -697,23 +694,21 @@ func TestMaxSchemaAppender(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
resApp := &collectResultAppender{}
|
|
||||||
|
|
||||||
for _, c := range cases {
|
for _, c := range cases {
|
||||||
for _, floatHisto := range []bool{true, false} {
|
for _, floatHisto := range []bool{true, false} {
|
||||||
t.Run(fmt.Sprintf("floatHistogram=%t", floatHisto), func(t *testing.T) {
|
t.Run(fmt.Sprintf("floatHistogram=%t", floatHisto), func(t *testing.T) {
|
||||||
app := &maxSchemaAppender{Appender: resApp, maxSchema: c.maxSchema}
|
app := &maxSchemaAppender{AppenderV2: nopAppender{}, maxSchema: c.maxSchema}
|
||||||
ts := int64(10 * time.Minute / time.Millisecond)
|
ts := int64(10 * time.Minute / time.Millisecond)
|
||||||
lbls := labels.FromStrings("__name__", "sparse_histogram_series")
|
lbls := labels.FromStrings("__name__", "sparse_histogram_series")
|
||||||
var err error
|
var err error
|
||||||
if floatHisto {
|
if floatHisto {
|
||||||
fh := c.h.Copy().ToFloat(nil)
|
fh := c.h.Copy().ToFloat(nil)
|
||||||
_, err = app.AppendHistogram(0, lbls, ts, nil, fh)
|
_, err = app.Append(0, lbls, 0, ts, 0, nil, fh, storage.AOptions{})
|
||||||
require.Equal(t, c.expectSchema, fh.Schema)
|
require.Equal(t, c.expectSchema, fh.Schema)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
} else {
|
} else {
|
||||||
h := c.h.Copy()
|
h := c.h.Copy()
|
||||||
_, err = app.AppendHistogram(0, lbls, ts, h, nil)
|
_, err = app.Append(0, lbls, 0, ts, 0, h, nil, storage.AOptions{})
|
||||||
require.Equal(t, c.expectSchema, h.Schema)
|
require.Equal(t, c.expectSchema, h.Schema)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
@ -723,39 +718,37 @@ func TestMaxSchemaAppender(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test sample_limit when a scrape containst Native Histograms.
|
// Test sample_limit when a scrape contains Native Histograms.
|
||||||
func TestAppendWithSampleLimitAndNativeHistogram(t *testing.T) {
|
func TestAppendWithSampleLimitAndNativeHistogram(t *testing.T) {
|
||||||
const sampleLimit = 2
|
const sampleLimit = 2
|
||||||
resApp := &collectResultAppender{}
|
|
||||||
sl := newBasicScrapeLoop(t, context.Background(), nil, func(_ context.Context) storage.Appender {
|
sl, _, _ := newTestScrapeLoop(t)
|
||||||
return resApp
|
|
||||||
}, 0)
|
|
||||||
sl.sampleLimit = sampleLimit
|
sl.sampleLimit = sampleLimit
|
||||||
|
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
app := appender(sl.appender(context.Background()), sl.sampleLimit, sl.bucketLimit, sl.maxSchema)
|
app := appender(sl.appendableV2.AppenderV2(sl.ctx), sl.sampleLimit, sl.bucketLimit, sl.maxSchema)
|
||||||
|
|
||||||
// sample_limit is set to 2, so first two scrapes should work
|
// sample_limit is set to 2, so first two scrapes should work
|
||||||
_, err := app.Append(0, labels.FromStrings(model.MetricNameLabel, "foo"), timestamp.FromTime(now), 1)
|
{
|
||||||
require.NoError(t, err)
|
ls := labels.FromStrings(model.MetricNameLabel, "foo")
|
||||||
|
ts := timestamp.FromTime(now)
|
||||||
|
_, err := app.Append(0, ls, 0, ts, 1, nil, nil, storage.AOptions{})
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
// Second sample, should be ok.
|
// Second sample, should be ok.
|
||||||
_, err = app.AppendHistogram(
|
{
|
||||||
0,
|
ls := labels.FromStrings(model.MetricNameLabel, "my_histogram1")
|
||||||
labels.FromStrings(model.MetricNameLabel, "my_histogram1"),
|
ts := timestamp.FromTime(now)
|
||||||
timestamp.FromTime(now),
|
_, err := app.Append(0, ls, 0, ts, 0, &histogram.Histogram{}, nil, storage.AOptions{})
|
||||||
&histogram.Histogram{},
|
require.NoError(t, err)
|
||||||
nil,
|
}
|
||||||
)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// This is third sample with sample_limit=2, it should trigger errSampleLimit.
|
// This is third sample with sample_limit=2, it should trigger errSampleLimit.
|
||||||
_, err = app.AppendHistogram(
|
{
|
||||||
0,
|
ls := labels.FromStrings(model.MetricNameLabel, "my_histogram2")
|
||||||
labels.FromStrings(model.MetricNameLabel, "my_histogram2"),
|
ts := timestamp.FromTime(now)
|
||||||
timestamp.FromTime(now),
|
_, err := app.Append(0, ls, 0, ts, 0, &histogram.Histogram{}, nil, storage.AOptions{})
|
||||||
&histogram.Histogram{},
|
require.ErrorIs(t, err, errSampleLimit)
|
||||||
nil,
|
}
|
||||||
)
|
|
||||||
require.ErrorIs(t, err, errSampleLimit)
|
|
||||||
}
|
}
|
||||||
|
|||||||
@ -19,10 +19,8 @@ import (
|
|||||||
|
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/model/exemplar"
|
|
||||||
"github.com/prometheus/prometheus/model/histogram"
|
"github.com/prometheus/prometheus/model/histogram"
|
||||||
"github.com/prometheus/prometheus/model/labels"
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
"github.com/prometheus/prometheus/model/metadata"
|
|
||||||
tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
|
tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -117,11 +115,11 @@ func (f *fanout) ChunkQuerier(mint, maxt int64) (ChunkQuerier, error) {
|
|||||||
return NewMergeChunkQuerier([]ChunkQuerier{primary}, secondaries, NewCompactingChunkSeriesMerger(ChainedSeriesMerge)), nil
|
return NewMergeChunkQuerier([]ChunkQuerier{primary}, secondaries, NewCompactingChunkSeriesMerger(ChainedSeriesMerge)), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *fanout) Appender(ctx context.Context) Appender {
|
func (f *fanout) AppenderV2(ctx context.Context) AppenderV2 {
|
||||||
primary := f.primary.Appender(ctx)
|
primary := f.primary.AppenderV2(ctx)
|
||||||
secondaries := make([]Appender, 0, len(f.secondaries))
|
secondaries := make([]AppenderV2, 0, len(f.secondaries))
|
||||||
for _, storage := range f.secondaries {
|
for _, storage := range f.secondaries {
|
||||||
secondaries = append(secondaries, storage.Appender(ctx))
|
secondaries = append(secondaries, storage.AppenderV2(ctx))
|
||||||
}
|
}
|
||||||
return &fanoutAppender{
|
return &fanoutAppender{
|
||||||
logger: f.logger,
|
logger: f.logger,
|
||||||
@ -143,98 +141,18 @@ func (f *fanout) Close() error {
|
|||||||
type fanoutAppender struct {
|
type fanoutAppender struct {
|
||||||
logger *slog.Logger
|
logger *slog.Logger
|
||||||
|
|
||||||
primary Appender
|
primary AppenderV2
|
||||||
secondaries []Appender
|
secondaries []AppenderV2
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetOptions propagates the hints to both primary and secondary appenders.
|
func (f *fanoutAppender) Append(ref SeriesRef, ls labels.Labels, st, t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram, opts AppendV2Options) (SeriesRef, error) {
|
||||||
func (f *fanoutAppender) SetOptions(opts *AppendOptions) {
|
ref, err := f.primary.Append(ref, ls, st, t, v, h, fh, opts)
|
||||||
if f.primary != nil {
|
|
||||||
f.primary.SetOptions(opts)
|
|
||||||
}
|
|
||||||
for _, appender := range f.secondaries {
|
|
||||||
appender.SetOptions(opts)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *fanoutAppender) Append(ref SeriesRef, l labels.Labels, t int64, v float64) (SeriesRef, error) {
|
|
||||||
ref, err := f.primary.Append(ref, l, t, v)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ref, err
|
return ref, err
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, appender := range f.secondaries {
|
for _, appender := range f.secondaries {
|
||||||
if _, err := appender.Append(ref, l, t, v); err != nil {
|
if _, err := appender.Append(ref, ls, st, t, v, h, fh, opts); err != nil {
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return ref, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *fanoutAppender) AppendExemplar(ref SeriesRef, l labels.Labels, e exemplar.Exemplar) (SeriesRef, error) {
|
|
||||||
ref, err := f.primary.AppendExemplar(ref, l, e)
|
|
||||||
if err != nil {
|
|
||||||
return ref, err
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, appender := range f.secondaries {
|
|
||||||
if _, err := appender.AppendExemplar(ref, l, e); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return ref, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *fanoutAppender) AppendHistogram(ref SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (SeriesRef, error) {
|
|
||||||
ref, err := f.primary.AppendHistogram(ref, l, t, h, fh)
|
|
||||||
if err != nil {
|
|
||||||
return ref, err
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, appender := range f.secondaries {
|
|
||||||
if _, err := appender.AppendHistogram(ref, l, t, h, fh); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return ref, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *fanoutAppender) AppendHistogramSTZeroSample(ref SeriesRef, l labels.Labels, t, st int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (SeriesRef, error) {
|
|
||||||
ref, err := f.primary.AppendHistogramSTZeroSample(ref, l, t, st, h, fh)
|
|
||||||
if err != nil {
|
|
||||||
return ref, err
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, appender := range f.secondaries {
|
|
||||||
if _, err := appender.AppendHistogramSTZeroSample(ref, l, t, st, h, fh); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return ref, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *fanoutAppender) UpdateMetadata(ref SeriesRef, l labels.Labels, m metadata.Metadata) (SeriesRef, error) {
|
|
||||||
ref, err := f.primary.UpdateMetadata(ref, l, m)
|
|
||||||
if err != nil {
|
|
||||||
return ref, err
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, appender := range f.secondaries {
|
|
||||||
if _, err := appender.UpdateMetadata(ref, l, m); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return ref, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *fanoutAppender) AppendSTZeroSample(ref SeriesRef, l labels.Labels, t, st int64) (SeriesRef, error) {
|
|
||||||
ref, err := f.primary.AppendSTZeroSample(ref, l, t, st)
|
|
||||||
if err != nil {
|
|
||||||
return ref, err
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, appender := range f.secondaries {
|
|
||||||
if _, err := appender.AppendSTZeroSample(ref, l, t, st); err != nil {
|
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -80,7 +80,7 @@ type SampleAndChunkQueryable interface {
|
|||||||
// are goroutine-safe. Storage implements storage.Appender.
|
// are goroutine-safe. Storage implements storage.Appender.
|
||||||
type Storage interface {
|
type Storage interface {
|
||||||
SampleAndChunkQueryable
|
SampleAndChunkQueryable
|
||||||
Appendable
|
AppendableV2
|
||||||
|
|
||||||
// StartTime returns the oldest timestamp stored in the storage.
|
// StartTime returns the oldest timestamp stored in the storage.
|
||||||
StartTime() (int64, error)
|
StartTime() (int64, error)
|
||||||
|
|||||||
@ -62,9 +62,10 @@ var (
|
|||||||
{Name: "d", Value: "e"},
|
{Name: "d", Value: "e"},
|
||||||
{Name: "foo", Value: "bar"},
|
{Name: "foo", Value: "bar"},
|
||||||
},
|
},
|
||||||
Samples: []prompb.Sample{{Value: 1, Timestamp: 1}},
|
Samples: []prompb.Sample{{Value: 1, Timestamp: 1}},
|
||||||
Exemplars: []prompb.Exemplar{{Labels: []prompb.Label{{Name: "f", Value: "g"}}, Value: 1, Timestamp: 1}},
|
Exemplars: []prompb.Exemplar{{Labels: []prompb.Label{{Name: "f", Value: "g"}}, Value: 1, Timestamp: 1}},
|
||||||
Histograms: []prompb.Histogram{prompb.FromIntHistogram(1, &testHistogram), prompb.FromFloatHistogram(2, testHistogram.ToFloat(nil))},
|
// TODO: For RW1 can you send both sample and histogram? (not allowed for RW2).
|
||||||
|
Histograms: []prompb.Histogram{prompb.FromIntHistogram(2, &testHistogram), prompb.FromFloatHistogram(3, testHistogram.ToFloat(nil))},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Labels: []prompb.Label{
|
Labels: []prompb.Label{
|
||||||
@ -74,9 +75,10 @@ var (
|
|||||||
{Name: "d", Value: "e"},
|
{Name: "d", Value: "e"},
|
||||||
{Name: "foo", Value: "bar"},
|
{Name: "foo", Value: "bar"},
|
||||||
},
|
},
|
||||||
Samples: []prompb.Sample{{Value: 2, Timestamp: 2}},
|
Samples: []prompb.Sample{{Value: 2, Timestamp: 4}},
|
||||||
Exemplars: []prompb.Exemplar{{Labels: []prompb.Label{{Name: "h", Value: "i"}}, Value: 2, Timestamp: 2}},
|
Exemplars: []prompb.Exemplar{{Labels: []prompb.Label{{Name: "h", Value: "i"}}, Value: 2, Timestamp: 2}},
|
||||||
Histograms: []prompb.Histogram{prompb.FromIntHistogram(3, &testHistogram), prompb.FromFloatHistogram(4, testHistogram.ToFloat(nil))},
|
// TODO: For RW1 can you send both sample and histogram? (not allowed for RW2).
|
||||||
|
Histograms: []prompb.Histogram{prompb.FromIntHistogram(5, &testHistogram), prompb.FromFloatHistogram(6, testHistogram.ToFloat(nil))},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -90,6 +92,9 @@ var (
|
|||||||
Type: model.MetricTypeCounter,
|
Type: model.MetricTypeCounter,
|
||||||
Help: "Test counter for test purposes",
|
Help: "Test counter for test purposes",
|
||||||
}
|
}
|
||||||
|
writeV2RequestSeries3Metadata = metadata.Metadata{
|
||||||
|
Type: model.MetricTypeHistogram,
|
||||||
|
}
|
||||||
|
|
||||||
testHistogramCustomBuckets = histogram.Histogram{
|
testHistogramCustomBuckets = histogram.Histogram{
|
||||||
Schema: histogram.CustomBucketsSchema,
|
Schema: histogram.CustomBucketsSchema,
|
||||||
@ -101,7 +106,7 @@ var (
|
|||||||
}
|
}
|
||||||
|
|
||||||
// writeV2RequestFixture represents the same request as writeRequestFixture,
|
// writeV2RequestFixture represents the same request as writeRequestFixture,
|
||||||
// but using the v2 representation, plus includes writeV2RequestSeries1Metadata and writeV2RequestSeries2Metadata.
|
// but using the v2 representation, plus includes writeV2RequestSeries1Metadata, writeV2RequestSeries2Metadata and writeV2RequestSeries3Metadata.
|
||||||
// NOTE: Use TestWriteV2RequestFixture and copy the diff to regenerate if needed.
|
// NOTE: Use TestWriteV2RequestFixture and copy the diff to regenerate if needed.
|
||||||
writeV2RequestFixture = &writev2.Request{
|
writeV2RequestFixture = &writev2.Request{
|
||||||
Symbols: []string{"", "__name__", "test_metric1", "b", "c", "baz", "qux", "d", "e", "foo", "bar", "f", "g", "h", "i", "Test gauge for test purposes", "Maybe op/sec who knows (:", "Test counter for test purposes"},
|
Symbols: []string{"", "__name__", "test_metric1", "b", "c", "baz", "qux", "d", "e", "foo", "bar", "f", "g", "h", "i", "Test gauge for test purposes", "Maybe op/sec who knows (:", "Test counter for test purposes"},
|
||||||
@ -116,12 +121,6 @@ var (
|
|||||||
},
|
},
|
||||||
Samples: []writev2.Sample{{Value: 1, Timestamp: 10, StartTimestamp: 1}}, // ST needs to be lower than the sample's timestamp.
|
Samples: []writev2.Sample{{Value: 1, Timestamp: 10, StartTimestamp: 1}}, // ST needs to be lower than the sample's timestamp.
|
||||||
Exemplars: []writev2.Exemplar{{LabelsRefs: []uint32{11, 12}, Value: 1, Timestamp: 10}},
|
Exemplars: []writev2.Exemplar{{LabelsRefs: []uint32{11, 12}, Value: 1, Timestamp: 10}},
|
||||||
Histograms: []writev2.Histogram{
|
|
||||||
writev2.FromIntHistogram(10, &testHistogram),
|
|
||||||
writev2.FromFloatHistogram(20, testHistogram.ToFloat(nil)),
|
|
||||||
writev2.FromIntHistogram(30, &testHistogramCustomBuckets),
|
|
||||||
writev2.FromFloatHistogram(40, testHistogramCustomBuckets.ToFloat(nil)),
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
LabelsRefs: []uint32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, // Same series as first.
|
LabelsRefs: []uint32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, // Same series as first.
|
||||||
@ -133,11 +132,22 @@ var (
|
|||||||
},
|
},
|
||||||
Samples: []writev2.Sample{{Value: 2, Timestamp: 20}},
|
Samples: []writev2.Sample{{Value: 2, Timestamp: 20}},
|
||||||
Exemplars: []writev2.Exemplar{{LabelsRefs: []uint32{13, 14}, Value: 2, Timestamp: 20}},
|
Exemplars: []writev2.Exemplar{{LabelsRefs: []uint32{13, 14}, Value: 2, Timestamp: 20}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
LabelsRefs: []uint32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, // Same series as first two.
|
||||||
|
Metadata: writev2.Metadata{
|
||||||
|
Type: writev2.Metadata_METRIC_TYPE_HISTOGRAM, // writeV2RequestSeries3Metadata.Type.
|
||||||
|
// Missing help and unit.
|
||||||
|
},
|
||||||
|
Exemplars: []writev2.Exemplar{
|
||||||
|
{LabelsRefs: []uint32{11, 12}, Value: 3, Timestamp: 30},
|
||||||
|
{LabelsRefs: []uint32{11, 12}, Value: 4, Timestamp: 40},
|
||||||
|
},
|
||||||
Histograms: []writev2.Histogram{
|
Histograms: []writev2.Histogram{
|
||||||
writev2.FromIntHistogram(50, &testHistogram),
|
writev2.FromIntHistogram(30, &testHistogram),
|
||||||
writev2.FromFloatHistogram(60, testHistogram.ToFloat(nil)),
|
writev2.FromFloatHistogram(40, testHistogram.ToFloat(nil)),
|
||||||
writev2.FromIntHistogram(70, &testHistogramCustomBuckets),
|
writev2.FromIntHistogram(50, &testHistogramCustomBuckets),
|
||||||
writev2.FromFloatHistogram(80, testHistogramCustomBuckets.ToFloat(nil)),
|
writev2.FromFloatHistogram(60, testHistogramCustomBuckets.ToFloat(nil)),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -183,12 +193,6 @@ func TestWriteV2RequestFixture(t *testing.T) {
|
|||||||
},
|
},
|
||||||
Samples: []writev2.Sample{{Value: 1, Timestamp: 10, StartTimestamp: 1}},
|
Samples: []writev2.Sample{{Value: 1, Timestamp: 10, StartTimestamp: 1}},
|
||||||
Exemplars: []writev2.Exemplar{{LabelsRefs: exemplar1LabelRefs, Value: 1, Timestamp: 10}},
|
Exemplars: []writev2.Exemplar{{LabelsRefs: exemplar1LabelRefs, Value: 1, Timestamp: 10}},
|
||||||
Histograms: []writev2.Histogram{
|
|
||||||
writev2.FromIntHistogram(10, &testHistogram),
|
|
||||||
writev2.FromFloatHistogram(20, testHistogram.ToFloat(nil)),
|
|
||||||
writev2.FromIntHistogram(30, &testHistogramCustomBuckets),
|
|
||||||
writev2.FromFloatHistogram(40, testHistogramCustomBuckets.ToFloat(nil)),
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
LabelsRefs: labelRefs,
|
LabelsRefs: labelRefs,
|
||||||
@ -199,11 +203,22 @@ func TestWriteV2RequestFixture(t *testing.T) {
|
|||||||
},
|
},
|
||||||
Samples: []writev2.Sample{{Value: 2, Timestamp: 20}},
|
Samples: []writev2.Sample{{Value: 2, Timestamp: 20}},
|
||||||
Exemplars: []writev2.Exemplar{{LabelsRefs: exemplar2LabelRefs, Value: 2, Timestamp: 20}},
|
Exemplars: []writev2.Exemplar{{LabelsRefs: exemplar2LabelRefs, Value: 2, Timestamp: 20}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
LabelsRefs: labelRefs,
|
||||||
|
Metadata: writev2.Metadata{
|
||||||
|
Type: writev2.Metadata_METRIC_TYPE_HISTOGRAM,
|
||||||
|
// No unit, no help.
|
||||||
|
},
|
||||||
|
Exemplars: []writev2.Exemplar{
|
||||||
|
{LabelsRefs: exemplar1LabelRefs, Value: 3, Timestamp: 30},
|
||||||
|
{LabelsRefs: exemplar1LabelRefs, Value: 4, Timestamp: 40},
|
||||||
|
},
|
||||||
Histograms: []writev2.Histogram{
|
Histograms: []writev2.Histogram{
|
||||||
writev2.FromIntHistogram(50, &testHistogram),
|
writev2.FromIntHistogram(30, &testHistogram),
|
||||||
writev2.FromFloatHistogram(60, testHistogram.ToFloat(nil)),
|
writev2.FromFloatHistogram(40, testHistogram.ToFloat(nil)),
|
||||||
writev2.FromIntHistogram(70, &testHistogramCustomBuckets),
|
writev2.FromIntHistogram(50, &testHistogramCustomBuckets),
|
||||||
writev2.FromFloatHistogram(80, testHistogramCustomBuckets.ToFloat(nil)),
|
writev2.FromFloatHistogram(60, testHistogramCustomBuckets.ToFloat(nil)),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|||||||
@ -1,244 +0,0 @@
|
|||||||
// Copyright The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// TODO(krajorama): rename this package to otlpappender or similar, as it is
|
|
||||||
// not specific to Prometheus remote write anymore.
|
|
||||||
// Note otlptranslator is already used by prometheus/otlptranslator repo.
|
|
||||||
package prometheusremotewrite
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"log/slog"
|
|
||||||
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
|
||||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/model/exemplar"
|
|
||||||
"github.com/prometheus/prometheus/model/histogram"
|
|
||||||
"github.com/prometheus/prometheus/model/labels"
|
|
||||||
"github.com/prometheus/prometheus/model/metadata"
|
|
||||||
"github.com/prometheus/prometheus/storage"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Metadata extends metadata.Metadata with the metric family name.
|
|
||||||
// OTLP calculates the metric family name for all metrics and uses
|
|
||||||
// it for generating summary, histogram series by adding the magic
|
|
||||||
// suffixes. The metric family name is passed down to the appender
|
|
||||||
// in case the storage needs it for metadata updates.
|
|
||||||
// Known user is Mimir that implements /api/v1/metadata and uses
|
|
||||||
// Remote-Write 1.0 for this. Might be removed later if no longer
|
|
||||||
// needed by any downstream project.
|
|
||||||
type Metadata struct {
|
|
||||||
metadata.Metadata
|
|
||||||
MetricFamilyName string
|
|
||||||
}
|
|
||||||
|
|
||||||
// CombinedAppender is similar to storage.Appender, but combines updates to
|
|
||||||
// metadata, created timestamps, exemplars and samples into a single call.
|
|
||||||
type CombinedAppender interface {
|
|
||||||
// AppendSample appends a sample and related exemplars, metadata, and
|
|
||||||
// created timestamp to the storage.
|
|
||||||
AppendSample(ls labels.Labels, meta Metadata, st, t int64, v float64, es []exemplar.Exemplar) error
|
|
||||||
// AppendHistogram appends a histogram and related exemplars, metadata, and
|
|
||||||
// created timestamp to the storage.
|
|
||||||
AppendHistogram(ls labels.Labels, meta Metadata, st, t int64, h *histogram.Histogram, es []exemplar.Exemplar) error
|
|
||||||
}
|
|
||||||
|
|
||||||
// CombinedAppenderMetrics is for the metrics observed by the
|
|
||||||
// combinedAppender implementation.
|
|
||||||
type CombinedAppenderMetrics struct {
|
|
||||||
samplesAppendedWithoutMetadata prometheus.Counter
|
|
||||||
outOfOrderExemplars prometheus.Counter
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewCombinedAppenderMetrics(reg prometheus.Registerer) CombinedAppenderMetrics {
|
|
||||||
return CombinedAppenderMetrics{
|
|
||||||
samplesAppendedWithoutMetadata: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
|
||||||
Namespace: "prometheus",
|
|
||||||
Subsystem: "api",
|
|
||||||
Name: "otlp_appended_samples_without_metadata_total",
|
|
||||||
Help: "The total number of samples ingested from OTLP without corresponding metadata.",
|
|
||||||
}),
|
|
||||||
outOfOrderExemplars: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
|
||||||
Namespace: "prometheus",
|
|
||||||
Subsystem: "api",
|
|
||||||
Name: "otlp_out_of_order_exemplars_total",
|
|
||||||
Help: "The total number of received OTLP exemplars which were rejected because they were out of order.",
|
|
||||||
}),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewCombinedAppender creates a combined appender that sets start times and
|
|
||||||
// updates metadata for each series only once, and appends samples and
|
|
||||||
// exemplars for each call.
|
|
||||||
func NewCombinedAppender(app storage.Appender, logger *slog.Logger, ingestSTZeroSample, appendMetadata bool, metrics CombinedAppenderMetrics) CombinedAppender {
|
|
||||||
return &combinedAppender{
|
|
||||||
app: app,
|
|
||||||
logger: logger,
|
|
||||||
ingestSTZeroSample: ingestSTZeroSample,
|
|
||||||
appendMetadata: appendMetadata,
|
|
||||||
refs: make(map[uint64]seriesRef),
|
|
||||||
samplesAppendedWithoutMetadata: metrics.samplesAppendedWithoutMetadata,
|
|
||||||
outOfOrderExemplars: metrics.outOfOrderExemplars,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type seriesRef struct {
|
|
||||||
ref storage.SeriesRef
|
|
||||||
st int64
|
|
||||||
ls labels.Labels
|
|
||||||
meta metadata.Metadata
|
|
||||||
}
|
|
||||||
|
|
||||||
type combinedAppender struct {
|
|
||||||
app storage.Appender
|
|
||||||
logger *slog.Logger
|
|
||||||
samplesAppendedWithoutMetadata prometheus.Counter
|
|
||||||
outOfOrderExemplars prometheus.Counter
|
|
||||||
ingestSTZeroSample bool
|
|
||||||
appendMetadata bool
|
|
||||||
// Used to ensure we only update metadata and created timestamps once, and to share storage.SeriesRefs.
|
|
||||||
// To detect hash collision it also stores the labels.
|
|
||||||
// There is no overflow/conflict list, the TSDB will handle that part.
|
|
||||||
refs map[uint64]seriesRef
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *combinedAppender) AppendSample(ls labels.Labels, meta Metadata, st, t int64, v float64, es []exemplar.Exemplar) (err error) {
|
|
||||||
return b.appendFloatOrHistogram(ls, meta.Metadata, st, t, v, nil, es)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *combinedAppender) AppendHistogram(ls labels.Labels, meta Metadata, st, t int64, h *histogram.Histogram, es []exemplar.Exemplar) (err error) {
|
|
||||||
if h == nil {
|
|
||||||
// Sanity check, we should never get here with a nil histogram.
|
|
||||||
b.logger.Error("Received nil histogram in CombinedAppender.AppendHistogram", "series", ls.String())
|
|
||||||
return errors.New("internal error, attempted to append nil histogram")
|
|
||||||
}
|
|
||||||
return b.appendFloatOrHistogram(ls, meta.Metadata, st, t, 0, h, es)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *combinedAppender) appendFloatOrHistogram(ls labels.Labels, meta metadata.Metadata, st, t int64, v float64, h *histogram.Histogram, es []exemplar.Exemplar) (err error) {
|
|
||||||
hash := ls.Hash()
|
|
||||||
series, exists := b.refs[hash]
|
|
||||||
ref := series.ref
|
|
||||||
if exists && !labels.Equal(series.ls, ls) {
|
|
||||||
// Hash collision. The series reference we stored is pointing to a
|
|
||||||
// different series so we cannot use it, we need to reset the
|
|
||||||
// reference and cache.
|
|
||||||
// Note: we don't need to keep track of conflicts here,
|
|
||||||
// the TSDB will handle that part when we pass 0 reference.
|
|
||||||
exists = false
|
|
||||||
ref = 0
|
|
||||||
}
|
|
||||||
updateRefs := !exists || series.st != st
|
|
||||||
if updateRefs && st != 0 && st < t && b.ingestSTZeroSample {
|
|
||||||
var newRef storage.SeriesRef
|
|
||||||
if h != nil {
|
|
||||||
newRef, err = b.app.AppendHistogramSTZeroSample(ref, ls, t, st, h, nil)
|
|
||||||
} else {
|
|
||||||
newRef, err = b.app.AppendSTZeroSample(ref, ls, t, st)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
if !errors.Is(err, storage.ErrOutOfOrderST) && !errors.Is(err, storage.ErrDuplicateSampleForTimestamp) {
|
|
||||||
// Even for the first sample OOO is a common scenario because
|
|
||||||
// we can't tell if a ST was already ingested in a previous request.
|
|
||||||
// We ignore the error.
|
|
||||||
// ErrDuplicateSampleForTimestamp is also a common scenario because
|
|
||||||
// unknown start times in Opentelemetry are indicated by setting
|
|
||||||
// the start time to the same as the first sample time.
|
|
||||||
// https://opentelemetry.io/docs/specs/otel/metrics/data-model/#cumulative-streams-handling-unknown-start-time
|
|
||||||
b.logger.Warn("Error when appending ST from OTLP", "err", err, "series", ls.String(), "start_timestamp", st, "timestamp", t, "sample_type", sampleType(h))
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// We only use the returned reference on success as otherwise an
|
|
||||||
// error of ST append could invalidate the series reference.
|
|
||||||
ref = newRef
|
|
||||||
}
|
|
||||||
}
|
|
||||||
{
|
|
||||||
var newRef storage.SeriesRef
|
|
||||||
if h != nil {
|
|
||||||
newRef, err = b.app.AppendHistogram(ref, ls, t, h, nil)
|
|
||||||
} else {
|
|
||||||
newRef, err = b.app.Append(ref, ls, t, v)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
// Although Append does not currently return ErrDuplicateSampleForTimestamp there is
|
|
||||||
// a note indicating its inclusion in the future.
|
|
||||||
if errors.Is(err, storage.ErrOutOfOrderSample) ||
|
|
||||||
errors.Is(err, storage.ErrOutOfBounds) ||
|
|
||||||
errors.Is(err, storage.ErrDuplicateSampleForTimestamp) {
|
|
||||||
b.logger.Error("Error when appending sample from OTLP", "err", err.Error(), "series", ls.String(), "timestamp", t, "sample_type", sampleType(h))
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// If the append was successful, we can use the returned reference.
|
|
||||||
ref = newRef
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if ref == 0 {
|
|
||||||
// We cannot update metadata or add exemplars on non existent series.
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
metadataChanged := exists && (series.meta.Help != meta.Help || series.meta.Type != meta.Type || series.meta.Unit != meta.Unit)
|
|
||||||
|
|
||||||
// Update cache if references changed or metadata changed.
|
|
||||||
if updateRefs || metadataChanged {
|
|
||||||
b.refs[hash] = seriesRef{
|
|
||||||
ref: ref,
|
|
||||||
st: st,
|
|
||||||
ls: ls,
|
|
||||||
meta: meta,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update metadata in storage if enabled and needed.
|
|
||||||
if b.appendMetadata && (!exists || metadataChanged) {
|
|
||||||
// Only update metadata in WAL if the metadata-wal-records feature is enabled.
|
|
||||||
// Without this feature, metadata is not persisted to WAL.
|
|
||||||
_, err := b.app.UpdateMetadata(ref, ls, meta)
|
|
||||||
if err != nil {
|
|
||||||
b.samplesAppendedWithoutMetadata.Add(1)
|
|
||||||
b.logger.Warn("Error while updating metadata from OTLP", "err", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
b.appendExemplars(ref, ls, es)
|
|
||||||
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func sampleType(h *histogram.Histogram) string {
|
|
||||||
if h == nil {
|
|
||||||
return "float"
|
|
||||||
}
|
|
||||||
return "histogram"
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *combinedAppender) appendExemplars(ref storage.SeriesRef, ls labels.Labels, es []exemplar.Exemplar) storage.SeriesRef {
|
|
||||||
var err error
|
|
||||||
for _, e := range es {
|
|
||||||
if ref, err = b.app.AppendExemplar(ref, ls, e); err != nil {
|
|
||||||
switch {
|
|
||||||
case errors.Is(err, storage.ErrOutOfOrderExemplar):
|
|
||||||
b.outOfOrderExemplars.Add(1)
|
|
||||||
b.logger.Debug("Out of order exemplar from OTLP", "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e))
|
|
||||||
default:
|
|
||||||
// Since exemplar storage is still experimental, we don't fail the request on ingestion errors
|
|
||||||
b.logger.Debug("Error while adding exemplar from OTLP", "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e), "err", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return ref
|
|
||||||
}
|
|
||||||
@ -1,937 +0,0 @@
|
|||||||
// Copyright 2025 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package prometheusremotewrite
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"math"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/google/go-cmp/cmp"
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
|
||||||
"github.com/prometheus/common/model"
|
|
||||||
"github.com/prometheus/common/promslog"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/model/exemplar"
|
|
||||||
"github.com/prometheus/prometheus/model/histogram"
|
|
||||||
"github.com/prometheus/prometheus/model/labels"
|
|
||||||
"github.com/prometheus/prometheus/model/metadata"
|
|
||||||
"github.com/prometheus/prometheus/storage"
|
|
||||||
"github.com/prometheus/prometheus/tsdb"
|
|
||||||
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
|
||||||
"github.com/prometheus/prometheus/tsdb/tsdbutil"
|
|
||||||
"github.com/prometheus/prometheus/util/testutil"
|
|
||||||
)
|
|
||||||
|
|
||||||
type mockCombinedAppender struct {
|
|
||||||
pendingSamples []combinedSample
|
|
||||||
pendingHistograms []combinedHistogram
|
|
||||||
|
|
||||||
samples []combinedSample
|
|
||||||
histograms []combinedHistogram
|
|
||||||
}
|
|
||||||
|
|
||||||
type combinedSample struct {
|
|
||||||
metricFamilyName string
|
|
||||||
ls labels.Labels
|
|
||||||
meta metadata.Metadata
|
|
||||||
t int64
|
|
||||||
st int64
|
|
||||||
v float64
|
|
||||||
es []exemplar.Exemplar
|
|
||||||
}
|
|
||||||
|
|
||||||
type combinedHistogram struct {
|
|
||||||
metricFamilyName string
|
|
||||||
ls labels.Labels
|
|
||||||
meta metadata.Metadata
|
|
||||||
t int64
|
|
||||||
st int64
|
|
||||||
h *histogram.Histogram
|
|
||||||
es []exemplar.Exemplar
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *mockCombinedAppender) AppendSample(ls labels.Labels, meta Metadata, st, t int64, v float64, es []exemplar.Exemplar) error {
|
|
||||||
m.pendingSamples = append(m.pendingSamples, combinedSample{
|
|
||||||
metricFamilyName: meta.MetricFamilyName,
|
|
||||||
ls: ls,
|
|
||||||
meta: meta.Metadata,
|
|
||||||
t: t,
|
|
||||||
st: st,
|
|
||||||
v: v,
|
|
||||||
es: es,
|
|
||||||
})
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *mockCombinedAppender) AppendHistogram(ls labels.Labels, meta Metadata, st, t int64, h *histogram.Histogram, es []exemplar.Exemplar) error {
|
|
||||||
m.pendingHistograms = append(m.pendingHistograms, combinedHistogram{
|
|
||||||
metricFamilyName: meta.MetricFamilyName,
|
|
||||||
ls: ls,
|
|
||||||
meta: meta.Metadata,
|
|
||||||
t: t,
|
|
||||||
st: st,
|
|
||||||
h: h,
|
|
||||||
es: es,
|
|
||||||
})
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *mockCombinedAppender) Commit() error {
|
|
||||||
m.samples = append(m.samples, m.pendingSamples...)
|
|
||||||
m.pendingSamples = m.pendingSamples[:0]
|
|
||||||
m.histograms = append(m.histograms, m.pendingHistograms...)
|
|
||||||
m.pendingHistograms = m.pendingHistograms[:0]
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func requireEqual(t testing.TB, expected, actual any, msgAndArgs ...any) {
|
|
||||||
testutil.RequireEqualWithOptions(t, expected, actual, []cmp.Option{cmp.AllowUnexported(combinedSample{}, combinedHistogram{})}, msgAndArgs...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestCombinedAppenderOnTSDB runs some basic tests on a real TSDB to check
|
|
||||||
// that the combinedAppender works on a real TSDB.
|
|
||||||
func TestCombinedAppenderOnTSDB(t *testing.T) {
|
|
||||||
t.Run("ingestSTZeroSample=false", func(t *testing.T) { testCombinedAppenderOnTSDB(t, false) })
|
|
||||||
|
|
||||||
t.Run("ingestSTZeroSample=true", func(t *testing.T) { testCombinedAppenderOnTSDB(t, true) })
|
|
||||||
}
|
|
||||||
|
|
||||||
func testCombinedAppenderOnTSDB(t *testing.T, ingestSTZeroSample bool) {
|
|
||||||
t.Helper()
|
|
||||||
|
|
||||||
now := time.Now()
|
|
||||||
|
|
||||||
testExemplars := []exemplar.Exemplar{
|
|
||||||
{
|
|
||||||
Labels: labels.FromStrings("tracid", "122"),
|
|
||||||
Value: 1337,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Labels: labels.FromStrings("tracid", "132"),
|
|
||||||
Value: 7777,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
expectedExemplars := []exemplar.QueryResult{
|
|
||||||
{
|
|
||||||
SeriesLabels: labels.FromStrings(
|
|
||||||
model.MetricNameLabel, "test_bytes_total",
|
|
||||||
"foo", "bar",
|
|
||||||
),
|
|
||||||
Exemplars: testExemplars,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
seriesLabels := labels.FromStrings(
|
|
||||||
model.MetricNameLabel, "test_bytes_total",
|
|
||||||
"foo", "bar",
|
|
||||||
)
|
|
||||||
floatMetadata := Metadata{
|
|
||||||
Metadata: metadata.Metadata{
|
|
||||||
Type: model.MetricTypeCounter,
|
|
||||||
Unit: "bytes",
|
|
||||||
Help: "some help",
|
|
||||||
},
|
|
||||||
MetricFamilyName: "test_bytes_total",
|
|
||||||
}
|
|
||||||
|
|
||||||
histogramMetadata := Metadata{
|
|
||||||
Metadata: metadata.Metadata{
|
|
||||||
Type: model.MetricTypeHistogram,
|
|
||||||
Unit: "bytes",
|
|
||||||
Help: "some help",
|
|
||||||
},
|
|
||||||
MetricFamilyName: "test_bytes",
|
|
||||||
}
|
|
||||||
|
|
||||||
testCases := map[string]struct {
|
|
||||||
appendFunc func(*testing.T, CombinedAppender)
|
|
||||||
extraAppendFunc func(*testing.T, CombinedAppender)
|
|
||||||
expectedSamples []sample
|
|
||||||
expectedExemplars []exemplar.QueryResult
|
|
||||||
expectedLogsForST []string
|
|
||||||
}{
|
|
||||||
"single float sample, zero ST": {
|
|
||||||
appendFunc: func(t *testing.T, app CombinedAppender) {
|
|
||||||
require.NoError(t, app.AppendSample(seriesLabels.Copy(), floatMetadata, 0, now.UnixMilli(), 42.0, testExemplars))
|
|
||||||
},
|
|
||||||
expectedSamples: []sample{
|
|
||||||
{
|
|
||||||
t: now.UnixMilli(),
|
|
||||||
f: 42.0,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
expectedExemplars: expectedExemplars,
|
|
||||||
},
|
|
||||||
"single float sample, very old ST": {
|
|
||||||
appendFunc: func(t *testing.T, app CombinedAppender) {
|
|
||||||
require.NoError(t, app.AppendSample(seriesLabels.Copy(), floatMetadata, 1, now.UnixMilli(), 42.0, nil))
|
|
||||||
},
|
|
||||||
expectedSamples: []sample{
|
|
||||||
{
|
|
||||||
t: now.UnixMilli(),
|
|
||||||
f: 42.0,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
expectedLogsForST: []string{
|
|
||||||
"Error when appending ST from OTLP",
|
|
||||||
"out of bound",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
"single float sample, normal ST": {
|
|
||||||
appendFunc: func(t *testing.T, app CombinedAppender) {
|
|
||||||
require.NoError(t, app.AppendSample(seriesLabels.Copy(), floatMetadata, now.Add(-2*time.Minute).UnixMilli(), now.UnixMilli(), 42.0, nil))
|
|
||||||
},
|
|
||||||
expectedSamples: []sample{
|
|
||||||
{
|
|
||||||
stZero: true,
|
|
||||||
t: now.Add(-2 * time.Minute).UnixMilli(),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
t: now.UnixMilli(),
|
|
||||||
f: 42.0,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
"single float sample, ST same time as sample": {
|
|
||||||
appendFunc: func(t *testing.T, app CombinedAppender) {
|
|
||||||
require.NoError(t, app.AppendSample(seriesLabels.Copy(), floatMetadata, now.UnixMilli(), now.UnixMilli(), 42.0, nil))
|
|
||||||
},
|
|
||||||
expectedSamples: []sample{
|
|
||||||
{
|
|
||||||
t: now.UnixMilli(),
|
|
||||||
f: 42.0,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
"two float samples in different messages, ST same time as first sample": {
|
|
||||||
appendFunc: func(t *testing.T, app CombinedAppender) {
|
|
||||||
require.NoError(t, app.AppendSample(seriesLabels.Copy(), floatMetadata, now.UnixMilli(), now.UnixMilli(), 42.0, nil))
|
|
||||||
},
|
|
||||||
extraAppendFunc: func(t *testing.T, app CombinedAppender) {
|
|
||||||
require.NoError(t, app.AppendSample(seriesLabels.Copy(), floatMetadata, now.UnixMilli(), now.Add(time.Second).UnixMilli(), 43.0, nil))
|
|
||||||
},
|
|
||||||
expectedSamples: []sample{
|
|
||||||
{
|
|
||||||
t: now.UnixMilli(),
|
|
||||||
f: 42.0,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
t: now.Add(time.Second).UnixMilli(),
|
|
||||||
f: 43.0,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
"single float sample, ST in the future of the sample": {
|
|
||||||
appendFunc: func(t *testing.T, app CombinedAppender) {
|
|
||||||
require.NoError(t, app.AppendSample(seriesLabels.Copy(), floatMetadata, now.Add(time.Minute).UnixMilli(), now.UnixMilli(), 42.0, nil))
|
|
||||||
},
|
|
||||||
expectedSamples: []sample{
|
|
||||||
{
|
|
||||||
t: now.UnixMilli(),
|
|
||||||
f: 42.0,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
"single histogram sample, zero ST": {
|
|
||||||
appendFunc: func(t *testing.T, app CombinedAppender) {
|
|
||||||
require.NoError(t, app.AppendHistogram(seriesLabels.Copy(), histogramMetadata, 0, now.UnixMilli(), tsdbutil.GenerateTestHistogram(42), testExemplars))
|
|
||||||
},
|
|
||||||
expectedSamples: []sample{
|
|
||||||
{
|
|
||||||
t: now.UnixMilli(),
|
|
||||||
h: tsdbutil.GenerateTestHistogram(42),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
expectedExemplars: expectedExemplars,
|
|
||||||
},
|
|
||||||
"single histogram sample, very old ST": {
|
|
||||||
appendFunc: func(t *testing.T, app CombinedAppender) {
|
|
||||||
require.NoError(t, app.AppendHistogram(seriesLabels.Copy(), histogramMetadata, 1, now.UnixMilli(), tsdbutil.GenerateTestHistogram(42), nil))
|
|
||||||
},
|
|
||||||
expectedSamples: []sample{
|
|
||||||
{
|
|
||||||
t: now.UnixMilli(),
|
|
||||||
h: tsdbutil.GenerateTestHistogram(42),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
expectedLogsForST: []string{
|
|
||||||
"Error when appending ST from OTLP",
|
|
||||||
"out of bound",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
"single histogram sample, normal ST": {
|
|
||||||
appendFunc: func(t *testing.T, app CombinedAppender) {
|
|
||||||
require.NoError(t, app.AppendHistogram(seriesLabels.Copy(), histogramMetadata, now.Add(-2*time.Minute).UnixMilli(), now.UnixMilli(), tsdbutil.GenerateTestHistogram(42), nil))
|
|
||||||
},
|
|
||||||
expectedSamples: []sample{
|
|
||||||
{
|
|
||||||
stZero: true,
|
|
||||||
t: now.Add(-2 * time.Minute).UnixMilli(),
|
|
||||||
h: &histogram.Histogram{},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
t: now.UnixMilli(),
|
|
||||||
h: tsdbutil.GenerateTestHistogram(42),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
"single histogram sample, ST same time as sample": {
|
|
||||||
appendFunc: func(t *testing.T, app CombinedAppender) {
|
|
||||||
require.NoError(t, app.AppendHistogram(seriesLabels.Copy(), histogramMetadata, now.UnixMilli(), now.UnixMilli(), tsdbutil.GenerateTestHistogram(42), nil))
|
|
||||||
},
|
|
||||||
expectedSamples: []sample{
|
|
||||||
{
|
|
||||||
t: now.UnixMilli(),
|
|
||||||
h: tsdbutil.GenerateTestHistogram(42),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
"two histogram samples in different messages, ST same time as first sample": {
|
|
||||||
appendFunc: func(t *testing.T, app CombinedAppender) {
|
|
||||||
require.NoError(t, app.AppendHistogram(seriesLabels.Copy(), floatMetadata, now.UnixMilli(), now.UnixMilli(), tsdbutil.GenerateTestHistogram(42), nil))
|
|
||||||
},
|
|
||||||
extraAppendFunc: func(t *testing.T, app CombinedAppender) {
|
|
||||||
require.NoError(t, app.AppendHistogram(seriesLabels.Copy(), floatMetadata, now.UnixMilli(), now.Add(time.Second).UnixMilli(), tsdbutil.GenerateTestHistogram(43), nil))
|
|
||||||
},
|
|
||||||
expectedSamples: []sample{
|
|
||||||
{
|
|
||||||
t: now.UnixMilli(),
|
|
||||||
h: tsdbutil.GenerateTestHistogram(42),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
t: now.Add(time.Second).UnixMilli(),
|
|
||||||
h: tsdbutil.GenerateTestHistogram(43),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
"single histogram sample, ST in the future of the sample": {
|
|
||||||
appendFunc: func(t *testing.T, app CombinedAppender) {
|
|
||||||
require.NoError(t, app.AppendHistogram(seriesLabels.Copy(), histogramMetadata, now.Add(time.Minute).UnixMilli(), now.UnixMilli(), tsdbutil.GenerateTestHistogram(42), nil))
|
|
||||||
},
|
|
||||||
expectedSamples: []sample{
|
|
||||||
{
|
|
||||||
t: now.UnixMilli(),
|
|
||||||
h: tsdbutil.GenerateTestHistogram(42),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
"multiple float samples": {
|
|
||||||
appendFunc: func(t *testing.T, app CombinedAppender) {
|
|
||||||
require.NoError(t, app.AppendSample(seriesLabels.Copy(), floatMetadata, 0, now.UnixMilli(), 42.0, nil))
|
|
||||||
require.NoError(t, app.AppendSample(seriesLabels.Copy(), floatMetadata, 0, now.Add(15*time.Second).UnixMilli(), 62.0, nil))
|
|
||||||
},
|
|
||||||
expectedSamples: []sample{
|
|
||||||
{
|
|
||||||
t: now.UnixMilli(),
|
|
||||||
f: 42.0,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
t: now.Add(15 * time.Second).UnixMilli(),
|
|
||||||
f: 62.0,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
"multiple histogram samples": {
|
|
||||||
appendFunc: func(t *testing.T, app CombinedAppender) {
|
|
||||||
require.NoError(t, app.AppendHistogram(seriesLabels.Copy(), histogramMetadata, 0, now.UnixMilli(), tsdbutil.GenerateTestHistogram(42), nil))
|
|
||||||
require.NoError(t, app.AppendHistogram(seriesLabels.Copy(), histogramMetadata, 0, now.Add(15*time.Second).UnixMilli(), tsdbutil.GenerateTestHistogram(62), nil))
|
|
||||||
},
|
|
||||||
expectedSamples: []sample{
|
|
||||||
{
|
|
||||||
t: now.UnixMilli(),
|
|
||||||
h: tsdbutil.GenerateTestHistogram(42),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
t: now.Add(15 * time.Second).UnixMilli(),
|
|
||||||
h: tsdbutil.GenerateTestHistogram(62),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
"float samples with ST changing": {
|
|
||||||
appendFunc: func(t *testing.T, app CombinedAppender) {
|
|
||||||
require.NoError(t, app.AppendSample(seriesLabels.Copy(), floatMetadata, now.Add(-4*time.Second).UnixMilli(), now.Add(-3*time.Second).UnixMilli(), 42.0, nil))
|
|
||||||
require.NoError(t, app.AppendSample(seriesLabels.Copy(), floatMetadata, now.Add(-1*time.Second).UnixMilli(), now.UnixMilli(), 62.0, nil))
|
|
||||||
},
|
|
||||||
expectedSamples: []sample{
|
|
||||||
{
|
|
||||||
stZero: true,
|
|
||||||
t: now.Add(-4 * time.Second).UnixMilli(),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
t: now.Add(-3 * time.Second).UnixMilli(),
|
|
||||||
f: 42.0,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
stZero: true,
|
|
||||||
t: now.Add(-1 * time.Second).UnixMilli(),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
t: now.UnixMilli(),
|
|
||||||
f: 62.0,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for name, tc := range testCases {
|
|
||||||
t.Run(name, func(t *testing.T) {
|
|
||||||
var expectedLogs []string
|
|
||||||
if ingestSTZeroSample {
|
|
||||||
expectedLogs = append(expectedLogs, tc.expectedLogsForST...)
|
|
||||||
}
|
|
||||||
|
|
||||||
dir := t.TempDir()
|
|
||||||
opts := tsdb.DefaultOptions()
|
|
||||||
opts.EnableExemplarStorage = true
|
|
||||||
opts.MaxExemplars = 100
|
|
||||||
db, err := tsdb.Open(dir, promslog.NewNopLogger(), prometheus.NewRegistry(), opts, nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
t.Cleanup(func() { db.Close() })
|
|
||||||
|
|
||||||
var output bytes.Buffer
|
|
||||||
logger := promslog.New(&promslog.Config{Writer: &output})
|
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
reg := prometheus.NewRegistry()
|
|
||||||
cappMetrics := NewCombinedAppenderMetrics(reg)
|
|
||||||
app := db.Appender(ctx)
|
|
||||||
capp := NewCombinedAppender(app, logger, ingestSTZeroSample, false, cappMetrics)
|
|
||||||
tc.appendFunc(t, capp)
|
|
||||||
require.NoError(t, app.Commit())
|
|
||||||
|
|
||||||
if tc.extraAppendFunc != nil {
|
|
||||||
app = db.Appender(ctx)
|
|
||||||
capp = NewCombinedAppender(app, logger, ingestSTZeroSample, false, cappMetrics)
|
|
||||||
tc.extraAppendFunc(t, capp)
|
|
||||||
require.NoError(t, app.Commit())
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(expectedLogs) > 0 {
|
|
||||||
for _, expectedLog := range expectedLogs {
|
|
||||||
require.Contains(t, output.String(), expectedLog)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
require.Empty(t, output.String(), "unexpected log output")
|
|
||||||
}
|
|
||||||
|
|
||||||
q, err := db.Querier(int64(math.MinInt64), int64(math.MaxInt64))
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
ss := q.Select(ctx, false, &storage.SelectHints{
|
|
||||||
Start: int64(math.MinInt64),
|
|
||||||
End: int64(math.MaxInt64),
|
|
||||||
}, labels.MustNewMatcher(labels.MatchEqual, model.MetricNameLabel, "test_bytes_total"))
|
|
||||||
|
|
||||||
require.NoError(t, ss.Err())
|
|
||||||
|
|
||||||
require.True(t, ss.Next())
|
|
||||||
series := ss.At()
|
|
||||||
it := series.Iterator(nil)
|
|
||||||
for i, sample := range tc.expectedSamples {
|
|
||||||
if !ingestSTZeroSample && sample.stZero {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if sample.h == nil {
|
|
||||||
require.Equal(t, chunkenc.ValFloat, it.Next())
|
|
||||||
ts, v := it.At()
|
|
||||||
require.Equal(t, sample.t, ts, "sample ts %d", i)
|
|
||||||
require.Equal(t, sample.f, v, "sample v %d", i)
|
|
||||||
} else {
|
|
||||||
require.Equal(t, chunkenc.ValHistogram, it.Next())
|
|
||||||
ts, h := it.AtHistogram(nil)
|
|
||||||
require.Equal(t, sample.t, ts, "sample ts %d", i)
|
|
||||||
require.Equal(t, sample.h.Count, h.Count, "sample v %d", i)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
require.False(t, ss.Next())
|
|
||||||
|
|
||||||
eq, err := db.ExemplarQuerier(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
exResult, err := eq.Select(int64(math.MinInt64), int64(math.MaxInt64), []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, model.MetricNameLabel, "test_bytes_total")})
|
|
||||||
require.NoError(t, err)
|
|
||||||
if tc.expectedExemplars == nil {
|
|
||||||
tc.expectedExemplars = []exemplar.QueryResult{}
|
|
||||||
}
|
|
||||||
require.Equal(t, tc.expectedExemplars, exResult)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type sample struct {
|
|
||||||
stZero bool
|
|
||||||
|
|
||||||
t int64
|
|
||||||
f float64
|
|
||||||
h *histogram.Histogram
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestCombinedAppenderSeriesRefs checks that the combined appender
|
|
||||||
// correctly uses and updates the series references in the internal map.
|
|
||||||
func TestCombinedAppenderSeriesRefs(t *testing.T) {
|
|
||||||
seriesLabels := labels.FromStrings(
|
|
||||||
model.MetricNameLabel, "test_bytes_total",
|
|
||||||
"foo", "bar",
|
|
||||||
)
|
|
||||||
|
|
||||||
floatMetadata := Metadata{
|
|
||||||
Metadata: metadata.Metadata{
|
|
||||||
Type: model.MetricTypeCounter,
|
|
||||||
Unit: "bytes",
|
|
||||||
Help: "some help",
|
|
||||||
},
|
|
||||||
MetricFamilyName: "test_bytes_total",
|
|
||||||
}
|
|
||||||
|
|
||||||
t.Run("happy case with ST zero, reference is passed and reused", func(t *testing.T) {
|
|
||||||
app := &appenderRecorder{}
|
|
||||||
capp := NewCombinedAppender(app, promslog.NewNopLogger(), true, false, NewCombinedAppenderMetrics(prometheus.NewRegistry()))
|
|
||||||
|
|
||||||
require.NoError(t, capp.AppendSample(seriesLabels.Copy(), floatMetadata, 1, 2, 42.0, nil))
|
|
||||||
|
|
||||||
require.NoError(t, capp.AppendSample(seriesLabels.Copy(), floatMetadata, 3, 4, 62.0, []exemplar.Exemplar{
|
|
||||||
{
|
|
||||||
Labels: labels.FromStrings("tracid", "122"),
|
|
||||||
Value: 1337,
|
|
||||||
},
|
|
||||||
}))
|
|
||||||
|
|
||||||
require.Len(t, app.records, 5)
|
|
||||||
requireEqualOpAndRef(t, "AppendSTZeroSample", 0, app.records[0])
|
|
||||||
ref := app.records[0].outRef
|
|
||||||
require.NotZero(t, ref)
|
|
||||||
requireEqualOpAndRef(t, "Append", ref, app.records[1])
|
|
||||||
requireEqualOpAndRef(t, "AppendSTZeroSample", ref, app.records[2])
|
|
||||||
requireEqualOpAndRef(t, "Append", ref, app.records[3])
|
|
||||||
requireEqualOpAndRef(t, "AppendExemplar", ref, app.records[4])
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("error on second ST ingest doesn't update the reference", func(t *testing.T) {
|
|
||||||
app := &appenderRecorder{}
|
|
||||||
capp := NewCombinedAppender(app, promslog.NewNopLogger(), true, false, NewCombinedAppenderMetrics(prometheus.NewRegistry()))
|
|
||||||
|
|
||||||
require.NoError(t, capp.AppendSample(seriesLabels.Copy(), floatMetadata, 1, 2, 42.0, nil))
|
|
||||||
|
|
||||||
app.appendSTZeroSampleError = errors.New("test error")
|
|
||||||
require.NoError(t, capp.AppendSample(seriesLabels.Copy(), floatMetadata, 3, 4, 62.0, nil))
|
|
||||||
|
|
||||||
require.Len(t, app.records, 4)
|
|
||||||
requireEqualOpAndRef(t, "AppendSTZeroSample", 0, app.records[0])
|
|
||||||
ref := app.records[0].outRef
|
|
||||||
require.NotZero(t, ref)
|
|
||||||
requireEqualOpAndRef(t, "Append", ref, app.records[1])
|
|
||||||
requireEqualOpAndRef(t, "AppendSTZeroSample", ref, app.records[2])
|
|
||||||
require.Zero(t, app.records[2].outRef, "the second AppendSTZeroSample returned 0")
|
|
||||||
requireEqualOpAndRef(t, "Append", ref, app.records[3])
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("metadata, exemplars are not updated if append failed", func(t *testing.T) {
|
|
||||||
app := &appenderRecorder{}
|
|
||||||
capp := NewCombinedAppender(app, promslog.NewNopLogger(), true, false, NewCombinedAppenderMetrics(prometheus.NewRegistry()))
|
|
||||||
app.appendError = errors.New("test error")
|
|
||||||
require.Error(t, capp.AppendSample(seriesLabels.Copy(), floatMetadata, 0, 1, 42.0, []exemplar.Exemplar{
|
|
||||||
{
|
|
||||||
Labels: labels.FromStrings("tracid", "122"),
|
|
||||||
Value: 1337,
|
|
||||||
},
|
|
||||||
}))
|
|
||||||
|
|
||||||
require.Len(t, app.records, 1)
|
|
||||||
require.Equal(t, appenderRecord{
|
|
||||||
op: "Append",
|
|
||||||
ls: labels.FromStrings(model.MetricNameLabel, "test_bytes_total", "foo", "bar"),
|
|
||||||
}, app.records[0])
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("metadata, exemplars are updated if append failed but reference is valid", func(t *testing.T) {
|
|
||||||
app := &appenderRecorder{}
|
|
||||||
capp := NewCombinedAppender(app, promslog.NewNopLogger(), true, true, NewCombinedAppenderMetrics(prometheus.NewRegistry()))
|
|
||||||
|
|
||||||
newMetadata := floatMetadata
|
|
||||||
newMetadata.Help = "some other help"
|
|
||||||
|
|
||||||
require.NoError(t, capp.AppendSample(seriesLabels.Copy(), floatMetadata, 1, 2, 42.0, nil))
|
|
||||||
app.appendError = errors.New("test error")
|
|
||||||
require.Error(t, capp.AppendSample(seriesLabels.Copy(), newMetadata, 3, 4, 62.0, []exemplar.Exemplar{
|
|
||||||
{
|
|
||||||
Labels: labels.FromStrings("tracid", "122"),
|
|
||||||
Value: 1337,
|
|
||||||
},
|
|
||||||
}))
|
|
||||||
|
|
||||||
require.Len(t, app.records, 7)
|
|
||||||
requireEqualOpAndRef(t, "AppendSTZeroSample", 0, app.records[0])
|
|
||||||
ref := app.records[0].outRef
|
|
||||||
require.NotZero(t, ref)
|
|
||||||
requireEqualOpAndRef(t, "Append", ref, app.records[1])
|
|
||||||
requireEqualOpAndRef(t, "UpdateMetadata", ref, app.records[2])
|
|
||||||
requireEqualOpAndRef(t, "AppendSTZeroSample", ref, app.records[3])
|
|
||||||
requireEqualOpAndRef(t, "Append", ref, app.records[4])
|
|
||||||
require.Zero(t, app.records[4].outRef, "the second Append returned 0")
|
|
||||||
requireEqualOpAndRef(t, "UpdateMetadata", ref, app.records[5])
|
|
||||||
requireEqualOpAndRef(t, "AppendExemplar", ref, app.records[6])
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("simulate conflict with existing series", func(t *testing.T) {
|
|
||||||
app := &appenderRecorder{}
|
|
||||||
capp := NewCombinedAppender(app, promslog.NewNopLogger(), true, false, NewCombinedAppenderMetrics(prometheus.NewRegistry()))
|
|
||||||
|
|
||||||
ls := labels.FromStrings(
|
|
||||||
model.MetricNameLabel, "test_bytes_total",
|
|
||||||
"foo", "bar",
|
|
||||||
)
|
|
||||||
|
|
||||||
require.NoError(t, capp.AppendSample(ls, floatMetadata, 1, 2, 42.0, nil))
|
|
||||||
|
|
||||||
hash := ls.Hash()
|
|
||||||
cappImpl := capp.(*combinedAppender)
|
|
||||||
series := cappImpl.refs[hash]
|
|
||||||
series.ls = labels.FromStrings(
|
|
||||||
model.MetricNameLabel, "test_bytes_total",
|
|
||||||
"foo", "club",
|
|
||||||
)
|
|
||||||
// The hash and ref remain the same, but we altered the labels.
|
|
||||||
// This simulates a conflict with an existing series.
|
|
||||||
cappImpl.refs[hash] = series
|
|
||||||
|
|
||||||
require.NoError(t, capp.AppendSample(ls, floatMetadata, 3, 4, 62.0, []exemplar.Exemplar{
|
|
||||||
{
|
|
||||||
Labels: labels.FromStrings("tracid", "122"),
|
|
||||||
Value: 1337,
|
|
||||||
},
|
|
||||||
}))
|
|
||||||
|
|
||||||
require.Len(t, app.records, 5)
|
|
||||||
requireEqualOpAndRef(t, "AppendSTZeroSample", 0, app.records[0])
|
|
||||||
ref := app.records[0].outRef
|
|
||||||
require.NotZero(t, ref)
|
|
||||||
requireEqualOpAndRef(t, "Append", ref, app.records[1])
|
|
||||||
requireEqualOpAndRef(t, "AppendSTZeroSample", 0, app.records[2])
|
|
||||||
newRef := app.records[2].outRef
|
|
||||||
require.NotEqual(t, ref, newRef, "the second AppendSTZeroSample returned a different reference")
|
|
||||||
requireEqualOpAndRef(t, "Append", newRef, app.records[3])
|
|
||||||
requireEqualOpAndRef(t, "AppendExemplar", newRef, app.records[4])
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("check that invoking AppendHistogram returns an error for nil histogram", func(t *testing.T) {
|
|
||||||
app := &appenderRecorder{}
|
|
||||||
capp := NewCombinedAppender(app, promslog.NewNopLogger(), true, false, NewCombinedAppenderMetrics(prometheus.NewRegistry()))
|
|
||||||
|
|
||||||
ls := labels.FromStrings(
|
|
||||||
model.MetricNameLabel, "test_bytes_total",
|
|
||||||
"foo", "bar",
|
|
||||||
)
|
|
||||||
err := capp.AppendHistogram(ls, Metadata{}, 4, 2, nil, nil)
|
|
||||||
require.Error(t, err)
|
|
||||||
})
|
|
||||||
|
|
||||||
for _, appendMetadata := range []bool{false, true} {
|
|
||||||
t.Run(fmt.Sprintf("appendMetadata=%t", appendMetadata), func(t *testing.T) {
|
|
||||||
app := &appenderRecorder{}
|
|
||||||
capp := NewCombinedAppender(app, promslog.NewNopLogger(), true, appendMetadata, NewCombinedAppenderMetrics(prometheus.NewRegistry()))
|
|
||||||
|
|
||||||
require.NoError(t, capp.AppendSample(seriesLabels.Copy(), floatMetadata, 1, 2, 42.0, nil))
|
|
||||||
|
|
||||||
if appendMetadata {
|
|
||||||
require.Len(t, app.records, 3)
|
|
||||||
requireEqualOp(t, "AppendSTZeroSample", app.records[0])
|
|
||||||
requireEqualOp(t, "Append", app.records[1])
|
|
||||||
requireEqualOp(t, "UpdateMetadata", app.records[2])
|
|
||||||
} else {
|
|
||||||
require.Len(t, app.records, 2)
|
|
||||||
requireEqualOp(t, "AppendSTZeroSample", app.records[0])
|
|
||||||
requireEqualOp(t, "Append", app.records[1])
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestCombinedAppenderMetadataChanges verifies that UpdateMetadata is called
|
|
||||||
// when metadata fields change (help, unit, or type).
|
|
||||||
func TestCombinedAppenderMetadataChanges(t *testing.T) {
|
|
||||||
seriesLabels := labels.FromStrings(
|
|
||||||
model.MetricNameLabel, "test_metric",
|
|
||||||
"foo", "bar",
|
|
||||||
)
|
|
||||||
|
|
||||||
baseMetadata := Metadata{
|
|
||||||
Metadata: metadata.Metadata{
|
|
||||||
Type: model.MetricTypeCounter,
|
|
||||||
Unit: "bytes",
|
|
||||||
Help: "original help",
|
|
||||||
},
|
|
||||||
MetricFamilyName: "test_metric",
|
|
||||||
}
|
|
||||||
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
modifyMetadata func(Metadata) Metadata
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "help changes",
|
|
||||||
modifyMetadata: func(m Metadata) Metadata {
|
|
||||||
m.Help = "new help text"
|
|
||||||
return m
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "unit changes",
|
|
||||||
modifyMetadata: func(m Metadata) Metadata {
|
|
||||||
m.Unit = "seconds"
|
|
||||||
return m
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "type changes",
|
|
||||||
modifyMetadata: func(m Metadata) Metadata {
|
|
||||||
m.Type = model.MetricTypeGauge
|
|
||||||
return m
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
app := &appenderRecorder{}
|
|
||||||
capp := NewCombinedAppender(app, promslog.NewNopLogger(), true, true, NewCombinedAppenderMetrics(prometheus.NewRegistry()))
|
|
||||||
|
|
||||||
newMetadata := tt.modifyMetadata(baseMetadata)
|
|
||||||
|
|
||||||
require.NoError(t, capp.AppendSample(seriesLabels.Copy(), baseMetadata, 1, 2, 42.0, nil))
|
|
||||||
require.NoError(t, capp.AppendSample(seriesLabels.Copy(), newMetadata, 3, 4, 62.0, nil))
|
|
||||||
require.NoError(t, capp.AppendSample(seriesLabels.Copy(), newMetadata, 3, 5, 162.0, nil))
|
|
||||||
|
|
||||||
// Verify expected operations.
|
|
||||||
require.Len(t, app.records, 7)
|
|
||||||
requireEqualOpAndRef(t, "AppendSTZeroSample", 0, app.records[0])
|
|
||||||
ref := app.records[0].outRef
|
|
||||||
require.NotZero(t, ref)
|
|
||||||
requireEqualOpAndRef(t, "Append", ref, app.records[1])
|
|
||||||
requireEqualOpAndRef(t, "UpdateMetadata", ref, app.records[2])
|
|
||||||
requireEqualOpAndRef(t, "AppendSTZeroSample", ref, app.records[3])
|
|
||||||
requireEqualOpAndRef(t, "Append", ref, app.records[4])
|
|
||||||
requireEqualOpAndRef(t, "UpdateMetadata", ref, app.records[5])
|
|
||||||
requireEqualOpAndRef(t, "Append", ref, app.records[6])
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func requireEqualOp(t *testing.T, expectedOp string, actual appenderRecord) {
|
|
||||||
t.Helper()
|
|
||||||
require.Equal(t, expectedOp, actual.op)
|
|
||||||
}
|
|
||||||
|
|
||||||
func requireEqualOpAndRef(t *testing.T, expectedOp string, expectedRef storage.SeriesRef, actual appenderRecord) {
|
|
||||||
t.Helper()
|
|
||||||
require.Equal(t, expectedOp, actual.op)
|
|
||||||
require.Equal(t, expectedRef, actual.ref)
|
|
||||||
}
|
|
||||||
|
|
||||||
type appenderRecord struct {
|
|
||||||
op string
|
|
||||||
ref storage.SeriesRef
|
|
||||||
outRef storage.SeriesRef
|
|
||||||
ls labels.Labels
|
|
||||||
}
|
|
||||||
|
|
||||||
type appenderRecorder struct {
|
|
||||||
refcount uint64
|
|
||||||
records []appenderRecord
|
|
||||||
|
|
||||||
appendError error
|
|
||||||
appendSTZeroSampleError error
|
|
||||||
appendHistogramError error
|
|
||||||
appendHistogramSTZeroSampleError error
|
|
||||||
updateMetadataError error
|
|
||||||
appendExemplarError error
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ storage.Appender = &appenderRecorder{}
|
|
||||||
|
|
||||||
func (a *appenderRecorder) setOutRef(ref storage.SeriesRef) {
|
|
||||||
if len(a.records) == 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
a.records[len(a.records)-1].outRef = ref
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *appenderRecorder) newRef() storage.SeriesRef {
|
|
||||||
a.refcount++
|
|
||||||
return storage.SeriesRef(a.refcount)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *appenderRecorder) Append(ref storage.SeriesRef, ls labels.Labels, _ int64, _ float64) (storage.SeriesRef, error) {
|
|
||||||
a.records = append(a.records, appenderRecord{op: "Append", ref: ref, ls: ls})
|
|
||||||
if a.appendError != nil {
|
|
||||||
return 0, a.appendError
|
|
||||||
}
|
|
||||||
if ref == 0 {
|
|
||||||
ref = a.newRef()
|
|
||||||
}
|
|
||||||
a.setOutRef(ref)
|
|
||||||
return ref, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *appenderRecorder) AppendSTZeroSample(ref storage.SeriesRef, ls labels.Labels, _, _ int64) (storage.SeriesRef, error) {
|
|
||||||
a.records = append(a.records, appenderRecord{op: "AppendSTZeroSample", ref: ref, ls: ls})
|
|
||||||
if a.appendSTZeroSampleError != nil {
|
|
||||||
return 0, a.appendSTZeroSampleError
|
|
||||||
}
|
|
||||||
if ref == 0 {
|
|
||||||
ref = a.newRef()
|
|
||||||
}
|
|
||||||
a.setOutRef(ref)
|
|
||||||
return ref, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *appenderRecorder) AppendHistogram(ref storage.SeriesRef, ls labels.Labels, _ int64, _ *histogram.Histogram, _ *histogram.FloatHistogram) (storage.SeriesRef, error) {
|
|
||||||
a.records = append(a.records, appenderRecord{op: "AppendHistogram", ref: ref, ls: ls})
|
|
||||||
if a.appendHistogramError != nil {
|
|
||||||
return 0, a.appendHistogramError
|
|
||||||
}
|
|
||||||
if ref == 0 {
|
|
||||||
ref = a.newRef()
|
|
||||||
}
|
|
||||||
a.setOutRef(ref)
|
|
||||||
return ref, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *appenderRecorder) AppendHistogramSTZeroSample(ref storage.SeriesRef, ls labels.Labels, _, _ int64, _ *histogram.Histogram, _ *histogram.FloatHistogram) (storage.SeriesRef, error) {
|
|
||||||
a.records = append(a.records, appenderRecord{op: "AppendHistogramSTZeroSample", ref: ref, ls: ls})
|
|
||||||
if a.appendHistogramSTZeroSampleError != nil {
|
|
||||||
return 0, a.appendHistogramSTZeroSampleError
|
|
||||||
}
|
|
||||||
if ref == 0 {
|
|
||||||
ref = a.newRef()
|
|
||||||
}
|
|
||||||
a.setOutRef(ref)
|
|
||||||
return ref, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *appenderRecorder) UpdateMetadata(ref storage.SeriesRef, ls labels.Labels, _ metadata.Metadata) (storage.SeriesRef, error) {
|
|
||||||
a.records = append(a.records, appenderRecord{op: "UpdateMetadata", ref: ref, ls: ls})
|
|
||||||
if a.updateMetadataError != nil {
|
|
||||||
return 0, a.updateMetadataError
|
|
||||||
}
|
|
||||||
a.setOutRef(ref)
|
|
||||||
return ref, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *appenderRecorder) AppendExemplar(ref storage.SeriesRef, ls labels.Labels, _ exemplar.Exemplar) (storage.SeriesRef, error) {
|
|
||||||
a.records = append(a.records, appenderRecord{op: "AppendExemplar", ref: ref, ls: ls})
|
|
||||||
if a.appendExemplarError != nil {
|
|
||||||
return 0, a.appendExemplarError
|
|
||||||
}
|
|
||||||
a.setOutRef(ref)
|
|
||||||
return ref, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *appenderRecorder) Commit() error {
|
|
||||||
a.records = append(a.records, appenderRecord{op: "Commit"})
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *appenderRecorder) Rollback() error {
|
|
||||||
a.records = append(a.records, appenderRecord{op: "Rollback"})
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*appenderRecorder) SetOptions(_ *storage.AppendOptions) {
|
|
||||||
panic("not implemented")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMetadataChangedLogic(t *testing.T) {
|
|
||||||
seriesLabels := labels.FromStrings(model.MetricNameLabel, "test_metric", "foo", "bar")
|
|
||||||
baseMetadata := Metadata{
|
|
||||||
Metadata: metadata.Metadata{Type: model.MetricTypeCounter, Unit: "bytes", Help: "original"},
|
|
||||||
MetricFamilyName: "test_metric",
|
|
||||||
}
|
|
||||||
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
appendMetadata bool
|
|
||||||
modifyMetadata func(Metadata) Metadata
|
|
||||||
expectWALCall bool
|
|
||||||
verifyCached func(*testing.T, metadata.Metadata)
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "appendMetadata=false, no change",
|
|
||||||
appendMetadata: false,
|
|
||||||
modifyMetadata: func(m Metadata) Metadata { return m },
|
|
||||||
expectWALCall: false,
|
|
||||||
verifyCached: func(t *testing.T, m metadata.Metadata) { require.Equal(t, "original", m.Help) },
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "appendMetadata=false, help changes - cache updated, no WAL",
|
|
||||||
appendMetadata: false,
|
|
||||||
modifyMetadata: func(m Metadata) Metadata { m.Help = "changed"; return m },
|
|
||||||
expectWALCall: false,
|
|
||||||
verifyCached: func(t *testing.T, m metadata.Metadata) { require.Equal(t, "changed", m.Help) },
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "appendMetadata=true, help changes - cache and WAL updated",
|
|
||||||
appendMetadata: true,
|
|
||||||
modifyMetadata: func(m Metadata) Metadata { m.Help = "changed"; return m },
|
|
||||||
expectWALCall: true,
|
|
||||||
verifyCached: func(t *testing.T, m metadata.Metadata) { require.Equal(t, "changed", m.Help) },
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "appendMetadata=true, unit changes",
|
|
||||||
appendMetadata: true,
|
|
||||||
modifyMetadata: func(m Metadata) Metadata { m.Unit = "seconds"; return m },
|
|
||||||
expectWALCall: true,
|
|
||||||
verifyCached: func(t *testing.T, m metadata.Metadata) { require.Equal(t, "seconds", m.Unit) },
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "appendMetadata=true, type changes",
|
|
||||||
appendMetadata: true,
|
|
||||||
modifyMetadata: func(m Metadata) Metadata { m.Type = model.MetricTypeGauge; return m },
|
|
||||||
expectWALCall: true,
|
|
||||||
verifyCached: func(t *testing.T, m metadata.Metadata) { require.Equal(t, model.MetricTypeGauge, m.Type) },
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
app := &appenderRecorder{}
|
|
||||||
capp := NewCombinedAppender(app, promslog.NewNopLogger(), true, tt.appendMetadata, NewCombinedAppenderMetrics(prometheus.NewRegistry()))
|
|
||||||
|
|
||||||
require.NoError(t, capp.AppendSample(seriesLabels.Copy(), baseMetadata, 1, 2, 42.0, nil))
|
|
||||||
|
|
||||||
modifiedMetadata := tt.modifyMetadata(baseMetadata)
|
|
||||||
app.records = nil
|
|
||||||
require.NoError(t, capp.AppendSample(seriesLabels.Copy(), modifiedMetadata, 1, 3, 43.0, nil))
|
|
||||||
|
|
||||||
hash := seriesLabels.Hash()
|
|
||||||
cached, exists := capp.(*combinedAppender).refs[hash]
|
|
||||||
require.True(t, exists)
|
|
||||||
tt.verifyCached(t, cached.meta)
|
|
||||||
|
|
||||||
updateMetadataCalled := false
|
|
||||||
for _, record := range app.records {
|
|
||||||
if record.op == "UpdateMetadata" {
|
|
||||||
updateMetadataCalled = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
require.Equal(t, tt.expectWALCall, updateMetadataCalled)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@ -30,6 +30,7 @@ import (
|
|||||||
|
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"github.com/prometheus/otlptranslator"
|
"github.com/prometheus/otlptranslator"
|
||||||
|
"github.com/prometheus/prometheus/storage"
|
||||||
"go.opentelemetry.io/collector/pdata/pcommon"
|
"go.opentelemetry.io/collector/pdata/pcommon"
|
||||||
"go.opentelemetry.io/collector/pdata/pmetric"
|
"go.opentelemetry.io/collector/pdata/pmetric"
|
||||||
conventions "go.opentelemetry.io/collector/semconv/v1.6.1"
|
conventions "go.opentelemetry.io/collector/semconv/v1.6.1"
|
||||||
@ -64,8 +65,15 @@ const (
|
|||||||
// Unpaired string values are ignored. String pairs overwrite OTLP labels if collisions happen and
|
// Unpaired string values are ignored. String pairs overwrite OTLP labels if collisions happen and
|
||||||
// if logOnOverwrite is true, the overwrite is logged. Resulting label names are sanitized.
|
// if logOnOverwrite is true, the overwrite is logged. Resulting label names are sanitized.
|
||||||
// If settings.PromoteResourceAttributes is not empty, it's a set of resource attributes that should be promoted to labels.
|
// If settings.PromoteResourceAttributes is not empty, it's a set of resource attributes that should be promoted to labels.
|
||||||
func (c *PrometheusConverter) createAttributes(resource pcommon.Resource, attributes pcommon.Map, scope scope, settings Settings,
|
func (c *PrometheusConverter) createAttributes(
|
||||||
ignoreAttrs []string, logOnOverwrite bool, meta Metadata, extras ...string,
|
resource pcommon.Resource,
|
||||||
|
attributes pcommon.Map,
|
||||||
|
scope scope,
|
||||||
|
settings Settings,
|
||||||
|
ignoreAttrs []string,
|
||||||
|
logOnOverwrite bool,
|
||||||
|
meta metadata.Metadata,
|
||||||
|
extras ...string,
|
||||||
) (labels.Labels, error) {
|
) (labels.Labels, error) {
|
||||||
resourceAttrs := resource.Attributes()
|
resourceAttrs := resource.Attributes()
|
||||||
serviceName, haveServiceName := resourceAttrs.Get(conventions.AttributeServiceName)
|
serviceName, haveServiceName := resourceAttrs.Get(conventions.AttributeServiceName)
|
||||||
@ -222,8 +230,13 @@ func aggregationTemporality(metric pmetric.Metric) (pmetric.AggregationTemporali
|
|||||||
// with the user defined bucket boundaries of non-exponential OTel histograms.
|
// with the user defined bucket boundaries of non-exponential OTel histograms.
|
||||||
// However, work is under way to resolve this shortcoming through a feature called native histograms custom buckets:
|
// However, work is under way to resolve this shortcoming through a feature called native histograms custom buckets:
|
||||||
// https://github.com/prometheus/prometheus/issues/13485.
|
// https://github.com/prometheus/prometheus/issues/13485.
|
||||||
func (c *PrometheusConverter) addHistogramDataPoints(ctx context.Context, dataPoints pmetric.HistogramDataPointSlice,
|
func (c *PrometheusConverter) addHistogramDataPoints(
|
||||||
resource pcommon.Resource, settings Settings, scope scope, meta Metadata,
|
ctx context.Context,
|
||||||
|
dataPoints pmetric.HistogramDataPointSlice,
|
||||||
|
resource pcommon.Resource,
|
||||||
|
settings Settings,
|
||||||
|
scope scope,
|
||||||
|
appOpts storage.AOptions,
|
||||||
) error {
|
) error {
|
||||||
for x := 0; x < dataPoints.Len(); x++ {
|
for x := 0; x < dataPoints.Len(); x++ {
|
||||||
if err := c.everyN.checkContext(ctx); err != nil {
|
if err := c.everyN.checkContext(ctx); err != nil {
|
||||||
@ -231,40 +244,35 @@ func (c *PrometheusConverter) addHistogramDataPoints(ctx context.Context, dataPo
|
|||||||
}
|
}
|
||||||
|
|
||||||
pt := dataPoints.At(x)
|
pt := dataPoints.At(x)
|
||||||
timestamp := convertTimeStamp(pt.Timestamp())
|
t := convertTimeStamp(pt.Timestamp())
|
||||||
startTimestamp := convertTimeStamp(pt.StartTimestamp())
|
st := convertTimeStamp(pt.StartTimestamp())
|
||||||
baseLabels, err := c.createAttributes(resource, pt.Attributes(), scope, settings, nil, false, meta)
|
baseLabels, err := c.createAttributes(resource, pt.Attributes(), scope, settings, nil, false, appOpts.Metadata)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
baseName := meta.MetricFamilyName
|
|
||||||
|
|
||||||
// If the sum is unset, it indicates the _sum metric point should be
|
// If the sum is unset, it indicates the _sum metric point should be
|
||||||
// omitted
|
// omitted
|
||||||
if pt.HasSum() {
|
if pt.HasSum() {
|
||||||
// treat sum as a sample in an individual TimeSeries
|
|
||||||
val := pt.Sum()
|
val := pt.Sum()
|
||||||
if pt.Flags().NoRecordedValue() {
|
if pt.Flags().NoRecordedValue() {
|
||||||
val = math.Float64frombits(value.StaleNaN)
|
val = math.Float64frombits(value.StaleNaN)
|
||||||
}
|
}
|
||||||
|
sumlabels := c.addLabels(appOpts.MetricFamilyName+sumStr, baseLabels)
|
||||||
sumlabels := c.addLabels(baseName+sumStr, baseLabels)
|
if _, err := c.appender.Append(0, sumlabels, st, t, val, nil, nil, appOpts); err != nil {
|
||||||
if err := c.appender.AppendSample(sumlabels, meta, startTimestamp, timestamp, val, nil); err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// treat count as a sample in an individual TimeSeries
|
|
||||||
val := float64(pt.Count())
|
val := float64(pt.Count())
|
||||||
if pt.Flags().NoRecordedValue() {
|
if pt.Flags().NoRecordedValue() {
|
||||||
val = math.Float64frombits(value.StaleNaN)
|
val = math.Float64frombits(value.StaleNaN)
|
||||||
}
|
}
|
||||||
|
countlabels := c.addLabels(appOpts.MetricFamilyName+countStr, baseLabels)
|
||||||
countlabels := c.addLabels(baseName+countStr, baseLabels)
|
if _, err = c.appender.Append(0, countlabels, st, t, val, nil, nil, appOpts); err != nil {
|
||||||
if err := c.appender.AppendSample(countlabels, meta, startTimestamp, timestamp, val, nil); err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
exemplars, err := c.getPromExemplars(ctx, pt.Exemplars())
|
exemplars, err := c.getPromExemplars(ctx, pt.Exemplars())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -299,8 +307,9 @@ func (c *PrometheusConverter) addHistogramDataPoints(ctx context.Context, dataPo
|
|||||||
val = math.Float64frombits(value.StaleNaN)
|
val = math.Float64frombits(value.StaleNaN)
|
||||||
}
|
}
|
||||||
boundStr := strconv.FormatFloat(bound, 'f', -1, 64)
|
boundStr := strconv.FormatFloat(bound, 'f', -1, 64)
|
||||||
labels := c.addLabels(baseName+bucketStr, baseLabels, leStr, boundStr)
|
bktLabels := c.addLabels(appOpts.MetricFamilyName+bucketStr, baseLabels, leStr, boundStr)
|
||||||
if err := c.appender.AppendSample(labels, meta, startTimestamp, timestamp, val, currentBucketExemplars); err != nil {
|
appOpts.Exemplars = currentBucketExemplars
|
||||||
|
if _, err = c.appender.Append(0, bktLabels, st, t, val, nil, nil, appOpts); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -309,12 +318,12 @@ func (c *PrometheusConverter) addHistogramDataPoints(ctx context.Context, dataPo
|
|||||||
if pt.Flags().NoRecordedValue() {
|
if pt.Flags().NoRecordedValue() {
|
||||||
val = math.Float64frombits(value.StaleNaN)
|
val = math.Float64frombits(value.StaleNaN)
|
||||||
}
|
}
|
||||||
infLabels := c.addLabels(baseName+bucketStr, baseLabels, leStr, pInfStr)
|
infLabels := c.addLabels(appOpts.MetricFamilyName+bucketStr, baseLabels, leStr, pInfStr)
|
||||||
if err := c.appender.AppendSample(infLabels, meta, startTimestamp, timestamp, val, exemplars[nextExemplarIdx:]); err != nil {
|
appOpts.Exemplars = exemplars[nextExemplarIdx:]
|
||||||
|
if _, err = c.appender.Append(0, infLabels, st, t, val, nil, nil, appOpts); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -424,8 +433,13 @@ func findMinAndMaxTimestamps(metric pmetric.Metric, minTimestamp, maxTimestamp p
|
|||||||
return minTimestamp, maxTimestamp
|
return minTimestamp, maxTimestamp
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *PrometheusConverter) addSummaryDataPoints(ctx context.Context, dataPoints pmetric.SummaryDataPointSlice, resource pcommon.Resource,
|
func (c *PrometheusConverter) addSummaryDataPoints(
|
||||||
settings Settings, scope scope, meta Metadata,
|
ctx context.Context,
|
||||||
|
dataPoints pmetric.SummaryDataPointSlice,
|
||||||
|
resource pcommon.Resource,
|
||||||
|
settings Settings,
|
||||||
|
scope scope,
|
||||||
|
appOpts storage.AOptions,
|
||||||
) error {
|
) error {
|
||||||
for x := 0; x < dataPoints.Len(); x++ {
|
for x := 0; x < dataPoints.Len(); x++ {
|
||||||
if err := c.everyN.checkContext(ctx); err != nil {
|
if err := c.everyN.checkContext(ctx); err != nil {
|
||||||
@ -433,33 +447,28 @@ func (c *PrometheusConverter) addSummaryDataPoints(ctx context.Context, dataPoin
|
|||||||
}
|
}
|
||||||
|
|
||||||
pt := dataPoints.At(x)
|
pt := dataPoints.At(x)
|
||||||
timestamp := convertTimeStamp(pt.Timestamp())
|
t := convertTimeStamp(pt.Timestamp())
|
||||||
startTimestamp := convertTimeStamp(pt.StartTimestamp())
|
st := convertTimeStamp(pt.StartTimestamp())
|
||||||
baseLabels, err := c.createAttributes(resource, pt.Attributes(), scope, settings, nil, false, meta)
|
baseLabels, err := c.createAttributes(resource, pt.Attributes(), scope, settings, nil, false, appOpts.Metadata)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
baseName := meta.MetricFamilyName
|
|
||||||
|
|
||||||
// treat sum as a sample in an individual TimeSeries
|
|
||||||
val := pt.Sum()
|
val := pt.Sum()
|
||||||
if pt.Flags().NoRecordedValue() {
|
if pt.Flags().NoRecordedValue() {
|
||||||
val = math.Float64frombits(value.StaleNaN)
|
val = math.Float64frombits(value.StaleNaN)
|
||||||
}
|
}
|
||||||
// sum and count of the summary should append suffix to baseName
|
sumlabels := c.addLabels(appOpts.MetricFamilyName+sumStr, baseLabels)
|
||||||
sumlabels := c.addLabels(baseName+sumStr, baseLabels)
|
if _, err = c.appender.Append(0, sumlabels, st, t, val, nil, nil, appOpts); err != nil {
|
||||||
if err := c.appender.AppendSample(sumlabels, meta, startTimestamp, timestamp, val, nil); err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// treat count as a sample in an individual TimeSeries
|
|
||||||
val = float64(pt.Count())
|
val = float64(pt.Count())
|
||||||
if pt.Flags().NoRecordedValue() {
|
if pt.Flags().NoRecordedValue() {
|
||||||
val = math.Float64frombits(value.StaleNaN)
|
val = math.Float64frombits(value.StaleNaN)
|
||||||
}
|
}
|
||||||
countlabels := c.addLabels(baseName+countStr, baseLabels)
|
countlabels := c.addLabels(appOpts.MetricFamilyName+countStr, baseLabels)
|
||||||
if err := c.appender.AppendSample(countlabels, meta, startTimestamp, timestamp, val, nil); err != nil {
|
if _, err = c.appender.Append(0, countlabels, st, t, val, nil, nil, appOpts); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -471,13 +480,12 @@ func (c *PrometheusConverter) addSummaryDataPoints(ctx context.Context, dataPoin
|
|||||||
val = math.Float64frombits(value.StaleNaN)
|
val = math.Float64frombits(value.StaleNaN)
|
||||||
}
|
}
|
||||||
percentileStr := strconv.FormatFloat(qt.Quantile(), 'f', -1, 64)
|
percentileStr := strconv.FormatFloat(qt.Quantile(), 'f', -1, 64)
|
||||||
qtlabels := c.addLabels(baseName, baseLabels, quantileStr, percentileStr)
|
qtlabels := c.addLabels(appOpts.MetricFamilyName, baseLabels, quantileStr, percentileStr)
|
||||||
if err := c.appender.AppendSample(qtlabels, meta, startTimestamp, timestamp, val, nil); err != nil {
|
if _, err = c.appender.Append(0, qtlabels, st, t, val, nil, nil, appOpts); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -530,7 +538,7 @@ func (c *PrometheusConverter) addResourceTargetInfo(resource pcommon.Resource, s
|
|||||||
// Do not pass identifying attributes as ignoreAttrs below.
|
// Do not pass identifying attributes as ignoreAttrs below.
|
||||||
identifyingAttrs = nil
|
identifyingAttrs = nil
|
||||||
}
|
}
|
||||||
meta := Metadata{
|
appOpts := storage.AOptions{
|
||||||
Metadata: metadata.Metadata{
|
Metadata: metadata.Metadata{
|
||||||
Type: model.MetricTypeGauge,
|
Type: model.MetricTypeGauge,
|
||||||
Help: "Target metadata",
|
Help: "Target metadata",
|
||||||
@ -538,7 +546,7 @@ func (c *PrometheusConverter) addResourceTargetInfo(resource pcommon.Resource, s
|
|||||||
MetricFamilyName: name,
|
MetricFamilyName: name,
|
||||||
}
|
}
|
||||||
// TODO: should target info have the __type__ metadata label?
|
// TODO: should target info have the __type__ metadata label?
|
||||||
lbls, err := c.createAttributes(resource, attributes, scope{}, settings, identifyingAttrs, false, Metadata{}, model.MetricNameLabel, name)
|
lbls, err := c.createAttributes(resource, attributes, scope{}, settings, identifyingAttrs, false, metadata.Metadata{}, model.MetricNameLabel, name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -569,10 +577,10 @@ func (c *PrometheusConverter) addResourceTargetInfo(resource pcommon.Resource, s
|
|||||||
|
|
||||||
var key targetInfoKey
|
var key targetInfoKey
|
||||||
for timestamp := earliestTimestamp; timestamp.Before(latestTimestamp); timestamp = timestamp.Add(interval) {
|
for timestamp := earliestTimestamp; timestamp.Before(latestTimestamp); timestamp = timestamp.Add(interval) {
|
||||||
timestampMs := timestamp.UnixMilli()
|
t := timestamp.UnixMilli()
|
||||||
key = targetInfoKey{
|
key = targetInfoKey{
|
||||||
labelsHash: labelsHash,
|
labelsHash: labelsHash,
|
||||||
timestamp: timestampMs,
|
timestamp: t,
|
||||||
}
|
}
|
||||||
if _, exists := c.seenTargetInfo[key]; exists {
|
if _, exists := c.seenTargetInfo[key]; exists {
|
||||||
// Skip duplicate.
|
// Skip duplicate.
|
||||||
@ -580,23 +588,25 @@ func (c *PrometheusConverter) addResourceTargetInfo(resource pcommon.Resource, s
|
|||||||
}
|
}
|
||||||
|
|
||||||
c.seenTargetInfo[key] = struct{}{}
|
c.seenTargetInfo[key] = struct{}{}
|
||||||
if err := c.appender.AppendSample(lbls, meta, 0, timestampMs, float64(1), nil); err != nil {
|
_, err = c.appender.Append(0, lbls, 0, t, 1.0, nil, nil, appOpts)
|
||||||
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Append the final sample at latestTimestamp.
|
// Append the final sample at latestTimestamp.
|
||||||
finalTimestampMs := latestTimestamp.UnixMilli()
|
finalT := latestTimestamp.UnixMilli()
|
||||||
key = targetInfoKey{
|
key = targetInfoKey{
|
||||||
labelsHash: labelsHash,
|
labelsHash: labelsHash,
|
||||||
timestamp: finalTimestampMs,
|
timestamp: finalT,
|
||||||
}
|
}
|
||||||
if _, exists := c.seenTargetInfo[key]; exists {
|
if _, exists := c.seenTargetInfo[key]; exists {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
c.seenTargetInfo[key] = struct{}{}
|
c.seenTargetInfo[key] = struct{}{}
|
||||||
return c.appender.AppendSample(lbls, meta, 0, finalTimestampMs, float64(1), nil)
|
_, err = c.appender.Append(0, lbls, 0, finalT, 1.0, nil, nil, appOpts)
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// convertTimeStamp converts OTLP timestamp in ns to timestamp in ms.
|
// convertTimeStamp converts OTLP timestamp in ns to timestamp in ms.
|
||||||
|
|||||||
@ -18,6 +18,7 @@ package prometheusremotewrite
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"slices"
|
"slices"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
@ -25,16 +26,54 @@ import (
|
|||||||
|
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"github.com/prometheus/otlptranslator"
|
"github.com/prometheus/otlptranslator"
|
||||||
|
"github.com/prometheus/prometheus/storage"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"go.opentelemetry.io/collector/pdata/pcommon"
|
"go.opentelemetry.io/collector/pdata/pcommon"
|
||||||
"go.opentelemetry.io/collector/pdata/pmetric"
|
"go.opentelemetry.io/collector/pdata/pmetric"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/config"
|
"github.com/prometheus/prometheus/config"
|
||||||
|
"github.com/prometheus/prometheus/model/histogram"
|
||||||
"github.com/prometheus/prometheus/model/labels"
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
|
"github.com/prometheus/prometheus/model/metadata"
|
||||||
"github.com/prometheus/prometheus/prompb"
|
"github.com/prometheus/prometheus/prompb"
|
||||||
"github.com/prometheus/prometheus/util/testutil"
|
"github.com/prometheus/prometheus/util/testutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type statsAppender struct {
|
||||||
|
samples int
|
||||||
|
histograms int
|
||||||
|
metadata int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *statsAppender) Append(ref storage.SeriesRef, ls labels.Labels, st, t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram, opts storage.AOptions) (storage.SeriesRef, error) {
|
||||||
|
if fh != nil {
|
||||||
|
return 0, errors.New("mockAppender.Append: mock appender is not nil")
|
||||||
|
}
|
||||||
|
if h != nil {
|
||||||
|
a.histograms++
|
||||||
|
} else {
|
||||||
|
a.samples++
|
||||||
|
}
|
||||||
|
|
||||||
|
if !opts.Metadata.IsEmpty() {
|
||||||
|
a.metadata++
|
||||||
|
}
|
||||||
|
|
||||||
|
if ref == 0 {
|
||||||
|
// Use labels hash as a stand-in for unique series reference, to avoid having to track all series.
|
||||||
|
ref = storage.SeriesRef(ls.Hash())
|
||||||
|
}
|
||||||
|
return ref, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *statsAppender) Commit() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *statsAppender) Rollback() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func TestCreateAttributes(t *testing.T) {
|
func TestCreateAttributes(t *testing.T) {
|
||||||
resourceAttrs := map[string]string{
|
resourceAttrs := map[string]string{
|
||||||
"service.name": "service name",
|
"service.name": "service name",
|
||||||
@ -389,7 +428,7 @@ func TestCreateAttributes(t *testing.T) {
|
|||||||
}
|
}
|
||||||
for _, tc := range testCases {
|
for _, tc := range testCases {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
c := NewPrometheusConverter(&mockCombinedAppender{})
|
c := NewPrometheusConverter(&mockAppender{})
|
||||||
settings := Settings{
|
settings := Settings{
|
||||||
PromoteResourceAttributes: NewPromoteResourceAttributes(config.OTLPConfig{
|
PromoteResourceAttributes: NewPromoteResourceAttributes(config.OTLPConfig{
|
||||||
PromoteAllResourceAttributes: tc.promoteAllResourceAttributes,
|
PromoteAllResourceAttributes: tc.promoteAllResourceAttributes,
|
||||||
@ -413,7 +452,7 @@ func TestCreateAttributes(t *testing.T) {
|
|||||||
if tc.attrs != (pcommon.Map{}) {
|
if tc.attrs != (pcommon.Map{}) {
|
||||||
testAttrs = tc.attrs
|
testAttrs = tc.attrs
|
||||||
}
|
}
|
||||||
lbls, err := c.createAttributes(testResource, testAttrs, tc.scope, settings, tc.ignoreAttrs, false, Metadata{}, model.MetricNameLabel, "test_metric")
|
lbls, err := c.createAttributes(testResource, testAttrs, tc.scope, settings, tc.ignoreAttrs, false, metadata.Metadata{}, model.MetricNameLabel, "test_metric")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
testutil.RequireEqual(t, tc.expectedLabels, lbls)
|
testutil.RequireEqual(t, tc.expectedLabels, lbls)
|
||||||
@ -641,10 +680,10 @@ func TestPrometheusConverter_AddSummaryDataPoints(t *testing.T) {
|
|||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
metric := tt.metric()
|
metric := tt.metric()
|
||||||
mockAppender := &mockCombinedAppender{}
|
mApp := &mockAppender{}
|
||||||
converter := NewPrometheusConverter(mockAppender)
|
converter := NewPrometheusConverter(mApp)
|
||||||
|
|
||||||
converter.addSummaryDataPoints(
|
require.NoError(t, converter.addSummaryDataPoints(
|
||||||
context.Background(),
|
context.Background(),
|
||||||
metric.Summary().DataPoints(),
|
metric.Summary().DataPoints(),
|
||||||
pcommon.NewResource(),
|
pcommon.NewResource(),
|
||||||
@ -652,13 +691,13 @@ func TestPrometheusConverter_AddSummaryDataPoints(t *testing.T) {
|
|||||||
PromoteScopeMetadata: tt.promoteScope,
|
PromoteScopeMetadata: tt.promoteScope,
|
||||||
},
|
},
|
||||||
tt.scope,
|
tt.scope,
|
||||||
Metadata{
|
storage.AOptions{
|
||||||
MetricFamilyName: metric.Name(),
|
MetricFamilyName: metric.Name(),
|
||||||
},
|
},
|
||||||
)
|
))
|
||||||
require.NoError(t, mockAppender.Commit())
|
require.NoError(t, mApp.Commit())
|
||||||
|
|
||||||
requireEqual(t, tt.want(), mockAppender.samples)
|
requireEqual(t, tt.want(), mApp.samples)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -804,10 +843,10 @@ func TestPrometheusConverter_AddHistogramDataPoints(t *testing.T) {
|
|||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
metric := tt.metric()
|
metric := tt.metric()
|
||||||
mockAppender := &mockCombinedAppender{}
|
mApp := &mockAppender{}
|
||||||
converter := NewPrometheusConverter(mockAppender)
|
converter := NewPrometheusConverter(mApp)
|
||||||
|
|
||||||
converter.addHistogramDataPoints(
|
require.NoError(t, converter.addHistogramDataPoints(
|
||||||
context.Background(),
|
context.Background(),
|
||||||
metric.Histogram().DataPoints(),
|
metric.Histogram().DataPoints(),
|
||||||
pcommon.NewResource(),
|
pcommon.NewResource(),
|
||||||
@ -815,20 +854,20 @@ func TestPrometheusConverter_AddHistogramDataPoints(t *testing.T) {
|
|||||||
PromoteScopeMetadata: tt.promoteScope,
|
PromoteScopeMetadata: tt.promoteScope,
|
||||||
},
|
},
|
||||||
tt.scope,
|
tt.scope,
|
||||||
Metadata{
|
storage.AOptions{
|
||||||
MetricFamilyName: metric.Name(),
|
MetricFamilyName: metric.Name(),
|
||||||
},
|
},
|
||||||
)
|
))
|
||||||
require.NoError(t, mockAppender.Commit())
|
require.NoError(t, mApp.Commit())
|
||||||
|
|
||||||
requireEqual(t, tt.want(), mockAppender.samples)
|
requireEqual(t, tt.want(), mApp.samples)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGetPromExemplars(t *testing.T) {
|
func TestGetPromExemplars(t *testing.T) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
c := NewPrometheusConverter(&mockCombinedAppender{})
|
c := NewPrometheusConverter(&mockAppender{})
|
||||||
|
|
||||||
t.Run("Exemplars with int value", func(t *testing.T) {
|
t.Run("Exemplars with int value", func(t *testing.T) {
|
||||||
es := pmetric.NewExemplarSlice()
|
es := pmetric.NewExemplarSlice()
|
||||||
|
|||||||
@ -22,6 +22,7 @@ import (
|
|||||||
"math"
|
"math"
|
||||||
|
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
"github.com/prometheus/prometheus/storage"
|
||||||
"go.opentelemetry.io/collector/pdata/pcommon"
|
"go.opentelemetry.io/collector/pdata/pcommon"
|
||||||
"go.opentelemetry.io/collector/pdata/pmetric"
|
"go.opentelemetry.io/collector/pdata/pmetric"
|
||||||
|
|
||||||
@ -34,9 +35,14 @@ const defaultZeroThreshold = 1e-128
|
|||||||
|
|
||||||
// addExponentialHistogramDataPoints adds OTel exponential histogram data points to the corresponding time series
|
// addExponentialHistogramDataPoints adds OTel exponential histogram data points to the corresponding time series
|
||||||
// as native histogram samples.
|
// as native histogram samples.
|
||||||
func (c *PrometheusConverter) addExponentialHistogramDataPoints(ctx context.Context, dataPoints pmetric.ExponentialHistogramDataPointSlice,
|
func (c *PrometheusConverter) addExponentialHistogramDataPoints(
|
||||||
resource pcommon.Resource, settings Settings, temporality pmetric.AggregationTemporality,
|
ctx context.Context,
|
||||||
scope scope, meta Metadata,
|
dataPoints pmetric.ExponentialHistogramDataPointSlice,
|
||||||
|
resource pcommon.Resource,
|
||||||
|
settings Settings,
|
||||||
|
temporality pmetric.AggregationTemporality,
|
||||||
|
scope scope,
|
||||||
|
appOpts storage.AOptions,
|
||||||
) (annotations.Annotations, error) {
|
) (annotations.Annotations, error) {
|
||||||
var annots annotations.Annotations
|
var annots annotations.Annotations
|
||||||
for x := 0; x < dataPoints.Len(); x++ {
|
for x := 0; x < dataPoints.Len(); x++ {
|
||||||
@ -59,21 +65,23 @@ func (c *PrometheusConverter) addExponentialHistogramDataPoints(ctx context.Cont
|
|||||||
settings,
|
settings,
|
||||||
nil,
|
nil,
|
||||||
true,
|
true,
|
||||||
meta,
|
appOpts.Metadata,
|
||||||
model.MetricNameLabel,
|
model.MetricNameLabel,
|
||||||
meta.MetricFamilyName,
|
appOpts.MetricFamilyName,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return annots, err
|
return annots, err
|
||||||
}
|
}
|
||||||
ts := convertTimeStamp(pt.Timestamp())
|
t := convertTimeStamp(pt.Timestamp())
|
||||||
st := convertTimeStamp(pt.StartTimestamp())
|
st := convertTimeStamp(pt.StartTimestamp())
|
||||||
exemplars, err := c.getPromExemplars(ctx, pt.Exemplars())
|
exemplars, err := c.getPromExemplars(ctx, pt.Exemplars())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return annots, err
|
return annots, err
|
||||||
}
|
}
|
||||||
// OTel exponential histograms are always Int Histograms.
|
appOpts.Exemplars = exemplars
|
||||||
if err = c.appender.AppendHistogram(lbls, meta, st, ts, hp, exemplars); err != nil {
|
|
||||||
|
// OTel exponential histograms are always integer histograms.
|
||||||
|
if _, err = c.appender.Append(0, lbls, st, t, 0, hp, nil, appOpts); err != nil {
|
||||||
return annots, err
|
return annots, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -252,9 +260,14 @@ func convertBucketsLayout(bucketCounts []uint64, offset, scaleDown int32, adjust
|
|||||||
return spans, deltas
|
return spans, deltas
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *PrometheusConverter) addCustomBucketsHistogramDataPoints(ctx context.Context, dataPoints pmetric.HistogramDataPointSlice,
|
func (c *PrometheusConverter) addCustomBucketsHistogramDataPoints(
|
||||||
resource pcommon.Resource, settings Settings, temporality pmetric.AggregationTemporality,
|
ctx context.Context,
|
||||||
scope scope, meta Metadata,
|
dataPoints pmetric.HistogramDataPointSlice,
|
||||||
|
resource pcommon.Resource,
|
||||||
|
settings Settings,
|
||||||
|
temporality pmetric.AggregationTemporality,
|
||||||
|
scope scope,
|
||||||
|
appOpts storage.AOptions,
|
||||||
) (annotations.Annotations, error) {
|
) (annotations.Annotations, error) {
|
||||||
var annots annotations.Annotations
|
var annots annotations.Annotations
|
||||||
|
|
||||||
@ -278,20 +291,21 @@ func (c *PrometheusConverter) addCustomBucketsHistogramDataPoints(ctx context.Co
|
|||||||
settings,
|
settings,
|
||||||
nil,
|
nil,
|
||||||
true,
|
true,
|
||||||
meta,
|
appOpts.Metadata,
|
||||||
model.MetricNameLabel,
|
model.MetricNameLabel,
|
||||||
meta.MetricFamilyName,
|
appOpts.MetricFamilyName,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return annots, err
|
return annots, err
|
||||||
}
|
}
|
||||||
ts := convertTimeStamp(pt.Timestamp())
|
t := convertTimeStamp(pt.Timestamp())
|
||||||
st := convertTimeStamp(pt.StartTimestamp())
|
st := convertTimeStamp(pt.StartTimestamp())
|
||||||
exemplars, err := c.getPromExemplars(ctx, pt.Exemplars())
|
exemplars, err := c.getPromExemplars(ctx, pt.Exemplars())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return annots, err
|
return annots, err
|
||||||
}
|
}
|
||||||
if err = c.appender.AppendHistogram(lbls, meta, st, ts, hp, exemplars); err != nil {
|
appOpts.Exemplars = exemplars
|
||||||
|
if _, err = c.appender.Append(0, lbls, st, t, 0, hp, nil, appOpts); err != nil {
|
||||||
return annots, err
|
return annots, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -24,6 +24,7 @@ import (
|
|||||||
|
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"github.com/prometheus/otlptranslator"
|
"github.com/prometheus/otlptranslator"
|
||||||
|
"github.com/prometheus/prometheus/storage"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"go.opentelemetry.io/collector/pdata/pcommon"
|
"go.opentelemetry.io/collector/pdata/pcommon"
|
||||||
"go.opentelemetry.io/collector/pdata/pmetric"
|
"go.opentelemetry.io/collector/pdata/pmetric"
|
||||||
@ -854,8 +855,8 @@ func TestPrometheusConverter_addExponentialHistogramDataPoints(t *testing.T) {
|
|||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
metric := tt.metric()
|
metric := tt.metric()
|
||||||
|
|
||||||
mockAppender := &mockCombinedAppender{}
|
mApp := &mockAppender{}
|
||||||
converter := NewPrometheusConverter(mockAppender)
|
converter := NewPrometheusConverter(mApp)
|
||||||
namer := otlptranslator.MetricNamer{
|
namer := otlptranslator.MetricNamer{
|
||||||
WithMetricSuffixes: true,
|
WithMetricSuffixes: true,
|
||||||
}
|
}
|
||||||
@ -870,16 +871,16 @@ func TestPrometheusConverter_addExponentialHistogramDataPoints(t *testing.T) {
|
|||||||
},
|
},
|
||||||
pmetric.AggregationTemporalityCumulative,
|
pmetric.AggregationTemporalityCumulative,
|
||||||
tt.scope,
|
tt.scope,
|
||||||
Metadata{
|
storage.AOptions{
|
||||||
MetricFamilyName: name,
|
MetricFamilyName: name,
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Empty(t, annots)
|
require.Empty(t, annots)
|
||||||
|
|
||||||
require.NoError(t, mockAppender.Commit())
|
require.NoError(t, mApp.Commit())
|
||||||
|
|
||||||
requireEqual(t, tt.wantSeries(), mockAppender.histograms)
|
requireEqual(t, tt.wantSeries(), mApp.histograms)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1327,8 +1328,8 @@ func TestPrometheusConverter_addCustomBucketsHistogramDataPoints(t *testing.T) {
|
|||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
metric := tt.metric()
|
metric := tt.metric()
|
||||||
|
|
||||||
mockAppender := &mockCombinedAppender{}
|
mApp := &mockAppender{}
|
||||||
converter := NewPrometheusConverter(mockAppender)
|
converter := NewPrometheusConverter(mApp)
|
||||||
namer := otlptranslator.MetricNamer{
|
namer := otlptranslator.MetricNamer{
|
||||||
WithMetricSuffixes: true,
|
WithMetricSuffixes: true,
|
||||||
}
|
}
|
||||||
@ -1344,7 +1345,7 @@ func TestPrometheusConverter_addCustomBucketsHistogramDataPoints(t *testing.T) {
|
|||||||
},
|
},
|
||||||
pmetric.AggregationTemporalityCumulative,
|
pmetric.AggregationTemporalityCumulative,
|
||||||
tt.scope,
|
tt.scope,
|
||||||
Metadata{
|
storage.AOptions{
|
||||||
MetricFamilyName: name,
|
MetricFamilyName: name,
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
@ -1352,9 +1353,9 @@ func TestPrometheusConverter_addCustomBucketsHistogramDataPoints(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Empty(t, annots)
|
require.Empty(t, annots)
|
||||||
|
|
||||||
require.NoError(t, mockAppender.Commit())
|
require.NoError(t, mApp.Commit())
|
||||||
|
|
||||||
requireEqual(t, tt.wantSeries(), mockAppender.histograms)
|
requireEqual(t, tt.wantSeries(), mApp.histograms)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -24,6 +24,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/prometheus/otlptranslator"
|
"github.com/prometheus/otlptranslator"
|
||||||
|
"github.com/prometheus/prometheus/storage"
|
||||||
"go.opentelemetry.io/collector/pdata/pcommon"
|
"go.opentelemetry.io/collector/pdata/pcommon"
|
||||||
"go.opentelemetry.io/collector/pdata/pmetric"
|
"go.opentelemetry.io/collector/pdata/pmetric"
|
||||||
"go.uber.org/multierr"
|
"go.uber.org/multierr"
|
||||||
@ -67,7 +68,7 @@ type PrometheusConverter struct {
|
|||||||
everyN everyNTimes
|
everyN everyNTimes
|
||||||
scratchBuilder labels.ScratchBuilder
|
scratchBuilder labels.ScratchBuilder
|
||||||
builder *labels.Builder
|
builder *labels.Builder
|
||||||
appender CombinedAppender
|
appender storage.AppenderV2
|
||||||
// seenTargetInfo tracks target_info samples within a batch to prevent duplicates.
|
// seenTargetInfo tracks target_info samples within a batch to prevent duplicates.
|
||||||
seenTargetInfo map[targetInfoKey]struct{}
|
seenTargetInfo map[targetInfoKey]struct{}
|
||||||
}
|
}
|
||||||
@ -78,7 +79,7 @@ type targetInfoKey struct {
|
|||||||
timestamp int64
|
timestamp int64
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewPrometheusConverter(appender CombinedAppender) *PrometheusConverter {
|
func NewPrometheusConverter(appender storage.AppenderV2) *PrometheusConverter {
|
||||||
return &PrometheusConverter{
|
return &PrometheusConverter{
|
||||||
scratchBuilder: labels.NewScratchBuilder(0),
|
scratchBuilder: labels.NewScratchBuilder(0),
|
||||||
builder: labels.NewBuilder(labels.EmptyLabels()),
|
builder: labels.NewBuilder(labels.EmptyLabels()),
|
||||||
@ -128,7 +129,7 @@ func newScopeFromScopeMetrics(scopeMetrics pmetric.ScopeMetrics) scope {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// FromMetrics converts pmetric.Metrics to Prometheus remote write format.
|
// FromMetrics appends pmetric.Metrics to storage.AppenderV2.
|
||||||
func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metrics, settings Settings) (annots annotations.Annotations, errs error) {
|
func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metrics, settings Settings) (annots annotations.Annotations, errs error) {
|
||||||
namer := otlptranslator.MetricNamer{
|
namer := otlptranslator.MetricNamer{
|
||||||
Namespace: settings.Namespace,
|
Namespace: settings.Namespace,
|
||||||
@ -184,7 +185,8 @@ func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metric
|
|||||||
errs = multierr.Append(errs, err)
|
errs = multierr.Append(errs, err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
meta := Metadata{
|
|
||||||
|
appOpts := storage.AOptions{
|
||||||
Metadata: metadata.Metadata{
|
Metadata: metadata.Metadata{
|
||||||
Type: otelMetricTypeToPromMetricType(metric),
|
Type: otelMetricTypeToPromMetricType(metric),
|
||||||
Unit: unitNamer.Build(metric.Unit()),
|
Unit: unitNamer.Build(metric.Unit()),
|
||||||
@ -202,7 +204,7 @@ func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metric
|
|||||||
errs = multierr.Append(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name()))
|
errs = multierr.Append(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name()))
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if err := c.addGaugeNumberDataPoints(ctx, dataPoints, resource, settings, scope, meta); err != nil {
|
if err := c.addGaugeNumberDataPoints(ctx, dataPoints, resource, settings, scope, appOpts); err != nil {
|
||||||
errs = multierr.Append(errs, err)
|
errs = multierr.Append(errs, err)
|
||||||
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
|
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
|
||||||
return annots, errs
|
return annots, errs
|
||||||
@ -214,7 +216,7 @@ func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metric
|
|||||||
errs = multierr.Append(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name()))
|
errs = multierr.Append(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name()))
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if err := c.addSumNumberDataPoints(ctx, dataPoints, resource, settings, scope, meta); err != nil {
|
if err := c.addSumNumberDataPoints(ctx, dataPoints, resource, settings, scope, appOpts); err != nil {
|
||||||
errs = multierr.Append(errs, err)
|
errs = multierr.Append(errs, err)
|
||||||
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
|
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
|
||||||
return annots, errs
|
return annots, errs
|
||||||
@ -228,7 +230,7 @@ func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metric
|
|||||||
}
|
}
|
||||||
if settings.ConvertHistogramsToNHCB {
|
if settings.ConvertHistogramsToNHCB {
|
||||||
ws, err := c.addCustomBucketsHistogramDataPoints(
|
ws, err := c.addCustomBucketsHistogramDataPoints(
|
||||||
ctx, dataPoints, resource, settings, temporality, scope, meta,
|
ctx, dataPoints, resource, settings, temporality, scope, appOpts,
|
||||||
)
|
)
|
||||||
annots.Merge(ws)
|
annots.Merge(ws)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -238,7 +240,7 @@ func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metric
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if err := c.addHistogramDataPoints(ctx, dataPoints, resource, settings, scope, meta); err != nil {
|
if err := c.addHistogramDataPoints(ctx, dataPoints, resource, settings, scope, appOpts); err != nil {
|
||||||
errs = multierr.Append(errs, err)
|
errs = multierr.Append(errs, err)
|
||||||
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
|
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
|
||||||
return annots, errs
|
return annots, errs
|
||||||
@ -258,7 +260,7 @@ func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metric
|
|||||||
settings,
|
settings,
|
||||||
temporality,
|
temporality,
|
||||||
scope,
|
scope,
|
||||||
meta,
|
appOpts,
|
||||||
)
|
)
|
||||||
annots.Merge(ws)
|
annots.Merge(ws)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -273,7 +275,7 @@ func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metric
|
|||||||
errs = multierr.Append(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name()))
|
errs = multierr.Append(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name()))
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if err := c.addSummaryDataPoints(ctx, dataPoints, resource, settings, scope, meta); err != nil {
|
if err := c.addSummaryDataPoints(ctx, dataPoints, resource, settings, scope, appOpts); err != nil {
|
||||||
errs = multierr.Append(errs, err)
|
errs = multierr.Append(errs, err)
|
||||||
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
|
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
|
||||||
return annots, errs
|
return annots, errs
|
||||||
|
|||||||
@ -22,20 +22,16 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"github.com/prometheus/common/promslog"
|
|
||||||
"github.com/prometheus/otlptranslator"
|
"github.com/prometheus/otlptranslator"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"go.opentelemetry.io/collector/pdata/pcommon"
|
"go.opentelemetry.io/collector/pdata/pcommon"
|
||||||
"go.opentelemetry.io/collector/pdata/pmetric"
|
"go.opentelemetry.io/collector/pdata/pmetric"
|
||||||
"go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp"
|
"go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/model/exemplar"
|
|
||||||
"github.com/prometheus/prometheus/model/histogram"
|
"github.com/prometheus/prometheus/model/histogram"
|
||||||
"github.com/prometheus/prometheus/model/labels"
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
"github.com/prometheus/prometheus/model/metadata"
|
"github.com/prometheus/prometheus/model/metadata"
|
||||||
"github.com/prometheus/prometheus/storage"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestFromMetrics(t *testing.T) {
|
func TestFromMetrics(t *testing.T) {
|
||||||
@ -81,7 +77,7 @@ func TestFromMetrics(t *testing.T) {
|
|||||||
},
|
},
|
||||||
} {
|
} {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
mockAppender := &mockCombinedAppender{}
|
mockAppender := &mockAppender{}
|
||||||
converter := NewPrometheusConverter(mockAppender)
|
converter := NewPrometheusConverter(mockAppender)
|
||||||
payload, wantPromMetrics := createExportRequest(5, 128, 128, 2, 0, tc.settings, tc.temporality)
|
payload, wantPromMetrics := createExportRequest(5, 128, 128, 2, 0, tc.settings, tc.temporality)
|
||||||
seenFamilyNames := map[string]struct{}{}
|
seenFamilyNames := map[string]struct{}{}
|
||||||
@ -153,7 +149,7 @@ func TestFromMetrics(t *testing.T) {
|
|||||||
|
|
||||||
generateAttributes(h.Attributes(), "series", 1)
|
generateAttributes(h.Attributes(), "series", 1)
|
||||||
|
|
||||||
mockAppender := &mockCombinedAppender{}
|
mockAppender := &mockAppender{}
|
||||||
converter := NewPrometheusConverter(mockAppender)
|
converter := NewPrometheusConverter(mockAppender)
|
||||||
annots, err := converter.FromMetrics(
|
annots, err := converter.FromMetrics(
|
||||||
context.Background(),
|
context.Background(),
|
||||||
@ -176,7 +172,7 @@ func TestFromMetrics(t *testing.T) {
|
|||||||
|
|
||||||
t.Run("context cancellation", func(t *testing.T) {
|
t.Run("context cancellation", func(t *testing.T) {
|
||||||
settings := Settings{}
|
settings := Settings{}
|
||||||
converter := NewPrometheusConverter(&mockCombinedAppender{})
|
converter := NewPrometheusConverter(&mockAppender{})
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
// Verify that converter.FromMetrics respects cancellation.
|
// Verify that converter.FromMetrics respects cancellation.
|
||||||
cancel()
|
cancel()
|
||||||
@ -189,7 +185,7 @@ func TestFromMetrics(t *testing.T) {
|
|||||||
|
|
||||||
t.Run("context timeout", func(t *testing.T) {
|
t.Run("context timeout", func(t *testing.T) {
|
||||||
settings := Settings{}
|
settings := Settings{}
|
||||||
converter := NewPrometheusConverter(&mockCombinedAppender{})
|
converter := NewPrometheusConverter(&mockAppender{})
|
||||||
// Verify that converter.FromMetrics respects timeout.
|
// Verify that converter.FromMetrics respects timeout.
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 0)
|
ctx, cancel := context.WithTimeout(context.Background(), 0)
|
||||||
t.Cleanup(cancel)
|
t.Cleanup(cancel)
|
||||||
@ -222,7 +218,7 @@ func TestFromMetrics(t *testing.T) {
|
|||||||
generateAttributes(h.Attributes(), "series", 10)
|
generateAttributes(h.Attributes(), "series", 10)
|
||||||
}
|
}
|
||||||
|
|
||||||
converter := NewPrometheusConverter(&mockCombinedAppender{})
|
converter := NewPrometheusConverter(&mockAppender{})
|
||||||
annots, err := converter.FromMetrics(context.Background(), request.Metrics(), Settings{})
|
annots, err := converter.FromMetrics(context.Background(), request.Metrics(), Settings{})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.NotEmpty(t, annots)
|
require.NotEmpty(t, annots)
|
||||||
@ -255,7 +251,7 @@ func TestFromMetrics(t *testing.T) {
|
|||||||
generateAttributes(h.Attributes(), "series", 10)
|
generateAttributes(h.Attributes(), "series", 10)
|
||||||
}
|
}
|
||||||
|
|
||||||
converter := NewPrometheusConverter(&mockCombinedAppender{})
|
converter := NewPrometheusConverter(&mockAppender{})
|
||||||
annots, err := converter.FromMetrics(
|
annots, err := converter.FromMetrics(
|
||||||
context.Background(),
|
context.Background(),
|
||||||
request.Metrics(),
|
request.Metrics(),
|
||||||
@ -303,7 +299,7 @@ func TestFromMetrics(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
mockAppender := &mockCombinedAppender{}
|
mockAppender := &mockAppender{}
|
||||||
converter := NewPrometheusConverter(mockAppender)
|
converter := NewPrometheusConverter(mockAppender)
|
||||||
annots, err := converter.FromMetrics(
|
annots, err := converter.FromMetrics(
|
||||||
context.Background(),
|
context.Background(),
|
||||||
@ -403,7 +399,7 @@ func TestFromMetrics(t *testing.T) {
|
|||||||
generateAttributes(point2.Attributes(), "series", 1)
|
generateAttributes(point2.Attributes(), "series", 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
mockAppender := &mockCombinedAppender{}
|
mockAppender := &mockAppender{}
|
||||||
converter := NewPrometheusConverter(mockAppender)
|
converter := NewPrometheusConverter(mockAppender)
|
||||||
annots, err := converter.FromMetrics(
|
annots, err := converter.FromMetrics(
|
||||||
context.Background(),
|
context.Background(),
|
||||||
@ -660,7 +656,7 @@ func TestTemporality(t *testing.T) {
|
|||||||
s.CopyTo(sm.Metrics().AppendEmpty())
|
s.CopyTo(sm.Metrics().AppendEmpty())
|
||||||
}
|
}
|
||||||
|
|
||||||
mockAppender := &mockCombinedAppender{}
|
mockAppender := &mockAppender{}
|
||||||
c := NewPrometheusConverter(mockAppender)
|
c := NewPrometheusConverter(mockAppender)
|
||||||
settings := Settings{
|
settings := Settings{
|
||||||
AllowDeltaTemporality: tc.allowDelta,
|
AllowDeltaTemporality: tc.allowDelta,
|
||||||
@ -1061,14 +1057,11 @@ func BenchmarkPrometheusConverter_FromMetrics(b *testing.B) {
|
|||||||
settings,
|
settings,
|
||||||
pmetric.AggregationTemporalityCumulative,
|
pmetric.AggregationTemporalityCumulative,
|
||||||
)
|
)
|
||||||
appMetrics := NewCombinedAppenderMetrics(prometheus.NewRegistry())
|
|
||||||
noOpLogger := promslog.NewNopLogger()
|
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
|
|
||||||
for b.Loop() {
|
for b.Loop() {
|
||||||
app := &noOpAppender{}
|
app := &statsAppender{}
|
||||||
mockAppender := NewCombinedAppender(app, noOpLogger, false, false, appMetrics)
|
converter := NewPrometheusConverter(app)
|
||||||
converter := NewPrometheusConverter(mockAppender)
|
|
||||||
annots, err := converter.FromMetrics(context.Background(), payload.Metrics(), settings)
|
annots, err := converter.FromMetrics(context.Background(), payload.Metrics(), settings)
|
||||||
require.NoError(b, err)
|
require.NoError(b, err)
|
||||||
require.Empty(b, annots)
|
require.Empty(b, annots)
|
||||||
@ -1092,53 +1085,6 @@ func BenchmarkPrometheusConverter_FromMetrics(b *testing.B) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type noOpAppender struct {
|
|
||||||
samples int
|
|
||||||
histograms int
|
|
||||||
metadata int
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ storage.Appender = &noOpAppender{}
|
|
||||||
|
|
||||||
func (a *noOpAppender) Append(_ storage.SeriesRef, _ labels.Labels, _ int64, _ float64) (storage.SeriesRef, error) {
|
|
||||||
a.samples++
|
|
||||||
return 1, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*noOpAppender) AppendSTZeroSample(_ storage.SeriesRef, _ labels.Labels, _, _ int64) (storage.SeriesRef, error) {
|
|
||||||
return 1, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *noOpAppender) AppendHistogram(_ storage.SeriesRef, _ labels.Labels, _ int64, _ *histogram.Histogram, _ *histogram.FloatHistogram) (storage.SeriesRef, error) {
|
|
||||||
a.histograms++
|
|
||||||
return 1, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*noOpAppender) AppendHistogramSTZeroSample(_ storage.SeriesRef, _ labels.Labels, _, _ int64, _ *histogram.Histogram, _ *histogram.FloatHistogram) (storage.SeriesRef, error) {
|
|
||||||
return 1, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *noOpAppender) UpdateMetadata(_ storage.SeriesRef, _ labels.Labels, _ metadata.Metadata) (storage.SeriesRef, error) {
|
|
||||||
a.metadata++
|
|
||||||
return 1, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*noOpAppender) AppendExemplar(_ storage.SeriesRef, _ labels.Labels, _ exemplar.Exemplar) (storage.SeriesRef, error) {
|
|
||||||
return 1, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*noOpAppender) Commit() error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*noOpAppender) Rollback() error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*noOpAppender) SetOptions(_ *storage.AppendOptions) {
|
|
||||||
panic("not implemented")
|
|
||||||
}
|
|
||||||
|
|
||||||
type wantPrometheusMetric struct {
|
type wantPrometheusMetric struct {
|
||||||
name string
|
name string
|
||||||
familyName string
|
familyName string
|
||||||
|
|||||||
@ -21,14 +21,20 @@ import (
|
|||||||
"math"
|
"math"
|
||||||
|
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
"github.com/prometheus/prometheus/storage"
|
||||||
"go.opentelemetry.io/collector/pdata/pcommon"
|
"go.opentelemetry.io/collector/pdata/pcommon"
|
||||||
"go.opentelemetry.io/collector/pdata/pmetric"
|
"go.opentelemetry.io/collector/pdata/pmetric"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/model/value"
|
"github.com/prometheus/prometheus/model/value"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (c *PrometheusConverter) addGaugeNumberDataPoints(ctx context.Context, dataPoints pmetric.NumberDataPointSlice,
|
func (c *PrometheusConverter) addGaugeNumberDataPoints(
|
||||||
resource pcommon.Resource, settings Settings, scope scope, meta Metadata,
|
ctx context.Context,
|
||||||
|
dataPoints pmetric.NumberDataPointSlice,
|
||||||
|
resource pcommon.Resource,
|
||||||
|
settings Settings,
|
||||||
|
scope scope,
|
||||||
|
appOpts storage.AOptions,
|
||||||
) error {
|
) error {
|
||||||
for x := 0; x < dataPoints.Len(); x++ {
|
for x := 0; x < dataPoints.Len(); x++ {
|
||||||
if err := c.everyN.checkContext(ctx); err != nil {
|
if err := c.everyN.checkContext(ctx); err != nil {
|
||||||
@ -43,13 +49,14 @@ func (c *PrometheusConverter) addGaugeNumberDataPoints(ctx context.Context, data
|
|||||||
settings,
|
settings,
|
||||||
nil,
|
nil,
|
||||||
true,
|
true,
|
||||||
meta,
|
appOpts.Metadata,
|
||||||
model.MetricNameLabel,
|
model.MetricNameLabel,
|
||||||
meta.MetricFamilyName,
|
appOpts.MetricFamilyName,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
var val float64
|
var val float64
|
||||||
switch pt.ValueType() {
|
switch pt.ValueType() {
|
||||||
case pmetric.NumberDataPointValueTypeInt:
|
case pmetric.NumberDataPointValueTypeInt:
|
||||||
@ -57,21 +64,26 @@ func (c *PrometheusConverter) addGaugeNumberDataPoints(ctx context.Context, data
|
|||||||
case pmetric.NumberDataPointValueTypeDouble:
|
case pmetric.NumberDataPointValueTypeDouble:
|
||||||
val = pt.DoubleValue()
|
val = pt.DoubleValue()
|
||||||
}
|
}
|
||||||
|
|
||||||
if pt.Flags().NoRecordedValue() {
|
if pt.Flags().NoRecordedValue() {
|
||||||
val = math.Float64frombits(value.StaleNaN)
|
val = math.Float64frombits(value.StaleNaN)
|
||||||
}
|
}
|
||||||
ts := convertTimeStamp(pt.Timestamp())
|
t := convertTimeStamp(pt.Timestamp())
|
||||||
st := convertTimeStamp(pt.StartTimestamp())
|
st := convertTimeStamp(pt.StartTimestamp())
|
||||||
if err := c.appender.AppendSample(labels, meta, st, ts, val, nil); err != nil {
|
if _, err = c.appender.Append(0, labels, st, t, val, nil, nil, appOpts); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *PrometheusConverter) addSumNumberDataPoints(ctx context.Context, dataPoints pmetric.NumberDataPointSlice,
|
func (c *PrometheusConverter) addSumNumberDataPoints(
|
||||||
resource pcommon.Resource, settings Settings, scope scope, meta Metadata,
|
ctx context.Context,
|
||||||
|
dataPoints pmetric.NumberDataPointSlice,
|
||||||
|
resource pcommon.Resource,
|
||||||
|
settings Settings,
|
||||||
|
scope scope,
|
||||||
|
appOpts storage.AOptions,
|
||||||
) error {
|
) error {
|
||||||
for x := 0; x < dataPoints.Len(); x++ {
|
for x := 0; x < dataPoints.Len(); x++ {
|
||||||
if err := c.everyN.checkContext(ctx); err != nil {
|
if err := c.everyN.checkContext(ctx); err != nil {
|
||||||
@ -79,6 +91,7 @@ func (c *PrometheusConverter) addSumNumberDataPoints(ctx context.Context, dataPo
|
|||||||
}
|
}
|
||||||
|
|
||||||
pt := dataPoints.At(x)
|
pt := dataPoints.At(x)
|
||||||
|
|
||||||
lbls, err := c.createAttributes(
|
lbls, err := c.createAttributes(
|
||||||
resource,
|
resource,
|
||||||
pt.Attributes(),
|
pt.Attributes(),
|
||||||
@ -86,12 +99,12 @@ func (c *PrometheusConverter) addSumNumberDataPoints(ctx context.Context, dataPo
|
|||||||
settings,
|
settings,
|
||||||
nil,
|
nil,
|
||||||
true,
|
true,
|
||||||
meta,
|
appOpts.Metadata,
|
||||||
model.MetricNameLabel,
|
model.MetricNameLabel,
|
||||||
meta.MetricFamilyName,
|
appOpts.MetricFamilyName,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil
|
return err // NOTE: Previously it was nil, was it a bug?
|
||||||
}
|
}
|
||||||
var val float64
|
var val float64
|
||||||
switch pt.ValueType() {
|
switch pt.ValueType() {
|
||||||
@ -100,16 +113,19 @@ func (c *PrometheusConverter) addSumNumberDataPoints(ctx context.Context, dataPo
|
|||||||
case pmetric.NumberDataPointValueTypeDouble:
|
case pmetric.NumberDataPointValueTypeDouble:
|
||||||
val = pt.DoubleValue()
|
val = pt.DoubleValue()
|
||||||
}
|
}
|
||||||
|
|
||||||
if pt.Flags().NoRecordedValue() {
|
if pt.Flags().NoRecordedValue() {
|
||||||
val = math.Float64frombits(value.StaleNaN)
|
val = math.Float64frombits(value.StaleNaN)
|
||||||
}
|
}
|
||||||
ts := convertTimeStamp(pt.Timestamp())
|
t := convertTimeStamp(pt.Timestamp())
|
||||||
st := convertTimeStamp(pt.StartTimestamp())
|
st := convertTimeStamp(pt.StartTimestamp())
|
||||||
exemplars, err := c.getPromExemplars(ctx, pt.Exemplars())
|
exemplars, err := c.getPromExemplars(ctx, pt.Exemplars())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := c.appender.AppendSample(lbls, meta, st, ts, val, exemplars); err != nil {
|
|
||||||
|
appOpts.Exemplars = exemplars
|
||||||
|
if _, err = c.appender.Append(0, lbls, st, t, val, nil, nil, appOpts); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -22,6 +22,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
"github.com/prometheus/prometheus/storage"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"go.opentelemetry.io/collector/pdata/pcommon"
|
"go.opentelemetry.io/collector/pdata/pcommon"
|
||||||
"go.opentelemetry.io/collector/pdata/pmetric"
|
"go.opentelemetry.io/collector/pdata/pmetric"
|
||||||
@ -112,10 +113,10 @@ func TestPrometheusConverter_addGaugeNumberDataPoints(t *testing.T) {
|
|||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
metric := tt.metric()
|
metric := tt.metric()
|
||||||
mockAppender := &mockCombinedAppender{}
|
mApp := &mockAppender{}
|
||||||
converter := NewPrometheusConverter(mockAppender)
|
converter := NewPrometheusConverter(mApp)
|
||||||
|
|
||||||
converter.addGaugeNumberDataPoints(
|
require.NoError(t, converter.addGaugeNumberDataPoints(
|
||||||
context.Background(),
|
context.Background(),
|
||||||
metric.Gauge().DataPoints(),
|
metric.Gauge().DataPoints(),
|
||||||
pcommon.NewResource(),
|
pcommon.NewResource(),
|
||||||
@ -123,13 +124,13 @@ func TestPrometheusConverter_addGaugeNumberDataPoints(t *testing.T) {
|
|||||||
PromoteScopeMetadata: tt.promoteScope,
|
PromoteScopeMetadata: tt.promoteScope,
|
||||||
},
|
},
|
||||||
tt.scope,
|
tt.scope,
|
||||||
Metadata{
|
storage.AOptions{
|
||||||
MetricFamilyName: metric.Name(),
|
MetricFamilyName: metric.Name(),
|
||||||
},
|
},
|
||||||
)
|
))
|
||||||
require.NoError(t, mockAppender.Commit())
|
require.NoError(t, mApp.Commit())
|
||||||
|
|
||||||
requireEqual(t, tt.want(), mockAppender.samples)
|
requireEqual(t, tt.want(), mApp.samples)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -342,10 +343,10 @@ func TestPrometheusConverter_addSumNumberDataPoints(t *testing.T) {
|
|||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
metric := tt.metric()
|
metric := tt.metric()
|
||||||
mockAppender := &mockCombinedAppender{}
|
mockAppender := &mockAppender{}
|
||||||
converter := NewPrometheusConverter(mockAppender)
|
converter := NewPrometheusConverter(mockAppender)
|
||||||
|
|
||||||
converter.addSumNumberDataPoints(
|
require.NoError(t, converter.addSumNumberDataPoints(
|
||||||
context.Background(),
|
context.Background(),
|
||||||
metric.Sum().DataPoints(),
|
metric.Sum().DataPoints(),
|
||||||
pcommon.NewResource(),
|
pcommon.NewResource(),
|
||||||
@ -353,10 +354,10 @@ func TestPrometheusConverter_addSumNumberDataPoints(t *testing.T) {
|
|||||||
PromoteScopeMetadata: tt.promoteScope,
|
PromoteScopeMetadata: tt.promoteScope,
|
||||||
},
|
},
|
||||||
tt.scope,
|
tt.scope,
|
||||||
Metadata{
|
storage.AOptions{
|
||||||
MetricFamilyName: metric.Name(),
|
MetricFamilyName: metric.Name(),
|
||||||
},
|
},
|
||||||
)
|
))
|
||||||
require.NoError(t, mockAppender.Commit())
|
require.NoError(t, mockAppender.Commit())
|
||||||
|
|
||||||
requireEqual(t, tt.want(), mockAppender.samples)
|
requireEqual(t, tt.want(), mockAppender.samples)
|
||||||
|
|||||||
@ -28,6 +28,7 @@ import (
|
|||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
"github.com/prometheus/prometheus/model/exemplar"
|
||||||
"go.opentelemetry.io/collector/component"
|
"go.opentelemetry.io/collector/component"
|
||||||
"go.opentelemetry.io/collector/consumer"
|
"go.opentelemetry.io/collector/consumer"
|
||||||
"go.opentelemetry.io/collector/pdata/pmetric"
|
"go.opentelemetry.io/collector/pdata/pmetric"
|
||||||
@ -35,7 +36,6 @@ import (
|
|||||||
"go.opentelemetry.io/otel/metric/noop"
|
"go.opentelemetry.io/otel/metric/noop"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/config"
|
"github.com/prometheus/prometheus/config"
|
||||||
"github.com/prometheus/prometheus/model/exemplar"
|
|
||||||
"github.com/prometheus/prometheus/model/histogram"
|
"github.com/prometheus/prometheus/model/histogram"
|
||||||
"github.com/prometheus/prometheus/model/labels"
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
"github.com/prometheus/prometheus/model/timestamp"
|
"github.com/prometheus/prometheus/model/timestamp"
|
||||||
@ -48,14 +48,12 @@ import (
|
|||||||
|
|
||||||
type writeHandler struct {
|
type writeHandler struct {
|
||||||
logger *slog.Logger
|
logger *slog.Logger
|
||||||
appendable storage.Appendable
|
appendable storage.AppendableV2
|
||||||
|
|
||||||
samplesWithInvalidLabelsTotal prometheus.Counter
|
samplesWithInvalidLabelsTotal prometheus.Counter
|
||||||
samplesAppendedWithoutMetadata prometheus.Counter
|
samplesAppendedWithoutMetadata prometheus.Counter
|
||||||
|
|
||||||
ingestSTZeroSample bool
|
|
||||||
enableTypeAndUnitLabels bool
|
enableTypeAndUnitLabels bool
|
||||||
appendMetadata bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const maxAheadTime = 10 * time.Minute
|
const maxAheadTime = 10 * time.Minute
|
||||||
@ -65,7 +63,7 @@ const maxAheadTime = 10 * time.Minute
|
|||||||
//
|
//
|
||||||
// NOTE(bwplotka): When accepting v2 proto and spec, partial writes are possible
|
// NOTE(bwplotka): When accepting v2 proto and spec, partial writes are possible
|
||||||
// as per https://prometheus.io/docs/specs/remote_write_spec_2_0/#partial-write.
|
// as per https://prometheus.io/docs/specs/remote_write_spec_2_0/#partial-write.
|
||||||
func NewWriteHandler(logger *slog.Logger, reg prometheus.Registerer, appendable storage.Appendable, acceptedMsgs remoteapi.MessageTypes, ingestSTZeroSample, enableTypeAndUnitLabels, appendMetadata bool) http.Handler {
|
func NewWriteHandler(logger *slog.Logger, reg prometheus.Registerer, appendable storage.AppendableV2, acceptedMsgs remoteapi.MessageTypes, enableTypeAndUnitLabels bool) http.Handler {
|
||||||
h := &writeHandler{
|
h := &writeHandler{
|
||||||
logger: logger,
|
logger: logger,
|
||||||
appendable: appendable,
|
appendable: appendable,
|
||||||
@ -82,9 +80,7 @@ func NewWriteHandler(logger *slog.Logger, reg prometheus.Registerer, appendable
|
|||||||
Help: "The total number of received remote write samples (and histogram samples) which were ingested without corresponding metadata.",
|
Help: "The total number of received remote write samples (and histogram samples) which were ingested without corresponding metadata.",
|
||||||
}),
|
}),
|
||||||
|
|
||||||
ingestSTZeroSample: ingestSTZeroSample,
|
|
||||||
enableTypeAndUnitLabels: enableTypeAndUnitLabels,
|
enableTypeAndUnitLabels: enableTypeAndUnitLabels,
|
||||||
appendMetadata: appendMetadata,
|
|
||||||
}
|
}
|
||||||
return remoteapi.NewWriteHandler(h, acceptedMsgs, remoteapi.WithWriteHandlerLogger(logger))
|
return remoteapi.NewWriteHandler(h, acceptedMsgs, remoteapi.WithWriteHandlerLogger(logger))
|
||||||
}
|
}
|
||||||
@ -155,9 +151,9 @@ func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err
|
|||||||
samplesWithInvalidLabels := 0
|
samplesWithInvalidLabels := 0
|
||||||
samplesAppended := 0
|
samplesAppended := 0
|
||||||
|
|
||||||
app := &remoteWriteAppender{
|
app := &validationAppender{
|
||||||
Appender: h.appendable.Appender(ctx),
|
AppenderV2: h.appendable.AppenderV2(ctx),
|
||||||
maxTime: timestamp.FromTime(time.Now().Add(maxAheadTime)),
|
maxTime: timestamp.FromTime(time.Now().Add(maxAheadTime)),
|
||||||
}
|
}
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
@ -172,12 +168,13 @@ func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
b := labels.NewScratchBuilder(0)
|
b := labels.NewScratchBuilder(0)
|
||||||
|
var es []exemplar.Exemplar
|
||||||
for _, ts := range req.Timeseries {
|
for _, ts := range req.Timeseries {
|
||||||
ls := ts.ToLabels(&b, nil)
|
ls := ts.ToLabels(&b, nil)
|
||||||
|
|
||||||
// TODO(bwplotka): Even as per 1.0 spec, this should be a 400 error, while other samples are
|
// TODO(bwplotka): Even as per 1.0 spec, this should be a 400 error, while other samples are
|
||||||
// potentially written. Perhaps unify with fixed writeV2 implementation a bit.
|
// potentially written. Perhaps unify with fixed writeV2 implementation a bit.
|
||||||
if !ls.Has(labels.MetricName) || !ls.IsValid(model.UTF8Validation) {
|
if !ls.Has(model.MetricNameLabel) || !ls.IsValid(model.UTF8Validation) {
|
||||||
h.logger.Warn("Invalid metric names or labels", "got", ls.String())
|
h.logger.Warn("Invalid metric names or labels", "got", ls.String())
|
||||||
samplesWithInvalidLabels++
|
samplesWithInvalidLabels++
|
||||||
continue
|
continue
|
||||||
@ -187,26 +184,20 @@ func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := h.appendV1Samples(app, ts.Samples, ls); err != nil {
|
es = es[:0]
|
||||||
|
for _, ep := range ts.Exemplars {
|
||||||
|
e := ep.ToExemplar(&b, nil)
|
||||||
|
es = append(es, e)
|
||||||
|
}
|
||||||
|
|
||||||
|
outOfOrderExemplarErrs, err = h.appendV1Samples(app, ts.Samples, ls, es)
|
||||||
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
samplesAppended += len(ts.Samples)
|
samplesAppended += len(ts.Samples)
|
||||||
|
|
||||||
for _, ep := range ts.Exemplars {
|
outOfOrderExemplarErrs, err = h.appendV1Histograms(app, ts.Histograms, ls, es)
|
||||||
e := ep.ToExemplar(&b, nil)
|
if err != nil {
|
||||||
if _, err := app.AppendExemplar(0, ls, e); err != nil {
|
|
||||||
switch {
|
|
||||||
case errors.Is(err, storage.ErrOutOfOrderExemplar):
|
|
||||||
outOfOrderExemplarErrs++
|
|
||||||
h.logger.Debug("Out of order exemplar", "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e))
|
|
||||||
default:
|
|
||||||
// Since exemplar storage is still experimental, we don't fail the request on ingestion errors
|
|
||||||
h.logger.Debug("Error while adding exemplar in AppendExemplar", "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e), "err", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = h.appendV1Histograms(app, ts.Histograms, ls); err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
samplesAppended += len(ts.Histograms)
|
samplesAppended += len(ts.Histograms)
|
||||||
@ -221,43 +212,66 @@ func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *writeHandler) appendV1Samples(app storage.Appender, ss []prompb.Sample, labels labels.Labels) error {
|
func (h *writeHandler) appendV1Samples(app storage.AppenderV2, ss []prompb.Sample, ls labels.Labels, es []exemplar.Exemplar) (outOfOrderExemplarErrs int, err error) {
|
||||||
var ref storage.SeriesRef
|
var ref storage.SeriesRef
|
||||||
var err error
|
|
||||||
for _, s := range ss {
|
for _, s := range ss {
|
||||||
ref, err = app.Append(ref, labels, s.GetTimestamp(), s.GetValue())
|
ref, err = app.Append(ref, ls, 0, s.GetTimestamp(), s.GetValue(), nil, nil, storage.AOptions{Exemplars: es})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.Is(err, storage.ErrOutOfOrderSample) ||
|
if errors.Is(err, storage.ErrOutOfOrderSample) ||
|
||||||
errors.Is(err, storage.ErrOutOfBounds) ||
|
errors.Is(err, storage.ErrOutOfBounds) ||
|
||||||
errors.Is(err, storage.ErrDuplicateSampleForTimestamp) {
|
errors.Is(err, storage.ErrDuplicateSampleForTimestamp) {
|
||||||
h.logger.Error("Out of order sample from remote write", "err", err.Error(), "series", labels.String(), "timestamp", s.Timestamp)
|
h.logger.Error("Out of order sample from remote write", "err", err.Error(), "series", ls.String(), "timestamp", s.Timestamp)
|
||||||
}
|
}
|
||||||
return err
|
var pErr *storage.AppendPartialError
|
||||||
|
if errors.As(err, &pErr) {
|
||||||
|
for _, e := range pErr.ExemplarErrors {
|
||||||
|
if errors.Is(e, storage.ErrOutOfOrderExemplar) {
|
||||||
|
outOfOrderExemplarErrs++
|
||||||
|
h.logger.Debug("Out of order exemplar", "series", ls.String())
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// Since exemplar storage is still experimental, we don't fail the request on ingestion errors
|
||||||
|
h.logger.Debug("Error while adding exemplar in AppendExemplar", "series", ls.String(), "err", err)
|
||||||
|
}
|
||||||
|
// Still claim success and continue.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return outOfOrderExemplarErrs, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return outOfOrderExemplarErrs, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *writeHandler) appendV1Histograms(app storage.Appender, hh []prompb.Histogram, labels labels.Labels) error {
|
func (h *writeHandler) appendV1Histograms(app storage.AppenderV2, hh []prompb.Histogram, ls labels.Labels, es []exemplar.Exemplar) (outOfOrderExemplarErrs int, err error) {
|
||||||
var err error
|
var ref storage.SeriesRef
|
||||||
for _, hp := range hh {
|
for _, hp := range hh {
|
||||||
if hp.IsFloatHistogram() {
|
ref, err = app.Append(ref, ls, 0, hp.GetTimestamp(), 0, hp.ToIntHistogram(), hp.ToFloatHistogram(), storage.AOptions{Exemplars: es})
|
||||||
_, err = app.AppendHistogram(0, labels, hp.Timestamp, nil, hp.ToFloatHistogram())
|
|
||||||
} else {
|
|
||||||
_, err = app.AppendHistogram(0, labels, hp.Timestamp, hp.ToIntHistogram(), nil)
|
|
||||||
}
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Although AppendHistogram does not currently return ErrDuplicateSampleForTimestamp there is
|
// Although Append does not currently return ErrDuplicateSampleForTimestamp there is
|
||||||
// a note indicating its inclusion in the future.
|
// a note indicating its inclusion in the future.
|
||||||
if errors.Is(err, storage.ErrOutOfOrderSample) ||
|
if errors.Is(err, storage.ErrOutOfOrderSample) ||
|
||||||
errors.Is(err, storage.ErrOutOfBounds) ||
|
errors.Is(err, storage.ErrOutOfBounds) ||
|
||||||
errors.Is(err, storage.ErrDuplicateSampleForTimestamp) {
|
errors.Is(err, storage.ErrDuplicateSampleForTimestamp) {
|
||||||
h.logger.Error("Out of order histogram from remote write", "err", err.Error(), "series", labels.String(), "timestamp", hp.Timestamp)
|
h.logger.Error("Out of order histogram from remote write", "err", err.Error(), "series", ls.String(), "timestamp", hp.Timestamp)
|
||||||
}
|
}
|
||||||
return err
|
var pErr *storage.AppendPartialError
|
||||||
|
if errors.As(err, &pErr) {
|
||||||
|
for _, e := range pErr.ExemplarErrors {
|
||||||
|
if errors.Is(e, storage.ErrOutOfOrderExemplar) {
|
||||||
|
outOfOrderExemplarErrs++
|
||||||
|
h.logger.Debug("Out of order exemplar", "series", ls.String())
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// Since exemplar storage is still experimental, we don't fail the request on ingestion errors
|
||||||
|
h.logger.Debug("Error while adding exemplar in AppendExemplar", "series", ls.String(), "err", err)
|
||||||
|
}
|
||||||
|
// Still claim success and continue.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return outOfOrderExemplarErrs, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return outOfOrderExemplarErrs, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// writeV2 is similar to write, but it works with v2 proto message,
|
// writeV2 is similar to write, but it works with v2 proto message,
|
||||||
@ -270,9 +284,9 @@ func (h *writeHandler) appendV1Histograms(app storage.Appender, hh []prompb.Hist
|
|||||||
// NOTE(bwplotka): TSDB storage is NOT idempotent, so we don't allow "partial retry-able" errors.
|
// NOTE(bwplotka): TSDB storage is NOT idempotent, so we don't allow "partial retry-able" errors.
|
||||||
// Once we have 5xx type of error, we immediately stop and rollback all appends.
|
// Once we have 5xx type of error, we immediately stop and rollback all appends.
|
||||||
func (h *writeHandler) writeV2(ctx context.Context, req *writev2.Request) (_ remoteapi.WriteResponseStats, errHTTPCode int, _ error) {
|
func (h *writeHandler) writeV2(ctx context.Context, req *writev2.Request) (_ remoteapi.WriteResponseStats, errHTTPCode int, _ error) {
|
||||||
app := &remoteWriteAppender{
|
app := &validationAppender{
|
||||||
Appender: h.appendable.Appender(ctx),
|
AppenderV2: h.appendable.AppenderV2(ctx),
|
||||||
maxTime: timestamp.FromTime(time.Now().Add(maxAheadTime)),
|
maxTime: timestamp.FromTime(time.Now().Add(maxAheadTime)),
|
||||||
}
|
}
|
||||||
|
|
||||||
s := remoteapi.WriteResponseStats{}
|
s := remoteapi.WriteResponseStats{}
|
||||||
@ -306,10 +320,11 @@ func (h *writeHandler) writeV2(ctx context.Context, req *writev2.Request) (_ rem
|
|||||||
return s, 0, nil
|
return s, 0, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs *remoteapi.WriteResponseStats) (samplesWithoutMetadata, errHTTPCode int, err error) {
|
func (h *writeHandler) appendV2(app storage.AppenderV2, req *writev2.Request, rs *remoteapi.WriteResponseStats) (samplesWithoutMetadata, errHTTPCode int, err error) {
|
||||||
var (
|
var (
|
||||||
badRequestErrs []error
|
badRequestErrs []error
|
||||||
outOfOrderExemplarErrs, samplesWithInvalidLabels int
|
outOfOrderExemplarErrs, samplesWithInvalidLabels int
|
||||||
|
es []exemplar.Exemplar
|
||||||
|
|
||||||
b = labels.NewScratchBuilder(0)
|
b = labels.NewScratchBuilder(0)
|
||||||
)
|
)
|
||||||
@ -322,6 +337,11 @@ func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs *
|
|||||||
}
|
}
|
||||||
|
|
||||||
m := ts.ToMetadata(req.Symbols)
|
m := ts.ToMetadata(req.Symbols)
|
||||||
|
if m.IsEmpty() {
|
||||||
|
// Account for missing metadata this TimeSeries.
|
||||||
|
samplesWithoutMetadata += rs.AllSamples()
|
||||||
|
}
|
||||||
|
|
||||||
if h.enableTypeAndUnitLabels && (m.Type != model.MetricTypeUnknown || m.Unit != "") {
|
if h.enableTypeAndUnitLabels && (m.Type != model.MetricTypeUnknown || m.Unit != "") {
|
||||||
slb := labels.NewScratchBuilder(ls.Len() + 2) // +2 for __type__ and __unit__
|
slb := labels.NewScratchBuilder(ls.Len() + 2) // +2 for __type__ and __unit__
|
||||||
ls.Range(func(l labels.Label) {
|
ls.Range(func(l labels.Label) {
|
||||||
@ -339,7 +359,7 @@ func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs *
|
|||||||
// Validate series labels early.
|
// Validate series labels early.
|
||||||
// NOTE(bwplotka): While spec allows UTF-8, Prometheus Receiver may impose
|
// NOTE(bwplotka): While spec allows UTF-8, Prometheus Receiver may impose
|
||||||
// specific limits and follow https://prometheus.io/docs/specs/remote_write_spec_2_0/#invalid-samples case.
|
// specific limits and follow https://prometheus.io/docs/specs/remote_write_spec_2_0/#invalid-samples case.
|
||||||
if !ls.Has(labels.MetricName) || !ls.IsValid(model.UTF8Validation) {
|
if !ls.Has(model.MetricNameLabel) || !ls.IsValid(model.UTF8Validation) {
|
||||||
badRequestErrs = append(badRequestErrs, fmt.Errorf("invalid metric name or labels, got %v", ls.String()))
|
badRequestErrs = append(badRequestErrs, fmt.Errorf("invalid metric name or labels, got %v", ls.String()))
|
||||||
samplesWithInvalidLabels += len(ts.Samples) + len(ts.Histograms)
|
samplesWithInvalidLabels += len(ts.Samples) + len(ts.Histograms)
|
||||||
continue
|
continue
|
||||||
@ -354,22 +374,32 @@ func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs *
|
|||||||
badRequestErrs = append(badRequestErrs, fmt.Errorf("TimeSeries must contain at least one sample or histogram for series %v", ls.String()))
|
badRequestErrs = append(badRequestErrs, fmt.Errorf("TimeSeries must contain at least one sample or histogram for series %v", ls.String()))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
// Validate that TimeSeries does not have both; it's against the spec e.g. where to attach exemplars to?
|
||||||
|
if len(ts.Samples) > 0 && len(ts.Histograms) > 0 {
|
||||||
|
badRequestErrs = append(badRequestErrs, fmt.Errorf("TimeSeries must contain either samples or histograms for series %v not both", ls.String()))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
allSamplesSoFar := rs.AllSamples()
|
|
||||||
var ref storage.SeriesRef
|
var ref storage.SeriesRef
|
||||||
for _, s := range ts.Samples {
|
|
||||||
if h.ingestSTZeroSample && s.StartTimestamp != 0 && s.Timestamp != 0 {
|
|
||||||
ref, err = app.AppendSTZeroSample(ref, ls, s.Timestamp, s.StartTimestamp)
|
|
||||||
// We treat OOO errors specially as it's a common scenario given:
|
|
||||||
// * We can't tell if ST was already ingested in a previous request.
|
|
||||||
// * We don't check if ST changed for stream of samples (we typically have one though),
|
|
||||||
// as it's checked in the AppendSTZeroSample reliably.
|
|
||||||
if err != nil && !errors.Is(err, storage.ErrOutOfOrderST) {
|
|
||||||
h.logger.Debug("Error when appending ST from remote write request", "err", err, "series", ls.String(), "start_timestamp", s.StartTimestamp, "timestamp", s.Timestamp)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ref, err = app.Append(ref, ls, s.GetTimestamp(), s.GetValue())
|
// Attach potential exemplars to append.
|
||||||
|
es = es[:0]
|
||||||
|
for _, ep := range ts.Exemplars {
|
||||||
|
e, err := ep.ToExemplar(&b, req.Symbols)
|
||||||
|
if err != nil {
|
||||||
|
badRequestErrs = append(badRequestErrs, fmt.Errorf("parsing exemplar for series %v: %w", ls.String(), err))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
es = append(es, e)
|
||||||
|
}
|
||||||
|
appOpts := storage.AppendV2Options{
|
||||||
|
Metadata: m,
|
||||||
|
Exemplars: es,
|
||||||
|
}
|
||||||
|
rs.Exemplars += len(appOpts.Exemplars) // Rejection is accounted later on.
|
||||||
|
|
||||||
|
for _, s := range ts.Samples {
|
||||||
|
ref, err = app.Append(ref, ls, s.GetStartTimestamp(), s.GetTimestamp(), s.GetValue(), nil, nil, appOpts)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
rs.Samples++
|
rs.Samples++
|
||||||
continue
|
continue
|
||||||
@ -384,26 +414,29 @@ func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs *
|
|||||||
badRequestErrs = append(badRequestErrs, fmt.Errorf("%w for series %v", err, ls.String()))
|
badRequestErrs = append(badRequestErrs, fmt.Errorf("%w for series %v", err, ls.String()))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
var pErr *storage.AppendPartialError
|
||||||
|
if errors.As(err, &pErr) {
|
||||||
|
for _, e := range pErr.ExemplarErrors {
|
||||||
|
rs.Exemplars--
|
||||||
|
if errors.Is(e, storage.ErrOutOfOrderExemplar) {
|
||||||
|
outOfOrderExemplarErrs++ // Maintain old metrics, but technically not needed, given we fail here.
|
||||||
|
h.logger.Error("Out of order exemplar", "err", err.Error(), "series", ls.String())
|
||||||
|
badRequestErrs = append(badRequestErrs, fmt.Errorf("%w for series %v", err, ls.String()))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// Since exemplar storage is still experimental, we don't fail or check other errors.
|
||||||
|
// Debug log is emitted in TSDB already.
|
||||||
|
}
|
||||||
|
// Still claim success and continue.
|
||||||
|
rs.Samples++
|
||||||
|
continue
|
||||||
|
}
|
||||||
return 0, http.StatusInternalServerError, err
|
return 0, http.StatusInternalServerError, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Native Histograms.
|
// Native Histograms.
|
||||||
for _, hp := range ts.Histograms {
|
for _, hp := range ts.Histograms {
|
||||||
if h.ingestSTZeroSample && hp.StartTimestamp != 0 && hp.Timestamp != 0 {
|
ref, err = app.Append(ref, ls, hp.GetStartTimestamp(), hp.GetTimestamp(), 0, hp.ToIntHistogram(), hp.ToFloatHistogram(), appOpts)
|
||||||
ref, err = h.handleHistogramZeroSample(app, ref, ls, hp, hp.StartTimestamp)
|
|
||||||
// We treat OOO errors specially as it's a common scenario given:
|
|
||||||
// * We can't tell if ST was already ingested in a previous request.
|
|
||||||
// * We don't check if ST changed for stream of samples (we typically have one though),
|
|
||||||
// as it's checked in the ingestSTZeroSample reliably.
|
|
||||||
if err != nil && !errors.Is(err, storage.ErrOutOfOrderST) {
|
|
||||||
h.logger.Debug("Error when appending ST from remote write request", "err", err, "series", ls.String(), "start_timestamp", hp.StartTimestamp, "timestamp", hp.Timestamp)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if hp.IsFloatHistogram() {
|
|
||||||
ref, err = app.AppendHistogram(ref, ls, hp.Timestamp, nil, hp.ToFloatHistogram())
|
|
||||||
} else {
|
|
||||||
ref, err = app.AppendHistogram(ref, ls, hp.Timestamp, hp.ToIntHistogram(), nil)
|
|
||||||
}
|
|
||||||
if err == nil {
|
if err == nil {
|
||||||
rs.Histograms++
|
rs.Histograms++
|
||||||
continue
|
continue
|
||||||
@ -424,43 +457,25 @@ func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs *
|
|||||||
badRequestErrs = append(badRequestErrs, fmt.Errorf("%w for series %v", err, ls.String()))
|
badRequestErrs = append(badRequestErrs, fmt.Errorf("%w for series %v", err, ls.String()))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
var pErr *storage.AppendPartialError
|
||||||
|
if errors.As(err, &pErr) {
|
||||||
|
for _, e := range pErr.ExemplarErrors {
|
||||||
|
rs.Exemplars--
|
||||||
|
if errors.Is(e, storage.ErrOutOfOrderExemplar) {
|
||||||
|
outOfOrderExemplarErrs++ // Maintain old metrics, but technically not needed, given we fail here.
|
||||||
|
h.logger.Error("Out of order exemplar", "err", err.Error(), "series", ls.String())
|
||||||
|
badRequestErrs = append(badRequestErrs, fmt.Errorf("%w for series %v", err, ls.String()))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// Since exemplar storage is still experimental, we don't fail or check other errors.
|
||||||
|
// Debug log is emitted in TSDB already.
|
||||||
|
}
|
||||||
|
// Still claim success and continue.
|
||||||
|
rs.Histograms++
|
||||||
|
continue
|
||||||
|
}
|
||||||
return 0, http.StatusInternalServerError, err
|
return 0, http.StatusInternalServerError, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Exemplars.
|
|
||||||
for _, ep := range ts.Exemplars {
|
|
||||||
e, err := ep.ToExemplar(&b, req.Symbols)
|
|
||||||
if err != nil {
|
|
||||||
badRequestErrs = append(badRequestErrs, fmt.Errorf("parsing exemplar for series %v: %w", ls.String(), err))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
ref, err = app.AppendExemplar(ref, ls, e)
|
|
||||||
if err == nil {
|
|
||||||
rs.Exemplars++
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// Handle append error.
|
|
||||||
if errors.Is(err, storage.ErrOutOfOrderExemplar) {
|
|
||||||
outOfOrderExemplarErrs++ // Maintain old metrics, but technically not needed, given we fail here.
|
|
||||||
h.logger.Error("Out of order exemplar", "err", err.Error(), "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e))
|
|
||||||
badRequestErrs = append(badRequestErrs, fmt.Errorf("%w for series %v", err, ls.String()))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// TODO(bwplotka): Add strict mode which would trigger rollback of everything if needed.
|
|
||||||
// For now we keep the previously released flow (just error not debug leve) of dropping them without rollback and 5xx.
|
|
||||||
h.logger.Error("failed to ingest exemplar, emitting error log, but no error for PRW caller", "err", err.Error(), "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Only update metadata in WAL if the metadata-wal-records feature is enabled.
|
|
||||||
// Without this feature, metadata is not persisted to WAL.
|
|
||||||
if h.appendMetadata {
|
|
||||||
if _, err = app.UpdateMetadata(ref, ls, m); err != nil {
|
|
||||||
h.logger.Debug("error while updating metadata from remote write", "err", err)
|
|
||||||
// Metadata is attached to each series, so since Prometheus does not reject sample without metadata information,
|
|
||||||
// we don't report remote write error either. We increment metric instead.
|
|
||||||
samplesWithoutMetadata += rs.AllSamples() - allSamplesSoFar
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if outOfOrderExemplarErrs > 0 {
|
if outOfOrderExemplarErrs > 0 {
|
||||||
@ -499,16 +514,11 @@ type OTLPOptions struct {
|
|||||||
LookbackDelta time.Duration
|
LookbackDelta time.Duration
|
||||||
// Add type and unit labels to the metrics.
|
// Add type and unit labels to the metrics.
|
||||||
EnableTypeAndUnitLabels bool
|
EnableTypeAndUnitLabels bool
|
||||||
// IngestSTZeroSample enables writing zero samples based on the start time
|
|
||||||
// of metrics.
|
|
||||||
IngestSTZeroSample bool
|
|
||||||
// AppendMetadata enables writing metadata to WAL when metadata-wal-records feature is enabled.
|
|
||||||
AppendMetadata bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewOTLPWriteHandler creates a http.Handler that accepts OTLP write requests and
|
// NewOTLPWriteHandler creates a http.Handler that accepts OTLP write requests and
|
||||||
// writes them to the provided appendable.
|
// writes them to the provided appendable.
|
||||||
func NewOTLPWriteHandler(logger *slog.Logger, reg prometheus.Registerer, appendable storage.Appendable, configFunc func() config.Config, opts OTLPOptions) http.Handler {
|
func NewOTLPWriteHandler(logger *slog.Logger, reg prometheus.Registerer, appendable storage.AppendableV2, configFunc func() config.Config, opts OTLPOptions) http.Handler {
|
||||||
if opts.NativeDelta && opts.ConvertDelta {
|
if opts.NativeDelta && opts.ConvertDelta {
|
||||||
// This should be validated when iterating through feature flags, so not expected to fail here.
|
// This should be validated when iterating through feature flags, so not expected to fail here.
|
||||||
panic("cannot enable native delta ingestion and delta2cumulative conversion at the same time")
|
panic("cannot enable native delta ingestion and delta2cumulative conversion at the same time")
|
||||||
@ -520,11 +530,7 @@ func NewOTLPWriteHandler(logger *slog.Logger, reg prometheus.Registerer, appenda
|
|||||||
config: configFunc,
|
config: configFunc,
|
||||||
allowDeltaTemporality: opts.NativeDelta,
|
allowDeltaTemporality: opts.NativeDelta,
|
||||||
lookbackDelta: opts.LookbackDelta,
|
lookbackDelta: opts.LookbackDelta,
|
||||||
ingestSTZeroSample: opts.IngestSTZeroSample,
|
|
||||||
enableTypeAndUnitLabels: opts.EnableTypeAndUnitLabels,
|
enableTypeAndUnitLabels: opts.EnableTypeAndUnitLabels,
|
||||||
appendMetadata: opts.AppendMetadata,
|
|
||||||
// Register metrics.
|
|
||||||
metrics: otlptranslator.NewCombinedAppenderMetrics(reg),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
wh := &otlpWriteHandler{logger: logger, defaultConsumer: ex}
|
wh := &otlpWriteHandler{logger: logger, defaultConsumer: ex}
|
||||||
@ -559,26 +565,45 @@ func NewOTLPWriteHandler(logger *slog.Logger, reg prometheus.Registerer, appenda
|
|||||||
|
|
||||||
type rwExporter struct {
|
type rwExporter struct {
|
||||||
logger *slog.Logger
|
logger *slog.Logger
|
||||||
appendable storage.Appendable
|
appendable storage.AppendableV2
|
||||||
config func() config.Config
|
config func() config.Config
|
||||||
allowDeltaTemporality bool
|
allowDeltaTemporality bool
|
||||||
lookbackDelta time.Duration
|
lookbackDelta time.Duration
|
||||||
ingestSTZeroSample bool
|
|
||||||
enableTypeAndUnitLabels bool
|
enableTypeAndUnitLabels bool
|
||||||
appendMetadata bool
|
|
||||||
|
|
||||||
// Metrics.
|
|
||||||
metrics otlptranslator.CombinedAppenderMetrics
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rw *rwExporter) ConsumeMetrics(ctx context.Context, md pmetric.Metrics) error {
|
func (rw *rwExporter) ConsumeMetrics(ctx context.Context, md pmetric.Metrics) error {
|
||||||
otlpCfg := rw.config().OTLPConfig
|
otlpCfg := rw.config().OTLPConfig
|
||||||
app := &remoteWriteAppender{
|
app := &validationAppender{
|
||||||
Appender: rw.appendable.Appender(ctx),
|
AppenderV2: rw.appendable.AppenderV2(ctx),
|
||||||
maxTime: timestamp.FromTime(time.Now().Add(maxAheadTime)),
|
maxTime: timestamp.FromTime(time.Now().Add(maxAheadTime)),
|
||||||
}
|
}
|
||||||
combinedAppender := otlptranslator.NewCombinedAppender(app, rw.logger, rw.ingestSTZeroSample, rw.appendMetadata, rw.metrics)
|
|
||||||
converter := otlptranslator.NewPrometheusConverter(combinedAppender)
|
// NOTE(bwplotka): When switching to AppenderV2 I skipped 2 things:
|
||||||
|
// * Metrics
|
||||||
|
// // TODO: Add, likely in a single place in metrics_to_prw.go
|
||||||
|
// samplesAppendedWithoutMetadata: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||||
|
// Namespace: "prometheus",
|
||||||
|
// Subsystem: "api",
|
||||||
|
// Name: "otlp_appended_samples_without_metadata_total",
|
||||||
|
// Help: "The total number of samples ingested from OTLP without corresponding metadata.",
|
||||||
|
// }),
|
||||||
|
// // TODO: Add using storage.AppenderPartialError
|
||||||
|
// outOfOrderExemplars: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||||
|
// Namespace: "prometheus",
|
||||||
|
// Subsystem: "api",
|
||||||
|
// Name: "otlp_out_of_order_exemplars_total",
|
||||||
|
// Help: "The total number of received OTLP exemplars which were rejected because they were out of order.",
|
||||||
|
// }),
|
||||||
|
// }
|
||||||
|
// * this odd ref cache. This one I propose to skip until we know we need it for efficiency reasons.
|
||||||
|
// As a part of a single OTLP message, do we even envision ANY ref to be shared? (it's only one sample per series, no?
|
||||||
|
//
|
||||||
|
// // Used to ensure we only update metadata and created timestamps once, and to share storage.SeriesRefs.
|
||||||
|
// // To detect hash collision it also stores the labels.
|
||||||
|
// // There is no overflow/conflict list, the TSDB will handle that part.
|
||||||
|
// refs map[uint64]seriesRef
|
||||||
|
converter := otlptranslator.NewPrometheusConverter(app)
|
||||||
annots, err := converter.FromMetrics(ctx, md, otlptranslator.Settings{
|
annots, err := converter.FromMetrics(ctx, md, otlptranslator.Settings{
|
||||||
AddMetricSuffixes: otlpCfg.TranslationStrategy.ShouldAddSuffixes(),
|
AddMetricSuffixes: otlpCfg.TranslationStrategy.ShouldAddSuffixes(),
|
||||||
AllowUTF8: !otlpCfg.TranslationStrategy.ShouldEscape(),
|
AllowUTF8: !otlpCfg.TranslationStrategy.ShouldEscape(),
|
||||||
@ -678,55 +703,26 @@ func hasDelta(md pmetric.Metrics) bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
type remoteWriteAppender struct {
|
type validationAppender struct {
|
||||||
storage.Appender
|
storage.AppenderV2
|
||||||
|
|
||||||
maxTime int64
|
maxTime int64
|
||||||
}
|
}
|
||||||
|
|
||||||
func (app *remoteWriteAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64, v float64) (storage.SeriesRef, error) {
|
func (app *validationAppender) Append(ref storage.SeriesRef, ls labels.Labels, st, t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram, opts storage.AOptions) (storage.SeriesRef, error) {
|
||||||
if t > app.maxTime {
|
|
||||||
return 0, fmt.Errorf("%w: timestamp is too far in the future", storage.ErrOutOfBounds)
|
|
||||||
}
|
|
||||||
|
|
||||||
ref, err := app.Appender.Append(ref, lset, t, v)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
return ref, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (app *remoteWriteAppender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) {
|
|
||||||
var err error
|
|
||||||
if t > app.maxTime {
|
if t > app.maxTime {
|
||||||
return 0, fmt.Errorf("%w: timestamp is too far in the future", storage.ErrOutOfBounds)
|
return 0, fmt.Errorf("%w: timestamp is too far in the future", storage.ErrOutOfBounds)
|
||||||
}
|
}
|
||||||
|
|
||||||
if h != nil && histogram.IsExponentialSchemaReserved(h.Schema) && h.Schema > histogram.ExponentialSchemaMax {
|
if h != nil && histogram.IsExponentialSchemaReserved(h.Schema) && h.Schema > histogram.ExponentialSchemaMax {
|
||||||
if err = h.ReduceResolution(histogram.ExponentialSchemaMax); err != nil {
|
if err := h.ReduceResolution(histogram.ExponentialSchemaMax); err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if fh != nil && histogram.IsExponentialSchemaReserved(fh.Schema) && fh.Schema > histogram.ExponentialSchemaMax {
|
if fh != nil && histogram.IsExponentialSchemaReserved(fh.Schema) && fh.Schema > histogram.ExponentialSchemaMax {
|
||||||
if err = fh.ReduceResolution(histogram.ExponentialSchemaMax); err != nil {
|
if err := fh.ReduceResolution(histogram.ExponentialSchemaMax); err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
return app.AppenderV2.Append(ref, ls, st, t, v, h, fh, opts)
|
||||||
if ref, err = app.Appender.AppendHistogram(ref, l, t, h, fh); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
return ref, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (app *remoteWriteAppender) AppendExemplar(ref storage.SeriesRef, l labels.Labels, e exemplar.Exemplar) (storage.SeriesRef, error) {
|
|
||||||
if e.Ts > app.maxTime {
|
|
||||||
return 0, fmt.Errorf("%w: timestamp is too far in the future", storage.ErrOutOfBounds)
|
|
||||||
}
|
|
||||||
|
|
||||||
ref, err := app.Appender.AppendExemplar(ref, l, e)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
return ref, nil
|
|
||||||
}
|
}
|
||||||
|
|||||||
@ -31,9 +31,9 @@ import (
|
|||||||
"github.com/google/go-cmp/cmp"
|
"github.com/google/go-cmp/cmp"
|
||||||
remoteapi "github.com/prometheus/client_golang/exp/api/remote"
|
remoteapi "github.com/prometheus/client_golang/exp/api/remote"
|
||||||
"github.com/prometheus/common/promslog"
|
"github.com/prometheus/common/promslog"
|
||||||
|
"github.com/prometheus/prometheus/util/teststorage"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/model/exemplar"
|
|
||||||
"github.com/prometheus/prometheus/model/histogram"
|
"github.com/prometheus/prometheus/model/histogram"
|
||||||
"github.com/prometheus/prometheus/model/labels"
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
"github.com/prometheus/prometheus/model/metadata"
|
"github.com/prometheus/prometheus/model/metadata"
|
||||||
@ -129,8 +129,7 @@ func TestRemoteWriteHandlerHeadersHandling_V1Message(t *testing.T) {
|
|||||||
req.Header.Set(k, v)
|
req.Header.Set(k, v)
|
||||||
}
|
}
|
||||||
|
|
||||||
appendable := &mockAppendable{}
|
handler := NewWriteHandler(promslog.NewNopLogger(), nil, &mockAppendable{}, []remoteapi.WriteMessageType{remoteapi.WriteV1MessageType}, false)
|
||||||
handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []remoteapi.WriteMessageType{remoteapi.WriteV1MessageType}, false, false, false)
|
|
||||||
|
|
||||||
recorder := httptest.NewRecorder()
|
recorder := httptest.NewRecorder()
|
||||||
handler.ServeHTTP(recorder, req)
|
handler.ServeHTTP(recorder, req)
|
||||||
@ -236,8 +235,11 @@ func TestRemoteWriteHandlerHeadersHandling_V2Message(t *testing.T) {
|
|||||||
req.Header.Set(k, v)
|
req.Header.Set(k, v)
|
||||||
}
|
}
|
||||||
|
|
||||||
appendable := &mockAppendable{}
|
s := teststorage.New(t)
|
||||||
handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []remoteapi.WriteMessageType{remoteapi.WriteV2MessageType}, false, false, false)
|
t.Cleanup(func() { _ = s.Close() })
|
||||||
|
|
||||||
|
appendable := s //&mockAppendable{}
|
||||||
|
handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []remoteapi.WriteMessageType{remoteapi.WriteV2MessageType}, false)
|
||||||
|
|
||||||
recorder := httptest.NewRecorder()
|
recorder := httptest.NewRecorder()
|
||||||
handler.ServeHTTP(recorder, req)
|
handler.ServeHTTP(recorder, req)
|
||||||
@ -253,9 +255,9 @@ func TestRemoteWriteHandlerHeadersHandling_V2Message(t *testing.T) {
|
|||||||
|
|
||||||
// Invalid request case - no samples should be written.
|
// Invalid request case - no samples should be written.
|
||||||
require.Equal(t, tc.expectedError, strings.TrimSpace(string(out)))
|
require.Equal(t, tc.expectedError, strings.TrimSpace(string(out)))
|
||||||
require.Empty(t, appendable.samples)
|
// require.Empty(t, appendable.samples)
|
||||||
require.Empty(t, appendable.histograms)
|
// require.Empty(t, appendable.histograms)
|
||||||
require.Empty(t, appendable.exemplars)
|
// require.Empty(t, appendable.exemplars)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -272,7 +274,7 @@ func TestRemoteWriteHandlerHeadersHandling_V2Message(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
appendable := &mockAppendable{}
|
appendable := &mockAppendable{}
|
||||||
handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []remoteapi.WriteMessageType{remoteapi.WriteV2MessageType}, false, false, false)
|
handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []remoteapi.WriteMessageType{remoteapi.WriteV2MessageType}, false)
|
||||||
|
|
||||||
recorder := httptest.NewRecorder()
|
recorder := httptest.NewRecorder()
|
||||||
handler.ServeHTTP(recorder, req)
|
handler.ServeHTTP(recorder, req)
|
||||||
@ -301,7 +303,7 @@ func TestRemoteWriteHandler_V1Message(t *testing.T) {
|
|||||||
// in Prometheus, so keeping like this to not break existing 1.0 clients.
|
// in Prometheus, so keeping like this to not break existing 1.0 clients.
|
||||||
|
|
||||||
appendable := &mockAppendable{}
|
appendable := &mockAppendable{}
|
||||||
handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []remoteapi.WriteMessageType{remoteapi.WriteV1MessageType}, false, false, false)
|
handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []remoteapi.WriteMessageType{remoteapi.WriteV1MessageType}, false)
|
||||||
|
|
||||||
recorder := httptest.NewRecorder()
|
recorder := httptest.NewRecorder()
|
||||||
handler.ServeHTTP(recorder, req)
|
handler.ServeHTTP(recorder, req)
|
||||||
@ -314,25 +316,18 @@ func TestRemoteWriteHandler_V1Message(t *testing.T) {
|
|||||||
j := 0
|
j := 0
|
||||||
k := 0
|
k := 0
|
||||||
for _, ts := range writeRequestFixture.Timeseries {
|
for _, ts := range writeRequestFixture.Timeseries {
|
||||||
labels := ts.ToLabels(&b, nil)
|
ls := ts.ToLabels(&b, nil)
|
||||||
for _, s := range ts.Samples {
|
for _, s := range ts.Samples {
|
||||||
requireEqual(t, mockSample{labels, s.Timestamp, s.Value}, appendable.samples[i])
|
requireEqual(t, mockSample{ls, metadata.Metadata{}, 0, s.Timestamp, s.Value}, appendable.samples[i])
|
||||||
i++
|
i++
|
||||||
}
|
}
|
||||||
for _, e := range ts.Exemplars {
|
for _, e := range ts.Exemplars {
|
||||||
exemplarLabels := e.ToExemplar(&b, nil).Labels
|
exemplarLabels := e.ToExemplar(&b, nil).Labels
|
||||||
requireEqual(t, mockExemplar{labels, exemplarLabels, e.Timestamp, e.Value}, appendable.exemplars[j])
|
requireEqual(t, mockExemplar{ls, exemplarLabels, e.Timestamp, e.Value}, appendable.exemplars[j])
|
||||||
j++
|
j++
|
||||||
}
|
}
|
||||||
for _, hp := range ts.Histograms {
|
for _, hp := range ts.Histograms {
|
||||||
if hp.IsFloatHistogram() {
|
requireEqual(t, mockHistogram{ls, metadata.Metadata{}, 0, hp.Timestamp, hp.ToIntHistogram(), hp.ToFloatHistogram()}, appendable.histograms[k])
|
||||||
fh := hp.ToFloatHistogram()
|
|
||||||
requireEqual(t, mockHistogram{labels, hp.Timestamp, nil, fh}, appendable.histograms[k])
|
|
||||||
} else {
|
|
||||||
h := hp.ToIntHistogram()
|
|
||||||
requireEqual(t, mockHistogram{labels, hp.Timestamp, h, nil}, appendable.histograms[k])
|
|
||||||
}
|
|
||||||
|
|
||||||
k++
|
k++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -356,26 +351,15 @@ func TestRemoteWriteHandler_V2Message(t *testing.T) {
|
|||||||
expectedCode int
|
expectedCode int
|
||||||
expectedRespBody string
|
expectedRespBody string
|
||||||
|
|
||||||
commitErr error
|
commitErr error
|
||||||
appendSampleErr error
|
appendSampleErr error
|
||||||
appendSTZeroSampleErr error
|
appendExemplarErr error
|
||||||
appendHistogramErr error
|
|
||||||
appendExemplarErr error
|
|
||||||
updateMetadataErr error
|
|
||||||
|
|
||||||
ingestSTZeroSample bool
|
|
||||||
enableTypeAndUnitLabels bool
|
enableTypeAndUnitLabels bool
|
||||||
appendMetadata bool
|
|
||||||
expectedLabels labels.Labels // For verifying type/unit labels
|
expectedLabels labels.Labels // For verifying type/unit labels
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
desc: "All timeseries accepted/ct_enabled",
|
desc: "All timeseries accepted",
|
||||||
input: writeV2RequestFixture.Timeseries,
|
|
||||||
expectedCode: http.StatusNoContent,
|
|
||||||
ingestSTZeroSample: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
desc: "All timeseries accepted/ct_disabled",
|
|
||||||
input: writeV2RequestFixture.Timeseries,
|
input: writeV2RequestFixture.Timeseries,
|
||||||
expectedCode: http.StatusNoContent,
|
expectedCode: http.StatusNoContent,
|
||||||
},
|
},
|
||||||
@ -469,15 +453,25 @@ func TestRemoteWriteHandler_V2Message(t *testing.T) {
|
|||||||
expectedRespBody: "out of order sample for series {__name__=\"test_metric1\", b=\"c\", baz=\"qux\", d=\"e\", foo=\"bar\"}\n",
|
expectedRespBody: "out of order sample for series {__name__=\"test_metric1\", b=\"c\", baz=\"qux\", d=\"e\", foo=\"bar\"}\n",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
desc: "Partial write; first series with one dup histogram sample",
|
desc: "Partial write; 3rd series with one dup histogram sample",
|
||||||
input: func() []writev2.TimeSeries {
|
input: func() []writev2.TimeSeries {
|
||||||
f := proto.Clone(writeV2RequestFixture).(*writev2.Request)
|
f := proto.Clone(writeV2RequestFixture).(*writev2.Request)
|
||||||
f.Timeseries[0].Histograms = append(f.Timeseries[0].Histograms, f.Timeseries[0].Histograms[len(f.Timeseries[0].Histograms)-1])
|
f.Timeseries[2].Histograms = append(f.Timeseries[2].Histograms, f.Timeseries[2].Histograms[len(f.Timeseries[2].Histograms)-1])
|
||||||
return f.Timeseries
|
return f.Timeseries
|
||||||
}(),
|
}(),
|
||||||
expectedCode: http.StatusBadRequest,
|
expectedCode: http.StatusBadRequest,
|
||||||
expectedRespBody: "duplicate sample for timestamp for series {__name__=\"test_metric1\", b=\"c\", baz=\"qux\", d=\"e\", foo=\"bar\"}\n",
|
expectedRespBody: "duplicate sample for timestamp for series {__name__=\"test_metric1\", b=\"c\", baz=\"qux\", d=\"e\", foo=\"bar\"}\n",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
desc: "Partial write; first series have both sample and histogram",
|
||||||
|
input: func() []writev2.TimeSeries {
|
||||||
|
f := proto.Clone(writeV2RequestFixture).(*writev2.Request)
|
||||||
|
f.Timeseries[0].Histograms = append(f.Timeseries[0].Histograms, writev2.FromFloatHistogram(1, testHistogram.ToFloat(nil)))
|
||||||
|
return f.Timeseries
|
||||||
|
}(),
|
||||||
|
expectedCode: http.StatusBadRequest,
|
||||||
|
expectedRespBody: "TBDn",
|
||||||
|
},
|
||||||
// Non retriable errors from various parts.
|
// Non retriable errors from various parts.
|
||||||
{
|
{
|
||||||
desc: "Internal sample append error; rollback triggered",
|
desc: "Internal sample append error; rollback triggered",
|
||||||
@ -487,14 +481,6 @@ func TestRemoteWriteHandler_V2Message(t *testing.T) {
|
|||||||
expectedCode: http.StatusInternalServerError,
|
expectedCode: http.StatusInternalServerError,
|
||||||
expectedRespBody: "some sample internal append error\n",
|
expectedRespBody: "some sample internal append error\n",
|
||||||
},
|
},
|
||||||
{
|
|
||||||
desc: "Internal histogram sample append error; rollback triggered",
|
|
||||||
input: writeV2RequestFixture.Timeseries,
|
|
||||||
appendHistogramErr: errors.New("some histogram sample internal append error"),
|
|
||||||
|
|
||||||
expectedCode: http.StatusInternalServerError,
|
|
||||||
expectedRespBody: "some histogram sample internal append error\n",
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
desc: "Partial write; skipped exemplar; exemplar storage errs are noop",
|
desc: "Partial write; skipped exemplar; exemplar storage errs are noop",
|
||||||
input: writeV2RequestFixture.Timeseries,
|
input: writeV2RequestFixture.Timeseries,
|
||||||
@ -502,13 +488,6 @@ func TestRemoteWriteHandler_V2Message(t *testing.T) {
|
|||||||
|
|
||||||
expectedCode: http.StatusNoContent,
|
expectedCode: http.StatusNoContent,
|
||||||
},
|
},
|
||||||
{
|
|
||||||
desc: "Partial write; skipped metadata; metadata storage errs are noop",
|
|
||||||
input: writeV2RequestFixture.Timeseries,
|
|
||||||
updateMetadataErr: errors.New("some metadata update error"),
|
|
||||||
|
|
||||||
expectedCode: http.StatusNoContent,
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
desc: "Internal commit error; rollback triggered",
|
desc: "Internal commit error; rollback triggered",
|
||||||
input: writeV2RequestFixture.Timeseries,
|
input: writeV2RequestFixture.Timeseries,
|
||||||
@ -627,7 +606,6 @@ func TestRemoteWriteHandler_V2Message(t *testing.T) {
|
|||||||
}(),
|
}(),
|
||||||
expectedCode: http.StatusNoContent,
|
expectedCode: http.StatusNoContent,
|
||||||
enableTypeAndUnitLabels: false,
|
enableTypeAndUnitLabels: false,
|
||||||
appendMetadata: false,
|
|
||||||
expectedLabels: labels.FromStrings("__name__", "test_metric_wal", "instance", "localhost"),
|
expectedLabels: labels.FromStrings("__name__", "test_metric_wal", "instance", "localhost"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -699,23 +677,20 @@ func TestRemoteWriteHandler_V2Message(t *testing.T) {
|
|||||||
req.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion20HeaderValue)
|
req.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion20HeaderValue)
|
||||||
|
|
||||||
appendable := &mockAppendable{
|
appendable := &mockAppendable{
|
||||||
commitErr: tc.commitErr,
|
commitErr: tc.commitErr,
|
||||||
appendSampleErr: tc.appendSampleErr,
|
appendSampleErr: tc.appendSampleErr,
|
||||||
appendSTZeroSampleErr: tc.appendSTZeroSampleErr,
|
appendExemplarErr: tc.appendExemplarErr,
|
||||||
appendHistogramErr: tc.appendHistogramErr,
|
|
||||||
appendExemplarErr: tc.appendExemplarErr,
|
|
||||||
updateMetadataErr: tc.updateMetadataErr,
|
|
||||||
}
|
}
|
||||||
handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []remoteapi.WriteMessageType{remoteapi.WriteV2MessageType}, tc.ingestSTZeroSample, tc.enableTypeAndUnitLabels, tc.appendMetadata)
|
handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []remoteapi.WriteMessageType{remoteapi.WriteV2MessageType}, tc.enableTypeAndUnitLabels)
|
||||||
|
|
||||||
recorder := httptest.NewRecorder()
|
recorder := httptest.NewRecorder()
|
||||||
handler.ServeHTTP(recorder, req)
|
handler.ServeHTTP(recorder, req)
|
||||||
|
|
||||||
resp := recorder.Result()
|
resp := recorder.Result()
|
||||||
require.Equal(t, tc.expectedCode, resp.StatusCode)
|
|
||||||
respBody, err := io.ReadAll(resp.Body)
|
respBody, err := io.ReadAll(resp.Body)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, tc.expectedRespBody, string(respBody))
|
require.Equal(t, tc.expectedRespBody, string(respBody))
|
||||||
|
require.Equal(t, tc.expectedCode, resp.StatusCode)
|
||||||
|
|
||||||
if tc.expectedCode == http.StatusInternalServerError {
|
if tc.expectedCode == http.StatusInternalServerError {
|
||||||
// We don't expect writes for partial writes with retry-able code.
|
// We don't expect writes for partial writes with retry-able code.
|
||||||
@ -726,7 +701,6 @@ func TestRemoteWriteHandler_V2Message(t *testing.T) {
|
|||||||
require.Empty(t, appendable.samples)
|
require.Empty(t, appendable.samples)
|
||||||
require.Empty(t, appendable.histograms)
|
require.Empty(t, appendable.histograms)
|
||||||
require.Empty(t, appendable.exemplars)
|
require.Empty(t, appendable.exemplars)
|
||||||
require.Empty(t, appendable.metadata)
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -748,37 +722,21 @@ func TestRemoteWriteHandler_V2Message(t *testing.T) {
|
|||||||
|
|
||||||
// Double check what was actually appended.
|
// Double check what was actually appended.
|
||||||
var (
|
var (
|
||||||
b = labels.NewScratchBuilder(0)
|
b = labels.NewScratchBuilder(0)
|
||||||
i, j, k, m int
|
i, j, k int
|
||||||
)
|
)
|
||||||
for _, ts := range writeV2RequestFixture.Timeseries {
|
for _, ts := range writeV2RequestFixture.Timeseries {
|
||||||
|
expectedMeta := ts.ToMetadata(writeV2RequestFixture.Symbols)
|
||||||
|
|
||||||
ls, err := ts.ToLabels(&b, writeV2RequestFixture.Symbols)
|
ls, err := ts.ToLabels(&b, writeV2RequestFixture.Symbols)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
for _, s := range ts.Samples {
|
for _, s := range ts.Samples {
|
||||||
if s.StartTimestamp != 0 && tc.ingestSTZeroSample {
|
requireEqual(t, mockSample{ls, expectedMeta, s.StartTimestamp, s.Timestamp, s.Value}, appendable.samples[i])
|
||||||
requireEqual(t, mockSample{ls, s.StartTimestamp, 0}, appendable.samples[i])
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
requireEqual(t, mockSample{ls, s.Timestamp, s.Value}, appendable.samples[i])
|
|
||||||
i++
|
i++
|
||||||
}
|
}
|
||||||
for _, hp := range ts.Histograms {
|
for _, hp := range ts.Histograms {
|
||||||
if hp.IsFloatHistogram() {
|
requireEqual(t, mockHistogram{ls, expectedMeta, hp.StartTimestamp, hp.Timestamp, hp.ToIntHistogram(), hp.ToFloatHistogram()}, appendable.histograms[k])
|
||||||
fh := hp.ToFloatHistogram()
|
|
||||||
if hp.StartTimestamp != 0 && tc.ingestSTZeroSample {
|
|
||||||
requireEqual(t, mockHistogram{ls, hp.StartTimestamp, nil, &histogram.FloatHistogram{}}, appendable.histograms[k])
|
|
||||||
k++
|
|
||||||
}
|
|
||||||
requireEqual(t, mockHistogram{ls, hp.Timestamp, nil, fh}, appendable.histograms[k])
|
|
||||||
} else {
|
|
||||||
h := hp.ToIntHistogram()
|
|
||||||
if hp.StartTimestamp != 0 && tc.ingestSTZeroSample {
|
|
||||||
requireEqual(t, mockHistogram{ls, hp.StartTimestamp, &histogram.Histogram{}, nil}, appendable.histograms[k])
|
|
||||||
k++
|
|
||||||
}
|
|
||||||
requireEqual(t, mockHistogram{ls, hp.Timestamp, h, nil}, appendable.histograms[k])
|
|
||||||
}
|
|
||||||
k++
|
k++
|
||||||
}
|
}
|
||||||
if tc.appendExemplarErr == nil {
|
if tc.appendExemplarErr == nil {
|
||||||
@ -790,16 +748,6 @@ func TestRemoteWriteHandler_V2Message(t *testing.T) {
|
|||||||
j++
|
j++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if tc.appendMetadata && tc.updateMetadataErr == nil {
|
|
||||||
expectedMeta := ts.ToMetadata(writeV2RequestFixture.Symbols)
|
|
||||||
requireEqual(t, mockMetadata{ls, expectedMeta}, appendable.metadata[m])
|
|
||||||
m++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify that when the feature flag is disabled, no metadata is stored in WAL.
|
|
||||||
if !tc.appendMetadata {
|
|
||||||
require.Empty(t, appendable.metadata, "metadata should not be stored when appendMetadata (metadata-wal-records) is false")
|
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -880,7 +828,7 @@ func TestRemoteWriteHandler_V2Message_NoDuplicateTypeAndUnitLabels(t *testing.T)
|
|||||||
req.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion20HeaderValue)
|
req.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion20HeaderValue)
|
||||||
|
|
||||||
appendable := &mockAppendable{}
|
appendable := &mockAppendable{}
|
||||||
handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []remoteapi.WriteMessageType{remoteapi.WriteV2MessageType}, false, true, false)
|
handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []remoteapi.WriteMessageType{remoteapi.WriteV2MessageType}, true)
|
||||||
|
|
||||||
recorder := httptest.NewRecorder()
|
recorder := httptest.NewRecorder()
|
||||||
handler.ServeHTTP(recorder, req)
|
handler.ServeHTTP(recorder, req)
|
||||||
@ -928,8 +876,8 @@ func TestOutOfOrderSample_V1Message(t *testing.T) {
|
|||||||
req, err := http.NewRequest(http.MethodPost, "", bytes.NewReader(payload))
|
req, err := http.NewRequest(http.MethodPost, "", bytes.NewReader(payload))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
appendable := &mockAppendable{latestSample: map[uint64]int64{labels.FromStrings("__name__", "test_metric").Hash(): 100}}
|
appendable := &mockAppendable{latestTs: map[uint64]int64{labels.FromStrings("__name__", "test_metric").Hash(): 100}}
|
||||||
handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []remoteapi.WriteMessageType{remoteapi.WriteV1MessageType}, false, false, false)
|
handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []remoteapi.WriteMessageType{remoteapi.WriteV1MessageType}, false)
|
||||||
|
|
||||||
recorder := httptest.NewRecorder()
|
recorder := httptest.NewRecorder()
|
||||||
handler.ServeHTTP(recorder, req)
|
handler.ServeHTTP(recorder, req)
|
||||||
@ -970,8 +918,8 @@ func TestOutOfOrderExemplar_V1Message(t *testing.T) {
|
|||||||
req, err := http.NewRequest(http.MethodPost, "", bytes.NewReader(payload))
|
req, err := http.NewRequest(http.MethodPost, "", bytes.NewReader(payload))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
appendable := &mockAppendable{latestSample: map[uint64]int64{labels.FromStrings("__name__", "test_metric").Hash(): 100}}
|
appendable := &mockAppendable{latestTs: map[uint64]int64{labels.FromStrings("__name__", "test_metric").Hash(): 100}}
|
||||||
handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []remoteapi.WriteMessageType{remoteapi.WriteV1MessageType}, false, false, false)
|
handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []remoteapi.WriteMessageType{remoteapi.WriteV1MessageType}, false)
|
||||||
|
|
||||||
recorder := httptest.NewRecorder()
|
recorder := httptest.NewRecorder()
|
||||||
handler.ServeHTTP(recorder, req)
|
handler.ServeHTTP(recorder, req)
|
||||||
@ -1008,8 +956,8 @@ func TestOutOfOrderHistogram_V1Message(t *testing.T) {
|
|||||||
req, err := http.NewRequest(http.MethodPost, "", bytes.NewReader(payload))
|
req, err := http.NewRequest(http.MethodPost, "", bytes.NewReader(payload))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
appendable := &mockAppendable{latestSample: map[uint64]int64{labels.FromStrings("__name__", "test_metric").Hash(): 100}}
|
appendable := &mockAppendable{latestTs: map[uint64]int64{labels.FromStrings("__name__", "test_metric").Hash(): 100}}
|
||||||
handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []remoteapi.WriteMessageType{remoteapi.WriteV1MessageType}, false, false, false)
|
handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []remoteapi.WriteMessageType{remoteapi.WriteV1MessageType}, false)
|
||||||
|
|
||||||
recorder := httptest.NewRecorder()
|
recorder := httptest.NewRecorder()
|
||||||
handler.ServeHTTP(recorder, req)
|
handler.ServeHTTP(recorder, req)
|
||||||
@ -1059,7 +1007,7 @@ func BenchmarkRemoteWriteHandler(b *testing.B) {
|
|||||||
for _, tc := range testCases {
|
for _, tc := range testCases {
|
||||||
b.Run(tc.name, func(b *testing.B) {
|
b.Run(tc.name, func(b *testing.B) {
|
||||||
appendable := &mockAppendable{}
|
appendable := &mockAppendable{}
|
||||||
handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []remoteapi.WriteMessageType{tc.protoFormat}, false, false, false)
|
handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []remoteapi.WriteMessageType{tc.protoFormat}, false)
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
for b.Loop() {
|
for b.Loop() {
|
||||||
b.StopTimer()
|
b.StopTimer()
|
||||||
@ -1084,7 +1032,7 @@ func TestCommitErr_V1Message(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
appendable := &mockAppendable{commitErr: errors.New("commit error")}
|
appendable := &mockAppendable{commitErr: errors.New("commit error")}
|
||||||
handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []remoteapi.WriteMessageType{remoteapi.WriteV1MessageType}, false, false, false)
|
handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []remoteapi.WriteMessageType{remoteapi.WriteV1MessageType}, false)
|
||||||
|
|
||||||
recorder := httptest.NewRecorder()
|
recorder := httptest.NewRecorder()
|
||||||
handler.ServeHTTP(recorder, req)
|
handler.ServeHTTP(recorder, req)
|
||||||
@ -1150,7 +1098,7 @@ func TestHistogramValidationErrorHandling(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
t.Cleanup(func() { require.NoError(t, db.Close()) })
|
t.Cleanup(func() { require.NoError(t, db.Close()) })
|
||||||
|
|
||||||
handler := NewWriteHandler(promslog.NewNopLogger(), nil, db.Head(), []remoteapi.WriteMessageType{protoMsg}, false, false, false)
|
handler := NewWriteHandler(promslog.NewNopLogger(), nil, db.Head(), []remoteapi.WriteMessageType{protoMsg}, false)
|
||||||
recorder := httptest.NewRecorder()
|
recorder := httptest.NewRecorder()
|
||||||
|
|
||||||
var buf []byte
|
var buf []byte
|
||||||
@ -1195,7 +1143,7 @@ func TestCommitErr_V2Message(t *testing.T) {
|
|||||||
req.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion20HeaderValue)
|
req.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion20HeaderValue)
|
||||||
|
|
||||||
appendable := &mockAppendable{commitErr: errors.New("commit error")}
|
appendable := &mockAppendable{commitErr: errors.New("commit error")}
|
||||||
handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []remoteapi.WriteMessageType{remoteapi.WriteV2MessageType}, false, false, false)
|
handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []remoteapi.WriteMessageType{remoteapi.WriteV2MessageType}, false)
|
||||||
|
|
||||||
recorder := httptest.NewRecorder()
|
recorder := httptest.NewRecorder()
|
||||||
handler.ServeHTTP(recorder, req)
|
handler.ServeHTTP(recorder, req)
|
||||||
@ -1222,7 +1170,7 @@ func BenchmarkRemoteWriteOOOSamples(b *testing.B) {
|
|||||||
require.NoError(b, db.Close())
|
require.NoError(b, db.Close())
|
||||||
})
|
})
|
||||||
// TODO: test with other proto format(s)
|
// TODO: test with other proto format(s)
|
||||||
handler := NewWriteHandler(promslog.NewNopLogger(), nil, db.Head(), []remoteapi.WriteMessageType{remoteapi.WriteV1MessageType}, false, false, false)
|
handler := NewWriteHandler(promslog.NewNopLogger(), nil, db.Head(), []remoteapi.WriteMessageType{remoteapi.WriteV1MessageType}, false)
|
||||||
|
|
||||||
buf, _, _, err := buildWriteRequest(nil, genSeriesWithSample(1000, 200*time.Minute.Milliseconds()), nil, nil, nil, nil, "snappy")
|
buf, _, _, err := buildWriteRequest(nil, genSeriesWithSample(1000, 200*time.Minute.Milliseconds()), nil, nil, nil, nil, "snappy")
|
||||||
require.NoError(b, err)
|
require.NoError(b, err)
|
||||||
@ -1267,29 +1215,26 @@ func genSeriesWithSample(numSeries int, ts int64) []prompb.TimeSeries {
|
|||||||
return series
|
return series
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO(bwplotka): We have 3 mocks of appender at this point. Consolidate?
|
||||||
type mockAppendable struct {
|
type mockAppendable struct {
|
||||||
latestSample map[uint64]int64
|
latestTs map[uint64]int64
|
||||||
samples []mockSample
|
samples []mockSample
|
||||||
latestExemplar map[uint64]int64
|
exemplars []mockExemplar
|
||||||
exemplars []mockExemplar
|
latestExemplarTs map[uint64]int64
|
||||||
latestHistogram map[uint64]int64
|
histograms []mockHistogram
|
||||||
latestFloatHist map[uint64]int64
|
|
||||||
histograms []mockHistogram
|
|
||||||
metadata []mockMetadata
|
|
||||||
|
|
||||||
// optional errors to inject.
|
// optional errors to inject.
|
||||||
commitErr error
|
commitErr error
|
||||||
appendSampleErr error
|
appendSampleErr error
|
||||||
appendSTZeroSampleErr error
|
appendHistogramErr error
|
||||||
appendHistogramErr error
|
appendExemplarErr error
|
||||||
appendExemplarErr error
|
|
||||||
updateMetadataErr error
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type mockSample struct {
|
type mockSample struct {
|
||||||
l labels.Labels
|
l labels.Labels
|
||||||
t int64
|
m metadata.Metadata
|
||||||
v float64
|
st, t int64
|
||||||
|
v float64
|
||||||
}
|
}
|
||||||
|
|
||||||
type mockExemplar struct {
|
type mockExemplar struct {
|
||||||
@ -1300,15 +1245,11 @@ type mockExemplar struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type mockHistogram struct {
|
type mockHistogram struct {
|
||||||
l labels.Labels
|
l labels.Labels
|
||||||
t int64
|
m metadata.Metadata
|
||||||
h *histogram.Histogram
|
st, t int64
|
||||||
fh *histogram.FloatHistogram
|
h *histogram.Histogram
|
||||||
}
|
fh *histogram.FloatHistogram
|
||||||
|
|
||||||
type mockMetadata struct {
|
|
||||||
l labels.Labels
|
|
||||||
m metadata.Metadata
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wrapper to instruct go-cmp package to compare a list of structs with unexported fields.
|
// Wrapper to instruct go-cmp package to compare a list of structs with unexported fields.
|
||||||
@ -1316,36 +1257,26 @@ func requireEqual(t *testing.T, expected, actual any, msgAndArgs ...any) {
|
|||||||
t.Helper()
|
t.Helper()
|
||||||
|
|
||||||
testutil.RequireEqualWithOptions(t, expected, actual,
|
testutil.RequireEqualWithOptions(t, expected, actual,
|
||||||
[]cmp.Option{cmp.AllowUnexported(mockSample{}), cmp.AllowUnexported(mockExemplar{}), cmp.AllowUnexported(mockHistogram{}), cmp.AllowUnexported(mockMetadata{})},
|
[]cmp.Option{cmp.AllowUnexported(), cmp.AllowUnexported(mockSample{}, mockExemplar{}, mockHistogram{})},
|
||||||
msgAndArgs...)
|
msgAndArgs...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *mockAppendable) Appender(context.Context) storage.Appender {
|
func (m *mockAppendable) AppenderV2(context.Context) storage.AppenderV2 {
|
||||||
if m.latestSample == nil {
|
if m.latestTs == nil {
|
||||||
m.latestSample = map[uint64]int64{}
|
m.latestTs = map[uint64]int64{}
|
||||||
}
|
}
|
||||||
if m.latestHistogram == nil {
|
if m.latestExemplarTs == nil {
|
||||||
m.latestHistogram = map[uint64]int64{}
|
m.latestExemplarTs = map[uint64]int64{}
|
||||||
}
|
|
||||||
if m.latestFloatHist == nil {
|
|
||||||
m.latestFloatHist = map[uint64]int64{}
|
|
||||||
}
|
|
||||||
if m.latestExemplar == nil {
|
|
||||||
m.latestExemplar = map[uint64]int64{}
|
|
||||||
}
|
}
|
||||||
return m
|
return m
|
||||||
}
|
}
|
||||||
|
|
||||||
func (*mockAppendable) SetOptions(*storage.AppendOptions) {
|
func (m *mockAppendable) Append(_ storage.SeriesRef, ls labels.Labels, st, t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram, opts storage.AOptions) (storage.SeriesRef, error) {
|
||||||
panic("unimplemented")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *mockAppendable) Append(_ storage.SeriesRef, l labels.Labels, t int64, v float64) (storage.SeriesRef, error) {
|
|
||||||
if m.appendSampleErr != nil {
|
if m.appendSampleErr != nil {
|
||||||
return 0, m.appendSampleErr
|
return 0, m.appendSampleErr
|
||||||
}
|
}
|
||||||
hash := l.Hash()
|
ref := ls.Hash()
|
||||||
latestTs := m.latestSample[hash]
|
latestTs := m.latestTs[ref]
|
||||||
if t < latestTs {
|
if t < latestTs {
|
||||||
return 0, storage.ErrOutOfOrderSample
|
return 0, storage.ErrOutOfOrderSample
|
||||||
}
|
}
|
||||||
@ -1353,16 +1284,45 @@ func (m *mockAppendable) Append(_ storage.SeriesRef, l labels.Labels, t int64, v
|
|||||||
return 0, storage.ErrDuplicateSampleForTimestamp
|
return 0, storage.ErrDuplicateSampleForTimestamp
|
||||||
}
|
}
|
||||||
|
|
||||||
if l.IsEmpty() {
|
if ls.IsEmpty() {
|
||||||
return 0, tsdb.ErrInvalidSample
|
return 0, tsdb.ErrInvalidSample
|
||||||
}
|
}
|
||||||
if _, hasDuplicates := l.HasDuplicateLabelNames(); hasDuplicates {
|
if _, hasDuplicates := ls.HasDuplicateLabelNames(); hasDuplicates {
|
||||||
return 0, tsdb.ErrInvalidSample
|
return 0, tsdb.ErrInvalidSample
|
||||||
}
|
}
|
||||||
|
|
||||||
m.latestSample[hash] = t
|
m.latestTs[ref] = t
|
||||||
m.samples = append(m.samples, mockSample{l, t, v})
|
switch {
|
||||||
return storage.SeriesRef(hash), nil
|
case h != nil, fh != nil:
|
||||||
|
m.histograms = append(m.histograms, mockHistogram{ls, opts.Metadata, st, t, h, fh})
|
||||||
|
default:
|
||||||
|
m.samples = append(m.samples, mockSample{ls, opts.Metadata, st, t, v})
|
||||||
|
}
|
||||||
|
|
||||||
|
var exErrs []error
|
||||||
|
if m.appendExemplarErr != nil {
|
||||||
|
exErrs = append(exErrs, m.appendExemplarErr)
|
||||||
|
} else {
|
||||||
|
for _, e := range opts.Exemplars {
|
||||||
|
latestTs := m.latestExemplarTs[ref]
|
||||||
|
if e.Ts < latestTs {
|
||||||
|
exErrs = append(exErrs, storage.ErrOutOfOrderExemplar)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if e.Ts == latestTs {
|
||||||
|
// Similar to tsdb/head_append.go#appendExemplars, duplicate errors are not propagated.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
m.latestExemplarTs[ref] = e.Ts
|
||||||
|
m.exemplars = append(m.exemplars, mockExemplar{ls, e.Labels, e.Ts, e.Value})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(exErrs) > 0 {
|
||||||
|
return storage.SeriesRef(ref), &storage.AppendPartialError{ExemplarErrors: exErrs}
|
||||||
|
}
|
||||||
|
return storage.SeriesRef(ref), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *mockAppendable) Commit() error {
|
func (m *mockAppendable) Commit() error {
|
||||||
@ -1376,142 +1336,9 @@ func (m *mockAppendable) Rollback() error {
|
|||||||
m.samples = m.samples[:0]
|
m.samples = m.samples[:0]
|
||||||
m.exemplars = m.exemplars[:0]
|
m.exemplars = m.exemplars[:0]
|
||||||
m.histograms = m.histograms[:0]
|
m.histograms = m.histograms[:0]
|
||||||
m.metadata = m.metadata[:0]
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *mockAppendable) AppendExemplar(ref storage.SeriesRef, l labels.Labels, e exemplar.Exemplar) (storage.SeriesRef, error) {
|
|
||||||
if m.appendExemplarErr != nil {
|
|
||||||
return 0, m.appendExemplarErr
|
|
||||||
}
|
|
||||||
|
|
||||||
latestTs := m.latestExemplar[uint64(ref)]
|
|
||||||
if e.Ts < latestTs {
|
|
||||||
return 0, storage.ErrOutOfOrderExemplar
|
|
||||||
}
|
|
||||||
if e.Ts == latestTs {
|
|
||||||
return 0, storage.ErrDuplicateExemplar
|
|
||||||
}
|
|
||||||
|
|
||||||
m.latestExemplar[uint64(ref)] = e.Ts
|
|
||||||
m.exemplars = append(m.exemplars, mockExemplar{l, e.Labels, e.Ts, e.Value})
|
|
||||||
return ref, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *mockAppendable) AppendHistogram(_ storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) {
|
|
||||||
if m.appendHistogramErr != nil {
|
|
||||||
return 0, m.appendHistogramErr
|
|
||||||
}
|
|
||||||
hash := l.Hash()
|
|
||||||
var latestTs int64
|
|
||||||
if h != nil {
|
|
||||||
latestTs = m.latestHistogram[hash]
|
|
||||||
} else {
|
|
||||||
latestTs = m.latestFloatHist[hash]
|
|
||||||
}
|
|
||||||
if t < latestTs {
|
|
||||||
return 0, storage.ErrOutOfOrderSample
|
|
||||||
}
|
|
||||||
if t == latestTs {
|
|
||||||
return 0, storage.ErrDuplicateSampleForTimestamp
|
|
||||||
}
|
|
||||||
|
|
||||||
if l.IsEmpty() {
|
|
||||||
return 0, tsdb.ErrInvalidSample
|
|
||||||
}
|
|
||||||
if _, hasDuplicates := l.HasDuplicateLabelNames(); hasDuplicates {
|
|
||||||
return 0, tsdb.ErrInvalidSample
|
|
||||||
}
|
|
||||||
|
|
||||||
if h != nil {
|
|
||||||
m.latestHistogram[hash] = t
|
|
||||||
} else {
|
|
||||||
m.latestFloatHist[hash] = t
|
|
||||||
}
|
|
||||||
m.histograms = append(m.histograms, mockHistogram{l, t, h, fh})
|
|
||||||
return storage.SeriesRef(hash), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *mockAppendable) AppendHistogramSTZeroSample(_ storage.SeriesRef, l labels.Labels, t, st int64, h *histogram.Histogram, _ *histogram.FloatHistogram) (storage.SeriesRef, error) {
|
|
||||||
if m.appendSTZeroSampleErr != nil {
|
|
||||||
return 0, m.appendSTZeroSampleErr
|
|
||||||
}
|
|
||||||
|
|
||||||
// Created Timestamp can't be higher than the original sample's timestamp.
|
|
||||||
if st > t {
|
|
||||||
return 0, storage.ErrOutOfOrderSample
|
|
||||||
}
|
|
||||||
hash := l.Hash()
|
|
||||||
var latestTs int64
|
|
||||||
if h != nil {
|
|
||||||
latestTs = m.latestHistogram[hash]
|
|
||||||
} else {
|
|
||||||
latestTs = m.latestFloatHist[hash]
|
|
||||||
}
|
|
||||||
if st < latestTs {
|
|
||||||
return 0, storage.ErrOutOfOrderSample
|
|
||||||
}
|
|
||||||
if st == latestTs {
|
|
||||||
return 0, storage.ErrDuplicateSampleForTimestamp
|
|
||||||
}
|
|
||||||
|
|
||||||
if l.IsEmpty() {
|
|
||||||
return 0, tsdb.ErrInvalidSample
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, hasDuplicates := l.HasDuplicateLabelNames(); hasDuplicates {
|
|
||||||
return 0, tsdb.ErrInvalidSample
|
|
||||||
}
|
|
||||||
|
|
||||||
if h != nil {
|
|
||||||
m.latestHistogram[hash] = st
|
|
||||||
m.histograms = append(m.histograms, mockHistogram{l, st, &histogram.Histogram{}, nil})
|
|
||||||
} else {
|
|
||||||
m.latestFloatHist[hash] = st
|
|
||||||
m.histograms = append(m.histograms, mockHistogram{l, st, nil, &histogram.FloatHistogram{}})
|
|
||||||
}
|
|
||||||
return storage.SeriesRef(hash), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *mockAppendable) UpdateMetadata(ref storage.SeriesRef, l labels.Labels, mp metadata.Metadata) (storage.SeriesRef, error) {
|
|
||||||
if m.updateMetadataErr != nil {
|
|
||||||
return 0, m.updateMetadataErr
|
|
||||||
}
|
|
||||||
|
|
||||||
m.metadata = append(m.metadata, mockMetadata{l: l, m: mp})
|
|
||||||
return ref, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *mockAppendable) AppendSTZeroSample(_ storage.SeriesRef, l labels.Labels, t, st int64) (storage.SeriesRef, error) {
|
|
||||||
if m.appendSTZeroSampleErr != nil {
|
|
||||||
return 0, m.appendSTZeroSampleErr
|
|
||||||
}
|
|
||||||
|
|
||||||
// Created Timestamp can't be higher than the original sample's timestamp.
|
|
||||||
if st > t {
|
|
||||||
return 0, storage.ErrOutOfOrderSample
|
|
||||||
}
|
|
||||||
hash := l.Hash()
|
|
||||||
latestTs := m.latestSample[hash]
|
|
||||||
if st < latestTs {
|
|
||||||
return 0, storage.ErrOutOfOrderSample
|
|
||||||
}
|
|
||||||
if st == latestTs {
|
|
||||||
return 0, storage.ErrDuplicateSampleForTimestamp
|
|
||||||
}
|
|
||||||
|
|
||||||
if l.IsEmpty() {
|
|
||||||
return 0, tsdb.ErrInvalidSample
|
|
||||||
}
|
|
||||||
if _, hasDuplicates := l.HasDuplicateLabelNames(); hasDuplicates {
|
|
||||||
return 0, tsdb.ErrInvalidSample
|
|
||||||
}
|
|
||||||
|
|
||||||
m.latestSample[hash] = st
|
|
||||||
m.samples = append(m.samples, mockSample{l, st, 0})
|
|
||||||
return storage.SeriesRef(hash), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
var (
|
||||||
highSchemaHistogram = &histogram.Histogram{
|
highSchemaHistogram = &histogram.Histogram{
|
||||||
Schema: 10,
|
Schema: 10,
|
||||||
@ -1553,7 +1380,7 @@ func TestHistogramsReduction(t *testing.T) {
|
|||||||
for _, protoMsg := range []remoteapi.WriteMessageType{remoteapi.WriteV1MessageType, remoteapi.WriteV2MessageType} {
|
for _, protoMsg := range []remoteapi.WriteMessageType{remoteapi.WriteV1MessageType, remoteapi.WriteV2MessageType} {
|
||||||
t.Run(string(protoMsg), func(t *testing.T) {
|
t.Run(string(protoMsg), func(t *testing.T) {
|
||||||
appendable := &mockAppendable{}
|
appendable := &mockAppendable{}
|
||||||
handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []remoteapi.WriteMessageType{protoMsg}, false, false, false)
|
handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []remoteapi.WriteMessageType{protoMsg}, false)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
err error
|
err error
|
||||||
|
|||||||
@ -37,17 +37,16 @@ import (
|
|||||||
common_config "github.com/prometheus/common/config"
|
common_config "github.com/prometheus/common/config"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"github.com/prometheus/otlptranslator"
|
"github.com/prometheus/otlptranslator"
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
"go.opentelemetry.io/collector/pdata/pcommon"
|
|
||||||
"go.opentelemetry.io/collector/pdata/pmetric"
|
|
||||||
"go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp"
|
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/config"
|
"github.com/prometheus/prometheus/config"
|
||||||
"github.com/prometheus/prometheus/model/histogram"
|
"github.com/prometheus/prometheus/model/histogram"
|
||||||
"github.com/prometheus/prometheus/model/labels"
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
"github.com/prometheus/prometheus/model/metadata"
|
"github.com/prometheus/prometheus/model/metadata"
|
||||||
"github.com/prometheus/prometheus/model/relabel"
|
"github.com/prometheus/prometheus/model/relabel"
|
||||||
"github.com/prometheus/prometheus/storage"
|
"github.com/prometheus/prometheus/storage"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
"go.opentelemetry.io/collector/pdata/pcommon"
|
||||||
|
"go.opentelemetry.io/collector/pdata/pmetric"
|
||||||
|
"go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp"
|
||||||
)
|
)
|
||||||
|
|
||||||
func testRemoteWriteConfig() *config.RemoteWriteConfig {
|
func testRemoteWriteConfig() *config.RemoteWriteConfig {
|
||||||
@ -385,87 +384,53 @@ func TestWriteStorageApplyConfig_PartialUpdate(t *testing.T) {
|
|||||||
require.NoError(t, s.Close())
|
require.NoError(t, s.Close())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO(bwplotka): Move all the OTLP handler tests to `write_handler_test.go`.
|
||||||
|
// write.go and write_test.go are for sending (client side).
|
||||||
|
|
||||||
func TestOTLPWriteHandler(t *testing.T) {
|
func TestOTLPWriteHandler(t *testing.T) {
|
||||||
timestamp := time.Now()
|
// Compile pieces of expectations that does not depend on translation or type and unit labels, for readability.
|
||||||
var zeroTime time.Time
|
expectedBaseSamples := []mockSample{
|
||||||
exportRequest := generateOTLPWriteRequest(timestamp, zeroTime)
|
{m: metadata.Metadata{Type: model.MetricTypeCounter, Unit: "bytes", Help: "test-counter-description"}, v: 10.0},
|
||||||
|
{m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "bytes", Help: "test-gauge-description"}, v: 10.0},
|
||||||
|
{m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, v: 30.0},
|
||||||
|
{m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, v: 12.0},
|
||||||
|
{m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, v: 2.0},
|
||||||
|
{m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, v: 4.0},
|
||||||
|
{m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, v: 6.0},
|
||||||
|
{m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, v: 8.0},
|
||||||
|
{m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, v: 10.0},
|
||||||
|
{m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, v: 12.0},
|
||||||
|
{m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"}, v: 12.0},
|
||||||
|
{m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "", Help: "Target metadata"}, v: 1},
|
||||||
|
}
|
||||||
|
|
||||||
|
ts := time.Now()
|
||||||
|
st := ts.Add(-1 * time.Millisecond)
|
||||||
|
exportRequest := generateOTLPWriteRequest(ts, st)
|
||||||
for _, testCase := range []struct {
|
for _, testCase := range []struct {
|
||||||
name string
|
name string
|
||||||
otlpCfg config.OTLPConfig
|
otlpCfg config.OTLPConfig
|
||||||
typeAndUnitLabels bool
|
typeAndUnitLabels bool
|
||||||
expectedSamples []mockSample
|
expectedSeries []labels.Labels
|
||||||
expectedMetadata []mockMetadata
|
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "NoTranslation/NoTypeAndUnitLabels",
|
name: "NoTranslation/NoTypeAndUnitLabels",
|
||||||
otlpCfg: config.OTLPConfig{
|
otlpCfg: config.OTLPConfig{
|
||||||
TranslationStrategy: otlptranslator.NoTranslation,
|
TranslationStrategy: otlptranslator.NoTranslation,
|
||||||
},
|
},
|
||||||
expectedSamples: []mockSample{
|
expectedSeries: []labels.Labels{
|
||||||
{
|
labels.FromStrings(model.MetricNameLabel, "test.counter", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test.counter", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
|
labels.FromStrings(model.MetricNameLabel, "test.gauge", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
|
||||||
t: timestamp.UnixMilli(),
|
labels.FromStrings(model.MetricNameLabel, "test.histogram_sum", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
|
||||||
v: 10.0,
|
labels.FromStrings(model.MetricNameLabel, "test.histogram_count", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
|
||||||
},
|
labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "0"),
|
||||||
{
|
labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "1"),
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "target_info", "host.name", "test-host", "instance", "test-instance", "job", "test-service"),
|
labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "2"),
|
||||||
t: timestamp.UnixMilli(),
|
labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "3"),
|
||||||
v: 1,
|
labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "4"),
|
||||||
},
|
labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "5"),
|
||||||
},
|
labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "+Inf"),
|
||||||
expectedMetadata: []mockMetadata{
|
labels.FromStrings(model.MetricNameLabel, "target_info", "host.name", "test-host", "instance", "test-instance", "job", "test-service"),
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test.counter", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeCounter, Unit: "bytes", Help: "test-counter-description"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test.gauge", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "bytes", Help: "test-gauge-description"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test.histogram_sum", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test.histogram_count", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "0"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "1"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "2"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "3"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "4"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "5"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "+Inf"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test.exponential.histogram", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-exponential-histogram-description"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "target_info", "host.name", "test-host", "instance", "test-instance", "job", "test-service"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "", Help: "Target metadata"},
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -474,145 +439,39 @@ func TestOTLPWriteHandler(t *testing.T) {
|
|||||||
TranslationStrategy: otlptranslator.NoTranslation,
|
TranslationStrategy: otlptranslator.NoTranslation,
|
||||||
},
|
},
|
||||||
typeAndUnitLabels: true,
|
typeAndUnitLabels: true,
|
||||||
expectedSamples: []mockSample{
|
expectedSeries: []labels.Labels{
|
||||||
{
|
labels.FromStrings(model.MetricNameLabel, "test.counter", "__type__", "counter", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test.counter", "__type__", "counter", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
|
labels.FromStrings(model.MetricNameLabel, "test.gauge", "__type__", "gauge", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
|
||||||
t: timestamp.UnixMilli(),
|
labels.FromStrings(model.MetricNameLabel, "test.histogram_sum", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
|
||||||
v: 10.0,
|
labels.FromStrings(model.MetricNameLabel, "test.histogram_count", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
|
||||||
},
|
labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "0"),
|
||||||
{
|
labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "1"),
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "target_info", "host.name", "test-host", "instance", "test-instance", "job", "test-service"),
|
labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "2"), labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "3"),
|
||||||
t: timestamp.UnixMilli(),
|
labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "4"),
|
||||||
v: 1,
|
labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "5"),
|
||||||
},
|
labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "+Inf"),
|
||||||
},
|
labels.FromStrings(model.MetricNameLabel, "target_info", "host.name", "test-host", "instance", "test-instance", "job", "test-service"),
|
||||||
expectedMetadata: []mockMetadata{
|
|
||||||
{
|
|
||||||
// Metadata labels follow series labels.
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test.counter", "__type__", "counter", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeCounter, Unit: "bytes", Help: "test-counter-description"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test.gauge", "__type__", "gauge", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "bytes", Help: "test-gauge-description"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test.histogram_sum", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test.histogram_count", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "0"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "1"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "2"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "3"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "4"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "5"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "+Inf"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test.exponential.histogram", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-exponential-histogram-description"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "target_info", "host.name", "test-host", "instance", "test-instance", "job", "test-service"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "", Help: "Target metadata"},
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
// For the following cases, skip type and unit cases, it has nothing todo with translation.
|
||||||
{
|
{
|
||||||
name: "UnderscoreEscapingWithSuffixes/NoTypeAndUnitLabels",
|
name: "UnderscoreEscapingWithSuffixes",
|
||||||
otlpCfg: config.OTLPConfig{
|
otlpCfg: config.OTLPConfig{
|
||||||
TranslationStrategy: otlptranslator.UnderscoreEscapingWithSuffixes,
|
TranslationStrategy: otlptranslator.UnderscoreEscapingWithSuffixes,
|
||||||
},
|
},
|
||||||
expectedSamples: []mockSample{
|
expectedSeries: []labels.Labels{
|
||||||
{
|
labels.FromStrings(model.MetricNameLabel, "test_counter_bytes_total", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test_counter_bytes_total", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
|
labels.FromStrings(model.MetricNameLabel, "test_gauge_bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
|
||||||
t: timestamp.UnixMilli(),
|
labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_sum", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
|
||||||
v: 10.0,
|
labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_count", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
|
||||||
},
|
labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "0"),
|
||||||
{
|
labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "1"),
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "target_info", "host_name", "test-host", "instance", "test-instance", "job", "test-service"),
|
labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "2"),
|
||||||
t: timestamp.UnixMilli(),
|
labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "3"),
|
||||||
v: 1,
|
labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "4"),
|
||||||
},
|
labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "5"),
|
||||||
},
|
labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "+Inf"),
|
||||||
expectedMetadata: []mockMetadata{
|
labels.FromStrings(model.MetricNameLabel, "target_info", "host_name", "test-host", "instance", "test-instance", "job", "test-service"),
|
||||||
// All get _bytes unit suffix and counter also gets _total.
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test_counter_bytes_total", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeCounter, Unit: "bytes", Help: "test-counter-description"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test_gauge_bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "bytes", Help: "test-gauge-description"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_sum", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_count", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "0"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "1"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "2"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "3"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "4"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "5"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "+Inf"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test_exponential_histogram_bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-exponential-histogram-description"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "target_info", "host_name", "test-host", "instance", "test-instance", "job", "test-service"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "", Help: "Target metadata"},
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -620,526 +479,68 @@ func TestOTLPWriteHandler(t *testing.T) {
|
|||||||
otlpCfg: config.OTLPConfig{
|
otlpCfg: config.OTLPConfig{
|
||||||
TranslationStrategy: otlptranslator.UnderscoreEscapingWithoutSuffixes,
|
TranslationStrategy: otlptranslator.UnderscoreEscapingWithoutSuffixes,
|
||||||
},
|
},
|
||||||
expectedSamples: []mockSample{
|
expectedSeries: []labels.Labels{
|
||||||
{
|
labels.FromStrings(model.MetricNameLabel, "test_counter", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test_counter", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
|
labels.FromStrings(model.MetricNameLabel, "test_gauge", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
|
||||||
t: timestamp.UnixMilli(),
|
labels.FromStrings(model.MetricNameLabel, "test_histogram_sum", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
|
||||||
v: 10.0,
|
labels.FromStrings(model.MetricNameLabel, "test_histogram_count", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
|
||||||
},
|
labels.FromStrings(model.MetricNameLabel, "test_histogram_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "0"),
|
||||||
{
|
labels.FromStrings(model.MetricNameLabel, "test_histogram_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "1"),
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "target_info", "host_name", "test-host", "instance", "test-instance", "job", "test-service"),
|
labels.FromStrings(model.MetricNameLabel, "test_histogram_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "2"),
|
||||||
t: timestamp.UnixMilli(),
|
labels.FromStrings(model.MetricNameLabel, "test_histogram_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "3"),
|
||||||
v: 1,
|
labels.FromStrings(model.MetricNameLabel, "test_histogram_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "4"),
|
||||||
},
|
labels.FromStrings(model.MetricNameLabel, "test_histogram_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "5"),
|
||||||
},
|
labels.FromStrings(model.MetricNameLabel, "test_histogram_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "+Inf"),
|
||||||
expectedMetadata: []mockMetadata{
|
labels.FromStrings(model.MetricNameLabel, "target_info", "host_name", "test-host", "instance", "test-instance", "job", "test-service"),
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test_counter", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeCounter, Unit: "bytes", Help: "test-counter-description"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test_gauge", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "bytes", Help: "test-gauge-description"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test_histogram_sum", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test_histogram_count", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "0"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "1"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "2"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "3"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "4"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "5"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bucket", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "+Inf"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test_exponential_histogram", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-exponential-histogram-description"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "target_info", "host_name", "test-host", "instance", "test-instance", "job", "test-service"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "", Help: "Target metadata"},
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "UnderscoreEscapingWithSuffixes/WithTypeAndUnitLabels",
|
name: "NoUTF8EscapingWithSuffixes",
|
||||||
otlpCfg: config.OTLPConfig{
|
|
||||||
TranslationStrategy: otlptranslator.UnderscoreEscapingWithSuffixes,
|
|
||||||
},
|
|
||||||
typeAndUnitLabels: true,
|
|
||||||
expectedSamples: []mockSample{
|
|
||||||
{
|
|
||||||
l: labels.New(labels.Label{Name: "__name__", Value: "test_counter_bytes_total"},
|
|
||||||
labels.Label{Name: "__type__", Value: "counter"},
|
|
||||||
labels.Label{Name: "__unit__", Value: "bytes"},
|
|
||||||
labels.Label{Name: "foo_bar", Value: "baz"},
|
|
||||||
labels.Label{Name: "instance", Value: "test-instance"},
|
|
||||||
labels.Label{Name: "job", Value: "test-service"}),
|
|
||||||
t: timestamp.UnixMilli(),
|
|
||||||
v: 10.0,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.New(
|
|
||||||
labels.Label{Name: "__name__", Value: "target_info"},
|
|
||||||
labels.Label{Name: "host_name", Value: "test-host"},
|
|
||||||
labels.Label{Name: "instance", Value: "test-instance"},
|
|
||||||
labels.Label{Name: "job", Value: "test-service"},
|
|
||||||
),
|
|
||||||
t: timestamp.UnixMilli(),
|
|
||||||
v: 1,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
expectedMetadata: []mockMetadata{
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test_counter_bytes_total", "__type__", "counter", "__unit__", "bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeCounter, Unit: "bytes", Help: "test-counter-description"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test_gauge_bytes", "__type__", "gauge", "__unit__", "bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "bytes", Help: "test-gauge-description"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_sum", "__type__", "histogram", "__unit__", "bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_count", "__type__", "histogram", "__unit__", "bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "__type__", "histogram", "__unit__", "bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "0"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "__type__", "histogram", "__unit__", "bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "1"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "__type__", "histogram", "__unit__", "bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "2"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "__type__", "histogram", "__unit__", "bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "3"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "__type__", "histogram", "__unit__", "bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "4"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "__type__", "histogram", "__unit__", "bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "5"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test_histogram_bytes_bucket", "__type__", "histogram", "__unit__", "bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service", "le", "+Inf"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test_exponential_histogram_bytes", "__type__", "histogram", "__unit__", "bytes", "foo_bar", "baz", "instance", "test-instance", "job", "test-service"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-exponential-histogram-description"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "target_info", "host_name", "test-host", "instance", "test-instance", "job", "test-service"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "", Help: "Target metadata"},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "NoUTF8EscapingWithSuffixes/NoTypeAndUnitLabels",
|
|
||||||
otlpCfg: config.OTLPConfig{
|
otlpCfg: config.OTLPConfig{
|
||||||
TranslationStrategy: otlptranslator.NoUTF8EscapingWithSuffixes,
|
TranslationStrategy: otlptranslator.NoUTF8EscapingWithSuffixes,
|
||||||
},
|
},
|
||||||
expectedSamples: []mockSample{
|
expectedSeries: []labels.Labels{
|
||||||
{
|
labels.FromStrings(model.MetricNameLabel, "test.counter_bytes_total", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test.counter_bytes_total", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
|
labels.FromStrings(model.MetricNameLabel, "test.gauge_bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
|
||||||
t: timestamp.UnixMilli(),
|
labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_sum", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
|
||||||
v: 10.0,
|
labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_count", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
|
||||||
},
|
labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "0"),
|
||||||
{
|
labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "1"),
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "target_info", "host.name", "test-host", "instance", "test-instance", "job", "test-service"),
|
labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "2"),
|
||||||
t: timestamp.UnixMilli(),
|
labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "3"),
|
||||||
v: 1,
|
labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "4"),
|
||||||
},
|
labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "5"),
|
||||||
},
|
labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "+Inf"),
|
||||||
expectedMetadata: []mockMetadata{
|
labels.FromStrings(model.MetricNameLabel, "target_info", "host.name", "test-host", "instance", "test-instance", "job", "test-service"),
|
||||||
// All get _bytes unit suffix and counter also gets _total.
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test.counter_bytes_total", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeCounter, Unit: "bytes", Help: "test-counter-description"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test.gauge_bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "bytes", Help: "test-gauge-description"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_sum", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_count", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "0"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "1"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "2"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "3"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "4"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "5"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "+Inf"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test.exponential.histogram_bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-exponential-histogram-description"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "target_info", "host.name", "test-host", "instance", "test-instance", "job", "test-service"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "", Help: "Target metadata"},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "NoUTF8EscapingWithSuffixes/WithTypeAndUnitLabels",
|
|
||||||
otlpCfg: config.OTLPConfig{
|
|
||||||
TranslationStrategy: otlptranslator.NoUTF8EscapingWithSuffixes,
|
|
||||||
},
|
|
||||||
typeAndUnitLabels: true,
|
|
||||||
expectedSamples: []mockSample{
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test.counter_bytes_total", "__type__", "counter", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
|
|
||||||
t: timestamp.UnixMilli(),
|
|
||||||
v: 10.0,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "target_info", "host.name", "test-host", "instance", "test-instance", "job", "test-service"),
|
|
||||||
t: timestamp.UnixMilli(),
|
|
||||||
v: 1,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
expectedMetadata: []mockMetadata{
|
|
||||||
// All get _bytes unit suffix and counter also gets _total.
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test.counter_bytes_total", "__type__", "counter", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeCounter, Unit: "bytes", Help: "test-counter-description"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test.gauge_bytes", "__type__", "gauge", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "bytes", Help: "test-gauge-description"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_sum", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_count", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "0"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "1"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "2"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "3"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "4"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "5"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bytes_bucket", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "+Inf"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-histogram-description"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test.exponential.histogram_bytes", "__type__", "histogram", "__unit__", "bytes", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeHistogram, Unit: "bytes", Help: "test-exponential-histogram-description"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "target_info", "host.name", "test-host", "instance", "test-instance", "job", "test-service"),
|
|
||||||
m: metadata.Metadata{Type: model.MetricTypeGauge, Unit: "", Help: "Target metadata"},
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
} {
|
} {
|
||||||
t.Run(testCase.name, func(t *testing.T) {
|
t.Run(testCase.name, func(t *testing.T) {
|
||||||
otlpOpts := OTLPOptions{
|
otlpOpts := OTLPOptions{
|
||||||
EnableTypeAndUnitLabels: testCase.typeAndUnitLabels,
|
EnableTypeAndUnitLabels: testCase.typeAndUnitLabels,
|
||||||
AppendMetadata: true,
|
|
||||||
}
|
}
|
||||||
appendable := handleOTLP(t, exportRequest, testCase.otlpCfg, otlpOpts)
|
appendable := handleOTLP(t, exportRequest, testCase.otlpCfg, otlpOpts)
|
||||||
for _, sample := range testCase.expectedSamples {
|
|
||||||
requireContainsSample(t, appendable.samples, sample)
|
expectedSamples := expectedBaseSamples
|
||||||
}
|
for i, l := range testCase.expectedSeries {
|
||||||
for _, meta := range testCase.expectedMetadata {
|
expectedSamples[i].l = l
|
||||||
requireContainsMetadata(t, appendable.metadata, meta)
|
expectedSamples[i].t = ts.UnixMilli()
|
||||||
|
expectedSamples[i].st = st.UnixMilli()
|
||||||
|
if l.Get(model.MetricNameLabel) == "target_info" {
|
||||||
|
expectedSamples[i].st = 0 // Target info is artificial and it does not have st (also gauge).
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
requireEqual(t, expectedSamples, appendable.samples)
|
||||||
|
|
||||||
|
// TODO: Test histogram sample?
|
||||||
|
|
||||||
require.Len(t, appendable.samples, 12) // 1 (counter) + 1 (gauge) + 1 (target_info) + 7 (hist_bucket) + 2 (hist_sum, hist_count)
|
require.Len(t, appendable.samples, 12) // 1 (counter) + 1 (gauge) + 1 (target_info) + 7 (hist_bucket) + 2 (hist_sum, hist_count)
|
||||||
require.Len(t, appendable.histograms, 1) // 1 (exponential histogram)
|
require.Len(t, appendable.histograms, 1) // 1 (exponential histogram)
|
||||||
require.Len(t, appendable.metadata, 13) // for each float and histogram sample
|
|
||||||
require.Len(t, appendable.exemplars, 1) // 1 (exemplar)
|
require.Len(t, appendable.exemplars, 1) // 1 (exemplar)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check that start time is ingested if ingestSTZeroSample is enabled
|
|
||||||
// and the start time is actually set (non-zero).
|
|
||||||
func TestOTLPWriteHandler_StartTime(t *testing.T) {
|
|
||||||
timestamp := time.Now()
|
|
||||||
startTime := timestamp.Add(-1 * time.Millisecond)
|
|
||||||
var zeroTime time.Time
|
|
||||||
|
|
||||||
expectedSamples := []mockSample{
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test.counter", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
|
|
||||||
t: timestamp.UnixMilli(),
|
|
||||||
v: 10.0,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test.gauge", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
|
|
||||||
t: timestamp.UnixMilli(),
|
|
||||||
v: 10.0,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test.histogram_sum", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
|
|
||||||
t: timestamp.UnixMilli(),
|
|
||||||
v: 30.0,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test.histogram_count", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
|
|
||||||
t: timestamp.UnixMilli(),
|
|
||||||
v: 12.0,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "0"),
|
|
||||||
t: timestamp.UnixMilli(),
|
|
||||||
v: 2.0,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "1"),
|
|
||||||
t: timestamp.UnixMilli(),
|
|
||||||
v: 4.0,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "2"),
|
|
||||||
t: timestamp.UnixMilli(),
|
|
||||||
v: 6.0,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "3"),
|
|
||||||
t: timestamp.UnixMilli(),
|
|
||||||
v: 8.0,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "4"),
|
|
||||||
t: timestamp.UnixMilli(),
|
|
||||||
v: 10.0,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "5"),
|
|
||||||
t: timestamp.UnixMilli(),
|
|
||||||
v: 12.0,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test.histogram_bucket", "foo.bar", "baz", "instance", "test-instance", "job", "test-service", "le", "+Inf"),
|
|
||||||
t: timestamp.UnixMilli(),
|
|
||||||
v: 12.0,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "target_info", "host.name", "test-host", "instance", "test-instance", "job", "test-service"),
|
|
||||||
t: timestamp.UnixMilli(),
|
|
||||||
v: 1.0,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
expectedHistograms := []mockHistogram{
|
|
||||||
{
|
|
||||||
l: labels.FromStrings(model.MetricNameLabel, "test.exponential.histogram", "foo.bar", "baz", "instance", "test-instance", "job", "test-service"),
|
|
||||||
t: timestamp.UnixMilli(),
|
|
||||||
h: &histogram.Histogram{
|
|
||||||
Schema: 2,
|
|
||||||
ZeroThreshold: 1e-128,
|
|
||||||
ZeroCount: 2,
|
|
||||||
Count: 10,
|
|
||||||
Sum: 30,
|
|
||||||
PositiveSpans: []histogram.Span{{Offset: 1, Length: 5}},
|
|
||||||
PositiveBuckets: []int64{2, 0, 0, 0, 0},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
expectedSamplesWithSTZero := make([]mockSample, 0, len(expectedSamples)*2-1) // All samples will get ST zero, except target_info.
|
|
||||||
for _, s := range expectedSamples {
|
|
||||||
if s.l.Get(model.MetricNameLabel) != "target_info" {
|
|
||||||
expectedSamplesWithSTZero = append(expectedSamplesWithSTZero, mockSample{
|
|
||||||
l: s.l.Copy(),
|
|
||||||
t: startTime.UnixMilli(),
|
|
||||||
v: 0,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
expectedSamplesWithSTZero = append(expectedSamplesWithSTZero, s)
|
|
||||||
}
|
|
||||||
expectedHistogramsWithSTZero := make([]mockHistogram, 0, len(expectedHistograms)*2)
|
|
||||||
for _, s := range expectedHistograms {
|
|
||||||
if s.l.Get(model.MetricNameLabel) != "target_info" {
|
|
||||||
expectedHistogramsWithSTZero = append(expectedHistogramsWithSTZero, mockHistogram{
|
|
||||||
l: s.l.Copy(),
|
|
||||||
t: startTime.UnixMilli(),
|
|
||||||
h: &histogram.Histogram{},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
expectedHistogramsWithSTZero = append(expectedHistogramsWithSTZero, s)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, testCase := range []struct {
|
|
||||||
name string
|
|
||||||
otlpOpts OTLPOptions
|
|
||||||
startTime time.Time
|
|
||||||
expectSTZero bool
|
|
||||||
expectedSamples []mockSample
|
|
||||||
expectedHistograms []mockHistogram
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "IngestSTZero=false/startTime=0",
|
|
||||||
otlpOpts: OTLPOptions{
|
|
||||||
IngestSTZeroSample: false,
|
|
||||||
},
|
|
||||||
startTime: zeroTime,
|
|
||||||
expectedSamples: expectedSamples,
|
|
||||||
expectedHistograms: expectedHistograms,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "IngestSTZero=true/startTime=0",
|
|
||||||
otlpOpts: OTLPOptions{
|
|
||||||
IngestSTZeroSample: true,
|
|
||||||
},
|
|
||||||
startTime: zeroTime,
|
|
||||||
expectedSamples: expectedSamples,
|
|
||||||
expectedHistograms: expectedHistograms,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "IngestSTZero=false/startTime=ts-1ms",
|
|
||||||
otlpOpts: OTLPOptions{
|
|
||||||
IngestSTZeroSample: false,
|
|
||||||
},
|
|
||||||
startTime: startTime,
|
|
||||||
expectedSamples: expectedSamples,
|
|
||||||
expectedHistograms: expectedHistograms,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "IngestSTZero=true/startTime=ts-1ms",
|
|
||||||
otlpOpts: OTLPOptions{
|
|
||||||
IngestSTZeroSample: true,
|
|
||||||
},
|
|
||||||
startTime: startTime,
|
|
||||||
expectedSamples: expectedSamplesWithSTZero,
|
|
||||||
expectedHistograms: expectedHistogramsWithSTZero,
|
|
||||||
},
|
|
||||||
} {
|
|
||||||
t.Run(testCase.name, func(t *testing.T) {
|
|
||||||
exportRequest := generateOTLPWriteRequest(timestamp, testCase.startTime)
|
|
||||||
appendable := handleOTLP(t, exportRequest, config.OTLPConfig{
|
|
||||||
TranslationStrategy: otlptranslator.NoTranslation,
|
|
||||||
}, testCase.otlpOpts)
|
|
||||||
for i, expect := range testCase.expectedSamples {
|
|
||||||
actual := appendable.samples[i]
|
|
||||||
require.True(t, labels.Equal(expect.l, actual.l), "sample labels,pos=%v", i)
|
|
||||||
require.Equal(t, expect.t, actual.t, "sample timestamp,pos=%v", i)
|
|
||||||
require.Equal(t, expect.v, actual.v, "sample value,pos=%v", i)
|
|
||||||
}
|
|
||||||
for i, expect := range testCase.expectedHistograms {
|
|
||||||
actual := appendable.histograms[i]
|
|
||||||
require.True(t, labels.Equal(expect.l, actual.l), "histogram labels,pos=%v", i)
|
|
||||||
require.Equal(t, expect.t, actual.t, "histogram timestamp,pos=%v", i)
|
|
||||||
require.True(t, expect.h.Equals(actual.h), "histogram value,pos=%v", i)
|
|
||||||
}
|
|
||||||
require.Len(t, appendable.samples, len(testCase.expectedSamples))
|
|
||||||
require.Len(t, appendable.histograms, len(testCase.expectedHistograms))
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func requireContainsSample(t *testing.T, actual []mockSample, expected mockSample) {
|
|
||||||
t.Helper()
|
|
||||||
|
|
||||||
for _, got := range actual {
|
|
||||||
if labels.Equal(expected.l, got.l) && expected.t == got.t && expected.v == got.v {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
require.Fail(t, fmt.Sprintf("Sample not found: \n"+
|
|
||||||
"expected: %v\n"+
|
|
||||||
"actual : %v", expected, actual))
|
|
||||||
}
|
|
||||||
|
|
||||||
func requireContainsMetadata(t *testing.T, actual []mockMetadata, expected mockMetadata) {
|
|
||||||
t.Helper()
|
|
||||||
|
|
||||||
for _, got := range actual {
|
|
||||||
if labels.Equal(expected.l, got.l) && expected.m.Type == got.m.Type && expected.m.Unit == got.m.Unit && expected.m.Help == got.m.Help {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
require.Fail(t, fmt.Sprintf("Metadata not found: \n"+
|
|
||||||
"expected: %v\n"+
|
|
||||||
"actual : %v", expected, actual))
|
|
||||||
}
|
|
||||||
|
|
||||||
func handleOTLP(t *testing.T, exportRequest pmetricotlp.ExportRequest, otlpCfg config.OTLPConfig, otlpOpts OTLPOptions) *mockAppendable {
|
func handleOTLP(t *testing.T, exportRequest pmetricotlp.ExportRequest, otlpCfg config.OTLPConfig, otlpOpts OTLPOptions) *mockAppendable {
|
||||||
buf, err := exportRequest.MarshalProto()
|
buf, err := exportRequest.MarshalProto()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -1164,7 +565,7 @@ func handleOTLP(t *testing.T, exportRequest pmetricotlp.ExportRequest, otlpCfg c
|
|||||||
return appendable
|
return appendable
|
||||||
}
|
}
|
||||||
|
|
||||||
func generateOTLPWriteRequest(timestamp, startTime time.Time) pmetricotlp.ExportRequest {
|
func generateOTLPWriteRequest(ts, startTime time.Time) pmetricotlp.ExportRequest {
|
||||||
d := pmetric.NewMetrics()
|
d := pmetric.NewMetrics()
|
||||||
|
|
||||||
// Generate One Counter, One Gauge, One Histogram, One Exponential-Histogram
|
// Generate One Counter, One Gauge, One Histogram, One Exponential-Histogram
|
||||||
@ -1188,14 +589,14 @@ func generateOTLPWriteRequest(timestamp, startTime time.Time) pmetricotlp.Export
|
|||||||
counterMetric.Sum().SetIsMonotonic(true)
|
counterMetric.Sum().SetIsMonotonic(true)
|
||||||
|
|
||||||
counterDataPoint := counterMetric.Sum().DataPoints().AppendEmpty()
|
counterDataPoint := counterMetric.Sum().DataPoints().AppendEmpty()
|
||||||
counterDataPoint.SetTimestamp(pcommon.NewTimestampFromTime(timestamp))
|
counterDataPoint.SetTimestamp(pcommon.NewTimestampFromTime(ts))
|
||||||
counterDataPoint.SetStartTimestamp(pcommon.NewTimestampFromTime(startTime))
|
counterDataPoint.SetStartTimestamp(pcommon.NewTimestampFromTime(startTime))
|
||||||
counterDataPoint.SetDoubleValue(10.0)
|
counterDataPoint.SetDoubleValue(10.0)
|
||||||
counterDataPoint.Attributes().PutStr("foo.bar", "baz")
|
counterDataPoint.Attributes().PutStr("foo.bar", "baz")
|
||||||
|
|
||||||
counterExemplar := counterDataPoint.Exemplars().AppendEmpty()
|
counterExemplar := counterDataPoint.Exemplars().AppendEmpty()
|
||||||
|
|
||||||
counterExemplar.SetTimestamp(pcommon.NewTimestampFromTime(timestamp))
|
counterExemplar.SetTimestamp(pcommon.NewTimestampFromTime(ts))
|
||||||
counterExemplar.SetDoubleValue(10.0)
|
counterExemplar.SetDoubleValue(10.0)
|
||||||
counterExemplar.SetSpanID(pcommon.SpanID{0, 1, 2, 3, 4, 5, 6, 7})
|
counterExemplar.SetSpanID(pcommon.SpanID{0, 1, 2, 3, 4, 5, 6, 7})
|
||||||
counterExemplar.SetTraceID(pcommon.TraceID{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15})
|
counterExemplar.SetTraceID(pcommon.TraceID{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15})
|
||||||
@ -1208,7 +609,7 @@ func generateOTLPWriteRequest(timestamp, startTime time.Time) pmetricotlp.Export
|
|||||||
gaugeMetric.SetEmptyGauge()
|
gaugeMetric.SetEmptyGauge()
|
||||||
|
|
||||||
gaugeDataPoint := gaugeMetric.Gauge().DataPoints().AppendEmpty()
|
gaugeDataPoint := gaugeMetric.Gauge().DataPoints().AppendEmpty()
|
||||||
gaugeDataPoint.SetTimestamp(pcommon.NewTimestampFromTime(timestamp))
|
gaugeDataPoint.SetTimestamp(pcommon.NewTimestampFromTime(ts))
|
||||||
gaugeDataPoint.SetStartTimestamp(pcommon.NewTimestampFromTime(startTime))
|
gaugeDataPoint.SetStartTimestamp(pcommon.NewTimestampFromTime(startTime))
|
||||||
gaugeDataPoint.SetDoubleValue(10.0)
|
gaugeDataPoint.SetDoubleValue(10.0)
|
||||||
gaugeDataPoint.Attributes().PutStr("foo.bar", "baz")
|
gaugeDataPoint.Attributes().PutStr("foo.bar", "baz")
|
||||||
@ -1222,7 +623,7 @@ func generateOTLPWriteRequest(timestamp, startTime time.Time) pmetricotlp.Export
|
|||||||
histogramMetric.Histogram().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
|
histogramMetric.Histogram().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
|
||||||
|
|
||||||
histogramDataPoint := histogramMetric.Histogram().DataPoints().AppendEmpty()
|
histogramDataPoint := histogramMetric.Histogram().DataPoints().AppendEmpty()
|
||||||
histogramDataPoint.SetTimestamp(pcommon.NewTimestampFromTime(timestamp))
|
histogramDataPoint.SetTimestamp(pcommon.NewTimestampFromTime(ts))
|
||||||
histogramDataPoint.SetStartTimestamp(pcommon.NewTimestampFromTime(startTime))
|
histogramDataPoint.SetStartTimestamp(pcommon.NewTimestampFromTime(startTime))
|
||||||
histogramDataPoint.ExplicitBounds().FromRaw([]float64{0.0, 1.0, 2.0, 3.0, 4.0, 5.0})
|
histogramDataPoint.ExplicitBounds().FromRaw([]float64{0.0, 1.0, 2.0, 3.0, 4.0, 5.0})
|
||||||
histogramDataPoint.BucketCounts().FromRaw([]uint64{2, 2, 2, 2, 2, 2})
|
histogramDataPoint.BucketCounts().FromRaw([]uint64{2, 2, 2, 2, 2, 2})
|
||||||
@ -1239,7 +640,7 @@ func generateOTLPWriteRequest(timestamp, startTime time.Time) pmetricotlp.Export
|
|||||||
exponentialHistogramMetric.ExponentialHistogram().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
|
exponentialHistogramMetric.ExponentialHistogram().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
|
||||||
|
|
||||||
exponentialHistogramDataPoint := exponentialHistogramMetric.ExponentialHistogram().DataPoints().AppendEmpty()
|
exponentialHistogramDataPoint := exponentialHistogramMetric.ExponentialHistogram().DataPoints().AppendEmpty()
|
||||||
exponentialHistogramDataPoint.SetTimestamp(pcommon.NewTimestampFromTime(timestamp))
|
exponentialHistogramDataPoint.SetTimestamp(pcommon.NewTimestampFromTime(ts))
|
||||||
exponentialHistogramDataPoint.SetStartTimestamp(pcommon.NewTimestampFromTime(startTime))
|
exponentialHistogramDataPoint.SetStartTimestamp(pcommon.NewTimestampFromTime(startTime))
|
||||||
exponentialHistogramDataPoint.SetScale(2.0)
|
exponentialHistogramDataPoint.SetScale(2.0)
|
||||||
exponentialHistogramDataPoint.Positive().BucketCounts().FromRaw([]uint64{2, 2, 2, 2, 2})
|
exponentialHistogramDataPoint.Positive().BucketCounts().FromRaw([]uint64{2, 2, 2, 2, 2})
|
||||||
@ -1292,9 +693,9 @@ func TestOTLPDelta(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
want := []mockSample{
|
want := []mockSample{
|
||||||
{t: milli(0), l: ls, v: 0}, // +0
|
{t: milli(0), l: ls, m: metadata.Metadata{Type: model.MetricTypeGauge}, v: 0}, // +0
|
||||||
{t: milli(1), l: ls, v: 1}, // +1
|
{t: milli(1), l: ls, m: metadata.Metadata{Type: model.MetricTypeGauge}, v: 1}, // +1
|
||||||
{t: milli(2), l: ls, v: 3}, // +2
|
{t: milli(2), l: ls, m: metadata.Metadata{Type: model.MetricTypeGauge}, v: 3}, // +2
|
||||||
}
|
}
|
||||||
if diff := cmp.Diff(want, appendable.samples, cmp.Exporter(func(reflect.Type) bool { return true })); diff != "" {
|
if diff := cmp.Diff(want, appendable.samples, cmp.Exporter(func(reflect.Type) bool { return true })); diff != "" {
|
||||||
t.Fatal(diff)
|
t.Fatal(diff)
|
||||||
@ -1474,7 +875,7 @@ func BenchmarkOTLP(b *testing.B) {
|
|||||||
var total int
|
var total int
|
||||||
|
|
||||||
// reqs is a [b.N]*http.Request, divided across the workers.
|
// reqs is a [b.N]*http.Request, divided across the workers.
|
||||||
// deltatocumulative requires timestamps to be strictly in
|
// deltatocumulative requires tss to be strictly in
|
||||||
// order on a per-series basis. to ensure this, each reqs[k]
|
// order on a per-series basis. to ensure this, each reqs[k]
|
||||||
// contains samples of differently named series, sorted
|
// contains samples of differently named series, sorted
|
||||||
// strictly in time order
|
// strictly in time order
|
||||||
@ -1506,8 +907,8 @@ func BenchmarkOTLP(b *testing.B) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
log := slog.New(slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{Level: slog.LevelWarn}))
|
log := slog.New(slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{Level: slog.LevelWarn}))
|
||||||
mock := new(mockAppendable)
|
mock := &mockAppendable{}
|
||||||
appendable := syncAppendable{Appendable: mock, lock: new(sync.Mutex)}
|
appendable := syncAppendable{AppendableV2: mock, lock: new(sync.Mutex)}
|
||||||
cfgfn := func() config.Config {
|
cfgfn := func() config.Config {
|
||||||
return config.Config{OTLPConfig: config.DefaultOTLPConfig}
|
return config.Config{OTLPConfig: config.DefaultOTLPConfig}
|
||||||
}
|
}
|
||||||
@ -1588,28 +989,22 @@ func sampleCount(md pmetric.Metrics) int {
|
|||||||
|
|
||||||
type syncAppendable struct {
|
type syncAppendable struct {
|
||||||
lock sync.Locker
|
lock sync.Locker
|
||||||
storage.Appendable
|
storage.AppendableV2
|
||||||
}
|
}
|
||||||
|
|
||||||
type syncAppender struct {
|
type syncAppender struct {
|
||||||
lock sync.Locker
|
lock sync.Locker
|
||||||
storage.Appender
|
storage.AppenderV2
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s syncAppendable) Appender(ctx context.Context) storage.Appender {
|
func (s syncAppendable) AppenderV2(ctx context.Context) storage.AppenderV2 {
|
||||||
return syncAppender{Appender: s.Appendable.Appender(ctx), lock: s.lock}
|
return syncAppender{AppenderV2: s.AppendableV2.AppenderV2(ctx), lock: s.lock}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s syncAppender) Append(ref storage.SeriesRef, l labels.Labels, t int64, v float64) (storage.SeriesRef, error) {
|
func (s syncAppender) Append(ref storage.SeriesRef, ls labels.Labels, st, t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram, opts storage.AOptions) (storage.SeriesRef, error) {
|
||||||
s.lock.Lock()
|
s.lock.Lock()
|
||||||
defer s.lock.Unlock()
|
defer s.lock.Unlock()
|
||||||
return s.Appender.Append(ref, l, t, v)
|
return s.AppenderV2.Append(ref, ls, st, t, v, h, fh, opts)
|
||||||
}
|
|
||||||
|
|
||||||
func (s syncAppender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, f *histogram.FloatHistogram) (storage.SeriesRef, error) {
|
|
||||||
s.lock.Lock()
|
|
||||||
defer s.lock.Unlock()
|
|
||||||
return s.Appender.AppendHistogram(ref, l, t, h, f)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestWriteStorage_CanRegisterMetricsAfterClosing(t *testing.T) {
|
func TestWriteStorage_CanRegisterMetricsAfterClosing(t *testing.T) {
|
||||||
|
|||||||
254
util/teststorage/appender.go
Normal file
254
util/teststorage/appender.go
Normal file
@ -0,0 +1,254 @@
|
|||||||
|
package teststorage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/prometheus/common/model"
|
||||||
|
"github.com/prometheus/prometheus/model/exemplar"
|
||||||
|
"github.com/prometheus/prometheus/model/histogram"
|
||||||
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
|
"github.com/prometheus/prometheus/model/metadata"
|
||||||
|
"github.com/prometheus/prometheus/storage"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Sample represents test, combined sample for mocking storage.AppenderV2.
|
||||||
|
type Sample struct {
|
||||||
|
MF string
|
||||||
|
L labels.Labels
|
||||||
|
M metadata.Metadata
|
||||||
|
ST, T int64
|
||||||
|
V float64
|
||||||
|
H *histogram.Histogram
|
||||||
|
FH *histogram.FloatHistogram
|
||||||
|
ES []exemplar.Exemplar
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s Sample) String() string {
|
||||||
|
b := bytes.Buffer{}
|
||||||
|
if s.M.Help != "" {
|
||||||
|
_, _ = fmt.Fprintf(&b, "HELP %s\n", s.M.Help)
|
||||||
|
}
|
||||||
|
if s.M.Type != model.MetricTypeUnknown && s.M.Type != "" {
|
||||||
|
_, _ = fmt.Fprintf(&b, "type@%s ", s.M.Type)
|
||||||
|
}
|
||||||
|
if s.M.Unit != "" {
|
||||||
|
_, _ = fmt.Fprintf(&b, "unit@%s ", s.M.Unit)
|
||||||
|
}
|
||||||
|
h := ""
|
||||||
|
if s.H != nil {
|
||||||
|
h = s.H.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
fh := ""
|
||||||
|
if s.FH != nil {
|
||||||
|
fh = s.FH.String()
|
||||||
|
}
|
||||||
|
_, _ = fmt.Fprintf(&b, "%s %v%v%v st@%v t@%v\n", s.L.String(), s.V, h, fh, s.ST, s.T)
|
||||||
|
return b.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s Sample) exemplarsEqual(other []exemplar.Exemplar) bool {
|
||||||
|
if len(s.ES) != len(other) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for i := range s.ES {
|
||||||
|
if !s.ES[i].Equals(other[i]) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s Sample) Equal(other Sample) bool {
|
||||||
|
return strings.Compare(s.MF, other.MF) == 0 &&
|
||||||
|
labels.Equal(s.L, other.L) &&
|
||||||
|
s.M.Equals(other.M) &&
|
||||||
|
s.ST == other.ST &&
|
||||||
|
s.T == other.T &&
|
||||||
|
math.Float64bits(s.V) == math.Float64bits(s.V) && // Compare Float64bits so NaN values which are exactly the same will compare equal.
|
||||||
|
s.H.Equals(other.H) &&
|
||||||
|
s.FH.Equals(other.FH) &&
|
||||||
|
s.exemplarsEqual(other.ES)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Appender is a storage.AppenderV2 mock.
|
||||||
|
// It allows:
|
||||||
|
// * recording all samples that were added through the appender.
|
||||||
|
// * optionally backed by another appender it writes samples through (Next).
|
||||||
|
// * optionally runs another appender before result recording e.g. to simulate chained validation (Prev)
|
||||||
|
// TODO(bwplotka): Move to storage/interface/mock or something?
|
||||||
|
type Appender struct {
|
||||||
|
Prev storage.AppendableV2 // Optional appender to run before the result collection.
|
||||||
|
Next storage.AppendableV2 // Optional appender to run after results are collected (e.g. TestStorage).
|
||||||
|
|
||||||
|
AppendErr error // Inject appender error on every Append run.
|
||||||
|
AppendAllExemplarsError error // Inject storage.AppendPartialError for all exemplars.
|
||||||
|
CommitErr error // Inject commit error.
|
||||||
|
|
||||||
|
mtx sync.Mutex // mutex for result writes and ResultSamplesGreaterThan read.
|
||||||
|
|
||||||
|
// Recorded results.
|
||||||
|
PendingSamples []Sample
|
||||||
|
ResultSamples []Sample
|
||||||
|
RolledbackSamples []Sample
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *Appender) ResultReset() {
|
||||||
|
a.PendingSamples = a.PendingSamples[:0]
|
||||||
|
a.ResultSamples = a.ResultSamples[:0]
|
||||||
|
a.RolledbackSamples = a.RolledbackSamples[:0]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *Appender) ResultSamplesGreaterThan(than int) bool {
|
||||||
|
a.mtx.Lock()
|
||||||
|
defer a.mtx.Unlock()
|
||||||
|
return len(a.ResultSamples) > than
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResultMetadata returns ResultSamples with samples only containing L and M.
|
||||||
|
// This is for compatibility with old tests that only focus on metadata.
|
||||||
|
//
|
||||||
|
// Deprecated: Rewrite tests to test metadata on ResultSamples instead.
|
||||||
|
func (a *Appender) ResultMetadata() []Sample {
|
||||||
|
var ret []Sample
|
||||||
|
for _, s := range a.ResultSamples {
|
||||||
|
ret = append(ret, Sample{L: s.L, M: s.M})
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *Appender) String() string {
|
||||||
|
var sb strings.Builder
|
||||||
|
sb.WriteString("committed:\n")
|
||||||
|
for _, s := range a.ResultSamples {
|
||||||
|
sb.WriteString("\n")
|
||||||
|
sb.WriteString(s.String())
|
||||||
|
}
|
||||||
|
sb.WriteString("pending:\n")
|
||||||
|
for _, s := range a.PendingSamples {
|
||||||
|
sb.WriteString("\n")
|
||||||
|
sb.WriteString(s.String())
|
||||||
|
}
|
||||||
|
sb.WriteString("rolledback:\n")
|
||||||
|
for _, s := range a.RolledbackSamples {
|
||||||
|
sb.WriteString("\n")
|
||||||
|
sb.WriteString(s.String())
|
||||||
|
}
|
||||||
|
return sb.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewAppender() *Appender {
|
||||||
|
return &Appender{}
|
||||||
|
}
|
||||||
|
|
||||||
|
type appender struct {
|
||||||
|
prev storage.AppenderV2
|
||||||
|
next storage.AppenderV2
|
||||||
|
|
||||||
|
*Appender
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *Appender) AppenderV2(ctx context.Context) storage.AppenderV2 {
|
||||||
|
ret := &appender{Appender: a}
|
||||||
|
if a.Prev != nil {
|
||||||
|
ret.prev = a.Prev.AppenderV2(ctx)
|
||||||
|
}
|
||||||
|
if a.Next != nil {
|
||||||
|
ret.next = a.Next.AppenderV2(ctx)
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *appender) Append(ref storage.SeriesRef, ls labels.Labels, st, t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram, opts storage.AOptions) (storage.SeriesRef, error) {
|
||||||
|
if a.Prev != nil {
|
||||||
|
if _, err := a.prev.Append(ref, ls, st, t, v, h, fh, opts); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if a.AppendErr != nil {
|
||||||
|
return 0, a.AppendErr
|
||||||
|
}
|
||||||
|
|
||||||
|
a.mtx.Lock()
|
||||||
|
a.PendingSamples = append(a.PendingSamples, Sample{
|
||||||
|
MF: opts.MetricFamilyName,
|
||||||
|
M: opts.Metadata,
|
||||||
|
L: ls,
|
||||||
|
ST: st, T: t,
|
||||||
|
V: v, H: h, FH: fh,
|
||||||
|
ES: opts.Exemplars,
|
||||||
|
})
|
||||||
|
a.mtx.Unlock()
|
||||||
|
|
||||||
|
var err error
|
||||||
|
if a.AppendAllExemplarsError != nil {
|
||||||
|
var exErrs []error
|
||||||
|
for range opts.Exemplars {
|
||||||
|
exErrs = append(exErrs, a.AppendAllExemplarsError)
|
||||||
|
}
|
||||||
|
if len(exErrs) > 0 {
|
||||||
|
err = &storage.AppendPartialError{ExemplarErrors: exErrs}
|
||||||
|
}
|
||||||
|
if ref == 0 {
|
||||||
|
// Use labels hash as a stand-in for unique series reference, to avoid having to track all series.
|
||||||
|
ref = storage.SeriesRef(ls.Hash())
|
||||||
|
}
|
||||||
|
return ref, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if a.next != nil {
|
||||||
|
return a.next.Append(ref, ls, st, t, v, h, fh, opts)
|
||||||
|
}
|
||||||
|
|
||||||
|
if ref == 0 {
|
||||||
|
// Use labels hash as a stand-in for unique series reference, to avoid having to track all series.
|
||||||
|
ref = storage.SeriesRef(ls.Hash())
|
||||||
|
}
|
||||||
|
return ref, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *appender) Commit() error {
|
||||||
|
if a.Prev != nil {
|
||||||
|
if err := a.prev.Commit(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if a.CommitErr != nil {
|
||||||
|
return a.CommitErr
|
||||||
|
}
|
||||||
|
|
||||||
|
a.mtx.Lock()
|
||||||
|
a.ResultSamples = append(a.ResultSamples, a.PendingSamples...)
|
||||||
|
a.PendingSamples = a.PendingSamples[:0]
|
||||||
|
a.mtx.Unlock()
|
||||||
|
|
||||||
|
if a.next != nil {
|
||||||
|
return a.next.Commit()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *appender) Rollback() error {
|
||||||
|
if a.prev != nil {
|
||||||
|
if err := a.prev.Rollback(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
a.mtx.Lock()
|
||||||
|
a.RolledbackSamples = append(a.RolledbackSamples, a.PendingSamples...)
|
||||||
|
a.PendingSamples = a.PendingSamples[:0]
|
||||||
|
a.mtx.Unlock()
|
||||||
|
|
||||||
|
if a.next != nil {
|
||||||
|
return a.next.Rollback()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
41
util/teststorage/appender_test.go
Normal file
41
util/teststorage/appender_test.go
Normal file
@ -0,0 +1,41 @@
|
|||||||
|
package teststorage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/google/go-cmp/cmp"
|
||||||
|
"github.com/prometheus/prometheus/model/exemplar"
|
||||||
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
|
"github.com/prometheus/prometheus/model/metadata"
|
||||||
|
"github.com/prometheus/prometheus/util/testutil"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestSample_RequireEqual(t *testing.T) {
|
||||||
|
a := []Sample{
|
||||||
|
{},
|
||||||
|
{L: labels.FromStrings("__name__", "test_metric_total"), M: metadata.Metadata{Type: "counter", Unit: "metric", Help: "some help text"}},
|
||||||
|
{L: labels.FromStrings("__name__", "test_metric2", "foo", "bar"), M: metadata.Metadata{Type: "gauge", Unit: "", Help: "other help text"}, V: 123.123},
|
||||||
|
{ES: []exemplar.Exemplar{{Labels: labels.FromStrings("__name__", "yolo")}}},
|
||||||
|
}
|
||||||
|
testutil.RequireEqual(t, a, a)
|
||||||
|
|
||||||
|
b1 := []Sample{
|
||||||
|
{},
|
||||||
|
{L: labels.FromStrings("__name__", "test_metric_total"), M: metadata.Metadata{Type: "counter", Unit: "metric", Help: "some help text"}},
|
||||||
|
{L: labels.FromStrings("__name__", "test_metric2_diff", "foo", "bar"), M: metadata.Metadata{Type: "gauge", Unit: "", Help: "other help text"}, V: 123.123}, // test_metric2_diff is different.
|
||||||
|
{ES: []exemplar.Exemplar{{Labels: labels.FromStrings("__name__", "yolo")}}},
|
||||||
|
}
|
||||||
|
requireNotEqual(t, a, b1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func requireNotEqual(t testing.TB, a, b any) {
|
||||||
|
t.Helper()
|
||||||
|
if !cmp.Equal(a, b, cmp.Comparer(labels.Equal)) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
require.Fail(t, fmt.Sprintf("Equal, but expected not: \n"+
|
||||||
|
"a: %s\n"+
|
||||||
|
"b: %s", a, b))
|
||||||
|
}
|
||||||
@ -21,9 +21,6 @@ import (
|
|||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/model/exemplar"
|
|
||||||
"github.com/prometheus/prometheus/model/labels"
|
|
||||||
"github.com/prometheus/prometheus/storage"
|
|
||||||
"github.com/prometheus/prometheus/tsdb"
|
"github.com/prometheus/prometheus/tsdb"
|
||||||
"github.com/prometheus/prometheus/util/testutil"
|
"github.com/prometheus/prometheus/util/testutil"
|
||||||
)
|
)
|
||||||
@ -84,15 +81,3 @@ func (s TestStorage) Close() error {
|
|||||||
}
|
}
|
||||||
return os.RemoveAll(s.dir)
|
return os.RemoveAll(s.dir)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s TestStorage) ExemplarAppender() storage.ExemplarAppender {
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s TestStorage) ExemplarQueryable() storage.ExemplarQueryable {
|
|
||||||
return s.exemplarStorage
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s TestStorage) AppendExemplar(ref storage.SeriesRef, l labels.Labels, e exemplar.Exemplar) (storage.SeriesRef, error) {
|
|
||||||
return ref, s.exemplarStorage.AddExemplar(l, e)
|
|
||||||
}
|
|
||||||
|
|||||||
@ -261,7 +261,7 @@ type API struct {
|
|||||||
func NewAPI(
|
func NewAPI(
|
||||||
qe promql.QueryEngine,
|
qe promql.QueryEngine,
|
||||||
q storage.SampleAndChunkQueryable,
|
q storage.SampleAndChunkQueryable,
|
||||||
ap storage.Appendable,
|
ap storage.AppendableV2,
|
||||||
eq storage.ExemplarQueryable,
|
eq storage.ExemplarQueryable,
|
||||||
spsr func(context.Context) ScrapePoolsRetriever,
|
spsr func(context.Context) ScrapePoolsRetriever,
|
||||||
tr func(context.Context) TargetRetriever,
|
tr func(context.Context) TargetRetriever,
|
||||||
@ -290,10 +290,8 @@ func NewAPI(
|
|||||||
rwEnabled bool,
|
rwEnabled bool,
|
||||||
acceptRemoteWriteProtoMsgs remoteapi.MessageTypes,
|
acceptRemoteWriteProtoMsgs remoteapi.MessageTypes,
|
||||||
otlpEnabled, otlpDeltaToCumulative, otlpNativeDeltaIngestion bool,
|
otlpEnabled, otlpDeltaToCumulative, otlpNativeDeltaIngestion bool,
|
||||||
stZeroIngestionEnabled bool,
|
|
||||||
lookbackDelta time.Duration,
|
lookbackDelta time.Duration,
|
||||||
enableTypeAndUnitLabels bool,
|
enableTypeAndUnitLabels bool,
|
||||||
appendMetadata bool,
|
|
||||||
overrideErrorCode OverrideErrorCode,
|
overrideErrorCode OverrideErrorCode,
|
||||||
) *API {
|
) *API {
|
||||||
a := &API{
|
a := &API{
|
||||||
@ -339,16 +337,14 @@ func NewAPI(
|
|||||||
}
|
}
|
||||||
|
|
||||||
if rwEnabled {
|
if rwEnabled {
|
||||||
a.remoteWriteHandler = remote.NewWriteHandler(logger, registerer, ap, acceptRemoteWriteProtoMsgs, stZeroIngestionEnabled, enableTypeAndUnitLabels, appendMetadata)
|
a.remoteWriteHandler = remote.NewWriteHandler(logger, registerer, ap, acceptRemoteWriteProtoMsgs, enableTypeAndUnitLabels)
|
||||||
}
|
}
|
||||||
if otlpEnabled {
|
if otlpEnabled {
|
||||||
a.otlpWriteHandler = remote.NewOTLPWriteHandler(logger, registerer, ap, configFunc, remote.OTLPOptions{
|
a.otlpWriteHandler = remote.NewOTLPWriteHandler(logger, registerer, ap, configFunc, remote.OTLPOptions{
|
||||||
ConvertDelta: otlpDeltaToCumulative,
|
ConvertDelta: otlpDeltaToCumulative,
|
||||||
NativeDelta: otlpNativeDeltaIngestion,
|
NativeDelta: otlpNativeDeltaIngestion,
|
||||||
LookbackDelta: lookbackDelta,
|
LookbackDelta: lookbackDelta,
|
||||||
IngestSTZeroSample: stZeroIngestionEnabled,
|
|
||||||
EnableTypeAndUnitLabels: enableTypeAndUnitLabels,
|
EnableTypeAndUnitLabels: enableTypeAndUnitLabels,
|
||||||
AppendMetadata: appendMetadata,
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user