mirror of
https://github.com/prometheus/prometheus.git
synced 2025-11-01 17:01:01 +01:00
Phase out native histogram feature flag
The detailed plan for this is laid out in https://github.com/prometheus/prometheus/issues/16572 . This commit adds a global and local scrape config option `scrape_native_histograms`, which has to be set to true to ingest native histograms. To ease the transition, the feature flag is changed to simply set the default of `scrape_native_histograms` to true. Further implications: - The default scrape protocols now depend on the `scrape_native_histograms` setting. - Everywhere else, histograms are now "on by default". Documentation beyond the one for the feature flag and the scrape config are deliberately left out. See https://github.com/prometheus/prometheus/pull/17232 for that. Signed-off-by: beorn7 <beorn@grafana.com>
This commit is contained in:
parent
72960c076d
commit
ad7d1aed99
@ -254,12 +254,11 @@ func (c *flagConfig) setFeatureListOptions(logger *slog.Logger) error {
|
||||
parser.ExperimentalDurationExpr = true
|
||||
logger.Info("Experimental duration expression parsing enabled.")
|
||||
case "native-histograms":
|
||||
c.tsdb.EnableNativeHistograms = true
|
||||
c.scrape.EnableNativeHistogramsIngestion = true
|
||||
// Change relevant global variables. Hacky, but it's hard to pass a new option or default to unmarshallers.
|
||||
config.DefaultConfig.GlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols
|
||||
config.DefaultGlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols
|
||||
logger.Info("Experimental native histogram support enabled. Changed default scrape_protocols to prefer PrometheusProto format.", "global.scrape_protocols", fmt.Sprintf("%v", config.DefaultGlobalConfig.ScrapeProtocols))
|
||||
t := true
|
||||
config.DefaultConfig.GlobalConfig.ScrapeNativeHistograms = &t
|
||||
config.DefaultGlobalConfig.ScrapeNativeHistograms = &t
|
||||
logger.Warn("This option for --enable-feature is being phased out. It currently changes the default for the scrape_native_histograms scrape config setting to true, but will become a no-op in v3.9+. Stop using this option and set scrape_native_histograms in the scrape config instead.", "option", o)
|
||||
case "ooo-native-histograms":
|
||||
logger.Warn("This option for --enable-feature is now permanently enabled and therefore a no-op.", "option", o)
|
||||
case "created-timestamp-zero-ingestion":
|
||||
@ -1875,7 +1874,6 @@ type tsdbOptions struct {
|
||||
EnableExemplarStorage bool
|
||||
MaxExemplars int64
|
||||
EnableMemorySnapshotOnShutdown bool
|
||||
EnableNativeHistograms bool
|
||||
EnableDelayedCompaction bool
|
||||
CompactionDelayMaxPercent int
|
||||
EnableOverlappingCompaction bool
|
||||
@ -1898,7 +1896,6 @@ func (opts tsdbOptions) ToTSDBOptions() tsdb.Options {
|
||||
EnableExemplarStorage: opts.EnableExemplarStorage,
|
||||
MaxExemplars: opts.MaxExemplars,
|
||||
EnableMemorySnapshotOnShutdown: opts.EnableMemorySnapshotOnShutdown,
|
||||
EnableNativeHistograms: opts.EnableNativeHistograms,
|
||||
OutOfOrderTimeWindow: opts.OutOfOrderTimeWindow,
|
||||
EnableDelayedCompaction: opts.EnableDelayedCompaction,
|
||||
CompactionDelayMaxPercent: opts.CompactionDelayMaxPercent,
|
||||
|
||||
@ -157,15 +157,22 @@ var (
|
||||
OTLPConfig: DefaultOTLPConfig,
|
||||
}
|
||||
|
||||
f bool
|
||||
// DefaultGlobalConfig is the default global configuration.
|
||||
DefaultGlobalConfig = GlobalConfig{
|
||||
ScrapeInterval: model.Duration(1 * time.Minute),
|
||||
ScrapeTimeout: model.Duration(10 * time.Second),
|
||||
EvaluationInterval: model.Duration(1 * time.Minute),
|
||||
RuleQueryOffset: model.Duration(0 * time.Minute),
|
||||
// When native histogram feature flag is enabled, ScrapeProtocols default
|
||||
// changes to DefaultNativeHistogramScrapeProtocols.
|
||||
ScrapeProtocols: DefaultScrapeProtocols,
|
||||
// This is nil to be able to distinguish between the case when
|
||||
// the normal default should be used and the case when a
|
||||
// new default is needed due to an enabled feature flag.
|
||||
// E.g. set to `DefaultProtoFirstScrapeProtocols` when
|
||||
// the feature flag `created-timestamp-zero-ingestion` is set.
|
||||
ScrapeProtocols: nil,
|
||||
// When the native histogram feature flag is enabled,
|
||||
// ScrapeNativeHistograms default changes to true.
|
||||
ScrapeNativeHistograms: &f,
|
||||
ConvertClassicHistogramsToNHCB: false,
|
||||
AlwaysScrapeClassicHistograms: false,
|
||||
MetricNameValidationScheme: model.UTF8Validation,
|
||||
@ -455,7 +462,7 @@ type GlobalConfig struct {
|
||||
// The protocols to negotiate during a scrape. It tells clients what
|
||||
// protocol are accepted by Prometheus and with what weight (most wanted is first).
|
||||
// Supported values (case sensitive): PrometheusProto, OpenMetricsText0.0.1,
|
||||
// OpenMetricsText1.0.0, PrometheusText0.0.4.
|
||||
// OpenMetricsText1.0.0, PrometheusText1.0.0, PrometheusText0.0.4
|
||||
ScrapeProtocols []ScrapeProtocol `yaml:"scrape_protocols,omitempty"`
|
||||
// How frequently to evaluate rules by default.
|
||||
EvaluationInterval model.Duration `yaml:"evaluation_interval,omitempty"`
|
||||
@ -495,6 +502,8 @@ type GlobalConfig struct {
|
||||
// blank in config files but must have a value if a ScrapeConfig is created
|
||||
// programmatically.
|
||||
MetricNameEscapingScheme string `yaml:"metric_name_escaping_scheme,omitempty"`
|
||||
// Whether to scrape native histograms.
|
||||
ScrapeNativeHistograms *bool `yaml:"scrape_native_histograms,omitempty"`
|
||||
// Whether to convert all scraped classic histograms into native histograms with custom buckets.
|
||||
ConvertClassicHistogramsToNHCB bool `yaml:"convert_classic_histograms_to_nhcb,omitempty"`
|
||||
// Whether to scrape a classic histogram, even if it is also exposed as a native histogram.
|
||||
@ -635,12 +644,26 @@ func (c *GlobalConfig) UnmarshalYAML(unmarshal func(any) error) error {
|
||||
if gc.EvaluationInterval == 0 {
|
||||
gc.EvaluationInterval = DefaultGlobalConfig.EvaluationInterval
|
||||
}
|
||||
|
||||
if gc.ScrapeProtocols == nil {
|
||||
gc.ScrapeProtocols = DefaultGlobalConfig.ScrapeProtocols
|
||||
if gc.ScrapeNativeHistograms == nil {
|
||||
gc.ScrapeNativeHistograms = DefaultGlobalConfig.ScrapeNativeHistograms
|
||||
}
|
||||
if err := validateAcceptScrapeProtocols(gc.ScrapeProtocols); err != nil {
|
||||
return fmt.Errorf("%w for global config", err)
|
||||
if gc.ScrapeProtocols == nil {
|
||||
if DefaultGlobalConfig.ScrapeProtocols != nil {
|
||||
// This is the case where the defaults are set due to a feature flag.
|
||||
// E.g. if the created-timestamp-zero-ingestion feature flag is
|
||||
// used.
|
||||
gc.ScrapeProtocols = DefaultGlobalConfig.ScrapeProtocols
|
||||
}
|
||||
// Otherwise, we leave ScrapeProtocols at nil for now. In the
|
||||
// per-job scrape config, we have to recognize the unset case to
|
||||
// correctly set the default depending on the local value of
|
||||
// ScrapeNativeHistograms.
|
||||
}
|
||||
if gc.ScrapeProtocols != nil {
|
||||
// Only validate if not-nil at this point.
|
||||
if err := validateAcceptScrapeProtocols(gc.ScrapeProtocols); err != nil {
|
||||
return fmt.Errorf("%w for global config", err)
|
||||
}
|
||||
}
|
||||
|
||||
*c = *gc
|
||||
@ -657,6 +680,7 @@ func (c *GlobalConfig) isZero() bool {
|
||||
c.QueryLogFile == "" &&
|
||||
c.ScrapeFailureLogFile == "" &&
|
||||
c.ScrapeProtocols == nil &&
|
||||
c.ScrapeNativeHistograms == nil &&
|
||||
!c.ConvertClassicHistogramsToNHCB &&
|
||||
!c.AlwaysScrapeClassicHistograms
|
||||
}
|
||||
@ -719,6 +743,8 @@ type ScrapeConfig struct {
|
||||
// Supported values (case sensitive): PrometheusProto, OpenMetricsText0.0.1,
|
||||
// OpenMetricsText1.0.0, PrometheusText1.0.0, PrometheusText0.0.4.
|
||||
ScrapeFallbackProtocol ScrapeProtocol `yaml:"fallback_scrape_protocol,omitempty"`
|
||||
// Whether to scrape native histograms.
|
||||
ScrapeNativeHistograms *bool `yaml:"scrape_native_histograms,omitempty"`
|
||||
// Whether to scrape a classic histogram, even if it is also exposed as a native histogram.
|
||||
AlwaysScrapeClassicHistograms *bool `yaml:"always_scrape_classic_histograms,omitempty"`
|
||||
// Whether to convert all scraped classic histograms into a native histogram with custom buckets.
|
||||
@ -863,9 +889,23 @@ func (c *ScrapeConfig) Validate(globalConfig GlobalConfig) error {
|
||||
if c.ScrapeFailureLogFile == "" {
|
||||
c.ScrapeFailureLogFile = globalConfig.ScrapeFailureLogFile
|
||||
}
|
||||
if c.ScrapeNativeHistograms == nil {
|
||||
c.ScrapeNativeHistograms = globalConfig.ScrapeNativeHistograms
|
||||
}
|
||||
|
||||
if c.ScrapeProtocols == nil {
|
||||
c.ScrapeProtocols = globalConfig.ScrapeProtocols
|
||||
switch {
|
||||
case globalConfig.ScrapeProtocols != nil:
|
||||
// global ScrapeProtocols either set explicitly or via a
|
||||
// default triggered by a feature flag. This overrides
|
||||
// the selection based on locally active scraping of
|
||||
// native histograms.
|
||||
c.ScrapeProtocols = globalConfig.ScrapeProtocols
|
||||
case c.ScrapeNativeHistogramsEnabled():
|
||||
c.ScrapeProtocols = DefaultProtoFirstScrapeProtocols
|
||||
default:
|
||||
c.ScrapeProtocols = DefaultScrapeProtocols
|
||||
}
|
||||
}
|
||||
if err := validateAcceptScrapeProtocols(c.ScrapeProtocols); err != nil {
|
||||
return fmt.Errorf("%w for scrape config with job name %q", err, c.JobName)
|
||||
@ -985,6 +1025,11 @@ func ToEscapingScheme(s string, v model.ValidationScheme) (model.EscapingScheme,
|
||||
return model.ToEscapingScheme(s)
|
||||
}
|
||||
|
||||
// ScrapeNativeHistogramsEnabled returns whether to scrape native histograms.
|
||||
func (c *ScrapeConfig) ScrapeNativeHistogramsEnabled() bool {
|
||||
return c.ScrapeNativeHistograms != nil && *c.ScrapeNativeHistograms
|
||||
}
|
||||
|
||||
// ConvertClassicHistogramsToNHCBEnabled returns whether to convert classic histograms to NHCB.
|
||||
func (c *ScrapeConfig) ConvertClassicHistogramsToNHCBEnabled() bool {
|
||||
return c.ConvertClassicHistogramsToNHCB != nil && *c.ConvertClassicHistogramsToNHCB
|
||||
|
||||
@ -105,7 +105,7 @@ var expectedConf = &Config{
|
||||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: false,
|
||||
ConvertClassicHistogramsToNHCB: false,
|
||||
MetricNameValidationScheme: model.UTF8Validation,
|
||||
@ -225,11 +225,12 @@ var expectedConf = &Config{
|
||||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
ScrapeProtocols: DefaultScrapeProtocols,
|
||||
ScrapeFallbackProtocol: PrometheusText0_0_4,
|
||||
ScrapeFailureLogFile: "testdata/fail_prom.log",
|
||||
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
|
||||
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
|
||||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
|
||||
@ -353,6 +354,7 @@ var expectedConf = &Config{
|
||||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
|
||||
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
|
||||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
|
||||
@ -458,10 +460,11 @@ var expectedConf = &Config{
|
||||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
ScrapeProtocols: DefaultScrapeProtocols,
|
||||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
|
||||
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
|
||||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
|
||||
@ -519,10 +522,11 @@ var expectedConf = &Config{
|
||||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
ScrapeProtocols: DefaultScrapeProtocols,
|
||||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
|
||||
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
|
||||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
|
||||
@ -557,10 +561,11 @@ var expectedConf = &Config{
|
||||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
ScrapeProtocols: DefaultScrapeProtocols,
|
||||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
|
||||
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
|
||||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
|
||||
@ -601,10 +606,11 @@ var expectedConf = &Config{
|
||||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
ScrapeProtocols: DefaultScrapeProtocols,
|
||||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
|
||||
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
|
||||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
|
||||
@ -645,10 +651,11 @@ var expectedConf = &Config{
|
||||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
ScrapeProtocols: DefaultScrapeProtocols,
|
||||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
|
||||
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
|
||||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
|
||||
@ -679,10 +686,11 @@ var expectedConf = &Config{
|
||||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
ScrapeProtocols: DefaultScrapeProtocols,
|
||||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
|
||||
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
|
||||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
|
||||
@ -721,10 +729,11 @@ var expectedConf = &Config{
|
||||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
ScrapeProtocols: DefaultScrapeProtocols,
|
||||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
|
||||
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
|
||||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
|
||||
@ -760,10 +769,11 @@ var expectedConf = &Config{
|
||||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
ScrapeProtocols: DefaultScrapeProtocols,
|
||||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
|
||||
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
|
||||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
|
||||
@ -806,10 +816,11 @@ var expectedConf = &Config{
|
||||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
ScrapeProtocols: DefaultScrapeProtocols,
|
||||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
|
||||
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
|
||||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
|
||||
@ -842,10 +853,11 @@ var expectedConf = &Config{
|
||||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
ScrapeProtocols: DefaultScrapeProtocols,
|
||||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
|
||||
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
|
||||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
|
||||
@ -881,10 +893,11 @@ var expectedConf = &Config{
|
||||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
ScrapeProtocols: DefaultScrapeProtocols,
|
||||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
|
||||
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
|
||||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
|
||||
@ -913,10 +926,11 @@ var expectedConf = &Config{
|
||||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
ScrapeProtocols: DefaultScrapeProtocols,
|
||||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
|
||||
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
|
||||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
|
||||
@ -948,10 +962,11 @@ var expectedConf = &Config{
|
||||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
ScrapeProtocols: DefaultScrapeProtocols,
|
||||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
|
||||
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
|
||||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
|
||||
@ -983,10 +998,11 @@ var expectedConf = &Config{
|
||||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
ScrapeProtocols: DefaultScrapeProtocols,
|
||||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
|
||||
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
|
||||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
|
||||
@ -1018,10 +1034,11 @@ var expectedConf = &Config{
|
||||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
ScrapeProtocols: DefaultScrapeProtocols,
|
||||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
|
||||
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
|
||||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
|
||||
@ -1050,10 +1067,11 @@ var expectedConf = &Config{
|
||||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
ScrapeProtocols: DefaultScrapeProtocols,
|
||||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
|
||||
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
|
||||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
|
||||
@ -1090,10 +1108,11 @@ var expectedConf = &Config{
|
||||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
ScrapeProtocols: DefaultScrapeProtocols,
|
||||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
|
||||
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
|
||||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
|
||||
@ -1129,10 +1148,11 @@ var expectedConf = &Config{
|
||||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
ScrapeProtocols: DefaultScrapeProtocols,
|
||||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
|
||||
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
|
||||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
|
||||
@ -1165,10 +1185,11 @@ var expectedConf = &Config{
|
||||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
ScrapeProtocols: DefaultScrapeProtocols,
|
||||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
|
||||
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
|
||||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
|
||||
@ -1200,10 +1221,11 @@ var expectedConf = &Config{
|
||||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
ScrapeProtocols: DefaultScrapeProtocols,
|
||||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
|
||||
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
|
||||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
|
||||
@ -1239,10 +1261,11 @@ var expectedConf = &Config{
|
||||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
ScrapeProtocols: DefaultScrapeProtocols,
|
||||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
|
||||
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
|
||||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
|
||||
@ -1281,10 +1304,11 @@ var expectedConf = &Config{
|
||||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
ScrapeProtocols: DefaultProtoFirstScrapeProtocols,
|
||||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
|
||||
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
|
||||
ScrapeNativeHistograms: boolPtr(true),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
|
||||
@ -1343,10 +1367,11 @@ var expectedConf = &Config{
|
||||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
ScrapeProtocols: DefaultScrapeProtocols,
|
||||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
|
||||
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
|
||||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
|
||||
@ -1375,10 +1400,11 @@ var expectedConf = &Config{
|
||||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
ScrapeProtocols: DefaultScrapeProtocols,
|
||||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
|
||||
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
|
||||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
|
||||
@ -1418,10 +1444,11 @@ var expectedConf = &Config{
|
||||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
ScrapeProtocols: DefaultScrapeProtocols,
|
||||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
|
||||
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
|
||||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
|
||||
@ -1467,10 +1494,11 @@ var expectedConf = &Config{
|
||||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
ScrapeProtocols: DefaultScrapeProtocols,
|
||||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
|
||||
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
|
||||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
|
||||
@ -1506,10 +1534,11 @@ var expectedConf = &Config{
|
||||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
ScrapeProtocols: DefaultScrapeProtocols,
|
||||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
|
||||
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
|
||||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
|
||||
@ -1546,10 +1575,11 @@ var expectedConf = &Config{
|
||||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
ScrapeProtocols: DefaultScrapeProtocols,
|
||||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
|
||||
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
|
||||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
|
||||
@ -1581,10 +1611,11 @@ var expectedConf = &Config{
|
||||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
ScrapeProtocols: DefaultScrapeProtocols,
|
||||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
|
||||
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
|
||||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
|
||||
@ -1618,10 +1649,11 @@ var expectedConf = &Config{
|
||||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
ScrapeProtocols: DefaultScrapeProtocols,
|
||||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
MetricNameValidationScheme: DefaultGlobalConfig.MetricNameValidationScheme,
|
||||
MetricNameEscapingScheme: DefaultGlobalConfig.MetricNameEscapingScheme,
|
||||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
|
||||
@ -2595,6 +2627,8 @@ type ScrapeConfigOptions struct {
|
||||
JobName string
|
||||
ScrapeInterval model.Duration
|
||||
ScrapeTimeout model.Duration
|
||||
ScrapeProtocols []ScrapeProtocol // Set to DefaultScrapeProtocols by default.
|
||||
ScrapeNativeHistograms bool
|
||||
AlwaysScrapeClassicHistograms bool
|
||||
ConvertClassicHistToNHCB bool
|
||||
}
|
||||
@ -2602,12 +2636,12 @@ type ScrapeConfigOptions struct {
|
||||
func TestGetScrapeConfigs(t *testing.T) {
|
||||
// Helper function to create a scrape config with the given options.
|
||||
sc := func(opts ScrapeConfigOptions) *ScrapeConfig {
|
||||
return &ScrapeConfig{
|
||||
sc := ScrapeConfig{
|
||||
JobName: opts.JobName,
|
||||
HonorTimestamps: true,
|
||||
ScrapeInterval: opts.ScrapeInterval,
|
||||
ScrapeTimeout: opts.ScrapeTimeout,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
ScrapeProtocols: opts.ScrapeProtocols,
|
||||
MetricNameValidationScheme: model.UTF8Validation,
|
||||
MetricNameEscapingScheme: model.AllowUTF8,
|
||||
|
||||
@ -2627,9 +2661,14 @@ func TestGetScrapeConfigs(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
ScrapeNativeHistograms: boolPtr(opts.ScrapeNativeHistograms),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(opts.AlwaysScrapeClassicHistograms),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(opts.ConvertClassicHistToNHCB),
|
||||
}
|
||||
if opts.ScrapeProtocols == nil {
|
||||
sc.ScrapeProtocols = DefaultScrapeProtocols
|
||||
}
|
||||
return &sc
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
@ -2639,22 +2678,57 @@ func TestGetScrapeConfigs(t *testing.T) {
|
||||
expectedError string
|
||||
}{
|
||||
{
|
||||
name: "An included config file should be a valid global config.",
|
||||
configFile: "testdata/scrape_config_files.good.yml",
|
||||
expectedResult: []*ScrapeConfig{sc(ScrapeConfigOptions{JobName: "prometheus", ScrapeInterval: model.Duration(60 * time.Second), ScrapeTimeout: model.Duration(10 * time.Second), AlwaysScrapeClassicHistograms: false, ConvertClassicHistToNHCB: false})},
|
||||
name: "An included config file should be a valid global config.",
|
||||
configFile: "testdata/scrape_config_files.good.yml",
|
||||
expectedResult: []*ScrapeConfig{sc(ScrapeConfigOptions{
|
||||
JobName: "prometheus",
|
||||
ScrapeInterval: model.Duration(60 * time.Second),
|
||||
ScrapeTimeout: model.Duration(10 * time.Second),
|
||||
ScrapeNativeHistograms: false,
|
||||
AlwaysScrapeClassicHistograms: false,
|
||||
ConvertClassicHistToNHCB: false,
|
||||
})},
|
||||
},
|
||||
{
|
||||
name: "A global config that only include a scrape config file.",
|
||||
configFile: "testdata/scrape_config_files_only.good.yml",
|
||||
expectedResult: []*ScrapeConfig{sc(ScrapeConfigOptions{JobName: "prometheus", ScrapeInterval: model.Duration(60 * time.Second), ScrapeTimeout: model.Duration(10 * time.Second), AlwaysScrapeClassicHistograms: false, ConvertClassicHistToNHCB: false})},
|
||||
name: "A global config that only include a scrape config file.",
|
||||
configFile: "testdata/scrape_config_files_only.good.yml",
|
||||
expectedResult: []*ScrapeConfig{sc(ScrapeConfigOptions{
|
||||
JobName: "prometheus",
|
||||
ScrapeInterval: model.Duration(60 * time.Second),
|
||||
ScrapeTimeout: model.Duration(10 * time.Second),
|
||||
ScrapeNativeHistograms: false,
|
||||
AlwaysScrapeClassicHistograms: false,
|
||||
ConvertClassicHistToNHCB: false,
|
||||
})},
|
||||
},
|
||||
{
|
||||
name: "A global config that combine scrape config files and scrape configs.",
|
||||
configFile: "testdata/scrape_config_files_combined.good.yml",
|
||||
expectedResult: []*ScrapeConfig{
|
||||
sc(ScrapeConfigOptions{JobName: "node", ScrapeInterval: model.Duration(60 * time.Second), ScrapeTimeout: model.Duration(10 * time.Second), AlwaysScrapeClassicHistograms: false, ConvertClassicHistToNHCB: false}),
|
||||
sc(ScrapeConfigOptions{JobName: "prometheus", ScrapeInterval: model.Duration(60 * time.Second), ScrapeTimeout: model.Duration(10 * time.Second), AlwaysScrapeClassicHistograms: false, ConvertClassicHistToNHCB: false}),
|
||||
sc(ScrapeConfigOptions{JobName: "alertmanager", ScrapeInterval: model.Duration(60 * time.Second), ScrapeTimeout: model.Duration(10 * time.Second), AlwaysScrapeClassicHistograms: false, ConvertClassicHistToNHCB: false}),
|
||||
sc(ScrapeConfigOptions{
|
||||
JobName: "node",
|
||||
ScrapeInterval: model.Duration(60 * time.Second),
|
||||
ScrapeTimeout: model.Duration(10 * time.Second),
|
||||
ScrapeNativeHistograms: false,
|
||||
AlwaysScrapeClassicHistograms: false,
|
||||
ConvertClassicHistToNHCB: false,
|
||||
}),
|
||||
sc(ScrapeConfigOptions{
|
||||
JobName: "prometheus",
|
||||
ScrapeInterval: model.Duration(60 * time.Second),
|
||||
ScrapeTimeout: model.Duration(10 * time.Second),
|
||||
ScrapeNativeHistograms: false,
|
||||
AlwaysScrapeClassicHistograms: false,
|
||||
ConvertClassicHistToNHCB: false,
|
||||
}),
|
||||
sc(ScrapeConfigOptions{
|
||||
JobName: "alertmanager",
|
||||
ScrapeInterval: model.Duration(60 * time.Second),
|
||||
ScrapeTimeout: model.Duration(10 * time.Second),
|
||||
ScrapeNativeHistograms: false,
|
||||
AlwaysScrapeClassicHistograms: false,
|
||||
ConvertClassicHistToNHCB: false,
|
||||
}),
|
||||
},
|
||||
},
|
||||
{
|
||||
@ -2667,9 +2741,10 @@ func TestGetScrapeConfigs(t *testing.T) {
|
||||
HonorTimestamps: true,
|
||||
ScrapeInterval: model.Duration(60 * time.Second),
|
||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
ScrapeProtocols: DefaultScrapeProtocols,
|
||||
MetricNameValidationScheme: model.UTF8Validation,
|
||||
MetricNameEscapingScheme: model.AllowUTF8,
|
||||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
|
||||
@ -2704,9 +2779,10 @@ func TestGetScrapeConfigs(t *testing.T) {
|
||||
HonorTimestamps: true,
|
||||
ScrapeInterval: model.Duration(15 * time.Second),
|
||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
ScrapeProtocols: DefaultScrapeProtocols,
|
||||
MetricNameValidationScheme: model.UTF8Validation,
|
||||
MetricNameEscapingScheme: model.AllowUTF8,
|
||||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
|
||||
@ -2791,6 +2867,36 @@ func TestGetScrapeConfigs(t *testing.T) {
|
||||
configFile: "testdata/local_disable_always_scrape_classic_hist.good.yml",
|
||||
expectedResult: []*ScrapeConfig{sc(ScrapeConfigOptions{JobName: "prometheus", ScrapeInterval: model.Duration(60 * time.Second), ScrapeTimeout: model.Duration(10 * time.Second), AlwaysScrapeClassicHistograms: false, ConvertClassicHistToNHCB: false})},
|
||||
},
|
||||
{
|
||||
name: "A global config that enables scrape native histograms",
|
||||
configFile: "testdata/global_enable_scrape_native_hist.good.yml",
|
||||
expectedResult: []*ScrapeConfig{sc(ScrapeConfigOptions{JobName: "prometheus", ScrapeInterval: model.Duration(60 * time.Second), ScrapeTimeout: model.Duration(10 * time.Second), ScrapeNativeHistograms: true, ScrapeProtocols: DefaultProtoFirstScrapeProtocols})},
|
||||
},
|
||||
{
|
||||
name: "A global config that enables scrape native histograms and sets scrape protocols explicitly",
|
||||
configFile: "testdata/global_enable_scrape_native_hist_and_scrape_protocols.good.yml",
|
||||
expectedResult: []*ScrapeConfig{sc(ScrapeConfigOptions{JobName: "prometheus", ScrapeInterval: model.Duration(60 * time.Second), ScrapeTimeout: model.Duration(10 * time.Second), ScrapeNativeHistograms: true, ScrapeProtocols: []ScrapeProtocol{PrometheusText0_0_4}})},
|
||||
},
|
||||
{
|
||||
name: "A local config that enables scrape native histograms",
|
||||
configFile: "testdata/local_enable_scrape_native_hist.good.yml",
|
||||
expectedResult: []*ScrapeConfig{sc(ScrapeConfigOptions{JobName: "prometheus", ScrapeInterval: model.Duration(60 * time.Second), ScrapeTimeout: model.Duration(10 * time.Second), ScrapeNativeHistograms: true, ScrapeProtocols: DefaultProtoFirstScrapeProtocols})},
|
||||
},
|
||||
{
|
||||
name: "A local config that enables scrape native histograms and sets scrape protocols explicitly",
|
||||
configFile: "testdata/local_enable_scrape_native_hist_and_scrape_protocols.good.yml",
|
||||
expectedResult: []*ScrapeConfig{sc(ScrapeConfigOptions{JobName: "prometheus", ScrapeInterval: model.Duration(60 * time.Second), ScrapeTimeout: model.Duration(10 * time.Second), ScrapeNativeHistograms: true, ScrapeProtocols: []ScrapeProtocol{PrometheusText0_0_4}})},
|
||||
},
|
||||
{
|
||||
name: "A global config that enables scrape native histograms and scrape config that disables it",
|
||||
configFile: "testdata/local_disable_scrape_native_hist.good.yml",
|
||||
expectedResult: []*ScrapeConfig{sc(ScrapeConfigOptions{JobName: "prometheus", ScrapeInterval: model.Duration(60 * time.Second), ScrapeTimeout: model.Duration(10 * time.Second), ScrapeNativeHistograms: false, ScrapeProtocols: DefaultScrapeProtocols})},
|
||||
},
|
||||
{
|
||||
name: "A global config that enables scrape native histograms and scrape protocols and scrape config that disables scrape native histograms but does not change scrape protocols",
|
||||
configFile: "testdata/global_scrape_protocols_and_local_disable_scrape_native_hist.good.yml",
|
||||
expectedResult: []*ScrapeConfig{sc(ScrapeConfigOptions{JobName: "prometheus", ScrapeInterval: model.Duration(60 * time.Second), ScrapeTimeout: model.Duration(10 * time.Second), ScrapeNativeHistograms: false, ScrapeProtocols: []ScrapeProtocol{PrometheusText0_0_4}})},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
|
||||
1
config/testdata/conf.good.yml
vendored
1
config/testdata/conf.good.yml
vendored
@ -369,6 +369,7 @@ scrape_configs:
|
||||
key_file: valid_key_file
|
||||
|
||||
- job_name: hetzner
|
||||
scrape_native_histograms: true
|
||||
relabel_configs:
|
||||
- action: uppercase
|
||||
source_labels: [instance]
|
||||
|
||||
6
config/testdata/global_enable_scrape_native_hist.good.yml
vendored
Normal file
6
config/testdata/global_enable_scrape_native_hist.good.yml
vendored
Normal file
@ -0,0 +1,6 @@
|
||||
global:
|
||||
scrape_native_histograms: true
|
||||
scrape_configs:
|
||||
- job_name: prometheus
|
||||
static_configs:
|
||||
- targets: ['localhost:8080']
|
||||
7
config/testdata/global_enable_scrape_native_hist_and_scrape_protocols.good.yml
vendored
Normal file
7
config/testdata/global_enable_scrape_native_hist_and_scrape_protocols.good.yml
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
global:
|
||||
scrape_native_histograms: true
|
||||
scrape_protocols: ['PrometheusText0.0.4']
|
||||
scrape_configs:
|
||||
- job_name: prometheus
|
||||
static_configs:
|
||||
- targets: ['localhost:8080']
|
||||
8
config/testdata/global_scrape_protocols_and_local_disable_scrape_native_hist.good.yml
vendored
Normal file
8
config/testdata/global_scrape_protocols_and_local_disable_scrape_native_hist.good.yml
vendored
Normal file
@ -0,0 +1,8 @@
|
||||
global:
|
||||
scrape_native_histograms: true
|
||||
scrape_protocols: ['PrometheusText0.0.4']
|
||||
scrape_configs:
|
||||
- job_name: prometheus
|
||||
scrape_native_histograms: false
|
||||
static_configs:
|
||||
- targets: ['localhost:8080']
|
||||
7
config/testdata/local_disable_scrape_native_hist.good.yml
vendored
Normal file
7
config/testdata/local_disable_scrape_native_hist.good.yml
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
global:
|
||||
scrape_native_histograms: true
|
||||
scrape_configs:
|
||||
- job_name: prometheus
|
||||
scrape_native_histograms: false
|
||||
static_configs:
|
||||
- targets: ['localhost:8080']
|
||||
6
config/testdata/local_enable_scrape_native_hist.good.yml
vendored
Normal file
6
config/testdata/local_enable_scrape_native_hist.good.yml
vendored
Normal file
@ -0,0 +1,6 @@
|
||||
global:
|
||||
scrape_configs:
|
||||
- job_name: prometheus
|
||||
scrape_native_histograms: true
|
||||
static_configs:
|
||||
- targets: ['localhost:8080']
|
||||
7
config/testdata/local_enable_scrape_native_hist_and_scrape_protocols.good.yml
vendored
Normal file
7
config/testdata/local_enable_scrape_native_hist_and_scrape_protocols.good.yml
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
global:
|
||||
scrape_configs:
|
||||
- job_name: prometheus
|
||||
scrape_native_histograms: true
|
||||
scrape_protocols: ['PrometheusText0.0.4']
|
||||
static_configs:
|
||||
- targets: ['localhost:8080']
|
||||
@ -62,10 +62,15 @@ global:
|
||||
|
||||
# The protocols to negotiate during a scrape with the client.
|
||||
# Supported values (case sensitive): PrometheusProto, OpenMetricsText0.0.1,
|
||||
# OpenMetricsText1.0.0, PrometheusText0.0.4.
|
||||
# The default value changes to [ PrometheusProto, OpenMetricsText1.0.0, OpenMetricsText0.0.1, PrometheusText0.0.4 ]
|
||||
# when native_histogram feature flag is set.
|
||||
[ scrape_protocols: [<string>, ...] | default = [ OpenMetricsText1.0.0, OpenMetricsText0.0.1, PrometheusText0.0.4 ] ]
|
||||
# OpenMetricsText1.0.0, PrometheusText0.0.4, PrometheusText1.0.0.
|
||||
# If left unset both here and in an individual scrape config, the
|
||||
# negotiation order used in that scrape config depends on the effective
|
||||
# value of scrape_native_histograms for that scrape config.
|
||||
# If scrape_native_histograms is false, the order is
|
||||
# [ OpenMetricsText1.0.0, OpenMetricsText0.0.1, PrometheusText1.0.0, PrometheusText0.0.4 ].
|
||||
# If scrape_native_histograms is true, the order is
|
||||
# [ PrometheusProto, OpenMetricsText1.0.0, OpenMetricsText0.0.1, PrometheusText1.0.0, PrometheusText0.0.4 ].
|
||||
[ scrape_protocols: [<string>, ...] ]
|
||||
|
||||
# How frequently to evaluate rules.
|
||||
[ evaluation_interval: <duration> | default = 1m ]
|
||||
@ -138,14 +143,57 @@ global:
|
||||
# and underscores.
|
||||
[ metric_name_validation_scheme: <string> | default "utf8" ]
|
||||
|
||||
# Specifies whether to convert all scraped classic histograms into native
|
||||
# histograms with custom buckets.
|
||||
[ convert_classic_histograms_to_nhcb: <bool> | default = false]
|
||||
# If true, native histograms exposed by a target are recognized during
|
||||
# scraping and ingested as such. If false, any native parts of histograms
|
||||
# are ignored and only the classic parts are recognized (possibly as
|
||||
# a classic histogram with only the +Inf buckets if no explicit classic
|
||||
# buckets are part of the histogram).
|
||||
[ scrape_native_histograms: <bool> | default = false ]
|
||||
|
||||
# Specifies whether to scrape a classic histogram, even if it is also exposed as a native
|
||||
# histogram (has no effect without --enable-feature=native-histograms).
|
||||
# Specifies whether to convert scraped classic histograms into native
|
||||
# histograms with custom buckets.
|
||||
[ convert_classic_histograms_to_nhcb: <bool> | default = false ]
|
||||
|
||||
# Specifies whether to additionally scrape the classic parts of a histogram,
|
||||
# even if it is also exposed with native parts or it is converted into a
|
||||
# native histogram with custom buckets.
|
||||
[ always_scrape_classic_histograms: <boolean> | default = false ]
|
||||
|
||||
# The following explains the various combinations of the last three options
|
||||
# in various exposition cases.
|
||||
#
|
||||
# CASE 1: A histogram is solely exposed as a classic histogram. (Note that
|
||||
# this also applies if the used scrape protocol (also see the
|
||||
# scrape_protocols setting) does not support native histograms.) In this
|
||||
# case, the scrape_native_histograms setting has no effect. If
|
||||
# convert_classic_histograms_to_nhcb is false, the histogram is ingested as
|
||||
# a classic histograms. If convert_classic_histograms_to_nhcb is true, the
|
||||
# histograms is converted to an NHCB. In this case,
|
||||
# always_scrape_classic_histograms determines whether it is also ingested
|
||||
# as a classic histograms or not.
|
||||
#
|
||||
# CASE 2: A histogram is solely exposed as a native histogram, i.e. it has
|
||||
# no classic buckets except the optional +Inf bucket but it is marked as a
|
||||
# native histogram (by some "native parts", at the very least by a no-op
|
||||
# span). If scrape_native_histograms is false, this case is handled like case
|
||||
# 1, but the resulting classic histogram or NHCB only has a sole bucket, the
|
||||
# +Inf bucket. If scrape_native_histograms is true, however, the histogram is
|
||||
# recognized as a pure native histogram and ingested as such. There will be
|
||||
# no classic histgram ingested, no matter what
|
||||
# always_scrape_classic_histograms is set to, and there will be no
|
||||
# conversion to an NHCB, no matter what convert_classic_histograms_to_nhcb
|
||||
# is set to.
|
||||
#
|
||||
# CASE 3: A histogram is exposed as both a native and a classic histogram,
|
||||
# i.e. it has "native parts" (at the very least a no-op span) and it has at
|
||||
# least one classic bucket that is not the +Inf bucket. If
|
||||
# scrape_native_histograms is false, this case is handled like case 1. The
|
||||
# native parts are ignored, and there will be either a classic histogram, an
|
||||
# NHCB, or both. If scrape_native_histograms is true, the histogram is
|
||||
# ingested as a native histogram. There will be no NHCB, no matter what
|
||||
# convert_classic_histograms_to_nhcb is set to (it would collide with the
|
||||
# actual native histogram). However, there will be a classic histogram if (and
|
||||
# only if) always_scrape_classic_histograms is set to true.
|
||||
|
||||
runtime:
|
||||
# Configure the Go garbage collector GOGC parameter
|
||||
@ -263,7 +311,12 @@ job_name: <job_name>
|
||||
# The protocols to negotiate during a scrape with the client.
|
||||
# Supported values (case sensitive): PrometheusProto, OpenMetricsText0.0.1,
|
||||
# OpenMetricsText1.0.0, PrometheusText0.0.4, PrometheusText1.0.0.
|
||||
[ scrape_protocols: [<string>, ...] | default = <global_config.scrape_protocols> ]
|
||||
# If not set in the global config, the default value depends on the
|
||||
# setting of scrape_native_histograms. If false, it is
|
||||
# [ OpenMetricsText1.0.0, OpenMetricsText0.0.1, PrometheusText1.0.0, PrometheusText0.0.4 ].
|
||||
# If true, it is
|
||||
# [ PrometheusProto, OpenMetricsText1.0.0, OpenMetricsText0.0.1, PrometheusText1.0.0, PrometheusText0.0.4 ].
|
||||
[ scrape_protocols: [<string>, ...] | default = <dynamic> ]
|
||||
|
||||
# Fallback protocol to use if a scrape returns blank, unparsable, or otherwise
|
||||
# invalid Content-Type.
|
||||
@ -271,11 +324,6 @@ job_name: <job_name>
|
||||
# OpenMetricsText1.0.0, PrometheusText0.0.4, PrometheusText1.0.0.
|
||||
[ fallback_scrape_protocol: <string> ]
|
||||
|
||||
# Whether to scrape a classic histogram, even if it is also exposed as a native
|
||||
# histogram (has no effect without --enable-feature=native-histograms).
|
||||
[ always_scrape_classic_histograms: <boolean> |
|
||||
default = <global.always_scrape_classic_histograms> ]
|
||||
|
||||
# The HTTP resource path on which to fetch metrics from targets.
|
||||
[ metrics_path: <path> | default = /metrics ]
|
||||
|
||||
@ -570,10 +618,25 @@ metric_relabel_configs:
|
||||
# schema 8, but might change in the future).
|
||||
[ native_histogram_min_bucket_factor: <float> | default = 0 ]
|
||||
|
||||
# If true, native histograms exposed by a target are recognized during
|
||||
# scraping and ingested as such. If false, any native parts of histograms
|
||||
# are ignored and only the classic parts are recognized (possibly as
|
||||
# a classic histogram with only the +Inf buckets if no explicit classic
|
||||
# buckets are part of the histogram).
|
||||
[ scrape_native_histograms: <bool> | default = <global.scrape_native_histograms> ]
|
||||
|
||||
# Specifies whether to convert classic histograms into native histograms with
|
||||
# custom buckets (has no effect without --enable-feature=native-histograms).
|
||||
[ convert_classic_histograms_to_nhcb: <bool> | default =
|
||||
<global.convert_classic_histograms_to_nhcb>]
|
||||
# custom buckets.
|
||||
[ convert_classic_histograms_to_nhcb: <bool> | default = <global.convert_classic_histograms_to_nhcb>]
|
||||
|
||||
# Specifies whether to additionally scrape the classic parts of a histogram,
|
||||
# even if it is also exposed with native parts or it is converted into a
|
||||
# native histogram with custom buckets.
|
||||
[ always_scrape_classic_histograms: <boolean> | default = <global.always_scrape_classic_histograms> ]
|
||||
|
||||
# See global configuration above for further explanations of how the last three
|
||||
# options combine their effects.
|
||||
|
||||
```
|
||||
|
||||
Where `<job_name>` must be unique across all scrape configurations.
|
||||
|
||||
@ -49,25 +49,15 @@ computed at all.
|
||||
|
||||
`--enable-feature=native-histograms`
|
||||
|
||||
When enabled, Prometheus will ingest native histograms (formerly also known as
|
||||
sparse histograms or high-res histograms). Native histograms are still highly
|
||||
experimental. Expect breaking changes to happen (including those rendering the
|
||||
TSDB unreadable).
|
||||
_This feature flag is being phased out. You should not use it anymore._
|
||||
|
||||
Native histograms are currently only supported in the traditional Prometheus
|
||||
protobuf exposition format. This feature flag therefore also enables a new (and
|
||||
also experimental) protobuf parser, through which _all_ metrics are ingested
|
||||
(i.e. not only native histograms). Prometheus will try to negotiate the
|
||||
protobuf format first. The instrumented target needs to support the protobuf
|
||||
format, too, _and_ it needs to expose native histograms. The protobuf format
|
||||
allows to expose classic and native histograms side by side. With this feature
|
||||
flag disabled, Prometheus will continue to parse the classic histogram (albeit
|
||||
via the text format). With this flag enabled, Prometheus will still ingest
|
||||
those classic histograms that do not come with a corresponding native
|
||||
histogram. However, if a native histogram is present, Prometheus will ignore
|
||||
the corresponding classic histogram, with the notable exception of exemplars,
|
||||
which are always ingested. To keep the classic histograms as well, enable
|
||||
`always_scrape_classic_histograms` in the scrape job.
|
||||
Native histograms are a stable feature by now. However, to scrape native
|
||||
histograms, a scrape config setting `scrape_native_histograms` is required. To
|
||||
ease the transition, this feature flag sets the default value of
|
||||
`scrape_native_histograms` to `true`. From v3.9 on, this feature flag will be a
|
||||
true no-op, and the default value of `scrape_native_histograms` will be always
|
||||
`false`. If you are still using this feature flag while running v3.8, update
|
||||
your scrape configs and stop using the feature flag before upgrading to v3.9.
|
||||
|
||||
## Experimental PromQL functions
|
||||
|
||||
@ -83,7 +73,12 @@ entirely.
|
||||
|
||||
Enables ingestion of created timestamp. Created timestamps are injected as 0 valued samples when appropriate. See [PromCon talk](https://youtu.be/nWf0BfQ5EEA) for details.
|
||||
|
||||
Currently Prometheus supports created timestamps only on the traditional Prometheus Protobuf protocol (WIP for other protocols). As a result, when enabling this feature, the Prometheus protobuf scrape protocol will be prioritized (See `scrape_config.scrape_protocols` settings for more details).
|
||||
Currently Prometheus supports created timestamps only on the traditional
|
||||
Prometheus Protobuf protocol (WIP for other protocols). Therefore, enabling
|
||||
this feature pre-sets the global `scrape_protocols` configuration option to
|
||||
`[ PrometheusProto, OpenMetricsText1.0.0, OpenMetricsText0.0.1, PrometheusText0.0.4 ]`,
|
||||
resulting in negotiating the Prometheus Protobuf protocol with first priority
|
||||
(unless the `scrape_protocols` option is set to a different value explicitly).
|
||||
|
||||
Besides enabling this feature in Prometheus, created timestamps need to be exposed by the application being scraped.
|
||||
|
||||
@ -340,4 +335,4 @@ Example query:
|
||||
|
||||
For more details, see the [design doc](https://github.com/prometheus/proposals/blob/main/proposals/2025-04-04_extended-range-selectors-semantics.md).
|
||||
|
||||
**Note**: Extended Range Selectors are not supported for subqueries.
|
||||
**Note**: Extended Range Selectors are not supported for subqueries.
|
||||
|
||||
@ -149,7 +149,7 @@ func benchParse(b *testing.B, data []byte, parser string) {
|
||||
}
|
||||
case "promproto":
|
||||
newParserFn = func(b []byte, st *labels.SymbolTable) Parser {
|
||||
return NewProtobufParser(b, true, false, false, st)
|
||||
return NewProtobufParser(b, false, true, false, false, st)
|
||||
}
|
||||
case "omtext":
|
||||
newParserFn = func(b []byte, st *labels.SymbolTable) Parser {
|
||||
@ -276,7 +276,7 @@ func BenchmarkCreatedTimestampPromProto(b *testing.B) {
|
||||
data := createTestProtoBuf(b).Bytes()
|
||||
|
||||
st := labels.NewSymbolTable()
|
||||
p := NewProtobufParser(data, true, false, false, st)
|
||||
p := NewProtobufParser(data, false, true, false, false, st)
|
||||
|
||||
found := false
|
||||
Inner:
|
||||
|
||||
@ -127,6 +127,17 @@ type ParserOptions struct {
|
||||
// in the parsed metrics.
|
||||
EnableTypeAndUnitLabels bool
|
||||
|
||||
// IgnoreNativeHistograms causes the parser to completely ignore all
|
||||
// parts of native histograms, but to keep the ability to convert
|
||||
// classic histograms to NHCB. This has the implication that even a
|
||||
// histogram that has some native parts but not a single classic bucket
|
||||
// will be parsed as a classic histogram (with only the +Inf bucket and
|
||||
// count and sum). Setting this also allows converting a classic
|
||||
// histogram that already has a native representation to an NHCB. This
|
||||
// option has no effect on parsers for formats that do not support
|
||||
// native histograms.
|
||||
IgnoreNativeHistograms bool
|
||||
|
||||
// ConvertClassicHistogramsToNHCB enables conversion of classic histograms
|
||||
// to native histogram custom buckets (NHCB) format.
|
||||
ConvertClassicHistogramsToNHCB bool
|
||||
@ -168,7 +179,14 @@ func New(b []byte, contentType string, st *labels.SymbolTable, opts ParserOption
|
||||
o.enableTypeAndUnitLabels = opts.EnableTypeAndUnitLabels
|
||||
})
|
||||
case "application/vnd.google.protobuf":
|
||||
return NewProtobufParser(b, opts.KeepClassicOnClassicAndNativeHistograms, opts.ConvertClassicHistogramsToNHCB, opts.EnableTypeAndUnitLabels, st), err
|
||||
return NewProtobufParser(
|
||||
b,
|
||||
opts.IgnoreNativeHistograms,
|
||||
opts.KeepClassicOnClassicAndNativeHistograms,
|
||||
opts.ConvertClassicHistogramsToNHCB,
|
||||
opts.EnableTypeAndUnitLabels,
|
||||
st,
|
||||
), err
|
||||
case "text/plain":
|
||||
baseParser = NewPromParser(b, st, opts.EnableTypeAndUnitLabels)
|
||||
default:
|
||||
|
||||
@ -77,6 +77,8 @@ type ProtobufParser struct {
|
||||
// that we have to decode the next MetricDescriptor.
|
||||
state Entry
|
||||
|
||||
// Whether to completely ignore any native parts of histograms.
|
||||
ignoreNativeHistograms bool
|
||||
// Whether to also parse a classic histogram that is also present as a
|
||||
// native histogram.
|
||||
parseClassicHistograms bool
|
||||
@ -93,7 +95,11 @@ type ProtobufParser struct {
|
||||
}
|
||||
|
||||
// NewProtobufParser returns a parser for the payload in the byte slice.
|
||||
func NewProtobufParser(b []byte, parseClassicHistograms, convertClassicHistogramsToNHCB, enableTypeAndUnitLabels bool, st *labels.SymbolTable) Parser {
|
||||
func NewProtobufParser(
|
||||
b []byte,
|
||||
ignoreNativeHistograms, parseClassicHistograms, convertClassicHistogramsToNHCB, enableTypeAndUnitLabels bool,
|
||||
st *labels.SymbolTable,
|
||||
) Parser {
|
||||
builder := labels.NewScratchBuilderWithSymbolTable(st, 16)
|
||||
builder.SetUnsafeAdd(true)
|
||||
return &ProtobufParser{
|
||||
@ -102,6 +108,7 @@ func NewProtobufParser(b []byte, parseClassicHistograms, convertClassicHistogram
|
||||
builder: builder,
|
||||
|
||||
state: EntryInvalid,
|
||||
ignoreNativeHistograms: ignoreNativeHistograms,
|
||||
parseClassicHistograms: parseClassicHistograms,
|
||||
enableTypeAndUnitLabels: enableTypeAndUnitLabels,
|
||||
convertClassicHistogramsToNHCB: convertClassicHistogramsToNHCB,
|
||||
@ -196,7 +203,7 @@ func (p *ProtobufParser) Histogram() ([]byte, *int64, *histogram.Histogram, *his
|
||||
h = p.dec.GetHistogram()
|
||||
)
|
||||
|
||||
if !isNativeHistogram(h) {
|
||||
if p.ignoreNativeHistograms || !isNativeHistogram(h) {
|
||||
// This only happens if we have a classic histogram and
|
||||
// we converted it to NHCB already in Next.
|
||||
if *ts != 0 {
|
||||
@ -494,7 +501,7 @@ func (p *ProtobufParser) Next() (Entry, error) {
|
||||
case EntryType:
|
||||
t := p.dec.GetType()
|
||||
if t == dto.MetricType_HISTOGRAM || t == dto.MetricType_GAUGE_HISTOGRAM {
|
||||
if !isNativeHistogram(p.dec.GetHistogram()) {
|
||||
if p.ignoreNativeHistograms || !isNativeHistogram(p.dec.GetHistogram()) {
|
||||
p.state = EntrySeries
|
||||
p.fieldPos = -3 // We have not returned anything, let p.Next() increment it to -2.
|
||||
return p.Next()
|
||||
@ -515,7 +522,8 @@ func (p *ProtobufParser) Next() (Entry, error) {
|
||||
t == dto.MetricType_GAUGE_HISTOGRAM {
|
||||
// Non-trivial series (complex metrics, with magic suffixes).
|
||||
|
||||
isClassicHistogram := (t == dto.MetricType_HISTOGRAM || t == dto.MetricType_GAUGE_HISTOGRAM) && !isNativeHistogram(p.dec.GetHistogram())
|
||||
isClassicHistogram := (t == dto.MetricType_HISTOGRAM || t == dto.MetricType_GAUGE_HISTOGRAM) &&
|
||||
(p.ignoreNativeHistograms || !isNativeHistogram(p.dec.GetHistogram()))
|
||||
skipSeries := p.convertClassicHistogramsToNHCB && isClassicHistogram && !p.parseClassicHistograms
|
||||
|
||||
// Did we iterate over all the classic representations fields?
|
||||
@ -591,10 +599,11 @@ func (p *ProtobufParser) Next() (Entry, error) {
|
||||
return EntryInvalid, err
|
||||
}
|
||||
|
||||
// If this is a metric family does not contain native
|
||||
// histograms, it means we are here thanks to NHCB conversion.
|
||||
// Return to classic histograms for the consistent flow.
|
||||
if !isNativeHistogram(p.dec.GetHistogram()) {
|
||||
// If this metric is not a native histograms or we are ignoring
|
||||
// native histograms, it means we are here thanks to NHCB
|
||||
// conversion. Return to classic histograms for the consistent
|
||||
// flow.
|
||||
if p.ignoreNativeHistograms || !isNativeHistogram(p.dec.GetHistogram()) {
|
||||
return switchToClassic()
|
||||
}
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -3960,7 +3960,6 @@ func TestInconsistentHistogramCount(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
opts := tsdb.DefaultHeadOptions()
|
||||
opts.EnableNativeHistograms.Store(true)
|
||||
opts.ChunkDirRoot = dir
|
||||
// We use TSDB head only. By using full TSDB DB, and appending samples to it, closing it would cause unnecessary HEAD compaction, which slows down the test.
|
||||
head, err := tsdb.NewHead(nil, nil, nil, nil, opts, nil)
|
||||
|
||||
@ -86,8 +86,6 @@ type Options struct {
|
||||
// Option to enable the ingestion of the created timestamp as a synthetic zero sample.
|
||||
// See: https://github.com/prometheus/proposals/blob/main/proposals/2023-06-13_created-timestamp.md
|
||||
EnableCreatedTimestampZeroIngestion bool
|
||||
// Option to enable the ingestion of native histograms.
|
||||
EnableNativeHistogramsIngestion bool
|
||||
|
||||
// EnableTypeAndUnitLabels
|
||||
EnableTypeAndUnitLabels bool
|
||||
|
||||
@ -967,7 +967,6 @@ func TestManagerCTZeroIngestionHistogram(t *testing.T) {
|
||||
app := &collectResultAppender{}
|
||||
discoveryManager, scrapeManager := runManagers(t, ctx, &Options{
|
||||
EnableCreatedTimestampZeroIngestion: tc.enableCTZeroIngestion,
|
||||
EnableNativeHistogramsIngestion: true,
|
||||
skipOffsetting: true,
|
||||
}, &collectResultAppendable{app})
|
||||
defer scrapeManager.Stop()
|
||||
@ -1007,6 +1006,7 @@ global:
|
||||
|
||||
scrape_configs:
|
||||
- job_name: test
|
||||
scrape_native_histograms: true
|
||||
static_configs:
|
||||
- targets: ['%s']
|
||||
`, serverURL.Host)
|
||||
@ -1087,7 +1087,6 @@ func TestNHCBAndCTZeroIngestion(t *testing.T) {
|
||||
app := &collectResultAppender{}
|
||||
discoveryManager, scrapeManager := runManagers(t, ctx, &Options{
|
||||
EnableCreatedTimestampZeroIngestion: true,
|
||||
EnableNativeHistogramsIngestion: true,
|
||||
skipOffsetting: true,
|
||||
}, &collectResultAppendable{app})
|
||||
defer scrapeManager.Stop()
|
||||
|
||||
@ -131,6 +131,7 @@ type scrapeLoopOptions struct {
|
||||
trackTimestampsStaleness bool
|
||||
interval time.Duration
|
||||
timeout time.Duration
|
||||
scrapeNativeHist bool
|
||||
alwaysScrapeClassicHist bool
|
||||
convertClassicHistToNHCB bool
|
||||
fallbackScrapeProtocol string
|
||||
@ -212,7 +213,7 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed
|
||||
opts.timeout,
|
||||
opts.alwaysScrapeClassicHist,
|
||||
opts.convertClassicHistToNHCB,
|
||||
options.EnableNativeHistogramsIngestion,
|
||||
cfg.ScrapeNativeHistogramsEnabled(),
|
||||
options.EnableCreatedTimestampZeroIngestion,
|
||||
options.EnableTypeAndUnitLabels,
|
||||
options.ExtraMetrics,
|
||||
@ -371,6 +372,7 @@ func (sp *scrapePool) restartLoops(reuseCache bool) {
|
||||
trackTimestampsStaleness = sp.config.TrackTimestampsStaleness
|
||||
mrc = sp.config.MetricRelabelConfigs
|
||||
fallbackScrapeProtocol = sp.config.ScrapeFallbackProtocol.HeaderMediaType()
|
||||
scrapeNativeHist = sp.config.ScrapeNativeHistogramsEnabled()
|
||||
alwaysScrapeClassicHist = sp.config.AlwaysScrapeClassicHistogramsEnabled()
|
||||
convertClassicHistToNHCB = sp.config.ConvertClassicHistogramsToNHCBEnabled()
|
||||
)
|
||||
@ -415,6 +417,7 @@ func (sp *scrapePool) restartLoops(reuseCache bool) {
|
||||
interval: targetInterval,
|
||||
timeout: targetTimeout,
|
||||
fallbackScrapeProtocol: fallbackScrapeProtocol,
|
||||
scrapeNativeHist: scrapeNativeHist,
|
||||
alwaysScrapeClassicHist: alwaysScrapeClassicHist,
|
||||
convertClassicHistToNHCB: convertClassicHistToNHCB,
|
||||
})
|
||||
@ -527,6 +530,7 @@ func (sp *scrapePool) sync(targets []*Target) {
|
||||
trackTimestampsStaleness = sp.config.TrackTimestampsStaleness
|
||||
mrc = sp.config.MetricRelabelConfigs
|
||||
fallbackScrapeProtocol = sp.config.ScrapeFallbackProtocol.HeaderMediaType()
|
||||
scrapeNativeHist = sp.config.ScrapeNativeHistogramsEnabled()
|
||||
alwaysScrapeClassicHist = sp.config.AlwaysScrapeClassicHistogramsEnabled()
|
||||
convertClassicHistToNHCB = sp.config.ConvertClassicHistogramsToNHCBEnabled()
|
||||
)
|
||||
@ -564,6 +568,7 @@ func (sp *scrapePool) sync(targets []*Target) {
|
||||
mrc: mrc,
|
||||
interval: interval,
|
||||
timeout: timeout,
|
||||
scrapeNativeHist: scrapeNativeHist,
|
||||
alwaysScrapeClassicHist: alwaysScrapeClassicHist,
|
||||
convertClassicHistToNHCB: convertClassicHistToNHCB,
|
||||
fallbackScrapeProtocol: fallbackScrapeProtocol,
|
||||
@ -939,8 +944,7 @@ type scrapeLoop struct {
|
||||
enableTypeAndUnitLabels bool
|
||||
fallbackScrapeProtocol string
|
||||
|
||||
// Feature flagged options.
|
||||
enableNativeHistogramIngestion bool
|
||||
enableNativeHistogramScraping bool
|
||||
|
||||
appender func(ctx context.Context) storage.Appender
|
||||
symbolTable *labels.SymbolTable
|
||||
@ -1248,7 +1252,7 @@ func newScrapeLoop(ctx context.Context,
|
||||
timeout time.Duration,
|
||||
alwaysScrapeClassicHist bool,
|
||||
convertClassicHistToNHCB bool,
|
||||
enableNativeHistogramIngestion bool,
|
||||
enableNativeHistogramScraping bool,
|
||||
enableCTZeroIngestion bool,
|
||||
enableTypeAndUnitLabels bool,
|
||||
reportExtraMetrics bool,
|
||||
@ -1283,39 +1287,39 @@ func newScrapeLoop(ctx context.Context,
|
||||
}
|
||||
|
||||
sl := &scrapeLoop{
|
||||
scraper: sc,
|
||||
buffers: buffers,
|
||||
cache: cache,
|
||||
appender: appender,
|
||||
symbolTable: symbolTable,
|
||||
sampleMutator: sampleMutator,
|
||||
reportSampleMutator: reportSampleMutator,
|
||||
stopped: make(chan struct{}),
|
||||
offsetSeed: offsetSeed,
|
||||
l: l,
|
||||
parentCtx: ctx,
|
||||
appenderCtx: appenderCtx,
|
||||
honorTimestamps: honorTimestamps,
|
||||
trackTimestampsStaleness: trackTimestampsStaleness,
|
||||
enableCompression: enableCompression,
|
||||
sampleLimit: sampleLimit,
|
||||
bucketLimit: bucketLimit,
|
||||
maxSchema: maxSchema,
|
||||
labelLimits: labelLimits,
|
||||
interval: interval,
|
||||
timeout: timeout,
|
||||
alwaysScrapeClassicHist: alwaysScrapeClassicHist,
|
||||
convertClassicHistToNHCB: convertClassicHistToNHCB,
|
||||
enableCTZeroIngestion: enableCTZeroIngestion,
|
||||
enableTypeAndUnitLabels: enableTypeAndUnitLabels,
|
||||
fallbackScrapeProtocol: fallbackScrapeProtocol,
|
||||
enableNativeHistogramIngestion: enableNativeHistogramIngestion,
|
||||
reportExtraMetrics: reportExtraMetrics,
|
||||
appendMetadataToWAL: appendMetadataToWAL,
|
||||
metrics: metrics,
|
||||
skipOffsetting: skipOffsetting,
|
||||
validationScheme: validationScheme,
|
||||
escapingScheme: escapingScheme,
|
||||
scraper: sc,
|
||||
buffers: buffers,
|
||||
cache: cache,
|
||||
appender: appender,
|
||||
symbolTable: symbolTable,
|
||||
sampleMutator: sampleMutator,
|
||||
reportSampleMutator: reportSampleMutator,
|
||||
stopped: make(chan struct{}),
|
||||
offsetSeed: offsetSeed,
|
||||
l: l,
|
||||
parentCtx: ctx,
|
||||
appenderCtx: appenderCtx,
|
||||
honorTimestamps: honorTimestamps,
|
||||
trackTimestampsStaleness: trackTimestampsStaleness,
|
||||
enableCompression: enableCompression,
|
||||
sampleLimit: sampleLimit,
|
||||
bucketLimit: bucketLimit,
|
||||
maxSchema: maxSchema,
|
||||
labelLimits: labelLimits,
|
||||
interval: interval,
|
||||
timeout: timeout,
|
||||
alwaysScrapeClassicHist: alwaysScrapeClassicHist,
|
||||
convertClassicHistToNHCB: convertClassicHistToNHCB,
|
||||
enableCTZeroIngestion: enableCTZeroIngestion,
|
||||
enableTypeAndUnitLabels: enableTypeAndUnitLabels,
|
||||
fallbackScrapeProtocol: fallbackScrapeProtocol,
|
||||
enableNativeHistogramScraping: enableNativeHistogramScraping,
|
||||
reportExtraMetrics: reportExtraMetrics,
|
||||
appendMetadataToWAL: appendMetadataToWAL,
|
||||
metrics: metrics,
|
||||
skipOffsetting: skipOffsetting,
|
||||
validationScheme: validationScheme,
|
||||
escapingScheme: escapingScheme,
|
||||
}
|
||||
sl.ctx, sl.cancel = context.WithCancel(ctx)
|
||||
|
||||
@ -1637,6 +1641,7 @@ func (sl *scrapeLoop) append(app storage.Appender, b []byte, contentType string,
|
||||
|
||||
p, err := textparse.New(b, contentType, sl.symbolTable, textparse.ParserOptions{
|
||||
EnableTypeAndUnitLabels: sl.enableTypeAndUnitLabels,
|
||||
IgnoreNativeHistograms: !sl.enableNativeHistogramScraping,
|
||||
ConvertClassicHistogramsToNHCB: sl.convertClassicHistToNHCB,
|
||||
KeepClassicOnClassicAndNativeHistograms: sl.alwaysScrapeClassicHist,
|
||||
OpenMetricsSkipCTSeries: sl.enableCTZeroIngestion,
|
||||
@ -1663,8 +1668,8 @@ func (sl *scrapeLoop) append(app storage.Appender, b []byte, contentType string,
|
||||
appErrs = appendErrors{}
|
||||
sampleLimitErr error
|
||||
bucketLimitErr error
|
||||
lset labels.Labels // escapes to heap so hoisted out of loop
|
||||
e exemplar.Exemplar // escapes to heap so hoisted out of loop
|
||||
lset labels.Labels // Escapes to heap so hoisted out of loop.
|
||||
e exemplar.Exemplar // Escapes to heap so hoisted out of loop.
|
||||
lastMeta *metaEntry
|
||||
lastMFName []byte
|
||||
)
|
||||
@ -1734,7 +1739,7 @@ loop:
|
||||
t = *parsedTimestamp
|
||||
}
|
||||
|
||||
if sl.cache.getDropped(met) || isHistogram && !sl.enableNativeHistogramIngestion {
|
||||
if sl.cache.getDropped(met) {
|
||||
continue
|
||||
}
|
||||
ce, seriesCached, seriesAlreadyScraped := sl.cache.get(met)
|
||||
|
||||
@ -2188,7 +2188,7 @@ func TestScrapeLoop_HistogramBucketLimit(t *testing.T) {
|
||||
app := &bucketLimitAppender{Appender: resApp, limit: 2}
|
||||
|
||||
sl := newBasicScrapeLoop(t, context.Background(), nil, func(context.Context) storage.Appender { return app }, 0)
|
||||
sl.enableNativeHistogramIngestion = true
|
||||
sl.enableNativeHistogramScraping = true
|
||||
sl.sampleMutator = func(l labels.Labels) labels.Labels {
|
||||
if l.Has("deleteme") {
|
||||
return labels.EmptyLabels()
|
||||
@ -2929,6 +2929,11 @@ metric: <
|
||||
>
|
||||
|
||||
`,
|
||||
floats: []floatSample{
|
||||
{metric: labels.FromStrings("__name__", "test_histogram_count"), t: 1234568, f: 175},
|
||||
{metric: labels.FromStrings("__name__", "test_histogram_sum"), t: 1234568, f: 0.0008280461746287094},
|
||||
{metric: labels.FromStrings("__name__", "test_histogram_bucket", "le", "+Inf"), t: 1234568, f: 175},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@ -2941,7 +2946,7 @@ metric: <
|
||||
}
|
||||
|
||||
sl := newBasicScrapeLoop(t, context.Background(), nil, func(context.Context) storage.Appender { return app }, 0)
|
||||
sl.enableNativeHistogramIngestion = test.enableNativeHistogramsIngestion
|
||||
sl.enableNativeHistogramScraping = test.enableNativeHistogramsIngestion
|
||||
sl.sampleMutator = func(l labels.Labels) labels.Labels {
|
||||
return mutateSampleLabels(l, discoveryLabels, false, nil)
|
||||
}
|
||||
@ -4832,7 +4837,7 @@ metric: <
|
||||
sl := newBasicScrapeLoop(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return simpleStorage.Appender(ctx) }, 0)
|
||||
sl.alwaysScrapeClassicHist = tc.alwaysScrapeClassicHistograms
|
||||
sl.convertClassicHistToNHCB = tc.convertClassicHistToNHCB
|
||||
sl.enableNativeHistogramIngestion = true
|
||||
sl.enableNativeHistogramScraping = true
|
||||
app := simpleStorage.Appender(context.Background())
|
||||
|
||||
var content []byte
|
||||
@ -5349,6 +5354,7 @@ global:
|
||||
scrape_timeout: 25ms
|
||||
scrape_configs:
|
||||
- job_name: test
|
||||
scrape_native_histograms: true
|
||||
%s
|
||||
static_configs:
|
||||
- targets: [%s]
|
||||
@ -5356,10 +5362,9 @@ scrape_configs:
|
||||
|
||||
s := teststorage.New(t)
|
||||
defer s.Close()
|
||||
s.EnableNativeHistograms()
|
||||
reg := prometheus.NewRegistry()
|
||||
|
||||
mng, err := NewManager(&Options{DiscoveryReloadInterval: model.Duration(10 * time.Millisecond), EnableNativeHistogramsIngestion: true}, nil, nil, s, reg)
|
||||
mng, err := NewManager(&Options{DiscoveryReloadInterval: model.Duration(10 * time.Millisecond)}, nil, nil, s, reg)
|
||||
require.NoError(t, err)
|
||||
cfg, err := config.Load(configStr, promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
|
||||
@ -400,7 +400,6 @@ func testCombinedAppenderOnTSDB(t *testing.T, ingestCTZeroSample bool) {
|
||||
opts := tsdb.DefaultOptions()
|
||||
opts.EnableExemplarStorage = true
|
||||
opts.MaxExemplars = 100
|
||||
opts.EnableNativeHistograms = true
|
||||
db, err := tsdb.Open(dir, promslog.NewNopLogger(), prometheus.NewRegistry(), opts, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
||||
@ -871,7 +871,6 @@ func TestHistogramValidationErrorHandling(t *testing.T) {
|
||||
t.Run(testName, func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
opts := tsdb.DefaultOptions()
|
||||
opts.EnableNativeHistograms = true
|
||||
|
||||
db, err := tsdb.Open(dir, nil, nil, opts, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
@ -71,7 +71,6 @@ func (w *BlockWriter) initHead() error {
|
||||
opts := DefaultHeadOptions()
|
||||
opts.ChunkRange = w.blockSize
|
||||
opts.ChunkDirRoot = w.chunkDir
|
||||
opts.EnableNativeHistograms.Store(true)
|
||||
h, err := NewHead(nil, w.logger, nil, nil, opts, NewHeadStats())
|
||||
if err != nil {
|
||||
return fmt.Errorf("tsdb.NewHead: %w", err)
|
||||
|
||||
14
tsdb/db.go
14
tsdb/db.go
@ -176,9 +176,6 @@ type Options struct {
|
||||
// Disables isolation between reads and in-flight appends.
|
||||
IsolationDisabled bool
|
||||
|
||||
// EnableNativeHistograms enables the ingestion of native histograms.
|
||||
EnableNativeHistograms bool
|
||||
|
||||
// OutOfOrderTimeWindow specifies how much out of order is allowed, if any.
|
||||
// This can change during run-time, so this value from here should only be used
|
||||
// while initialising.
|
||||
@ -964,7 +961,6 @@ func open(dir string, l *slog.Logger, r prometheus.Registerer, opts *Options, rn
|
||||
headOpts.EnableExemplarStorage = opts.EnableExemplarStorage
|
||||
headOpts.MaxExemplars.Store(opts.MaxExemplars)
|
||||
headOpts.EnableMemorySnapshotOnShutdown = opts.EnableMemorySnapshotOnShutdown
|
||||
headOpts.EnableNativeHistograms.Store(opts.EnableNativeHistograms)
|
||||
headOpts.OutOfOrderTimeWindow.Store(opts.OutOfOrderTimeWindow)
|
||||
headOpts.OutOfOrderCapMax.Store(opts.OutOfOrderCapMax)
|
||||
headOpts.EnableSharding = opts.EnableSharding
|
||||
@ -1191,16 +1187,6 @@ func (db *DB) ApplyConfig(conf *config.Config) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// EnableNativeHistograms enables the native histogram feature.
|
||||
func (db *DB) EnableNativeHistograms() {
|
||||
db.head.EnableNativeHistograms()
|
||||
}
|
||||
|
||||
// DisableNativeHistograms disables the native histogram feature.
|
||||
func (db *DB) DisableNativeHistograms() {
|
||||
db.head.DisableNativeHistograms()
|
||||
}
|
||||
|
||||
// dbAppender wraps the DB's head appender and triggers compactions on commit
|
||||
// if necessary.
|
||||
type dbAppender struct {
|
||||
|
||||
105
tsdb/db_test.go
105
tsdb/db_test.go
@ -86,7 +86,6 @@ func openTestDB(t testing.TB, opts *Options, rngs []int64) (db *DB) {
|
||||
if opts == nil {
|
||||
opts = DefaultOptions()
|
||||
}
|
||||
opts.EnableNativeHistograms = true
|
||||
|
||||
if len(rngs) == 0 {
|
||||
db, err = Open(tmpdir, nil, nil, opts, nil)
|
||||
@ -4517,7 +4516,6 @@ func testOOOWALWrite(t *testing.T,
|
||||
|
||||
db, err := Open(dir, nil, nil, opts, nil)
|
||||
require.NoError(t, err)
|
||||
db.EnableNativeHistograms()
|
||||
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, db.Close())
|
||||
@ -4934,7 +4932,6 @@ func TestMultipleEncodingsCommitOrder(t *testing.T) {
|
||||
|
||||
db := openTestDB(t, opts, nil)
|
||||
db.DisableCompactions()
|
||||
db.EnableNativeHistograms()
|
||||
defer func() {
|
||||
require.NoError(t, db.Close())
|
||||
}()
|
||||
@ -5087,7 +5084,6 @@ func testOOOCompaction(t *testing.T, scenario sampleTypeScenario, addExtraSample
|
||||
opts := DefaultOptions()
|
||||
opts.OutOfOrderCapMax = 30
|
||||
opts.OutOfOrderTimeWindow = 300 * time.Minute.Milliseconds()
|
||||
opts.EnableNativeHistograms = true
|
||||
|
||||
db, err := Open(dir, nil, nil, opts, nil)
|
||||
require.NoError(t, err)
|
||||
@ -5295,7 +5291,6 @@ func testOOOCompactionWithNormalCompaction(t *testing.T, scenario sampleTypeScen
|
||||
db, err := Open(dir, nil, nil, opts, nil)
|
||||
require.NoError(t, err)
|
||||
db.DisableCompactions() // We want to manually call it.
|
||||
db.EnableNativeHistograms()
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, db.Close())
|
||||
})
|
||||
@ -5403,12 +5398,10 @@ func testOOOCompactionWithDisabledWriteLog(t *testing.T, scenario sampleTypeScen
|
||||
opts.OutOfOrderCapMax = 30
|
||||
opts.OutOfOrderTimeWindow = 300 * time.Minute.Milliseconds()
|
||||
opts.WALSegmentSize = -1 // disabled WAL and WBL
|
||||
opts.EnableNativeHistograms = true
|
||||
|
||||
db, err := Open(dir, nil, nil, opts, nil)
|
||||
require.NoError(t, err)
|
||||
db.DisableCompactions() // We want to manually call it.
|
||||
db.EnableNativeHistograms()
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, db.Close())
|
||||
})
|
||||
@ -5515,7 +5508,6 @@ func testOOOQueryAfterRestartWithSnapshotAndRemovedWBL(t *testing.T, scenario sa
|
||||
opts.OutOfOrderCapMax = 10
|
||||
opts.OutOfOrderTimeWindow = 300 * time.Minute.Milliseconds()
|
||||
opts.EnableMemorySnapshotOnShutdown = true
|
||||
opts.EnableNativeHistograms = true
|
||||
|
||||
db, err := Open(dir, nil, nil, opts, nil)
|
||||
require.NoError(t, err)
|
||||
@ -5880,7 +5872,6 @@ func testQuerierOOOQuery(t *testing.T,
|
||||
opts.OutOfOrderCapMax = tc.oooCap
|
||||
db := openTestDB(t, opts, nil)
|
||||
db.DisableCompactions()
|
||||
db.EnableNativeHistograms()
|
||||
defer func() {
|
||||
require.NoError(t, db.Close())
|
||||
}()
|
||||
@ -6210,7 +6201,6 @@ func testChunkQuerierOOOQuery(t *testing.T,
|
||||
opts.OutOfOrderCapMax = tc.oooCap
|
||||
db := openTestDB(t, opts, nil)
|
||||
db.DisableCompactions()
|
||||
db.EnableNativeHistograms()
|
||||
defer func() {
|
||||
require.NoError(t, db.Close())
|
||||
}()
|
||||
@ -6729,7 +6719,6 @@ func testOOOAppendAndQuery(t *testing.T, scenario sampleTypeScenario) {
|
||||
|
||||
db := openTestDB(t, opts, nil)
|
||||
db.DisableCompactions()
|
||||
db.EnableNativeHistograms()
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, db.Close())
|
||||
})
|
||||
@ -6861,7 +6850,6 @@ func testOOODisabled(t *testing.T, scenario sampleTypeScenario) {
|
||||
opts.OutOfOrderTimeWindow = 0
|
||||
db := openTestDB(t, opts, nil)
|
||||
db.DisableCompactions()
|
||||
db.EnableNativeHistograms()
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, db.Close())
|
||||
})
|
||||
@ -6934,7 +6922,6 @@ func testWBLAndMmapReplay(t *testing.T, scenario sampleTypeScenario) {
|
||||
opts := DefaultOptions()
|
||||
opts.OutOfOrderCapMax = 30
|
||||
opts.OutOfOrderTimeWindow = 4 * time.Hour.Milliseconds()
|
||||
opts.EnableNativeHistograms = true
|
||||
|
||||
db := openTestDB(t, opts, nil)
|
||||
db.DisableCompactions()
|
||||
@ -7127,7 +7114,6 @@ func TestOOOHistogramCompactionWithCounterResets(t *testing.T) {
|
||||
db, err := Open(dir, nil, nil, opts, nil)
|
||||
require.NoError(t, err)
|
||||
db.DisableCompactions() // We want to manually call it.
|
||||
db.EnableNativeHistograms()
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, db.Close())
|
||||
})
|
||||
@ -7488,7 +7474,6 @@ func TestInterleavedInOrderAndOOOHistogramCompactionWithCounterResets(t *testing
|
||||
db, err := Open(dir, nil, nil, opts, nil)
|
||||
require.NoError(t, err)
|
||||
db.DisableCompactions() // We want to manually call it.
|
||||
db.EnableNativeHistograms()
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, db.Close())
|
||||
})
|
||||
@ -7604,7 +7589,6 @@ func testOOOCompactionFailure(t *testing.T, scenario sampleTypeScenario) {
|
||||
db, err := Open(dir, nil, nil, opts, nil)
|
||||
require.NoError(t, err)
|
||||
db.DisableCompactions() // We want to manually call it.
|
||||
db.EnableNativeHistograms()
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, db.Close())
|
||||
})
|
||||
@ -7892,7 +7876,6 @@ func testOOOMmapCorruption(t *testing.T, scenario sampleTypeScenario) {
|
||||
opts := DefaultOptions()
|
||||
opts.OutOfOrderCapMax = 10
|
||||
opts.OutOfOrderTimeWindow = 300 * time.Minute.Milliseconds()
|
||||
opts.EnableNativeHistograms = true
|
||||
|
||||
db, err := Open(dir, nil, nil, opts, nil)
|
||||
require.NoError(t, err)
|
||||
@ -8027,7 +8010,6 @@ func testOutOfOrderRuntimeConfig(t *testing.T, scenario sampleTypeScenario) {
|
||||
|
||||
opts := DefaultOptions()
|
||||
opts.OutOfOrderTimeWindow = oooTimeWindow
|
||||
opts.EnableNativeHistograms = true
|
||||
|
||||
db, err := Open(dir, nil, nil, opts, nil)
|
||||
require.NoError(t, err)
|
||||
@ -8322,7 +8304,6 @@ func testNoGapAfterRestartWithOOO(t *testing.T, scenario sampleTypeScenario) {
|
||||
|
||||
opts := DefaultOptions()
|
||||
opts.OutOfOrderTimeWindow = 30 * time.Minute.Milliseconds()
|
||||
opts.EnableNativeHistograms = true
|
||||
|
||||
db, err := Open(dir, nil, nil, opts, nil)
|
||||
require.NoError(t, err)
|
||||
@ -8381,7 +8362,6 @@ func testWblReplayAfterOOODisableAndRestart(t *testing.T, scenario sampleTypeSce
|
||||
|
||||
opts := DefaultOptions()
|
||||
opts.OutOfOrderTimeWindow = 60 * time.Minute.Milliseconds()
|
||||
opts.EnableNativeHistograms = true
|
||||
|
||||
db, err := Open(dir, nil, nil, opts, nil)
|
||||
require.NoError(t, err)
|
||||
@ -8449,7 +8429,6 @@ func testPanicOnApplyConfig(t *testing.T, scenario sampleTypeScenario) {
|
||||
|
||||
opts := DefaultOptions()
|
||||
opts.OutOfOrderTimeWindow = 60 * time.Minute.Milliseconds()
|
||||
opts.EnableNativeHistograms = true
|
||||
|
||||
db, err := Open(dir, nil, nil, opts, nil)
|
||||
require.NoError(t, err)
|
||||
@ -8509,7 +8488,6 @@ func testDiskFillingUpAfterDisablingOOO(t *testing.T, scenario sampleTypeScenari
|
||||
|
||||
opts := DefaultOptions()
|
||||
opts.OutOfOrderTimeWindow = 60 * time.Minute.Milliseconds()
|
||||
opts.EnableNativeHistograms = true
|
||||
|
||||
db, err := Open(dir, nil, nil, opts, nil)
|
||||
require.NoError(t, err)
|
||||
@ -8999,59 +8977,6 @@ func TestQueryHistogramFromBlocksWithCompaction(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestNativeHistogramFlag(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
db, err := Open(dir, nil, nil, nil, nil)
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, db.Close())
|
||||
})
|
||||
h := &histogram.Histogram{
|
||||
Count: 9,
|
||||
ZeroCount: 4,
|
||||
ZeroThreshold: 0.001,
|
||||
Sum: 35.5,
|
||||
Schema: 1,
|
||||
PositiveSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 2},
|
||||
{Offset: 2, Length: 2},
|
||||
},
|
||||
PositiveBuckets: []int64{1, 1, -1, 0},
|
||||
}
|
||||
|
||||
l := labels.FromStrings("foo", "bar")
|
||||
|
||||
app := db.Appender(context.Background())
|
||||
|
||||
// Disabled by default.
|
||||
_, err = app.AppendHistogram(0, l, 100, h, nil)
|
||||
require.Equal(t, storage.ErrNativeHistogramsDisabled, err)
|
||||
_, err = app.AppendHistogram(0, l, 105, nil, h.ToFloat(nil))
|
||||
require.Equal(t, storage.ErrNativeHistogramsDisabled, err)
|
||||
|
||||
// Enable and append.
|
||||
db.EnableNativeHistograms()
|
||||
_, err = app.AppendHistogram(0, l, 200, h, nil)
|
||||
require.NoError(t, err)
|
||||
_, err = app.AppendHistogram(0, l, 205, nil, h.ToFloat(nil))
|
||||
require.NoError(t, err)
|
||||
|
||||
db.DisableNativeHistograms()
|
||||
_, err = app.AppendHistogram(0, l, 300, h, nil)
|
||||
require.Equal(t, storage.ErrNativeHistogramsDisabled, err)
|
||||
_, err = app.AppendHistogram(0, l, 305, nil, h.ToFloat(nil))
|
||||
require.Equal(t, storage.ErrNativeHistogramsDisabled, err)
|
||||
|
||||
require.NoError(t, app.Commit())
|
||||
|
||||
q, err := db.Querier(math.MinInt, math.MaxInt64)
|
||||
require.NoError(t, err)
|
||||
act := query(t, q, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"))
|
||||
require.Equal(t, map[string][]chunks.Sample{
|
||||
l.String(): {sample{t: 200, h: h}, sample{t: 205, fh: h.ToFloat(nil)}},
|
||||
}, act)
|
||||
}
|
||||
|
||||
func TestOOONativeHistogramsSettings(t *testing.T) {
|
||||
h := &histogram.Histogram{
|
||||
Count: 9,
|
||||
@ -9076,8 +9001,6 @@ func TestOOONativeHistogramsSettings(t *testing.T) {
|
||||
require.NoError(t, db.Close())
|
||||
}()
|
||||
|
||||
db.EnableNativeHistograms()
|
||||
|
||||
app := db.Appender(context.Background())
|
||||
_, err := app.AppendHistogram(0, l, 100, h, nil)
|
||||
require.NoError(t, err)
|
||||
@ -9094,32 +9017,6 @@ func TestOOONativeHistogramsSettings(t *testing.T) {
|
||||
l.String(): {sample{t: 100, h: h}},
|
||||
}, act)
|
||||
})
|
||||
t.Run("Test OOO Native Histograms if OOO is enabled and Native Histograms are disabled", func(t *testing.T) {
|
||||
opts := DefaultOptions()
|
||||
opts.OutOfOrderTimeWindow = 100
|
||||
db := openTestDB(t, opts, []int64{100})
|
||||
defer func() {
|
||||
require.NoError(t, db.Close())
|
||||
}()
|
||||
|
||||
db.DisableNativeHistograms()
|
||||
|
||||
// Attempt to add an in-order sample
|
||||
app := db.Appender(context.Background())
|
||||
_, err := app.AppendHistogram(0, l, 200, h, nil)
|
||||
require.Equal(t, storage.ErrNativeHistogramsDisabled, err)
|
||||
|
||||
// Attempt to add an OOO sample
|
||||
_, err = app.AppendHistogram(0, l, 100, h, nil)
|
||||
require.Equal(t, storage.ErrNativeHistogramsDisabled, err)
|
||||
|
||||
require.NoError(t, app.Commit())
|
||||
|
||||
q, err := db.Querier(math.MinInt, math.MaxInt64)
|
||||
require.NoError(t, err)
|
||||
act := query(t, q, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"))
|
||||
require.Equal(t, map[string][]chunks.Sample{}, act)
|
||||
})
|
||||
t.Run("Test OOO native histograms when both OOO and Native Histograms are enabled", func(t *testing.T) {
|
||||
opts := DefaultOptions()
|
||||
opts.OutOfOrderTimeWindow = 100
|
||||
@ -9128,8 +9025,6 @@ func TestOOONativeHistogramsSettings(t *testing.T) {
|
||||
require.NoError(t, db.Close())
|
||||
}()
|
||||
|
||||
db.EnableNativeHistograms()
|
||||
|
||||
// Add in-order samples
|
||||
app := db.Appender(context.Background())
|
||||
_, err := app.AppendHistogram(0, l, 200, h, nil)
|
||||
|
||||
13
tsdb/head.go
13
tsdb/head.go
@ -161,9 +161,6 @@ type HeadOptions struct {
|
||||
OutOfOrderTimeWindow atomic.Int64
|
||||
OutOfOrderCapMax atomic.Int64
|
||||
|
||||
// EnableNativeHistograms enables the ingestion of native histograms.
|
||||
EnableNativeHistograms atomic.Bool
|
||||
|
||||
ChunkRange int64
|
||||
// ChunkDirRoot is the parent directory of the chunks directory.
|
||||
ChunkDirRoot string
|
||||
@ -1050,16 +1047,6 @@ func (h *Head) SetOutOfOrderTimeWindow(oooTimeWindow int64, wbl *wlog.WL) {
|
||||
h.opts.OutOfOrderTimeWindow.Store(oooTimeWindow)
|
||||
}
|
||||
|
||||
// EnableNativeHistograms enables the native histogram feature.
|
||||
func (h *Head) EnableNativeHistograms() {
|
||||
h.opts.EnableNativeHistograms.Store(true)
|
||||
}
|
||||
|
||||
// DisableNativeHistograms disables the native histogram feature.
|
||||
func (h *Head) DisableNativeHistograms() {
|
||||
h.opts.EnableNativeHistograms.Store(false)
|
||||
}
|
||||
|
||||
// PostingsCardinalityStats returns highest cardinality stats by label and value names.
|
||||
func (h *Head) PostingsCardinalityStats(statsByLabelName string, limit int) *index.PostingsStats {
|
||||
cacheKey := statsByLabelName + ";" + strconv.Itoa(limit)
|
||||
|
||||
@ -797,10 +797,6 @@ func (a *headAppender) AppendExemplar(ref storage.SeriesRef, lset labels.Labels,
|
||||
}
|
||||
|
||||
func (a *headAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) {
|
||||
if !a.head.opts.EnableNativeHistograms.Load() {
|
||||
return 0, storage.ErrNativeHistogramsDisabled
|
||||
}
|
||||
|
||||
// Fail fast if OOO is disabled and the sample is out of bounds.
|
||||
// Otherwise a full check will be done later to decide if the sample is in-order or out-of-order.
|
||||
if a.oooTimeWindow == 0 && t < a.minValidTime {
|
||||
@ -907,10 +903,6 @@ func (a *headAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels
|
||||
}
|
||||
|
||||
func (a *headAppender) AppendHistogramCTZeroSample(ref storage.SeriesRef, lset labels.Labels, t, ct int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) {
|
||||
if !a.head.opts.EnableNativeHistograms.Load() {
|
||||
return 0, storage.ErrNativeHistogramsDisabled
|
||||
}
|
||||
|
||||
if ct >= t {
|
||||
return 0, storage.ErrCTNewerThanSample
|
||||
}
|
||||
|
||||
@ -64,7 +64,6 @@ func newTestHeadDefaultOptions(chunkRange int64, oooEnabled bool) *HeadOptions {
|
||||
opts.ChunkRange = chunkRange
|
||||
opts.EnableExemplarStorage = true
|
||||
opts.MaxExemplars.Store(config.DefaultExemplarsConfig.MaxExemplars)
|
||||
opts.EnableNativeHistograms.Store(true)
|
||||
if oooEnabled {
|
||||
opts.OutOfOrderTimeWindow.Store(10 * time.Minute.Milliseconds())
|
||||
}
|
||||
@ -3199,7 +3198,6 @@ func TestOutOfOrderSamplesMetric(t *testing.T) {
|
||||
for name, scenario := range sampleTypeScenarios {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
options := DefaultOptions()
|
||||
options.EnableNativeHistograms = true
|
||||
testOutOfOrderSamplesMetric(t, scenario, options, storage.ErrOutOfOrderSample)
|
||||
})
|
||||
}
|
||||
@ -3213,7 +3211,6 @@ func TestOutOfOrderSamplesMetricNativeHistogramOOODisabled(t *testing.T) {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
options := DefaultOptions()
|
||||
options.OutOfOrderTimeWindow = 0
|
||||
options.EnableNativeHistograms = true
|
||||
testOutOfOrderSamplesMetric(t, scenario, options, storage.ErrOutOfOrderSample)
|
||||
})
|
||||
}
|
||||
@ -4071,7 +4068,6 @@ func testQueryOOOHeadDuringTruncate(t *testing.T, makeQuerier func(db *DB, minT,
|
||||
|
||||
dir := t.TempDir()
|
||||
opts := DefaultOptions()
|
||||
opts.EnableNativeHistograms = true
|
||||
opts.OutOfOrderTimeWindow = maxT
|
||||
opts.MinBlockDuration = maxT / 2 // So that head will compact up to 3000.
|
||||
|
||||
@ -5313,7 +5309,6 @@ func TestOOOHistogramCounterResetHeaders(t *testing.T) {
|
||||
func TestAppendingDifferentEncodingToSameSeries(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
opts := DefaultOptions()
|
||||
opts.EnableNativeHistograms = true
|
||||
db, err := Open(dir, nil, nil, opts, nil)
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() {
|
||||
@ -5564,7 +5559,6 @@ func testWBLReplay(t *testing.T, scenario sampleTypeScenario) {
|
||||
opts.ChunkRange = 1000
|
||||
opts.ChunkDirRoot = dir
|
||||
opts.OutOfOrderTimeWindow.Store(30 * time.Minute.Milliseconds())
|
||||
opts.EnableNativeHistograms.Store(true)
|
||||
|
||||
h, err := NewHead(nil, nil, wal, oooWlog, opts, nil)
|
||||
require.NoError(t, err)
|
||||
@ -5658,7 +5652,6 @@ func testOOOMmapReplay(t *testing.T, scenario sampleTypeScenario) {
|
||||
opts.ChunkDirRoot = dir
|
||||
opts.OutOfOrderCapMax.Store(30)
|
||||
opts.OutOfOrderTimeWindow.Store(1000 * time.Minute.Milliseconds())
|
||||
opts.EnableNativeHistograms.Store(true)
|
||||
|
||||
h, err := NewHead(nil, nil, wal, oooWlog, opts, nil)
|
||||
require.NoError(t, err)
|
||||
@ -5960,7 +5953,6 @@ func testOOOAppendWithNoSeries(t *testing.T, appendFunc func(appender storage.Ap
|
||||
opts.ChunkDirRoot = dir
|
||||
opts.OutOfOrderCapMax.Store(30)
|
||||
opts.OutOfOrderTimeWindow.Store(120 * time.Minute.Milliseconds())
|
||||
opts.EnableNativeHistograms.Store(true)
|
||||
|
||||
h, err := NewHead(nil, nil, wal, oooWlog, opts, nil)
|
||||
require.NoError(t, err)
|
||||
@ -6051,7 +6043,6 @@ func testHeadMinOOOTimeUpdate(t *testing.T, scenario sampleTypeScenario) {
|
||||
opts := DefaultHeadOptions()
|
||||
opts.ChunkDirRoot = dir
|
||||
opts.OutOfOrderTimeWindow.Store(10 * time.Minute.Milliseconds())
|
||||
opts.EnableNativeHistograms.Store(true)
|
||||
|
||||
h, err := NewHead(nil, nil, wal, oooWlog, opts, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
@ -493,7 +493,6 @@ func testOOOHeadChunkReader_Chunk(t *testing.T, scenario sampleTypeScenario) {
|
||||
opts := DefaultOptions()
|
||||
opts.OutOfOrderCapMax = 5
|
||||
opts.OutOfOrderTimeWindow = 120 * time.Minute.Milliseconds()
|
||||
opts.EnableNativeHistograms = true
|
||||
|
||||
s1 := labels.FromStrings("l", "v1")
|
||||
minutes := func(m int64) int64 { return m * time.Minute.Milliseconds() }
|
||||
@ -903,7 +902,6 @@ func testOOOHeadChunkReader_Chunk_ConsistentQueryResponseDespiteOfHeadExpanding(
|
||||
opts := DefaultOptions()
|
||||
opts.OutOfOrderCapMax = 5
|
||||
opts.OutOfOrderTimeWindow = 120 * time.Minute.Milliseconds()
|
||||
opts.EnableNativeHistograms = true
|
||||
|
||||
s1 := labels.FromStrings("l", "v1")
|
||||
minutes := func(m int64) int64 { return m * time.Minute.Milliseconds() }
|
||||
|
||||
@ -3612,7 +3612,6 @@ func TestQueryWithDeletedHistograms(t *testing.T) {
|
||||
require.NoError(t, db.Close())
|
||||
}()
|
||||
|
||||
db.EnableNativeHistograms()
|
||||
appender := db.Appender(context.Background())
|
||||
|
||||
var (
|
||||
@ -3671,7 +3670,6 @@ func TestQueryWithOneChunkCompletelyDeleted(t *testing.T) {
|
||||
require.NoError(t, db.Close())
|
||||
}()
|
||||
|
||||
db.EnableNativeHistograms()
|
||||
appender := db.Appender(context.Background())
|
||||
|
||||
var (
|
||||
|
||||
@ -50,7 +50,6 @@ func NewWithError(outOfOrderTimeWindow ...int64) (*TestStorage, error) {
|
||||
opts.MinBlockDuration = int64(24 * time.Hour / time.Millisecond)
|
||||
opts.MaxBlockDuration = int64(24 * time.Hour / time.Millisecond)
|
||||
opts.RetentionDuration = 0
|
||||
opts.EnableNativeHistograms = true
|
||||
|
||||
// Set OutOfOrderTimeWindow if provided, otherwise use default (0)
|
||||
if len(outOfOrderTimeWindow) > 0 {
|
||||
|
||||
@ -453,7 +453,7 @@ func TestFederationWithNativeHistograms(t *testing.T) {
|
||||
require.Equal(t, http.StatusOK, res.Code)
|
||||
body, err := io.ReadAll(res.Body)
|
||||
require.NoError(t, err)
|
||||
p := textparse.NewProtobufParser(body, false, false, false, labels.NewSymbolTable())
|
||||
p := textparse.NewProtobufParser(body, false, false, false, false, labels.NewSymbolTable())
|
||||
var actVec promql.Vector
|
||||
metricFamilies := 0
|
||||
l := labels.Labels{}
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user