Merge remote-tracking branch 'origin/main' into feature/start-time

Signed-off-by: Owen Williams <owen.williams@grafana.com>
This commit is contained in:
Owen Williams 2026-03-02 14:47:44 -05:00
commit c4deef472e
No known key found for this signature in database
GPG Key ID: 711C61A216D34A69
43 changed files with 579 additions and 156 deletions

View File

@ -16,7 +16,7 @@ jobs:
container:
# Whenever the Go version is updated here, .promu.yml
# should also be updated.
image: quay.io/prometheus/golang-builder:1.25-base
image: quay.io/prometheus/golang-builder:1.26-base
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
@ -34,7 +34,7 @@ jobs:
name: More Go tests
runs-on: ubuntu-latest
container:
image: quay.io/prometheus/golang-builder:1.25-base
image: quay.io/prometheus/golang-builder:1.26-base
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
@ -55,8 +55,6 @@ jobs:
env:
# Enforce the Go version.
GOTOOLCHAIN: local
# TODO: remove once 1.25 is the min version.
GOEXPERIMENT: synctest
container:
# The go version in this image should be N-1 wrt test_go.
image: quay.io/prometheus/golang-builder:1.25-base
@ -67,9 +65,6 @@ jobs:
- run: make build
# Don't run NPM build; don't run race-detector.
- run: make test GO_ONLY=1 test-flags=""
# TODO: remove once 1.25 is the min version.
# ensure we can build without the tag.
- run: GOEXPERIMENT="" make build
test_ui:
name: UI tests
@ -77,7 +72,7 @@ jobs:
# Whenever the Go version is updated here, .promu.yml
# should also be updated.
container:
image: quay.io/prometheus/golang-builder:1.25-base
image: quay.io/prometheus/golang-builder:1.26-base
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
@ -104,7 +99,7 @@ jobs:
persist-credentials: false
- uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
with:
go-version: 1.25.x
go-version: 1.26.x
- run: |
$TestTargets = go list ./... | Where-Object { $_ -NotMatch "(github.com/prometheus/prometheus/config|github.com/prometheus/prometheus/web)"}
go test $TestTargets -vet=off -v
@ -116,7 +111,7 @@ jobs:
# Whenever the Go version is updated here, .promu.yml
# should also be updated.
container:
image: quay.io/prometheus/golang-builder:1.25-base
image: quay.io/prometheus/golang-builder:1.26-base
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
@ -227,7 +222,7 @@ jobs:
name: Check generated parser
runs-on: ubuntu-latest
container:
image: quay.io/prometheus/golang-builder:1.25-base
image: quay.io/prometheus/golang-builder:1.26-base
steps:
- name: Checkout repository
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
@ -250,7 +245,7 @@ jobs:
- name: Install Go
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
with:
go-version: 1.25.x
go-version: 1.26.x
- name: Install snmp_exporter/generator dependencies
run: sudo apt-get update && sudo apt-get -y install libsnmp-dev
if: github.repository == 'prometheus/snmp_exporter'
@ -265,8 +260,7 @@ jobs:
- name: Lint with slicelabels
uses: golangci/golangci-lint-action@1e7e51e771db61008b38414a730f564565cf7c20 # v9.2.0
with:
# goexperiment.synctest to ensure we don't miss files that depend on it.
args: --verbose --build-tags=slicelabels,goexperiment.synctest
args: --verbose --build-tags=slicelabels
version: ${{ steps.golangci-lint-version.outputs.version }}
- name: Lint with dedupelabels
uses: golangci/golangci-lint-action@1e7e51e771db61008b38414a730f564565cf7c20 # v9.2.0

View File

@ -19,7 +19,7 @@ jobs:
- name: Install Go
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
with:
go-version: 1.25.x
go-version: 1.26.x
- name: Run Fuzzing
run: go test -fuzz=${{ matrix.fuzz_test }}$ -fuzztime=5m ./util/fuzzing
continue-on-error: true

View File

@ -129,6 +129,9 @@ linters:
# Disable this check for now since it introduces too many changes in our existing codebase.
# See https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/modernize#hdr-Analyzer_omitzero for more details.
- omitzero
# Disable newexpr check for now since it introduces too many changes in our existing codebase.
# To be re-enabled as a part of https://github.com/prometheus/prometheus/issues/18066.
- newexpr
perfsprint:
# Optimizes even if it requires an int or uint type cast.
int-conversion: true

View File

@ -1,7 +1,7 @@
go:
# Whenever the Go version is updated here,
# .github/workflows should also be updated.
version: 1.25
version: 1.26
repository:
path: github.com/prometheus/prometheus
build:

View File

@ -1,5 +1,56 @@
# Changelog
## 3.10.0 / 2026-02-24
Prometheus now offers a distroless Docker image variant alongside the default
busybox image. The distroless variant provides enhanced security with a minimal
base image, uses UID/GID 65532 (nonroot) instead of nobody, and removes the
VOLUME declaration. Both variants are available with `-busybox` and `-distroless`
tag suffixes (e.g., `prom/prometheus:latest-busybox`, `prom/prometheus:latest-distroless`).
The busybox image remains the default with no suffix for backwards compatibility
(e.g., `prom/prometheus:latest` points to the busybox variant).
For users migrating existing **named** volumes from the busybox image to the distroless variant, the ownership can be adjusted with:
```
docker run --rm -v prometheus-data:/prometheus alpine chown -R 65532:65532 /prometheus
```
Then, the container can be started with the old volume with:
```
docker run -v prometheus-data:/prometheus prom/prometheus:latest-distroless
```
User migrating from bind mounts might need to ajust permissions too, depending on their setup.
- [CHANGE] Alerting: Add `alertmanager` dimension to following metrics: `prometheus_notifications_dropped_total`, `prometheus_notifications_queue_capacity`, `prometheus_notifications_queue_length`. #16355
- [CHANGE] UI: Hide expanded alert annotations by default, enabling more information density on the `/alerts` page. #17611
- [FEATURE] AWS SD: Add MSK Role. #17600
- [FEATURE] PromQL: Add `fill()` / `fill_left()` / `fill_right()` binop modifiers for specifying default values for missing series. #17644
- [FEATURE] Web: Add OpenAPI 3.2 specification for the HTTP API at `/api/v1/openapi.yaml`. #17825
- [FEATURE] Dockerfile: Add distroless image variant using UID/GID 65532 and no VOLUME declaration. Busybox image remains default. #17876
- [FEATURE] Web: Add on-demand wall time profiling under `<URL>/debug/pprof/fgprof`. #18027
- [ENHANCEMENT] PromQL: Add more detail to histogram quantile monotonicity info annotations. #15578
- [ENHANCEMENT] Alerting: Independent alertmanager sendloops. #16355
- [ENHANCEMENT] TSDB: Experimental support for early compaction of stale series in the memory with configurable threshold `stale_series_compaction_threshold` in the config file. #16929
- [ENHANCEMENT] Service Discovery: Service discoveries are now removable from the Prometheus binary through the Go build tag `remove_all_sd` and individual service discoveries can be re-added with the build tags `enable_<sd name>_sd`. Users can build a custom Prometheus with only the necessary SDs for a smaller binary size. #17736
- [ENHANCEMENT] Promtool: Support promql syntax features `promql-duration-expr` and `promql-extended-range-selectors`. #17926
- [PERF] PromQL: Avoid unnecessary label extraction in PromQL functions. #17676
- [PERF] PromQL: Improve performance of regex matchers like `.*-.*-.*`. #17707
- [PERF] OTLP: Add label caching for OTLP-to-Prometheus conversion to reduce allocations and improve latency. #17860
- [PERF] API: Compute `/api/v1/targets/relabel_steps` in a single pass instead of re-running relabeling for each prefix. #17969
- [PERF] tsdb: Optimize LabelValues intersection performance for matchers. #18069
- [BUGFIX] PromQL: Prevent query strings containing only UTF-8 continuation bytes from crashing Prometheus. #17735
- [BUGFIX] Web: Fix missing `X-Prometheus-Stopping` header for `/-/ready` endpoint in `NotReady` state. #17795
- [BUGFIX] PromQL: Fix PromQL `info()` function returning empty results when filtering by a label that exists on both the input metric and `target_info`. #17817
- [BUGFIX] TSDB: Fix a bug during exemplar buffer grow/shrink that could cause exemplars to be incorrectly discarded. #17863
- [BUGFIX] UI: Fix broken graph display after page reload, due to broken Y axis min encoding/decoding. #17869
- [BUGFIX] TSDB: Fix memory leaks in buffer pools by clearing reference fields (Labels, Histogram pointers, metadata strings) before returning buffers to pools. #17879
- [BUGFIX] PromQL: info function: fix series without identifying labels not being returned. #17898
- [BUGFIX] OTLP: Filter `__name__` from OTLP attributes to prevent duplicate labels. #17917
- [BUGFIX] TSDB: Fix division by zero when computing stale series ratio with empty head. #17952
- [BUGFIX] OTLP: Fix potential silent data loss for sum metrics. #17954
- [BUGFIX] PromQL: Fix smoothed interpolation across counter resets. #17988
- [BUGFIX] PromQL: Fix panic with `@` modifier on empty ranges. #18020
- [BUGFIX] PromQL: Fix `avg_over_time` for a single native histogram. #18058
## 3.9.1 / 2026-01-07
- [BUGFIX] Agent: fix crash shortly after startup from invalid type of object. #17802

View File

@ -22,6 +22,7 @@
/discovery/kubernetes @prometheus/default-maintainers @brancz
/discovery/stackit @prometheus/default-maintainers @jkroepke
/discovery/aws/ @prometheus/default-maintainers @matt-gp @sysadmind
/discovery/consul @prometheus/default-maintainers @mrvarmazyar
# Pending
# https://github.com/prometheus/prometheus/pull/15212#issuecomment-3575225179
# /discovery/aliyun @prometheus/default-maintainers @KeyOfSpectator

View File

@ -7,6 +7,7 @@ General maintainers:
* Ayoub Mrini (ayoubmrini424@gmail.com / @machine424)
* Julien Pivotto (roidelapluie@prometheus.io / @roidelapluie)
* György Krajcsovits (<gyorgy.krajcsovits@grafana.com> / @krajorama)
* Bartłomiej Płotka (<bwplotka@gmail.com> / @bwplotka)
Maintainers for specific parts of the codebase:
* `cmd`
@ -14,12 +15,13 @@ Maintainers for specific parts of the codebase:
* `discovery`
* `k8s`: Frederic Branczyk (<fbranczyk@gmail.com> / @brancz)
* `stackit`: Jan-Otto Kröpke (<mail@jkroepke.de> / @jkroepke)
* `consul`: Mohammad Varmazyar (<mrvarmazyar@gmail.com> / @mrvarmazyar)
* `documentation`
* `prometheus-mixin`: Matthias Loibl (<mail@matthiasloibl.com> / @metalmatze)
* `storage`
* `remote`: Callum Styan (<callumstyan@gmail.com> / @cstyan), Bartłomiej Płotka (<bwplotka@gmail.com> / @bwplotka), Tom Wilkie (tom.wilkie@gmail.com / @tomwilkie), Alex Greenbank (<alexgreenbank@yahoo.com> / @alexgreenbank)
* `remote`: Callum Styan (<callumstyan@gmail.com> / @cstyan), Tom Wilkie (tom.wilkie@gmail.com / @tomwilkie), Alex Greenbank (<alexgreenbank@yahoo.com> / @alexgreenbank)
* `otlptranslator`: Arthur Silva Sens (<arthursens2005@gmail.com> / @ArthurSens), Arve Knudsen (<arve.knudsen@gmail.com> / @aknuds1), Jesús Vázquez (<jesus.vazquez@grafana.com> / @jesusvazquez)
* `tsdb`: Ganesh Vernekar (<ganesh@grafana.com> / @codesome), Bartłomiej Płotka (<bwplotka@gmail.com> / @bwplotka), Jesús Vázquez (<jesus.vazquez@grafana.com> / @jesusvazquez)
* `tsdb`: Ganesh Vernekar (<ganesh@grafana.com> / @codesome), Jesús Vázquez (<jesus.vazquez@grafana.com> / @jesusvazquez)
* `web`
* `ui`: Julius Volz (<julius.volz@gmail.com> / @juliusv)
* `module`: Augustin Husson (<husson.augustin@gmail.com> / @nexucis)

View File

@ -13,6 +13,8 @@
# Needs to be defined before including Makefile.common to auto-generate targets
DOCKER_ARCHS ?= amd64 armv7 arm64 ppc64le riscv64 s390x
DOCKERFILE_ARCH_EXCLUSIONS ?= Dockerfile.distroless:riscv64
DOCKER_REGISTRY_ARCH_EXCLUSIONS ?= quay.io:riscv64
UI_PATH = web/ui
UI_NODE_MODULES_PATH = $(UI_PATH)/node_modules

View File

@ -109,6 +109,24 @@ endif
# Build variant:dockerfile pairs for shell iteration.
DOCKERFILE_VARIANTS_WITH_NAMES := $(foreach df,$(DOCKERFILE_VARIANTS),$(call dockerfile_variant,$(df)):$(df))
# Shell helper to check whether a dockerfile/arch pair is excluded.
define dockerfile_arch_is_excluded
case " $(DOCKERFILE_ARCH_EXCLUSIONS) " in \
*" $$dockerfile:$(1) "*) true ;; \
*) false ;; \
esac
endef
# Shell helper to check whether a registry/arch pair is excluded.
# Extracts registry from DOCKER_REPO (e.g., quay.io/prometheus -> quay.io)
define registry_arch_is_excluded
registry=$$(echo "$(DOCKER_REPO)" | cut -d'/' -f1); \
case " $(DOCKER_REGISTRY_ARCH_EXCLUSIONS) " in \
*" $$registry:$(1) "*) true ;; \
*) false ;; \
esac
endef
BUILD_DOCKER_ARCHS = $(addprefix common-docker-,$(DOCKER_ARCHS))
PUBLISH_DOCKER_ARCHS = $(addprefix common-docker-publish-,$(DOCKER_ARCHS))
TAG_DOCKER_ARCHS = $(addprefix common-docker-tag-latest-,$(DOCKER_ARCHS))
@ -250,6 +268,10 @@ $(BUILD_DOCKER_ARCHS): common-docker-%:
@for variant in $(DOCKERFILE_VARIANTS_WITH_NAMES); do \
dockerfile=$${variant#*:}; \
variant_name=$${variant%%:*}; \
if $(call dockerfile_arch_is_excluded,$*); then \
echo "Skipping $$variant_name variant for linux-$* (excluded by DOCKERFILE_ARCH_EXCLUSIONS)"; \
continue; \
fi; \
distroless_arch="$*"; \
if [ "$*" = "armv7" ]; then \
distroless_arch="arm"; \
@ -284,6 +306,14 @@ $(PUBLISH_DOCKER_ARCHS): common-docker-publish-%:
@for variant in $(DOCKERFILE_VARIANTS_WITH_NAMES); do \
dockerfile=$${variant#*:}; \
variant_name=$${variant%%:*}; \
if $(call dockerfile_arch_is_excluded,$*); then \
echo "Skipping push for $$variant_name variant on linux-$* (excluded by DOCKERFILE_ARCH_EXCLUSIONS)"; \
continue; \
fi; \
if $(call registry_arch_is_excluded,$*); then \
echo "Skipping push for $$variant_name variant on linux-$* to $(DOCKER_REPO) (excluded by DOCKER_REGISTRY_ARCH_EXCLUSIONS)"; \
continue; \
fi; \
if [ "$$dockerfile" != "Dockerfile" ] || [ "$$variant_name" != "default" ]; then \
echo "Pushing $$variant_name variant for linux-$*"; \
docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)-$$variant_name"; \
@ -311,6 +341,14 @@ $(TAG_DOCKER_ARCHS): common-docker-tag-latest-%:
@for variant in $(DOCKERFILE_VARIANTS_WITH_NAMES); do \
dockerfile=$${variant#*:}; \
variant_name=$${variant%%:*}; \
if $(call dockerfile_arch_is_excluded,$*); then \
echo "Skipping tag for $$variant_name variant on linux-$* (excluded by DOCKERFILE_ARCH_EXCLUSIONS)"; \
continue; \
fi; \
if $(call registry_arch_is_excluded,$*); then \
echo "Skipping tag for $$variant_name variant on linux-$* for $(DOCKER_REPO) (excluded by DOCKER_REGISTRY_ARCH_EXCLUSIONS)"; \
continue; \
fi; \
if [ "$$dockerfile" != "Dockerfile" ] || [ "$$variant_name" != "default" ]; then \
echo "Tagging $$variant_name variant for linux-$* as latest"; \
docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)-$$variant_name" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest-$$variant_name"; \
@ -330,23 +368,87 @@ common-docker-manifest:
variant_name=$${variant%%:*}; \
if [ "$$dockerfile" != "Dockerfile" ] || [ "$$variant_name" != "default" ]; then \
echo "Creating manifest for $$variant_name variant"; \
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)-$$variant_name" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(SANITIZED_DOCKER_IMAGE_TAG)-$$variant_name); \
refs=""; \
for arch in $(DOCKER_ARCHS); do \
if $(call dockerfile_arch_is_excluded,$$arch); then \
echo " Skipping $$arch for $$variant_name (excluded by DOCKERFILE_ARCH_EXCLUSIONS)"; \
continue; \
fi; \
if $(call registry_arch_is_excluded,$$arch); then \
echo " Skipping $$arch for $$variant_name on $(DOCKER_REPO) (excluded by DOCKER_REGISTRY_ARCH_EXCLUSIONS)"; \
continue; \
fi; \
refs="$$refs $(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$$arch:$(SANITIZED_DOCKER_IMAGE_TAG)-$$variant_name"; \
done; \
if [ -z "$$refs" ]; then \
echo "Skipping manifest for $$variant_name variant (no supported architectures)"; \
continue; \
fi; \
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)-$$variant_name" $$refs; \
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)-$$variant_name"; \
fi; \
if [ "$$dockerfile" = "Dockerfile" ]; then \
echo "Creating default variant ($$variant_name) manifest"; \
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(SANITIZED_DOCKER_IMAGE_TAG)); \
refs=""; \
for arch in $(DOCKER_ARCHS); do \
if $(call dockerfile_arch_is_excluded,$$arch); then \
echo " Skipping $$arch for default variant (excluded by DOCKERFILE_ARCH_EXCLUSIONS)"; \
continue; \
fi; \
if $(call registry_arch_is_excluded,$$arch); then \
echo " Skipping $$arch for default variant on $(DOCKER_REPO) (excluded by DOCKER_REGISTRY_ARCH_EXCLUSIONS)"; \
continue; \
fi; \
refs="$$refs $(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$$arch:$(SANITIZED_DOCKER_IMAGE_TAG)"; \
done; \
if [ -z "$$refs" ]; then \
echo "Skipping default variant manifest (no supported architectures)"; \
continue; \
fi; \
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)" $$refs; \
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)"; \
fi; \
if [ "$(DOCKER_IMAGE_TAG)" = "latest" ]; then \
if [ "$$dockerfile" != "Dockerfile" ] || [ "$$variant_name" != "default" ]; then \
echo "Creating manifest for $$variant_name variant version tag"; \
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):v$(DOCKER_MAJOR_VERSION_TAG)-$$variant_name" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):v$(DOCKER_MAJOR_VERSION_TAG)-$$variant_name); \
refs=""; \
for arch in $(DOCKER_ARCHS); do \
if $(call dockerfile_arch_is_excluded,$$arch); then \
echo " Skipping $$arch for $$variant_name version tag (excluded by DOCKERFILE_ARCH_EXCLUSIONS)"; \
continue; \
fi; \
if $(call registry_arch_is_excluded,$$arch); then \
echo " Skipping $$arch for $$variant_name version tag on $(DOCKER_REPO) (excluded by DOCKER_REGISTRY_ARCH_EXCLUSIONS)"; \
continue; \
fi; \
refs="$$refs $(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$$arch:v$(DOCKER_MAJOR_VERSION_TAG)-$$variant_name"; \
done; \
if [ -z "$$refs" ]; then \
echo "Skipping version-tag manifest for $$variant_name variant (no supported architectures)"; \
continue; \
fi; \
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):v$(DOCKER_MAJOR_VERSION_TAG)-$$variant_name" $$refs; \
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):v$(DOCKER_MAJOR_VERSION_TAG)-$$variant_name"; \
fi; \
if [ "$$dockerfile" = "Dockerfile" ]; then \
echo "Creating default variant version tag manifest"; \
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):v$(DOCKER_MAJOR_VERSION_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):v$(DOCKER_MAJOR_VERSION_TAG)); \
refs=""; \
for arch in $(DOCKER_ARCHS); do \
if $(call dockerfile_arch_is_excluded,$$arch); then \
echo " Skipping $$arch for default variant version tag (excluded by DOCKERFILE_ARCH_EXCLUSIONS)"; \
continue; \
fi; \
if $(call registry_arch_is_excluded,$$arch); then \
echo " Skipping $$arch for default variant version tag on $(DOCKER_REPO) (excluded by DOCKER_REGISTRY_ARCH_EXCLUSIONS)"; \
continue; \
fi; \
refs="$$refs $(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$$arch:v$(DOCKER_MAJOR_VERSION_TAG)"; \
done; \
if [ -z "$$refs" ]; then \
echo "Skipping default variant version-tag manifest (no supported architectures)"; \
continue; \
fi; \
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):v$(DOCKER_MAJOR_VERSION_TAG)" $$refs; \
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):v$(DOCKER_MAJOR_VERSION_TAG)"; \
fi; \
fi; \

View File

@ -1 +1 @@
3.9.1
3.10.0

View File

@ -1599,7 +1599,7 @@ type reloader struct {
reloader func(*config.Config) error
}
func reloadConfig(filename string, enableExemplarStorage bool, logger *slog.Logger, noStepSuqueryInterval *safePromQLNoStepSubqueryInterval, callback func(bool), rls ...reloader) (err error) {
func reloadConfig(filename string, enableExemplarStorage bool, logger *slog.Logger, noStepSubqueryInterval *safePromQLNoStepSubqueryInterval, callback func(bool), rls ...reloader) (err error) {
start := time.Now()
timingsLogger := logger
logger.Info("Loading configuration file", "filename", filename)
@ -1640,8 +1640,7 @@ func reloadConfig(filename string, enableExemplarStorage bool, logger *slog.Logg
}
updateGoGC(conf, logger)
noStepSuqueryInterval.Set(conf.GlobalConfig.EvaluationInterval)
noStepSubqueryInterval.Set(conf.GlobalConfig.EvaluationInterval)
timingsLogger.Info("Completed loading of configuration file", "filename", filename, "totalDuration", time.Since(start))
return nil
}

View File

@ -1,6 +1,6 @@
module compliance
go 1.25.5
go 1.25.0
require github.com/prometheus/compliance/remotewrite v0.0.0-20260223092825-818283e1171e

View File

@ -2316,7 +2316,7 @@ var expectedErrors = []struct {
},
{
filename: "kubernetes_selectors_pod.bad.yml",
errMsg: "pod role supports only pod selectors",
errMsg: "pod role supports only pod, node selectors",
},
{
filename: "kubernetes_selectors_service.bad.yml",

View File

@ -6,3 +6,6 @@ scrape_configs:
- role: "node"
label: "foo=bar"
field: "metadata.status=Running"
- role: "service"
label: "baz=que"
field: "metadata.status=Running"

View File

@ -11,3 +11,8 @@ scrape_configs:
- role: "pod"
label: "foo in (bar,baz)"
field: "metadata.status=Running"
- role: pod
selectors:
- role: "node"
label: "foo=bar"
field: "metadata.status=Running"

View File

@ -194,7 +194,7 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error {
foundSelectorRoles := make(map[Role]struct{})
allowedSelectors := map[Role][]string{
RolePod: {string(RolePod)},
RolePod: {string(RolePod), string(RoleNode)},
RoleService: {string(RoleService)},
RoleEndpointSlice: {string(RolePod), string(RoleService), string(RoleEndpointSlice)},
RoleEndpoint: {string(RolePod), string(RoleService), string(RoleEndpoint)},

View File

@ -286,6 +286,19 @@ func (p *Pod) buildPod(pod *apiv1.Pod) *targetgroup.Group {
return tg
}
// Filter out pods scheduled on nodes that are not in the node store, as
// these were filtered out by node selectors.
if p.withNodeMetadata {
_, exists, err := p.nodeInf.GetStore().GetByKey(pod.Spec.NodeName)
if err != nil {
p.logger.Error("failed to get node from store", "node", pod.Spec.NodeName, "err", err)
return tg
}
if !exists {
return tg
}
}
tg.Labels = podLabels(pod)
tg.Labels[namespaceLabel] = lv(pod.Namespace)
if p.withNodeMetadata {

View File

@ -627,3 +627,74 @@ func TestPodDiscoveryWithUpdatedNamespaceMetadata(t *testing.T) {
},
}.Run(t)
}
func TestPodDiscoveryWithNodeSelector(t *testing.T) {
t.Parallel()
workerNode := makeNode("worker-node", "10.0.0.1", "", map[string]string{"node-type": "worker"}, nil)
filteredNode := makeNode("filtered-node", "10.0.0.2", "", map[string]string{"node-type": "master"}, nil)
attachMetadata := AttachMetadataConfig{
Node: true, // necessary for node role selectos to work for pod role
}
n, c := makeDiscoveryWithMetadata(RolePod, NamespaceDiscovery{}, attachMetadata, workerNode, filteredNode)
n.selectors = roleSelector{
node: resourceSelector{
label: "node-type=worker",
},
}
podOnWorker := makePods("default")
podOnWorker.Name = "pod-on-worker"
podOnWorker.UID = types.UID("worker-pod-123")
podOnWorker.Spec.NodeName = "worker-node"
podOnWorker.Status.PodIP = "192.168.1.1"
podOnFilteredNode := makePods("default")
podOnFilteredNode.Name = "pod-on-filtered-node"
podOnFilteredNode.UID = types.UID("filtered-pod-456")
podOnFilteredNode.Spec.NodeName = "filtered-node"
podOnFilteredNode.Status.PodIP = "192.168.1.2"
k8sDiscoveryTest{
discovery: n,
beforeRun: func() {
c.CoreV1().Pods("default").Create(context.Background(), podOnWorker, metav1.CreateOptions{})
c.CoreV1().Pods("default").Create(context.Background(), podOnFilteredNode, metav1.CreateOptions{})
},
expectedMaxItems: 2,
expectedRes: map[string]*targetgroup.Group{
"pod/default/pod-on-worker": {
Targets: []model.LabelSet{
{
"__address__": "192.168.1.1:9000",
"__meta_kubernetes_pod_container_image": "testcontainer:latest",
"__meta_kubernetes_pod_container_name": "testcontainer",
"__meta_kubernetes_pod_container_port_name": "testport",
"__meta_kubernetes_pod_container_port_number": "9000",
"__meta_kubernetes_pod_container_port_protocol": "TCP",
"__meta_kubernetes_pod_container_init": "false",
"__meta_kubernetes_pod_container_id": "docker://a1b2c3d4e5f6",
},
},
Labels: model.LabelSet{
"__meta_kubernetes_namespace": "default",
"__meta_kubernetes_pod_name": "pod-on-worker",
"__meta_kubernetes_pod_ip": "192.168.1.1",
"__meta_kubernetes_pod_ready": "true",
"__meta_kubernetes_pod_phase": "Running",
"__meta_kubernetes_pod_node_name": "worker-node",
"__meta_kubernetes_pod_host_ip": "2.3.4.5",
"__meta_kubernetes_pod_uid": "worker-pod-123",
"__meta_kubernetes_node_name": "worker-node",
"__meta_kubernetes_node_label_node_type": "worker",
"__meta_kubernetes_node_labelpresent_node_type": "true",
},
Source: "pod/default/pod-on-worker",
},
"pod/default/pod-on-filtered-node": {
Source: "pod/default/pod-on-filtered-node",
},
},
}.Run(t)
}

View File

@ -475,6 +475,7 @@ func (m *Manager) allGroups() map[string][]*targetgroup.Group {
for setName, v := range n {
m.metrics.DiscoveredTargets.WithLabelValues(setName).Set(float64(v))
m.metrics.LastUpdated.WithLabelValues(setName).SetToCurrentTime()
}
return tSets

View File

@ -1595,3 +1595,30 @@ func TestConfigReloadAndShutdownRace(t *testing.T) {
cancel()
wgBg.Wait()
}
func TestGaugeLastUpdateTimestamp(t *testing.T) {
ctx := t.Context()
reg := prometheus.NewRegistry()
_, sdMetrics := NewTestMetrics(t, reg)
discoveryManager := NewManager(ctx, promslog.NewNopLogger(), reg, sdMetrics)
require.NotNil(t, discoveryManager)
discoveryManager.updatert = 100 * time.Millisecond
go discoveryManager.Run()
c := map[string]Configs{
"prometheus": {
staticConfig("foo:9090"),
},
}
discoveryManager.ApplyConfig(c)
before := time.Now()
<-discoveryManager.SyncCh()
after := time.Now()
ts := client_testutil.ToFloat64(discoveryManager.metrics.LastUpdated.WithLabelValues("prometheus"))
require.GreaterOrEqual(t, ts, float64(before.UnixNano())/1e9, "last update timestamp should be >= time before sync")
require.LessOrEqual(t, ts, float64(after.UnixNano())/1e9, "last update timestamp should be <= time after sync")
}

View File

@ -26,6 +26,7 @@ type Metrics struct {
ReceivedUpdates prometheus.Counter
DelayedUpdates prometheus.Counter
SentUpdates prometheus.Counter
LastUpdated *prometheus.GaugeVec
}
func NewManagerMetrics(registerer prometheus.Registerer, sdManagerName string) (*Metrics, error) {
@ -72,12 +73,22 @@ func NewManagerMetrics(registerer prometheus.Registerer, sdManagerName string) (
},
)
m.LastUpdated = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Name: "prometheus_sd_last_update_timestamp_seconds",
Help: "Timestamp of the last update sent to the SD consumers.",
ConstLabels: prometheus.Labels{"name": sdManagerName},
},
[]string{"config"},
)
metrics := []prometheus.Collector{
m.FailedConfigs,
m.DiscoveredTargets,
m.ReceivedUpdates,
m.DelayedUpdates,
m.SentUpdates,
m.LastUpdated,
}
for _, collector := range metrics {
@ -97,4 +108,5 @@ func (m *Metrics) Unregister(registerer prometheus.Registerer) {
registerer.Unregister(m.ReceivedUpdates)
registerer.Unregister(m.DelayedUpdates)
registerer.Unregister(m.SentUpdates)
registerer.Unregister(m.LastUpdated)
}

View File

@ -3746,3 +3746,6 @@ headers:
tls_config:
[ <tls_config> ]
```
If query logging and tracing are both enabled, a traceID and spanID will be injected
into the query log file for use in log/trace correlation.

View File

@ -1,6 +1,6 @@
module github.com/prometheus/prometheus/documentation/examples/remote_storage
go 1.25.5
go 1.25.0
require (
github.com/alecthomas/kingpin/v2 v2.4.0
@ -64,7 +64,7 @@ require (
github.com/googleapis/gax-go/v2 v2.17.0 // indirect
github.com/gophercloud/gophercloud/v2 v2.10.0 // indirect
github.com/grafana/regexp v0.0.0-20250905093917-f7b3be9d1853 // indirect
github.com/hashicorp/consul/api v1.33.2 // indirect
github.com/hashicorp/consul/api v1.32.1 // indirect
github.com/hashicorp/go-version v1.8.0 // indirect
github.com/hashicorp/nomad/api v0.0.0-20260209224925-94b77491c895 // indirect
github.com/hetznercloud/hcloud-go/v2 v2.36.0 // indirect

View File

@ -184,8 +184,8 @@ github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5T
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA=
github.com/grafana/regexp v0.0.0-20250905093917-f7b3be9d1853 h1:cLN4IBkmkYZNnk7EAJ0BHIethd+J6LqxFNw5mSiI2bM=
github.com/grafana/regexp v0.0.0-20250905093917-f7b3be9d1853/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk=
github.com/hashicorp/consul/api v1.33.2 h1:Q6mE0WZsUTJerlnl9TuXzqrtZ0cKdOCsxcZhj5mKbMs=
github.com/hashicorp/consul/api v1.33.2/go.mod h1:K3yoL/vnIBcQV/25NeMZVokRvPPERiqp2Udtr4xAfhs=
github.com/hashicorp/consul/api v1.32.1 h1:0+osr/3t/aZNAdJX558crU3PEjVrG4x6715aZHRgceE=
github.com/hashicorp/consul/api v1.32.1/go.mod h1:mXUWLnxftwTmDv4W3lzxYCPD199iNLLUyLfLGFJbtl4=
github.com/hashicorp/cronexpr v1.1.3 h1:rl5IkxXN2m681EfivTlccqIryzYJSXRGRNa0xeG7NA4=
github.com/hashicorp/cronexpr v1.1.3/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4=
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
@ -255,6 +255,8 @@ github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa1
github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ=
github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=

13
go.mod
View File

@ -1,6 +1,6 @@
module github.com/prometheus/prometheus
go 1.25.5
go 1.25.0
require (
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.21.0
@ -41,7 +41,7 @@ require (
github.com/google/uuid v1.6.0
github.com/gophercloud/gophercloud/v2 v2.10.0
github.com/grafana/regexp v0.0.0-20250905093917-f7b3be9d1853
github.com/hashicorp/consul/api v1.33.2
github.com/hashicorp/consul/api v1.32.1
github.com/hashicorp/nomad/api v0.0.0-20260220212019-daca79db0bd6
github.com/hetznercloud/hcloud-go/v2 v2.36.0
github.com/ionos-cloud/sdk-go/v6 v6.3.6
@ -57,8 +57,8 @@ require (
github.com/oklog/ulid/v2 v2.1.1
github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.145.0
github.com/ovh/go-ovh v1.9.0
github.com/pb33f/libopenapi v0.33.4
github.com/pb33f/libopenapi-validator v0.11.1
github.com/pb33f/libopenapi v0.34.0
github.com/pb33f/libopenapi-validator v0.13.0
github.com/prometheus/alertmanager v0.31.1
github.com/prometheus/client_golang v1.23.2
github.com/prometheus/client_golang/exp v0.0.0-20260108101519-fb0838f53562
@ -122,7 +122,8 @@ require (
github.com/go-openapi/swag/stringutils v0.25.4 // indirect
github.com/go-openapi/swag/typeutils v0.25.4 // indirect
github.com/go-openapi/swag/yamlutils v0.25.4 // indirect
github.com/pb33f/jsonpath v0.7.1 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/pb33f/jsonpath v0.8.1 // indirect
github.com/pb33f/ordered-map/v2 v2.3.0 // indirect
github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 // indirect
github.com/sirupsen/logrus v1.9.4 // indirect
@ -161,7 +162,7 @@ require (
github.com/docker/go-connections v0.6.0 // indirect
github.com/docker/go-units v0.5.0 // indirect
github.com/emicklei/go-restful/v3 v3.12.2 // indirect
github.com/fatih/color v1.16.0 // indirect
github.com/fatih/color v1.18.0 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/fxamacker/cbor/v2 v2.9.0 // indirect
github.com/go-logr/logr v1.4.3 // indirect

26
go.sum
View File

@ -156,8 +156,8 @@ github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb/go.mod h1:bH6Xx7IW
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=
github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=
github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM=
github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU=
github.com/felixge/fgprof v0.9.5 h1:8+vR6yu2vvSKn08urWyEuxx75NWPEvybbkBirEpsbVY=
github.com/felixge/fgprof v0.9.5/go.mod h1:yKl+ERSa++RYOs32d8K6WEXCB4uXdLls4ZaZPpayhMM=
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
@ -280,10 +280,10 @@ github.com/grafana/regexp v0.0.0-20250905093917-f7b3be9d1853 h1:cLN4IBkmkYZNnk7E
github.com/grafana/regexp v0.0.0-20250905093917-f7b3be9d1853/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.7 h1:X+2YciYSxvMQK0UZ7sg45ZVabVZBeBuvMkmuI2V3Fak=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.7/go.mod h1:lW34nIZuQ8UDPdkon5fmfp2l3+ZkQ2me/+oecHYLOII=
github.com/hashicorp/consul/api v1.33.2 h1:Q6mE0WZsUTJerlnl9TuXzqrtZ0cKdOCsxcZhj5mKbMs=
github.com/hashicorp/consul/api v1.33.2/go.mod h1:K3yoL/vnIBcQV/25NeMZVokRvPPERiqp2Udtr4xAfhs=
github.com/hashicorp/consul/sdk v0.17.1 h1:LumAh8larSXmXw2wvw/lK5ZALkJ2wK8VRwWMLVV5M5c=
github.com/hashicorp/consul/sdk v0.17.1/go.mod h1:EngiixMhmw9T7wApycq6rDRFXXVUwjjf7HuLiGMH/Sw=
github.com/hashicorp/consul/api v1.32.1 h1:0+osr/3t/aZNAdJX558crU3PEjVrG4x6715aZHRgceE=
github.com/hashicorp/consul/api v1.32.1/go.mod h1:mXUWLnxftwTmDv4W3lzxYCPD199iNLLUyLfLGFJbtl4=
github.com/hashicorp/consul/sdk v0.16.1 h1:V8TxTnImoPD5cj0U9Spl0TUxcytjcbbJeADFF07KdHg=
github.com/hashicorp/consul/sdk v0.16.1/go.mod h1:fSXvwxB2hmh1FMZCNl6PwX0Q/1wdWtHJcZ7Ea5tns0s=
github.com/hashicorp/cronexpr v1.1.3 h1:rl5IkxXN2m681EfivTlccqIryzYJSXRGRNa0xeG7NA4=
github.com/hashicorp/cronexpr v1.1.3/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
@ -412,6 +412,8 @@ github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HK
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ=
github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
@ -465,12 +467,12 @@ github.com/ovh/go-ovh v1.9.0/go.mod h1:cTVDnl94z4tl8pP1uZ/8jlVxntjSIf09bNcQ5TJSC
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY=
github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pb33f/jsonpath v0.7.1 h1:dEp6oIZuJbpDSyuHAl9m7GonoDW4M20BcD5vT0tPYRE=
github.com/pb33f/jsonpath v0.7.1/go.mod h1:zBV5LJW4OQOPatmQE2QdKpGQJvhDTlE5IEj6ASaRNTo=
github.com/pb33f/libopenapi v0.33.4 h1:Rgczgrg4VQKXW/NtSj/nApmtYKS+TVpLgTsG692JxmE=
github.com/pb33f/libopenapi v0.33.4/go.mod h1:e/dmd2Pf1nkjqkI0r7guFSyt9T5V0IIQKgs0L6B/3b0=
github.com/pb33f/libopenapi-validator v0.11.1 h1:lTW738oB3lwpS9poDzmI3jpTPZSb5W46vklZqtyf7+Q=
github.com/pb33f/libopenapi-validator v0.11.1/go.mod h1:7CfboslU/utKhiuQRuenriGYZ+HQLDOvARxjqRwd57w=
github.com/pb33f/jsonpath v0.8.1 h1:84C6QRyx6HcSm6PZnsMpcqYot3IsZ+m0n95+0NbBbvs=
github.com/pb33f/jsonpath v0.8.1/go.mod h1:zBV5LJW4OQOPatmQE2QdKpGQJvhDTlE5IEj6ASaRNTo=
github.com/pb33f/libopenapi v0.34.0 h1:jY8pf4yBHRObnNBrjuVDhVpgKjSUE8hLFpeoYtyQ/eo=
github.com/pb33f/libopenapi v0.34.0/go.mod h1:YOP20KzYe3mhE5301aQzJtzQ9MnvhABBGO7RMttA4V4=
github.com/pb33f/libopenapi-validator v0.13.0 h1:an3BxwklmLF4bxacudLV8Vysvw1krlAjpYoUfyJUgw8=
github.com/pb33f/libopenapi-validator v0.13.0/go.mod h1:YZQRDh+8xap/H0GM0cJsBrqqT+XLlMivA/qwqRLiidQ=
github.com/pb33f/ordered-map/v2 v2.3.0 h1:k2OhVEQkhTCQMhAicQ3Z6iInzoZNQ7L9MVomwKBZ5WQ=
github.com/pb33f/ordered-map/v2 v2.3.0/go.mod h1:oe5ue+6ZNhy7QN9cPZvPA23Hx0vMHnNVeMg4fGdCANw=
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0=

View File

@ -1,4 +1,4 @@
go 1.25.5
go 1.25.0
use (
.

View File

@ -1,6 +1,6 @@
module github.com/prometheus/prometheus/internal/tools
go 1.25.5
go 1.25.0
require (
github.com/bufbuild/buf v1.65.0

View File

@ -700,7 +700,11 @@ func (ng *Engine) exec(ctx context.Context, q *query) (v parser.Value, ws annota
}
f = append(f, slog.Any("stats", stats.NewQueryStats(q.Stats())))
if span := trace.SpanFromContext(ctx); span != nil {
f = append(f, slog.Any("spanID", span.SpanContext().SpanID()))
spanCtx := span.SpanContext()
f = append(f,
slog.Any("spanID", spanCtx.SpanID()),
slog.Any("traceID", spanCtx.TraceID()),
)
}
if origin := ctx.Value(QueryOrigin{}); origin != nil {
for k, v := range origin.(map[string]any) {
@ -1961,9 +1965,7 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value,
// Matrix evaluation always returns the evaluation time,
// so this function needs special handling when given
// a vector selector.
arg := unwrapStepInvariantExpr(e.Args[0])
vs, ok := arg.(*parser.VectorSelector)
if ok {
if vs, ok := e.Args[0].(*parser.VectorSelector); ok {
return ev.rangeEvalTimestampFunctionOverVectorSelector(ctx, vs, call, e)
}
}
@ -4215,13 +4217,6 @@ func unwrapParenExpr(e *parser.Expr) {
}
}
func unwrapStepInvariantExpr(e parser.Expr) parser.Expr {
if p, ok := e.(*parser.StepInvariantExpr); ok {
return p.Expr
}
return e
}
// PreprocessExpr wraps all possible step invariant parts of the given expression with
// StepInvariantExpr. It also resolves the preprocessors, evaluates duration expressions
// into their numeric values and removes superfluous parenthesis on parameters to functions and aggregations.
@ -4280,15 +4275,24 @@ func preprocessExprHelper(expr parser.Expr, start, end time.Time) (isStepInvaria
case *parser.Call:
_, ok := AtModifierUnsafeFunctions[n.Func.Name]
isStepInvariant := !ok
// A special case to allow timestamp() to be wrapped in a step invariant.
// timestamp() is considered AtModifierUnsafe, but it can be safe depending on its arguments.
// ie timestamp(metric @ 1) is step invariant, but timestamp(abs(metric @ 1)) is not.
isTimestampWithAllArgsStepInvariantSafe := n.Func.Name == "timestamp"
shouldWrap := make([]bool, len(n.Args))
for i := range n.Args {
unwrapParenExpr(&n.Args[i])
var argIsStepInvariant bool
argIsStepInvariant, shouldWrap[i] = preprocessExprHelper(n.Args[i], start, end)
isStepInvariant = isStepInvariant && argIsStepInvariant
_, argIsVectorSelector := n.Args[i].(*parser.VectorSelector)
if !argIsStepInvariant || !argIsVectorSelector {
isTimestampWithAllArgsStepInvariantSafe = false
}
}
if isStepInvariant {
if isStepInvariant || isTimestampWithAllArgsStepInvariantSafe {
// The function and all arguments are step invariant.
return true, true
}

View File

@ -3252,6 +3252,59 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) {
},
},
},
{
input: "timestamp(metric @ 10)",
expected: &parser.StepInvariantExpr{
Expr: &parser.Call{
Func: parser.MustGetFunction("timestamp"),
Args: parser.Expressions{
&parser.VectorSelector{
Name: "metric",
Timestamp: makeInt64Pointer(10000),
LabelMatchers: []*labels.Matcher{
parser.MustLabelMatcher(labels.MatchEqual, "__name__", "metric"),
},
PosRange: posrange.PositionRange{
Start: 10,
End: 21,
},
},
},
PosRange: posrange.PositionRange{Start: 0, End: 22},
},
},
},
{
input: "timestamp(abs(metric @ 10))",
expected: &parser.Call{
Func: parser.MustGetFunction("timestamp"),
Args: parser.Expressions{
&parser.StepInvariantExpr{
Expr: &parser.Call{
Func: parser.MustGetFunction("abs"),
Args: parser.Expressions{
&parser.VectorSelector{
Name: "metric",
Timestamp: makeInt64Pointer(10000),
LabelMatchers: []*labels.Matcher{
parser.MustLabelMatcher(labels.MatchEqual, "__name__", "metric"),
},
PosRange: posrange.PositionRange{
Start: 14,
End: 25,
},
},
},
PosRange: posrange.PositionRange{
Start: 10,
End: 26,
},
},
},
},
PosRange: posrange.PositionRange{Start: 0, End: 27},
},
},
}
for _, test := range testCases {

View File

@ -255,3 +255,56 @@ eval instant at 1111111s stdvar_over_time({__name__="up"}[1h:1m] @ 1111111)
eval instant at 1111111s mad_over_time({__name__="up"}[1h:1m] @ 1111111)
clear
# Additional tests specific to timestamp() and @ modifier usage.
load 10s
metric 0+1x10
metric_missing 0 _ 2 _ 4 _ 5 _ 6
# Return a vector where each sample is set to the metric value at T=11.
# Since T=11 falls within the [10s,20s) scrap window the sample value at T=10s is returned.
eval range from 0 to 60s step 10s metric @ 11
{__name__="metric"} 1 1 1 1 1 1 1
eval range from 0 to 60s step 10s abs(metric @ 11)
{} 1 1 1 1 1 1 1
# Return a vector where each sample's value is set to the timestamp of each sample in the given metric series
eval range from 0 to 60s step 10s timestamp(metric)
{} 0 10 20 30 40 50 60
# Return a vector where each sample's value is set to the timestamp for the metric's sample used at T=11s.
# The result is 10 since the metric at T=11s falls within the [10s,20s) scrape window.
# The result is the timestamp of the sample at T=10s
eval range from 0 to 60s step 10s timestamp(metric @ 11)
{} 10 10 10 10 10 10 10
# As above - illustrating the sample used at the upper end of the [10s,20s) scrape window.
eval range from 0 to 60s step 10s timestamp(metric @ 19)
{} 10 10 10 10 10 10 10
# As above - illustrating the transition to the next scrap window.
eval range from 0 to 60s step 10s timestamp(metric @ 20)
{} 20 20 20 20 20 20 20
eval range from 0 to 60s step 10s timestamp(metric_missing @ 0)
{} 0 0 0 0 0 0 0
# The timestamp of 0 is returned since the sample is missing from the [10s,20s) scrape window.
# As such, the previous sample from T=0s is returned.
eval range from 0 to 60s step 10s timestamp(metric_missing @ 10)
{} 0 0 0 0 0 0 0
eval range from 0 to 60s step 10s timestamp(metric_missing @ 20)
{} 20 20 20 20 20 20 20
# The timestamps for each step are returned since abs() returns a new vector with new [T,V] samples.
# Each sample in this vector has its value set to the absolute value of the sample value at T=10s, and its timestamp aligned to the step interval.
# This is unlike the above tests where timestamp() is operating on a vector with the original series samples.
eval range from 0 to 60s step 10s timestamp(abs(metric @ 11))
{} 0 10 20 30 40 50 60
eval range from 0 to 60s step 10s timestamp(abs(metric_missing @ 11))
{} 0 10 20 30 40 50 60
clear

View File

@ -8331,23 +8331,28 @@ func testDiskFillingUpAfterDisablingOOO(t *testing.T, scenario sampleTypeScenari
opts := DefaultOptions()
opts.OutOfOrderTimeWindow = 60 * time.Minute.Milliseconds()
// Use lower SamplesPerChunk and OutOfOrderCapMax so we need fewer samples
// to fill chunks, reducing the overall test time significantly
// (important for slow CI like i386 which can be 60x+ slower).
opts.SamplesPerChunk = 15
opts.OutOfOrderCapMax = 5
db := newTestDB(t, withOpts(opts))
db.DisableCompactions()
var (
ctx = t.Context()
series1 = labels.FromStrings("foo", "bar1")
allSamples []chunks.Sample
ctx = t.Context()
series1 = labels.FromStrings("foo", "bar1")
)
// Use step of 5 minutes to reduce sample count while preserving time ranges
// needed for compaction triggers. This reduces total samples from ~411 to ~83.
addSamples := func(fromMins, toMins int64) {
app := appenderFn(db, ctx)
for m := fromMins; m <= toMins; m++ {
for m := fromMins; m <= toMins; m += 5 {
ts := m * time.Minute.Milliseconds()
_, s, err := scenario.appendFunc(app, series1, ts, ts)
_, _, err := scenario.appendFunc(app, series1, ts, ts)
require.NoError(t, err)
allSamples = append(allSamples, s)
}
require.NoError(t, app.Commit())
}

View File

@ -2161,15 +2161,24 @@ func (h *Head) deleteSeriesByID(refs []chunks.HeadSeriesRef) {
)
for _, ref := range refs {
// Delete the reference from the series map.
// Copying getByID here to avoid locking and unlocking twice.
refShard := int(ref) & (h.series.size - 1)
h.series.locks[refShard].Lock()
// Copying getByID here to avoid locking and unlocking twice.
series := h.series.series[refShard][ref]
if series == nil {
h.series.locks[refShard].Unlock()
continue
}
delete(h.series.series[refShard], series.ref)
h.series.locks[refShard].Unlock()
// Delete the reference from the hash.
hash := series.lset.Hash()
hashShard := int(hash) & (h.series.size - 1)
h.series.locks[hashShard].Lock()
h.series.hashes[hashShard].del(hash, series.ref)
h.series.locks[hashShard].Unlock()
if value.IsStaleNaN(series.lastValue) ||
(series.lastHistogramValue != nil && value.IsStaleNaN(series.lastHistogramValue.Sum)) ||
@ -2177,9 +2186,6 @@ func (h *Head) deleteSeriesByID(refs []chunks.HeadSeriesRef) {
staleSeriesDeleted++
}
hash := series.lset.Hash()
hashShard := int(hash) & (h.series.size - 1)
chunksRemoved += len(series.mmappedChunks)
if series.headChunks != nil {
chunksRemoved += series.headChunks.len()
@ -2187,10 +2193,6 @@ func (h *Head) deleteSeriesByID(refs []chunks.HeadSeriesRef) {
deleted[storage.SeriesRef(series.ref)] = struct{}{}
series.lset.Range(func(l labels.Label) { affected[l] = struct{}{} })
h.series.hashes[hashShard].del(hash, series.ref)
delete(h.series.series[refShard], series.ref)
h.series.locks[refShard].Unlock()
}
h.metrics.seriesRemoved.Add(float64(len(deleted)))

View File

@ -7705,3 +7705,77 @@ func TestHeadAppender_STStorage_ChunkEncoding(t *testing.T) {
})
}
}
// TestWALReplayRaceWithStaleSeriesCompaction verifies that deleteSeriesByID correctly locks the
// hash shard (not only the ref shard) when deleting from the hashes map.
// The race only occurs when Prometheus restarts after having done a stale series compaction because
// deleteSeriesByID is not used otherwise.
func TestWALReplayRaceWithStaleSeriesCompaction(t *testing.T) {
opts := newTestHeadDefaultOptions(1000, false)
// A small stripe size ensures many series share hash shards, increasing
// the likelihood that deleteSeriesByID and getOrCreateWithOptionalID
// contend on the same shard during WAL replay.
opts.StripeSize = 32
head, _ := newTestHeadWithOptions(t, compression.None, opts)
require.NoError(t, head.Init(0))
appendSample := func(lbls labels.Labels, ts int64, val float64) {
app := head.Appender(context.Background())
_, err := app.Append(0, lbls, ts, val)
require.NoError(t, err)
require.NoError(t, app.Commit())
}
// Step 1: Create a batch of series and make them stale.
const numStaleSeries = 500
staleLbls := make([]labels.Labels, numStaleSeries)
for i := range numStaleSeries {
staleLbls[i] = labels.FromStrings("__name__", "stale_metric", "i", strconv.Itoa(i))
appendSample(staleLbls[i], 100, float64(i))
}
for _, lbl := range staleLbls {
appendSample(lbl, 200, math.Float64frombits(value.StaleNaN))
}
require.Equal(t, uint64(numStaleSeries), head.NumStaleSeries())
// Step 2: Truncate stale series. This removes them from the Head and
// writes tombstone records (with Mint=MinInt64, Maxt=MaxInt64) to the WAL.
staleRefs := make([]storage.SeriesRef, 0, numStaleSeries)
for i := range numStaleSeries {
ms := head.series.getByHash(staleLbls[i].Hash(), staleLbls[i])
require.NotNil(t, ms)
staleRefs = append(staleRefs, storage.SeriesRef(ms.ref))
}
require.NoError(t, head.truncateStaleSeries(staleRefs, 300))
require.Equal(t, uint64(0), head.NumStaleSeries())
require.Equal(t, uint64(0), head.NumSeries())
// Step 3: Add new series AFTER the truncation. In the WAL, these series
// records appear after the tombstone records. During replay, the main
// goroutine will create these series (via getOrCreateWithOptionalID, which
// accesses hashes[hashShard] under locks[hashShard]) concurrently with
// the walSubsetProcessor goroutines deleting the stale series (via
// deleteSeriesByID, which must also lock the correct hashShard).
const numNewSeries = 500
for i := range numNewSeries {
lbl := labels.FromStrings("__name__", "new_metric", "i", strconv.Itoa(i))
appendSample(lbl, 300, float64(i))
}
require.Equal(t, uint64(numNewSeries), head.NumSeries())
// Step 4: Close and re-open the Head to trigger WAL replay.
// With the buggy locking, the race detector should catch the data race
// between the main goroutine (creating series) and worker goroutines
// (deleting stale series) during replay.
require.NoError(t, head.Close())
wal, err := wlog.NewSize(nil, nil, filepath.Join(head.opts.ChunkDirRoot, "wal"), 32768, compression.None)
require.NoError(t, err)
head, err = NewHead(nil, nil, wal, nil, head.opts, nil)
require.NoError(t, err)
require.NoError(t, head.Init(0)) // Should not cause a race here.
require.Equal(t, uint64(0), head.NumStaleSeries())
require.Equal(t, uint64(numNewSeries), head.NumSeries())
require.NoError(t, head.Close())
}

View File

@ -1,29 +0,0 @@
// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !goexperiment.synctest && !go1.25
package synctest
import (
"testing"
)
func Test(t *testing.T, _ func(t *testing.T)) {
t.Skip("goexperiment.synctest is not enabled")
}
func Wait() {
// It isn't meant to be called outside of Test().
panic("goexperiment.synctest is not enabled")
}

View File

@ -1,31 +0,0 @@
// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build goexperiment.synctest && !go1.25
package synctest
import (
"testing"
"testing/synctest"
)
func Test(t *testing.T, f func(t *testing.T)) {
synctest.Run(func() {
f(t)
})
}
func Wait() {
synctest.Wait()
}

View File

@ -11,8 +11,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build go1.25
package synctest
import (

View File

@ -1,7 +1,7 @@
{
"name": "@prometheus-io/mantine-ui",
"private": true,
"version": "0.309.1",
"version": "0.310.0",
"type": "module",
"scripts": {
"start": "vite",
@ -28,7 +28,7 @@
"@microsoft/fetch-event-source": "^2.0.1",
"@nexucis/fuzzy": "^0.5.1",
"@nexucis/kvsearch": "^0.9.1",
"@prometheus-io/codemirror-promql": "0.309.1",
"@prometheus-io/codemirror-promql": "0.310.0",
"@reduxjs/toolkit": "^2.11.2",
"@tabler/icons-react": "^3.36.1",
"@tanstack/react-query": "^5.90.20",

View File

@ -1,6 +1,6 @@
module github.com/prometheus/prometheus/web/ui/mantine-ui/src/promql/tools
go 1.25.5
go 1.25.0
require (
github.com/grafana/regexp v0.0.0-20250905093917-f7b3be9d1853

View File

@ -1,6 +1,6 @@
{
"name": "@prometheus-io/codemirror-promql",
"version": "0.309.1",
"version": "0.310.0",
"description": "a CodeMirror mode for the PromQL language",
"types": "dist/esm/index.d.ts",
"module": "dist/esm/index.js",
@ -29,7 +29,7 @@
},
"homepage": "https://github.com/prometheus/prometheus/blob/main/web/ui/module/codemirror-promql/README.md",
"dependencies": {
"@prometheus-io/lezer-promql": "0.309.1",
"@prometheus-io/lezer-promql": "0.310.0",
"lru-cache": "^11.2.5"
},
"devDependencies": {

View File

@ -1,6 +1,6 @@
{
"name": "@prometheus-io/lezer-promql",
"version": "0.309.1",
"version": "0.310.0",
"description": "lezer-based PromQL grammar",
"main": "dist/index.cjs",
"type": "module",

View File

@ -1,12 +1,12 @@
{
"name": "prometheus-io",
"version": "0.309.1",
"version": "0.310.0",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "prometheus-io",
"version": "0.309.1",
"version": "0.310.0",
"workspaces": [
"mantine-ui",
"module/*"
@ -24,7 +24,7 @@
},
"mantine-ui": {
"name": "@prometheus-io/mantine-ui",
"version": "0.309.1",
"version": "0.310.0",
"dependencies": {
"@codemirror/autocomplete": "^6.20.0",
"@codemirror/language": "^6.12.1",
@ -42,7 +42,7 @@
"@microsoft/fetch-event-source": "^2.0.1",
"@nexucis/fuzzy": "^0.5.1",
"@nexucis/kvsearch": "^0.9.1",
"@prometheus-io/codemirror-promql": "0.309.1",
"@prometheus-io/codemirror-promql": "0.310.0",
"@reduxjs/toolkit": "^2.11.2",
"@tabler/icons-react": "^3.36.1",
"@tanstack/react-query": "^5.90.20",
@ -172,10 +172,10 @@
},
"module/codemirror-promql": {
"name": "@prometheus-io/codemirror-promql",
"version": "0.309.1",
"version": "0.310.0",
"license": "Apache-2.0",
"dependencies": {
"@prometheus-io/lezer-promql": "0.309.1",
"@prometheus-io/lezer-promql": "0.310.0",
"lru-cache": "^11.2.5"
},
"devDependencies": {
@ -205,7 +205,7 @@
},
"module/lezer-promql": {
"name": "@prometheus-io/lezer-promql",
"version": "0.309.1",
"version": "0.310.0",
"license": "Apache-2.0",
"devDependencies": {
"@lezer/generator": "^1.8.0",

View File

@ -1,7 +1,7 @@
{
"name": "prometheus-io",
"description": "Monorepo for the Prometheus UI",
"version": "0.309.1",
"version": "0.310.0",
"private": true,
"scripts": {
"build": "bash build_ui.sh --all",