From 0032ce06449781d0fc0bb5a4731e87e59aa3efae Mon Sep 17 00:00:00 2001
From: Patryk Prus
Date: Sun, 26 Feb 2023 21:05:27 -0500
Subject: [PATCH 001/231] Render background in images to play nicely with dark
mode
Signed-off-by: Patryk Prus
---
README.md | 2 +-
documentation/images/architecture.svg | 4 +++-
documentation/images/internal_architecture.svg | 4 +++-
3 files changed, 7 insertions(+), 3 deletions(-)
diff --git a/README.md b/README.md
index 9becf71aa1..8b89bb01e5 100644
--- a/README.md
+++ b/README.md
@@ -34,7 +34,7 @@ The features that distinguish Prometheus from other metrics and monitoring syste
## Architecture overview
-
+
## Install
diff --git a/documentation/images/architecture.svg b/documentation/images/architecture.svg
index df93e13cb2..4e1e85995d 100644
--- a/documentation/images/architecture.svg
+++ b/documentation/images/architecture.svg
@@ -1,2 +1,4 @@
+
+
-
\ No newline at end of file
+
\ No newline at end of file
diff --git a/documentation/images/internal_architecture.svg b/documentation/images/internal_architecture.svg
index 5948186a7d..1242548ddb 100644
--- a/documentation/images/internal_architecture.svg
+++ b/documentation/images/internal_architecture.svg
@@ -1,2 +1,4 @@
+
+
-
\ No newline at end of file
+
\ No newline at end of file
From 504a16d135e28c8e4bb88ac9275a55382bc52959 Mon Sep 17 00:00:00 2001
From: SuperQ
Date: Thu, 9 Mar 2023 14:41:24 +0100
Subject: [PATCH 002/231] Update Go version
Update build/test to use Go 1.20.
Signed-off-by: SuperQ
---
.github/workflows/ci.yml | 6 +++---
.github/workflows/codeql-analysis.yml | 2 +-
.promu.yml | 2 +-
go.mod | 2 +-
4 files changed, 6 insertions(+), 6 deletions(-)
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index d6d79b6fc3..ceb374c8c5 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -10,7 +10,7 @@ jobs:
# Whenever the Go version is updated here, .promu.yml
# should also be updated.
container:
- image: quay.io/prometheus/golang-builder:1.19-base
+ image: quay.io/prometheus/golang-builder:1.20-base
steps:
- uses: actions/checkout@v3
- uses: prometheus/promci@v0.0.2
@@ -31,7 +31,7 @@ jobs:
# Whenever the Go version is updated here, .promu.yml
# should also be updated.
container:
- image: quay.io/prometheus/golang-builder:1.19-base
+ image: quay.io/prometheus/golang-builder:1.20-base
steps:
- uses: actions/checkout@v3
@@ -54,7 +54,7 @@ jobs:
- uses: actions/checkout@v3
- uses: actions/setup-go@v3
with:
- go-version: '>=1.19 <1.20'
+ go-version: '>=1.20 <1.21'
- run: |
$TestTargets = go list ./... | Where-Object { $_ -NotMatch "(github.com/prometheus/prometheus/discovery.*|github.com/prometheus/prometheus/config|github.com/prometheus/prometheus/web)"}
go test $TestTargets -vet=off -v
diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml
index 01075f0c22..43c9778575 100644
--- a/.github/workflows/codeql-analysis.yml
+++ b/.github/workflows/codeql-analysis.yml
@@ -23,7 +23,7 @@ jobs:
uses: actions/checkout@v3
- uses: actions/setup-go@v3
with:
- go-version: '>=1.19 <1.20'
+ go-version: '>=1.20 <1.21'
- name: Initialize CodeQL
uses: github/codeql-action/init@v2
diff --git a/.promu.yml b/.promu.yml
index 233295f852..ef69c35c8e 100644
--- a/.promu.yml
+++ b/.promu.yml
@@ -1,7 +1,7 @@
go:
# Whenever the Go version is updated here,
# .circle/config.yml should also be updated.
- version: 1.19
+ version: 1.20
repository:
path: github.com/prometheus/prometheus
build:
diff --git a/go.mod b/go.mod
index 76b6cb0625..02c4980aee 100644
--- a/go.mod
+++ b/go.mod
@@ -1,6 +1,6 @@
module github.com/prometheus/prometheus
-go 1.18
+go 1.19
require (
github.com/Azure/azure-sdk-for-go v65.0.0+incompatible
From c16b6a0185feea8b65911fe370808c823b90a93d Mon Sep 17 00:00:00 2001
From: Justin Lei
Date: Tue, 7 Mar 2023 12:21:55 -0800
Subject: [PATCH 003/231] Handle native histograms in remote read
Signed-off-by: Justin Lei
---
prompb/types.pb.go | 152 ++++++++++++++--------------
prompb/types.proto | 7 +-
promql/test.go | 31 ++++--
storage/remote/codec.go | 36 +++++--
storage/remote/read_handler_test.go | 73 +++++++++++--
storage/remote/read_test.go | 4 +-
6 files changed, 201 insertions(+), 102 deletions(-)
diff --git a/prompb/types.pb.go b/prompb/types.pb.go
index e78e48809a..125f868e97 100644
--- a/prompb/types.pb.go
+++ b/prompb/types.pb.go
@@ -134,21 +134,24 @@ func (LabelMatcher_Type) EnumDescriptor() ([]byte, []int) {
type Chunk_Encoding int32
const (
- Chunk_UNKNOWN Chunk_Encoding = 0
- Chunk_XOR Chunk_Encoding = 1
- Chunk_HISTOGRAM Chunk_Encoding = 2
+ Chunk_UNKNOWN Chunk_Encoding = 0
+ Chunk_XOR Chunk_Encoding = 1
+ Chunk_HISTOGRAM Chunk_Encoding = 2
+ Chunk_FLOAT_HISTOGRAM Chunk_Encoding = 3
)
var Chunk_Encoding_name = map[int32]string{
0: "UNKNOWN",
1: "XOR",
2: "HISTOGRAM",
+ 3: "FLOAT_HISTOGRAM",
}
var Chunk_Encoding_value = map[string]int32{
- "UNKNOWN": 0,
- "XOR": 1,
- "HISTOGRAM": 2,
+ "UNKNOWN": 0,
+ "XOR": 1,
+ "HISTOGRAM": 2,
+ "FLOAT_HISTOGRAM": 3,
}
func (x Chunk_Encoding) String() string {
@@ -1143,75 +1146,76 @@ func init() {
func init() { proto.RegisterFile("types.proto", fileDescriptor_d938547f84707355) }
var fileDescriptor_d938547f84707355 = []byte{
- // 1081 bytes of a gzipped FileDescriptorProto
+ // 1092 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x56, 0xdb, 0x6e, 0xdb, 0x46,
- 0x13, 0x36, 0x49, 0x89, 0x12, 0x47, 0x87, 0xd0, 0x0b, 0x27, 0x3f, 0xff, 0xa0, 0x71, 0x54, 0x02,
- 0x69, 0x85, 0xa2, 0x90, 0x91, 0xb4, 0x17, 0x0d, 0x1a, 0x14, 0xb0, 0x5d, 0xf9, 0x80, 0x46, 0x12,
- 0xb2, 0x92, 0xd1, 0xa6, 0x37, 0xc2, 0x5a, 0x5a, 0x4b, 0x44, 0x78, 0x2a, 0x77, 0x15, 0x58, 0x7d,
- 0x8f, 0xde, 0xf5, 0x25, 0x7a, 0xdf, 0x07, 0x08, 0xd0, 0x9b, 0x3e, 0x41, 0x51, 0xf8, 0xaa, 0x8f,
- 0x51, 0xec, 0x90, 0x14, 0xa9, 0x38, 0x05, 0x9a, 0xde, 0xed, 0x7c, 0xf3, 0xcd, 0xec, 0xc7, 0xdd,
- 0x99, 0x59, 0x42, 0x43, 0xae, 0x63, 0x2e, 0x7a, 0x71, 0x12, 0xc9, 0x88, 0x40, 0x9c, 0x44, 0x01,
- 0x97, 0x4b, 0xbe, 0x12, 0xf7, 0xf7, 0x16, 0xd1, 0x22, 0x42, 0xf8, 0x40, 0xad, 0x52, 0x86, 0xfb,
- 0xb3, 0x0e, 0xed, 0x01, 0x97, 0x89, 0x37, 0x1b, 0x70, 0xc9, 0xe6, 0x4c, 0x32, 0xf2, 0x14, 0x2a,
- 0x2a, 0x87, 0xa3, 0x75, 0xb4, 0x6e, 0xfb, 0xc9, 0xa3, 0x5e, 0x91, 0xa3, 0xb7, 0xcd, 0xcc, 0xcc,
- 0xc9, 0x3a, 0xe6, 0x14, 0x43, 0xc8, 0xa7, 0x40, 0x02, 0xc4, 0xa6, 0x57, 0x2c, 0xf0, 0xfc, 0xf5,
- 0x34, 0x64, 0x01, 0x77, 0xf4, 0x8e, 0xd6, 0xb5, 0xa8, 0x9d, 0x7a, 0x4e, 0xd0, 0x31, 0x64, 0x01,
- 0x27, 0x04, 0x2a, 0x4b, 0xee, 0xc7, 0x4e, 0x05, 0xfd, 0xb8, 0x56, 0xd8, 0x2a, 0xf4, 0xa4, 0x53,
- 0x4d, 0x31, 0xb5, 0x76, 0xd7, 0x00, 0xc5, 0x4e, 0xa4, 0x01, 0xb5, 0x8b, 0xe1, 0x37, 0xc3, 0xd1,
- 0xb7, 0x43, 0x7b, 0x47, 0x19, 0xc7, 0xa3, 0x8b, 0xe1, 0xa4, 0x4f, 0x6d, 0x8d, 0x58, 0x50, 0x3d,
- 0x3d, 0xbc, 0x38, 0xed, 0xdb, 0x3a, 0x69, 0x81, 0x75, 0x76, 0x3e, 0x9e, 0x8c, 0x4e, 0xe9, 0xe1,
- 0xc0, 0x36, 0x08, 0x81, 0x36, 0x7a, 0x0a, 0xac, 0xa2, 0x42, 0xc7, 0x17, 0x83, 0xc1, 0x21, 0x7d,
- 0x69, 0x57, 0x49, 0x1d, 0x2a, 0xe7, 0xc3, 0x93, 0x91, 0x6d, 0x92, 0x26, 0xd4, 0xc7, 0x93, 0xc3,
- 0x49, 0x7f, 0xdc, 0x9f, 0xd8, 0x35, 0xf7, 0x19, 0x98, 0x63, 0x16, 0xc4, 0x3e, 0x27, 0x7b, 0x50,
- 0x7d, 0xcd, 0xfc, 0x55, 0x7a, 0x2c, 0x1a, 0x4d, 0x0d, 0xf2, 0x01, 0x58, 0xd2, 0x0b, 0xb8, 0x90,
- 0x2c, 0x88, 0xf1, 0x3b, 0x0d, 0x5a, 0x00, 0x6e, 0x04, 0xf5, 0xfe, 0x35, 0x0f, 0x62, 0x9f, 0x25,
- 0xe4, 0x00, 0x4c, 0x9f, 0x5d, 0x72, 0x5f, 0x38, 0x5a, 0xc7, 0xe8, 0x36, 0x9e, 0xec, 0x96, 0xcf,
- 0xf5, 0xb9, 0xf2, 0x1c, 0x55, 0xde, 0xfc, 0xf1, 0x70, 0x87, 0x66, 0xb4, 0x62, 0x43, 0xfd, 0x1f,
- 0x37, 0x34, 0xde, 0xde, 0xf0, 0xb7, 0x2a, 0x58, 0x67, 0x9e, 0x90, 0xd1, 0x22, 0x61, 0x01, 0x79,
- 0x00, 0xd6, 0x2c, 0x5a, 0x85, 0x72, 0xea, 0x85, 0x12, 0x65, 0x57, 0xce, 0x76, 0x68, 0x1d, 0xa1,
- 0xf3, 0x50, 0x92, 0x0f, 0xa1, 0x91, 0xba, 0xaf, 0xfc, 0x88, 0xc9, 0x74, 0x9b, 0xb3, 0x1d, 0x0a,
- 0x08, 0x9e, 0x28, 0x8c, 0xd8, 0x60, 0x88, 0x55, 0x80, 0xfb, 0x68, 0x54, 0x2d, 0xc9, 0x3d, 0x30,
- 0xc5, 0x6c, 0xc9, 0x03, 0x86, 0xb7, 0xb6, 0x4b, 0x33, 0x8b, 0x3c, 0x82, 0xf6, 0x8f, 0x3c, 0x89,
- 0xa6, 0x72, 0x99, 0x70, 0xb1, 0x8c, 0xfc, 0x39, 0xde, 0xa0, 0x46, 0x5b, 0x0a, 0x9d, 0xe4, 0x20,
- 0xf9, 0x28, 0xa3, 0x15, 0xba, 0x4c, 0xd4, 0xa5, 0xd1, 0xa6, 0xc2, 0x8f, 0x73, 0x6d, 0x9f, 0x80,
- 0x5d, 0xe2, 0xa5, 0x02, 0x6b, 0x28, 0x50, 0xa3, 0xed, 0x0d, 0x33, 0x15, 0x79, 0x0c, 0xed, 0x90,
- 0x2f, 0x98, 0xf4, 0x5e, 0xf3, 0xa9, 0x88, 0x59, 0x28, 0x9c, 0x3a, 0x9e, 0xf0, 0xbd, 0xf2, 0x09,
- 0x1f, 0xad, 0x66, 0xaf, 0xb8, 0x1c, 0xc7, 0x2c, 0xcc, 0x8e, 0xb9, 0x95, 0xc7, 0x28, 0x4c, 0x90,
- 0x8f, 0xe1, 0xce, 0x26, 0xc9, 0x9c, 0xfb, 0x92, 0x09, 0xc7, 0xea, 0x18, 0x5d, 0x42, 0x37, 0xb9,
- 0xbf, 0x46, 0x74, 0x8b, 0x88, 0xea, 0x84, 0x03, 0x1d, 0xa3, 0xab, 0x15, 0x44, 0x94, 0x26, 0x94,
- 0xac, 0x38, 0x12, 0x5e, 0x49, 0x56, 0xe3, 0xdf, 0xc8, 0xca, 0x63, 0x36, 0xb2, 0x36, 0x49, 0x32,
- 0x59, 0xcd, 0x54, 0x56, 0x0e, 0x17, 0xb2, 0x36, 0xc4, 0x4c, 0x56, 0x2b, 0x95, 0x95, 0xc3, 0x99,
- 0xac, 0xaf, 0x00, 0x12, 0x2e, 0xb8, 0x9c, 0x2e, 0xd5, 0xe9, 0xb7, 0xb1, 0xc7, 0x1f, 0x96, 0x25,
- 0x6d, 0xea, 0xa7, 0x47, 0x15, 0xef, 0xcc, 0x0b, 0x25, 0xb5, 0x92, 0x7c, 0xb9, 0x5d, 0x80, 0x77,
- 0xde, 0x2e, 0xc0, 0xcf, 0xc1, 0xda, 0x44, 0x6d, 0x77, 0x6a, 0x0d, 0x8c, 0x97, 0xfd, 0xb1, 0xad,
- 0x11, 0x13, 0xf4, 0xe1, 0xc8, 0xd6, 0x8b, 0x6e, 0x35, 0x8e, 0x6a, 0x50, 0x45, 0xcd, 0x47, 0x4d,
- 0x80, 0xe2, 0xda, 0xdd, 0x67, 0x00, 0xc5, 0xf9, 0xa8, 0xca, 0x8b, 0xae, 0xae, 0x04, 0x4f, 0x4b,
- 0x79, 0x97, 0x66, 0x96, 0xc2, 0x7d, 0x1e, 0x2e, 0xe4, 0x12, 0x2b, 0xb8, 0x45, 0x33, 0xcb, 0xfd,
- 0x4b, 0x03, 0x98, 0x78, 0x01, 0x1f, 0xf3, 0xc4, 0xe3, 0xe2, 0xfd, 0xfb, 0xef, 0x09, 0xd4, 0x04,
- 0xb6, 0xbe, 0x70, 0x74, 0x8c, 0x20, 0xe5, 0x88, 0x74, 0x2a, 0x64, 0x21, 0x39, 0x91, 0x7c, 0x01,
- 0x16, 0xcf, 0x1a, 0x5e, 0x38, 0x06, 0x46, 0xed, 0x95, 0xa3, 0xf2, 0x69, 0x90, 0xc5, 0x15, 0x64,
- 0xf2, 0x25, 0xc0, 0x32, 0x3f, 0x78, 0xe1, 0x54, 0x30, 0xf4, 0xee, 0x3b, 0xaf, 0x25, 0x8b, 0x2d,
- 0xd1, 0xdd, 0xc7, 0x50, 0xc5, 0x2f, 0x50, 0xd3, 0x13, 0x27, 0xae, 0x96, 0x4e, 0x4f, 0xb5, 0xde,
- 0x9e, 0x23, 0x56, 0x36, 0x47, 0xdc, 0xa7, 0x60, 0x3e, 0x4f, 0xbf, 0xf3, 0x7d, 0x0f, 0xc6, 0xfd,
- 0x49, 0x83, 0x26, 0xe2, 0x03, 0x26, 0x67, 0x4b, 0x9e, 0x90, 0xc7, 0x5b, 0x0f, 0xc6, 0x83, 0x5b,
- 0xf1, 0x19, 0xaf, 0x57, 0x7a, 0x28, 0x72, 0xa1, 0xfa, 0xbb, 0x84, 0x1a, 0x65, 0xa1, 0x5d, 0xa8,
- 0xe0, 0xd8, 0x37, 0x41, 0xef, 0xbf, 0x48, 0xeb, 0x68, 0xd8, 0x7f, 0x91, 0xd6, 0x11, 0x55, 0xa3,
- 0x5e, 0x01, 0xb4, 0x6f, 0x1b, 0xee, 0x2f, 0x9a, 0x2a, 0x3e, 0x36, 0x57, 0xb5, 0x27, 0xc8, 0xff,
- 0xa0, 0x26, 0x24, 0x8f, 0xa7, 0x81, 0x40, 0x5d, 0x06, 0x35, 0x95, 0x39, 0x10, 0x6a, 0xeb, 0xab,
- 0x55, 0x38, 0xcb, 0xb7, 0x56, 0x6b, 0xf2, 0x7f, 0xa8, 0x0b, 0xc9, 0x12, 0xa9, 0xd8, 0xe9, 0x50,
- 0xad, 0xa1, 0x3d, 0x10, 0xe4, 0x2e, 0x98, 0x3c, 0x9c, 0x4f, 0xf1, 0x52, 0x94, 0xa3, 0xca, 0xc3,
- 0xf9, 0x40, 0x90, 0xfb, 0x50, 0x5f, 0x24, 0xd1, 0x2a, 0xf6, 0xc2, 0x85, 0x53, 0xed, 0x18, 0x5d,
- 0x8b, 0x6e, 0x6c, 0xd2, 0x06, 0xfd, 0x72, 0x8d, 0x83, 0xad, 0x4e, 0xf5, 0xcb, 0xb5, 0xca, 0x9e,
- 0xb0, 0x70, 0xc1, 0x55, 0x92, 0x5a, 0x9a, 0x1d, 0xed, 0x81, 0x70, 0x7f, 0xd5, 0xa0, 0x7a, 0xbc,
- 0x5c, 0x85, 0xaf, 0xc8, 0x3e, 0x34, 0x02, 0x2f, 0x9c, 0xaa, 0x56, 0x2a, 0x34, 0x5b, 0x81, 0x17,
- 0xaa, 0x1a, 0x1e, 0x08, 0xf4, 0xb3, 0xeb, 0x8d, 0x3f, 0x7b, 0x6b, 0x02, 0x76, 0x9d, 0xf9, 0x7b,
- 0xd9, 0x25, 0x18, 0x78, 0x09, 0xf7, 0xcb, 0x97, 0x80, 0x1b, 0xf4, 0xfa, 0xe1, 0x2c, 0x9a, 0x7b,
- 0xe1, 0xa2, 0xb8, 0x01, 0xf5, 0x86, 0xe3, 0x57, 0x35, 0x29, 0xae, 0xdd, 0x03, 0xa8, 0xe7, 0xac,
- 0x5b, 0xcd, 0xfb, 0xdd, 0x48, 0x3d, 0xb1, 0x5b, 0xef, 0xaa, 0xee, 0xfe, 0x00, 0x2d, 0x4c, 0xce,
- 0xe7, 0xff, 0xb5, 0xcb, 0x0e, 0xc0, 0x9c, 0xa9, 0x0c, 0x79, 0x93, 0xed, 0xde, 0x12, 0x9e, 0x07,
- 0xa4, 0xb4, 0xa3, 0xbd, 0x37, 0x37, 0xfb, 0xda, 0xef, 0x37, 0xfb, 0xda, 0x9f, 0x37, 0xfb, 0xda,
- 0xf7, 0xa6, 0x62, 0xc7, 0x97, 0x97, 0x26, 0xfe, 0xcd, 0x7c, 0xf6, 0x77, 0x00, 0x00, 0x00, 0xff,
- 0xff, 0x53, 0x09, 0xe5, 0x37, 0xfe, 0x08, 0x00, 0x00,
+ 0x13, 0x36, 0x49, 0x89, 0x12, 0x47, 0x87, 0xd0, 0xfb, 0x3b, 0xf9, 0x59, 0xa3, 0x71, 0x54, 0x02,
+ 0x69, 0x85, 0xa2, 0x90, 0x11, 0xb7, 0x17, 0x0d, 0x1a, 0x14, 0xb0, 0x1d, 0xf9, 0x80, 0x5a, 0x12,
+ 0xb2, 0x92, 0xd1, 0xa6, 0x37, 0xc2, 0x5a, 0x5a, 0x4b, 0x44, 0xc4, 0x43, 0xb9, 0xab, 0xc0, 0xea,
+ 0x7b, 0xf4, 0xae, 0x2f, 0xd1, 0xb7, 0x08, 0xd0, 0x9b, 0xf6, 0x05, 0x8a, 0xc2, 0x57, 0x7d, 0x8c,
+ 0x62, 0x87, 0xa4, 0x48, 0xc5, 0x29, 0xd0, 0xf4, 0x6e, 0xe7, 0x9b, 0x6f, 0x76, 0x3e, 0xee, 0xce,
+ 0xcc, 0x12, 0x6a, 0x72, 0x15, 0x71, 0xd1, 0x89, 0xe2, 0x50, 0x86, 0x04, 0xa2, 0x38, 0xf4, 0xb9,
+ 0x9c, 0xf3, 0xa5, 0xd8, 0xdd, 0x99, 0x85, 0xb3, 0x10, 0xe1, 0x7d, 0xb5, 0x4a, 0x18, 0xee, 0xcf,
+ 0x3a, 0x34, 0x7b, 0x5c, 0xc6, 0xde, 0xa4, 0xc7, 0x25, 0x9b, 0x32, 0xc9, 0xc8, 0x53, 0x28, 0xa9,
+ 0x3d, 0x1c, 0xad, 0xa5, 0xb5, 0x9b, 0x07, 0x8f, 0x3b, 0xf9, 0x1e, 0x9d, 0x4d, 0x66, 0x6a, 0x8e,
+ 0x56, 0x11, 0xa7, 0x18, 0x42, 0x3e, 0x03, 0xe2, 0x23, 0x36, 0xbe, 0x66, 0xbe, 0xb7, 0x58, 0x8d,
+ 0x03, 0xe6, 0x73, 0x47, 0x6f, 0x69, 0x6d, 0x8b, 0xda, 0x89, 0xe7, 0x04, 0x1d, 0x7d, 0xe6, 0x73,
+ 0x42, 0xa0, 0x34, 0xe7, 0x8b, 0xc8, 0x29, 0xa1, 0x1f, 0xd7, 0x0a, 0x5b, 0x06, 0x9e, 0x74, 0xca,
+ 0x09, 0xa6, 0xd6, 0xee, 0x0a, 0x20, 0xcf, 0x44, 0x6a, 0x50, 0xb9, 0xec, 0x7f, 0xd3, 0x1f, 0x7c,
+ 0xdb, 0xb7, 0xb7, 0x94, 0x71, 0x3c, 0xb8, 0xec, 0x8f, 0xba, 0xd4, 0xd6, 0x88, 0x05, 0xe5, 0xd3,
+ 0xc3, 0xcb, 0xd3, 0xae, 0xad, 0x93, 0x06, 0x58, 0x67, 0xe7, 0xc3, 0xd1, 0xe0, 0x94, 0x1e, 0xf6,
+ 0x6c, 0x83, 0x10, 0x68, 0xa2, 0x27, 0xc7, 0x4a, 0x2a, 0x74, 0x78, 0xd9, 0xeb, 0x1d, 0xd2, 0x97,
+ 0x76, 0x99, 0x54, 0xa1, 0x74, 0xde, 0x3f, 0x19, 0xd8, 0x26, 0xa9, 0x43, 0x75, 0x38, 0x3a, 0x1c,
+ 0x75, 0x87, 0xdd, 0x91, 0x5d, 0x71, 0x9f, 0x81, 0x39, 0x64, 0x7e, 0xb4, 0xe0, 0x64, 0x07, 0xca,
+ 0xaf, 0xd9, 0x62, 0x99, 0x1c, 0x8b, 0x46, 0x13, 0x83, 0x7c, 0x08, 0x96, 0xf4, 0x7c, 0x2e, 0x24,
+ 0xf3, 0x23, 0xfc, 0x4e, 0x83, 0xe6, 0x80, 0x1b, 0x42, 0xb5, 0x7b, 0xc3, 0xfd, 0x68, 0xc1, 0x62,
+ 0xb2, 0x0f, 0xe6, 0x82, 0x5d, 0xf1, 0x85, 0x70, 0xb4, 0x96, 0xd1, 0xae, 0x1d, 0x6c, 0x17, 0xcf,
+ 0xf5, 0x42, 0x79, 0x8e, 0x4a, 0x6f, 0xfe, 0x78, 0xb4, 0x45, 0x53, 0x5a, 0x9e, 0x50, 0xff, 0xc7,
+ 0x84, 0xc6, 0xdb, 0x09, 0x7f, 0x2d, 0x83, 0x75, 0xe6, 0x09, 0x19, 0xce, 0x62, 0xe6, 0x93, 0x87,
+ 0x60, 0x4d, 0xc2, 0x65, 0x20, 0xc7, 0x5e, 0x20, 0x51, 0x76, 0xe9, 0x6c, 0x8b, 0x56, 0x11, 0x3a,
+ 0x0f, 0x24, 0xf9, 0x08, 0x6a, 0x89, 0xfb, 0x7a, 0x11, 0x32, 0x99, 0xa4, 0x39, 0xdb, 0xa2, 0x80,
+ 0xe0, 0x89, 0xc2, 0x88, 0x0d, 0x86, 0x58, 0xfa, 0x98, 0x47, 0xa3, 0x6a, 0x49, 0x1e, 0x80, 0x29,
+ 0x26, 0x73, 0xee, 0x33, 0xbc, 0xb5, 0x6d, 0x9a, 0x5a, 0xe4, 0x31, 0x34, 0x7f, 0xe4, 0x71, 0x38,
+ 0x96, 0xf3, 0x98, 0x8b, 0x79, 0xb8, 0x98, 0xe2, 0x0d, 0x6a, 0xb4, 0xa1, 0xd0, 0x51, 0x06, 0x92,
+ 0x8f, 0x53, 0x5a, 0xae, 0xcb, 0x44, 0x5d, 0x1a, 0xad, 0x2b, 0xfc, 0x38, 0xd3, 0xf6, 0x29, 0xd8,
+ 0x05, 0x5e, 0x22, 0xb0, 0x82, 0x02, 0x35, 0xda, 0x5c, 0x33, 0x13, 0x91, 0xc7, 0xd0, 0x0c, 0xf8,
+ 0x8c, 0x49, 0xef, 0x35, 0x1f, 0x8b, 0x88, 0x05, 0xc2, 0xa9, 0xe2, 0x09, 0x3f, 0x28, 0x9e, 0xf0,
+ 0xd1, 0x72, 0xf2, 0x8a, 0xcb, 0x61, 0xc4, 0x82, 0xf4, 0x98, 0x1b, 0x59, 0x8c, 0xc2, 0x04, 0xf9,
+ 0x04, 0xee, 0xad, 0x37, 0x99, 0xf2, 0x85, 0x64, 0xc2, 0xb1, 0x5a, 0x46, 0x9b, 0xd0, 0xf5, 0xde,
+ 0xcf, 0x11, 0xdd, 0x20, 0xa2, 0x3a, 0xe1, 0x40, 0xcb, 0x68, 0x6b, 0x39, 0x11, 0xa5, 0x09, 0x25,
+ 0x2b, 0x0a, 0x85, 0x57, 0x90, 0x55, 0xfb, 0x37, 0xb2, 0xb2, 0x98, 0xb5, 0xac, 0xf5, 0x26, 0xa9,
+ 0xac, 0x7a, 0x22, 0x2b, 0x83, 0x73, 0x59, 0x6b, 0x62, 0x2a, 0xab, 0x91, 0xc8, 0xca, 0xe0, 0x54,
+ 0xd6, 0xd7, 0x00, 0x31, 0x17, 0x5c, 0x8e, 0xe7, 0xea, 0xf4, 0x9b, 0xd8, 0xe3, 0x8f, 0x8a, 0x92,
+ 0xd6, 0xf5, 0xd3, 0xa1, 0x8a, 0x77, 0xe6, 0x05, 0x92, 0x5a, 0x71, 0xb6, 0xdc, 0x2c, 0xc0, 0x7b,
+ 0x6f, 0x17, 0xe0, 0x17, 0x60, 0xad, 0xa3, 0x36, 0x3b, 0xb5, 0x02, 0xc6, 0xcb, 0xee, 0xd0, 0xd6,
+ 0x88, 0x09, 0x7a, 0x7f, 0x60, 0xeb, 0x79, 0xb7, 0x1a, 0x47, 0x15, 0x28, 0xa3, 0xe6, 0xa3, 0x3a,
+ 0x40, 0x7e, 0xed, 0xee, 0x33, 0x80, 0xfc, 0x7c, 0x54, 0xe5, 0x85, 0xd7, 0xd7, 0x82, 0x27, 0xa5,
+ 0xbc, 0x4d, 0x53, 0x4b, 0xe1, 0x0b, 0x1e, 0xcc, 0xe4, 0x1c, 0x2b, 0xb8, 0x41, 0x53, 0xcb, 0xfd,
+ 0x4b, 0x03, 0x18, 0x79, 0x3e, 0x1f, 0xf2, 0xd8, 0xe3, 0xe2, 0xfd, 0xfb, 0xef, 0x00, 0x2a, 0x02,
+ 0x5b, 0x5f, 0x38, 0x3a, 0x46, 0x90, 0x62, 0x44, 0x32, 0x15, 0xd2, 0x90, 0x8c, 0x48, 0xbe, 0x04,
+ 0x8b, 0xa7, 0x0d, 0x2f, 0x1c, 0x03, 0xa3, 0x76, 0x8a, 0x51, 0xd9, 0x34, 0x48, 0xe3, 0x72, 0x32,
+ 0xf9, 0x0a, 0x60, 0x9e, 0x1d, 0xbc, 0x70, 0x4a, 0x18, 0x7a, 0xff, 0x9d, 0xd7, 0x92, 0xc6, 0x16,
+ 0xe8, 0xee, 0x13, 0x28, 0xe3, 0x17, 0xa8, 0xe9, 0x89, 0x13, 0x57, 0x4b, 0xa6, 0xa7, 0x5a, 0x6f,
+ 0xce, 0x11, 0x2b, 0x9d, 0x23, 0xee, 0x53, 0x30, 0x2f, 0x92, 0xef, 0x7c, 0xdf, 0x83, 0x71, 0x7f,
+ 0xd2, 0xa0, 0x8e, 0x78, 0x8f, 0xc9, 0xc9, 0x9c, 0xc7, 0xe4, 0xc9, 0xc6, 0x83, 0xf1, 0xf0, 0x4e,
+ 0x7c, 0xca, 0xeb, 0x14, 0x1e, 0x8a, 0x4c, 0xa8, 0xfe, 0x2e, 0xa1, 0x46, 0x51, 0x68, 0x1b, 0x4a,
+ 0x38, 0xf6, 0x4d, 0xd0, 0xbb, 0x2f, 0x92, 0x3a, 0xea, 0x77, 0x5f, 0x24, 0x75, 0x44, 0xd5, 0xa8,
+ 0x57, 0x00, 0xed, 0xda, 0x86, 0xfb, 0x8b, 0xa6, 0x8a, 0x8f, 0x4d, 0x55, 0xed, 0x09, 0xf2, 0x7f,
+ 0xa8, 0x08, 0xc9, 0xa3, 0xb1, 0x2f, 0x50, 0x97, 0x41, 0x4d, 0x65, 0xf6, 0x84, 0x4a, 0x7d, 0xbd,
+ 0x0c, 0x26, 0x59, 0x6a, 0xb5, 0x26, 0x1f, 0x40, 0x55, 0x48, 0x16, 0x4b, 0xc5, 0x4e, 0x86, 0x6a,
+ 0x05, 0xed, 0x9e, 0x20, 0xf7, 0xc1, 0xe4, 0xc1, 0x74, 0x8c, 0x97, 0xa2, 0x1c, 0x65, 0x1e, 0x4c,
+ 0x7b, 0x82, 0xec, 0x42, 0x75, 0x16, 0x87, 0xcb, 0xc8, 0x0b, 0x66, 0x4e, 0xb9, 0x65, 0xb4, 0x2d,
+ 0xba, 0xb6, 0x49, 0x13, 0xf4, 0xab, 0x15, 0x0e, 0xb6, 0x2a, 0xd5, 0xaf, 0x56, 0x6a, 0xf7, 0x98,
+ 0x05, 0x33, 0xae, 0x36, 0xa9, 0x24, 0xbb, 0xa3, 0xdd, 0x13, 0xee, 0xef, 0x1a, 0x94, 0x8f, 0xe7,
+ 0xcb, 0xe0, 0x15, 0xd9, 0x83, 0x9a, 0xef, 0x05, 0x63, 0xd5, 0x4a, 0xb9, 0x66, 0xcb, 0xf7, 0x02,
+ 0x55, 0xc3, 0x3d, 0x81, 0x7e, 0x76, 0xb3, 0xf6, 0xa7, 0x6f, 0x8d, 0xcf, 0x6e, 0x52, 0x7f, 0x27,
+ 0xbd, 0x04, 0x03, 0x2f, 0x61, 0xb7, 0x78, 0x09, 0x98, 0xa0, 0xd3, 0x0d, 0x26, 0xe1, 0xd4, 0x0b,
+ 0x66, 0xf9, 0x0d, 0xa8, 0x37, 0x1c, 0xbf, 0xaa, 0x4e, 0x71, 0xed, 0x3e, 0x87, 0x6a, 0xc6, 0xba,
+ 0xd3, 0xbc, 0xdf, 0x0d, 0xd4, 0x13, 0xbb, 0xf1, 0xae, 0xea, 0xe4, 0x7f, 0x70, 0xef, 0xe4, 0x62,
+ 0x70, 0x38, 0x1a, 0x17, 0x1e, 0x5b, 0xf7, 0x07, 0x68, 0x60, 0x46, 0x3e, 0xfd, 0xaf, 0xad, 0xb7,
+ 0x0f, 0xe6, 0x44, 0xed, 0x90, 0x75, 0xde, 0xf6, 0x9d, 0xaf, 0xc9, 0x02, 0x12, 0xda, 0xd1, 0xce,
+ 0x9b, 0xdb, 0x3d, 0xed, 0xb7, 0xdb, 0x3d, 0xed, 0xcf, 0xdb, 0x3d, 0xed, 0x7b, 0x53, 0xb1, 0xa3,
+ 0xab, 0x2b, 0x13, 0x7f, 0x71, 0x3e, 0xff, 0x3b, 0x00, 0x00, 0xff, 0xff, 0xfb, 0x5f, 0xf2, 0x4d,
+ 0x13, 0x09, 0x00, 0x00,
}
func (m *MetricMetadata) Marshal() (dAtA []byte, err error) {
diff --git a/prompb/types.proto b/prompb/types.proto
index 57216b81d9..aa322515c3 100644
--- a/prompb/types.proto
+++ b/prompb/types.proto
@@ -169,9 +169,10 @@ message Chunk {
// We require this to match chunkenc.Encoding.
enum Encoding {
- UNKNOWN = 0;
- XOR = 1;
- HISTOGRAM = 2;
+ UNKNOWN = 0;
+ XOR = 1;
+ HISTOGRAM = 2;
+ FLOAT_HISTOGRAM = 3;
}
Encoding type = 3;
bytes data = 4;
diff --git a/promql/test.go b/promql/test.go
index 78cc1e9fbb..f47269aeca 100644
--- a/promql/test.go
+++ b/promql/test.go
@@ -33,6 +33,7 @@ import (
"github.com/prometheus/prometheus/promql/parser"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb"
+ "github.com/prometheus/prometheus/tsdb/tsdbutil"
"github.com/prometheus/prometheus/util/teststorage"
"github.com/prometheus/prometheus/util/testutil"
)
@@ -302,14 +303,26 @@ func (cmd loadCmd) String() string {
func (cmd *loadCmd) set(m labels.Labels, vals ...parser.SequenceValue) {
h := m.Hash()
+ metricName := m.Get("__name__")
+ isHistogram := strings.Contains(metricName, "_histogram_")
+
samples := make([]Point, 0, len(vals))
ts := testStartTime
for _, v := range vals {
if !v.Omitted {
- samples = append(samples, Point{
- T: ts.UnixNano() / int64(time.Millisecond/time.Nanosecond),
- V: v.Value,
- })
+ t := ts.UnixNano() / int64(time.Millisecond/time.Nanosecond)
+
+ if isHistogram {
+ samples = append(samples, Point{
+ T: t,
+ H: tsdbutil.GenerateTestFloatHistogram(int(v.Value)),
+ })
+ } else {
+ samples = append(samples, Point{
+ T: t,
+ V: v.Value,
+ })
+ }
}
ts = ts.Add(cmd.gap)
}
@@ -323,8 +336,14 @@ func (cmd *loadCmd) append(a storage.Appender) error {
m := cmd.metrics[h]
for _, s := range smpls {
- if _, err := a.Append(0, m, s.T, s.V); err != nil {
- return err
+ if s.H == nil {
+ if _, err := a.Append(0, m, s.T, s.V); err != nil {
+ return err
+ }
+ } else {
+ if _, err := a.AppendHistogram(0, m, s.T, nil, s.H); err != nil {
+ return err
+ }
}
}
}
diff --git a/storage/remote/codec.go b/storage/remote/codec.go
index e3ef58c351..ba802bccb3 100644
--- a/storage/remote/codec.go
+++ b/storage/remote/codec.go
@@ -120,10 +120,13 @@ func ToQueryResult(ss storage.SeriesSet, sampleLimit int) (*prompb.QueryResult,
for ss.Next() {
series := ss.At()
iter = series.Iterator(iter)
- samples := []prompb.Sample{}
- for iter.Next() == chunkenc.ValFloat {
- // TODO(beorn7): Add Histogram support.
+ var (
+ samples []prompb.Sample
+ histograms []prompb.Histogram
+ )
+
+ for valType := iter.Next(); valType != chunkenc.ValNone; valType = iter.Next() {
numSamples++
if sampleLimit > 0 && numSamples > sampleLimit {
return nil, ss.Warnings(), HTTPError{
@@ -131,19 +134,32 @@ func ToQueryResult(ss storage.SeriesSet, sampleLimit int) (*prompb.QueryResult,
status: http.StatusBadRequest,
}
}
- ts, val := iter.At()
- samples = append(samples, prompb.Sample{
- Timestamp: ts,
- Value: val,
- })
+
+ switch valType {
+ case chunkenc.ValFloat:
+ ts, val := iter.At()
+ samples = append(samples, prompb.Sample{
+ Timestamp: ts,
+ Value: val,
+ })
+ case chunkenc.ValHistogram:
+ ts, h := iter.AtHistogram()
+ histograms = append(histograms, HistogramToHistogramProto(ts, h))
+ case chunkenc.ValFloatHistogram:
+ ts, fh := iter.AtFloatHistogram()
+ histograms = append(histograms, FloatHistogramToHistogramProto(ts, fh))
+ default:
+ return nil, ss.Warnings(), fmt.Errorf("unrecognized value type: %s", valType)
+ }
}
if err := iter.Err(); err != nil {
return nil, ss.Warnings(), err
}
resp.Timeseries = append(resp.Timeseries, &prompb.TimeSeries{
- Labels: labelsToLabelsProto(series.Labels(), nil),
- Samples: samples,
+ Labels: labelsToLabelsProto(series.Labels(), nil),
+ Samples: samples,
+ Histograms: histograms,
})
}
return resp, ss.Warnings(), ss.Err()
diff --git a/storage/remote/read_handler_test.go b/storage/remote/read_handler_test.go
index b5b717455c..2ac9cc32b4 100644
--- a/storage/remote/read_handler_test.go
+++ b/storage/remote/read_handler_test.go
@@ -30,12 +30,14 @@ import (
"github.com/prometheus/prometheus/prompb"
"github.com/prometheus/prometheus/promql"
"github.com/prometheus/prometheus/storage"
+ "github.com/prometheus/prometheus/tsdb/tsdbutil"
)
func TestSampledReadEndpoint(t *testing.T) {
suite, err := promql.NewTest(t, `
load 1m
test_metric1{foo="bar",baz="qux"} 1
+ test_histogram_metric1{foo="bar",baz="qux"} 1
`)
require.NoError(t, err)
@@ -60,10 +62,16 @@ func TestSampledReadEndpoint(t *testing.T) {
matcher2, err := labels.NewMatcher(labels.MatchEqual, "d", "e")
require.NoError(t, err)
- query, err := ToQuery(0, 1, []*labels.Matcher{matcher1, matcher2}, &storage.SelectHints{Step: 0, Func: "avg"})
+ matcher3, err := labels.NewMatcher(labels.MatchEqual, "__name__", "test_histogram_metric1")
require.NoError(t, err)
- req := &prompb.ReadRequest{Queries: []*prompb.Query{query}}
+ query1, err := ToQuery(0, 1, []*labels.Matcher{matcher1, matcher2}, &storage.SelectHints{Step: 0, Func: "avg"})
+ require.NoError(t, err)
+
+ query2, err := ToQuery(0, 1, []*labels.Matcher{matcher3, matcher2}, &storage.SelectHints{Step: 0, Func: "avg"})
+ require.NoError(t, err)
+
+ req := &prompb.ReadRequest{Queries: []*prompb.Query{query1, query2}}
data, err := proto.Marshal(req)
require.NoError(t, err)
@@ -90,7 +98,7 @@ func TestSampledReadEndpoint(t *testing.T) {
err = proto.Unmarshal(uncompressed, &resp)
require.NoError(t, err)
- require.Equal(t, 1, len(resp.Results), "Expected 1 result.")
+ require.Equal(t, 2, len(resp.Results), "Expected 2 results.")
require.Equal(t, &prompb.QueryResult{
Timeseries: []*prompb.TimeSeries{
@@ -106,6 +114,23 @@ func TestSampledReadEndpoint(t *testing.T) {
},
},
}, resp.Results[0])
+
+ require.Equal(t, &prompb.QueryResult{
+ Timeseries: []*prompb.TimeSeries{
+ {
+ Labels: []prompb.Label{
+ {Name: "__name__", Value: "test_histogram_metric1"},
+ {Name: "b", Value: "c"},
+ {Name: "baz", Value: "qux"},
+ {Name: "d", Value: "e"},
+ {Name: "foo", Value: "bar"},
+ },
+ Histograms: []prompb.Histogram{
+ FloatHistogramToHistogramProto(0, tsdbutil.GenerateTestFloatHistogram(1)),
+ },
+ },
+ },
+ }, resp.Results[1])
}
func BenchmarkStreamReadEndpoint(b *testing.B) {
@@ -179,11 +204,13 @@ func TestStreamReadEndpoint(t *testing.T) {
// First with 120 samples. We expect 1 frame with 1 chunk.
// Second with 121 samples, We expect 1 frame with 2 chunks.
// Third with 241 samples. We expect 1 frame with 2 chunks, and 1 frame with 1 chunk for the same series due to bytes limit.
+ // Fourth with 120 histogram samples. We expect 1 frame with 1 chunk.
suite, err := promql.NewTest(t, `
load 1m
test_metric1{foo="bar1",baz="qux"} 0+100x119
- test_metric1{foo="bar2",baz="qux"} 0+100x120
- test_metric1{foo="bar3",baz="qux"} 0+100x240
+ test_metric1{foo="bar2",baz="qux"} 0+100x120
+ test_metric1{foo="bar3",baz="qux"} 0+100x240
+ test_histogram_metric1{foo="bar4",baz="qux"} 0+1x119
`)
require.NoError(t, err)
@@ -214,6 +241,9 @@ func TestStreamReadEndpoint(t *testing.T) {
matcher3, err := labels.NewMatcher(labels.MatchEqual, "foo", "bar1")
require.NoError(t, err)
+ matcher4, err := labels.NewMatcher(labels.MatchEqual, "__name__", "test_histogram_metric1")
+ require.NoError(t, err)
+
query1, err := ToQuery(0, 14400001, []*labels.Matcher{matcher1, matcher2}, &storage.SelectHints{
Step: 1,
Func: "avg",
@@ -230,8 +260,16 @@ func TestStreamReadEndpoint(t *testing.T) {
})
require.NoError(t, err)
+ query3, err := ToQuery(0, 14400001, []*labels.Matcher{matcher4}, &storage.SelectHints{
+ Step: 1,
+ Func: "avg",
+ Start: 0,
+ End: 14400001,
+ })
+ require.NoError(t, err)
+
req := &prompb.ReadRequest{
- Queries: []*prompb.Query{query1, query2},
+ Queries: []*prompb.Query{query1, query2, query3},
AcceptedResponseTypes: []prompb.ReadRequest_ResponseType{prompb.ReadRequest_STREAMED_XOR_CHUNKS},
}
data, err := proto.Marshal(req)
@@ -261,7 +299,7 @@ func TestStreamReadEndpoint(t *testing.T) {
results = append(results, res)
}
- require.Equal(t, 5, len(results), "Expected 5 results.")
+ require.Equal(t, 6, len(results), "Expected 6 results.")
require.Equal(t, []*prompb.ChunkedReadResponse{
{
@@ -378,5 +416,26 @@ func TestStreamReadEndpoint(t *testing.T) {
},
QueryIndex: 1,
},
+ {
+ ChunkedSeries: []*prompb.ChunkedSeries{
+ {
+ Labels: []prompb.Label{
+ {Name: "__name__", Value: "test_histogram_metric1"},
+ {Name: "b", Value: "c"},
+ {Name: "baz", Value: "qux"},
+ {Name: "d", Value: "e"},
+ {Name: "foo", Value: "bar4"},
+ },
+ Chunks: []prompb.Chunk{
+ {
+ Type: prompb.Chunk_FLOAT_HISTOGRAM,
+ MaxTimeMs: 7140000,
+ Data: []byte("\x00x\x00\xff?PbM\xd2\xf1\xa9\xfc\x8c\xa4\x94e$\xa2@$\x00\x00\x00\x00\x00\x00@\x00\x00\x00\x00\x00\x00\x00@2ffffff?\xf0\x00\x00\x00\x00\x00\x00@\x00\x00\x00\x00\x00\x00\x00?\xf0\x00\x00\x00\x00\x00\x00?\xf0\x00\x00\x00\x00\x00\x00?\xf0\x00\x00\x00\x00\x00\x00@\x00\x00\x00\x00\x00\x00\x00?\xf0\x00\x00\x00\x00\x00\x00?\xf0\x00\x00\x00\x00\x00\x00\xf8\xea`\xd6%\xec\a\xa4?\x84\xbf\xff\xb0\x1e\x12\xff\xfe\x12\xff\xfe\x12\xff\xfe\xc0xK\xff\xf8K\xff\xe95\x85\xec\xd2\x7f\xff\xff\xff\xff\xff\xf6\x03\xd6\x17\xb0\x1e\xc0{\x01\xeb\v\xd8\x0f`6\x91\xfd\xed\a\xaf\\\xff\xff\xff\xff\xff\xff\xeb\v\xda\x0fX^\xb0\xbda{A\xeb\v\xd6\x16\x82l\v\x8a\xcc\xcc\xcc\xcc\xccʹ\x1e\xc0\xbd\xa0\xf6\x83\xda\x0f`^\xd0{A\xa1\x932fffffg`\\\xec\v\xd8\x17\xb0.v\x05\xec\vA5\t\xfa\x87\xef:\x84\xf9\x99\xd4'̵\x8d\xde\xe0{\xb2\x9f\xff\xff\xff\xff\xff\xf5\t\xfb\x81\xea\x13\xf5\t\xfa\x84\xfd\xc0\xf5\t\xfa\x84\xf4&н\xb9\xedUUUUU]\xc0\xf6\x85\xee\a\xb8\x1e\xe0{B\xf7\x03\xdc\r\x193\xb333333\xda\x17;B\xf6\x85\xed\v\x9d\xa1{BЛ\x03\xfb2\xe4\xcc\xcc\xcc\xcc\xcc\xe7`~fv\a\xe6S\x91ݕ\xaa\xaa\xaa\xaa\xaa\xab\xb0?\x1d\x81\xfd\x81\xfd\x81\xf8\xec\x0f\xec\x0f\xa1'\xb7<\xff\xff\xff\xff\xff\xff\x19\xc61\x9cb\x8c\x8e\xbd{\xff\xff\xff\xff\xff\xff8\xces\x8c\xe6\x84\xd6'\xc1Y\x99\x99\x99\x99\x99\x8e\xb1>1\x8e\xb1>1j#\xefx8d\xcc\xcc\xcc\xcc\xcc\xda\xc4\xfd\xe0\xf5\x89\xfa\xc4\xfdb~\xf0z\xc4\xfdbz\x04\xdc\x17\a\xaa\xaa\xaa\xaa\xaa\xabx=\xc1{\xc1\xef\a\xbc\x1e\xe0\xbd\xe0\xf7\x83A\x93\x1c\xff\xff\xff\xff\xff\xffp\\\xee\v\xdc\x17\xb8.w\x05\xee\v@\x9bC\xf0Z\xaa\xaa\xaa\xaa\xaa\xa7h~fv\x87\xe6P\xe4al\xcc\xcc\xcc\xcc\xcc\xed\x0f\xc7h\x7fh\x7fh~;C\xfbC\xe8\x12sə\x99\x99\x99\x99\xa38\xc63\x8cPd`\xb5UUUUUN3\x9c\xe39\xa0M\x82|3\xff\xff\xff\xff\xff\xf8\xec\x13\xe3\x18\xec\x13\xe3\x14y\f\x1e\xaa\xaa\xaa\xaa\xaa\xad\x82|;\x04\xfd\x82~\xc1>\x1d\x82~\xc1=\x02G\x1c\x99\x99\x99\x99\x99\x9a\x18\xe1\x86\x18\xe1\x85\x06C\x05ffffff8c\x8e8c\x8d\x02O\v\xaa\xaa\xaa\xaa\xaa\xaa\x19\xe1\x86\x19\xe1\x85\x0eC\xa3\x8f\xf1UUUUUY\xe1\x9ey\xe1\x9et\t\x1c\x01j\xaa\xaa\xaa\xaa\xab\fp\xc3\fp\u0083!\x80{33333#\x868\xe3\x868\xd0&\x91\xff\xc0\x12fffffp\xe9\x1f\xfc0ä\x7f\xf0\xc2\xd6G\xdf\x00p\x1d\xaa\xaa\xaa\xaa\xaa\xae\x91\xff\xf0\a\xa4\x7f\xfaG\xff\xa4\x7f\xfc\x01\xe9\x1f\xfe\x91\xff\xa0M\xe1p\x04\xff\xff\xff\xff\xff\xff\x00{\xc2\xf8\x03\xe0\x0f\x80=\xe1|\x01\xf0\x06\x83&\x01uUUUUU\xde\x17;\xc2\xf7\x85\xef\v\x9d\xe1{\xc2\xd0&\xe0\xfc\x0fY\x99\x99\x99\x99\x99;\x83\xf33\xb8?2\x87#\x00I\x99\x99\x99\x99\x99\xee\x0f\xc7p\x7fp\x7fp~;\x83\xfb\x83\xe8\x12p\x0f\xaa\xaa\xaa\xaa\xaa\xacg\x18\xc6q\x8a\f\x8c\x01?\xff\xff\xff\xff\xff8\xces\x8c\xe6\x816\x89\xf0\x1d\xaa\xaa\xaa\xaa\xaa\xacv\x89\xf1\x8cv\x89\xf1\x8a<\x86\x01l\xcc\xcc\xcc\xcc\xcc\xda'ôO\xda'\xed\x13\xe1\xda'\xed\x13\xd0$p\x04\x99\x99\x99\x99\x99\x9c1\xc3\f1\xc3\n\f\x86\x0f\xb5UUUUU\x8e\x18\xe3\x8e\x18\xe3@\x93\xc0\x13\xff\xff\xff\xff\xff\xf0\xcf\f0\xcf\f(r\x18\a\xd5UUUUVxg\x9exg\x9d\x02G\x00I\x99\x99\x99\x99\x99\xc3\x1c0\xc3\x1c0\xa0\xc8`:\xcc\xcc\xcc\xcc\xcc\xc8\xe1\x8e8\xe1\x8e4\t\xb0_\xc0.\xaa\xaa\xaa\xaa\xaa\xb0\xec\x17\xf0\xc3\x0e\xc1\x7f\f)\xf2\f\x01?\xff\xff\xff\xff\xff\xb0_\xc1\xd8/\xf6\v\xfd\x82\xfe\x0e\xc1\x7f\xb0_\xa0Hp=\xaa\xaa\xaa\xaa\xaa\xac\x18p`\xc1\x87\x06\n\f\x83\x00I\x99\x99\x99\x99\x99Ã\x0e\x1c80\xe1\xa0H\xf0\x0ffffffd\x18\xf0`\xc1\x8f\x06\n\x1c\x83\x00Z\xaa\xaa\xaa\xaa\xaaǃ\x1e|\xf83\xe7\xa0Hp\x03\xd5UUUUT\x18p`\xc1\x87\x06\n\f\x83\x00g\xff\xff\xff\xff\xffÃ\x0e\x1c80\xe1\xa0H\xf0\x02\xd5UUUUT\x18\xf0`\xc1\x8f\x06\n\x1c\x83\x00\xdb33333G\x83\x1e \xf8\x83\xe0\x17\xc4\x1f\x10h\x03&\x00I\x99\x99\x99\x99\x99\xe0\x17<\x02\xf8\x05\xf0\v\x9e\x01|\x02\xd0\x02o\x0f\xc07UUUUUS\xbc?3;\xc3\xf3(\a#\x00g\xff\xff\xff\xff\xff\xef\x0f\xc7x\x7fx\x7fx~;\xc3\xfb\xc3\xe8\x01'\x00-UUUUUFq\x8cg\x18\xa0\f\x8c\x0f\xec\xcc\xcc\xcc\xcc\xcd8\xces\x8c\xe6\x80\x13p\x9f\x00$\xcc\xcc\xcc\xcc\xcc\xc7p\x9f\x18\xc7p\x9f\x18\xa0<\x86\x00ڪ\xaa\xaa\xaa\xaa\xdc'øO\xdc'\xee\x13\xe1\xdc'\xee\x13\xd0\x02G\x00'\xff\xff\xff\xff\xff\xc3\x1c0\xc3\x1c0\xa0\f\x86\x01\xba\xaa\xaa\xaa\xaa\xaa\x8e\x18\xe3\x8e\x18\xe3@\t<\x01\xac\xcc\xcc\xcc\xcc\xcd\f\xf0\xc3\f\xf0\u0080r\x18\x01&fffffxg\x9exg\x9d\x00$p\x1f\xd5UUUUT1\xc3\f1\xc3\n\x00\xc8`\x04\xff\xff\xff\xff\xff\xf8\xe1\x8e8\xe1\x8e4\x00\x9bE\xfc\x01\xb5UUUUU\x0e\xd1\x7f\f0\xed\x17\xf0\u0081\xf2\f\x03l\xcc\xcc\xcc\xccʹ_\xc1\xda/\xf6\x8b\xfd\xa2\xfe\x0e\xd1\x7f\xb4_\xa0\x04\x87\x00$\xcc\xcc\xcc\xcc\xcc\xc1\x87\x06\f\x18p`\xa0\f\x83\x00mUUUUUC\x83\x0e\x1c80\xe1\xa0\x04\x8f\x00'\xff\xff\xff\xff\xff\xc1\x8f\x06\f\x18\xf0`\xa0\x1c\x83\a\xfdUUUUUG\x83\x1e|\xf83\xe7\xa0\x04\x87\x00mUUUUUA\x87\x06\f\x18p`\xa0\f\x83\x00$\xcc\xcc\xcc\xcc\xccÃ\x0e\x1c80\xe1\xa0\x04\x8f\x01\xfb33333A\x8f\x06\f\x18\xf0`\xa0\x1c\x83\x00-UUUUUG\x83\x1e
Date: Thu, 9 Mar 2023 11:03:30 -0800
Subject: [PATCH 004/231] Remove hacky promql.Test native histogram thing
Signed-off-by: Justin Lei
---
promql/test.go | 31 ++++++-----------------------
storage/remote/read_handler_test.go | 27 +++++++++++++++++--------
2 files changed, 25 insertions(+), 33 deletions(-)
diff --git a/promql/test.go b/promql/test.go
index f47269aeca..78cc1e9fbb 100644
--- a/promql/test.go
+++ b/promql/test.go
@@ -33,7 +33,6 @@ import (
"github.com/prometheus/prometheus/promql/parser"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb"
- "github.com/prometheus/prometheus/tsdb/tsdbutil"
"github.com/prometheus/prometheus/util/teststorage"
"github.com/prometheus/prometheus/util/testutil"
)
@@ -303,26 +302,14 @@ func (cmd loadCmd) String() string {
func (cmd *loadCmd) set(m labels.Labels, vals ...parser.SequenceValue) {
h := m.Hash()
- metricName := m.Get("__name__")
- isHistogram := strings.Contains(metricName, "_histogram_")
-
samples := make([]Point, 0, len(vals))
ts := testStartTime
for _, v := range vals {
if !v.Omitted {
- t := ts.UnixNano() / int64(time.Millisecond/time.Nanosecond)
-
- if isHistogram {
- samples = append(samples, Point{
- T: t,
- H: tsdbutil.GenerateTestFloatHistogram(int(v.Value)),
- })
- } else {
- samples = append(samples, Point{
- T: t,
- V: v.Value,
- })
- }
+ samples = append(samples, Point{
+ T: ts.UnixNano() / int64(time.Millisecond/time.Nanosecond),
+ V: v.Value,
+ })
}
ts = ts.Add(cmd.gap)
}
@@ -336,14 +323,8 @@ func (cmd *loadCmd) append(a storage.Appender) error {
m := cmd.metrics[h]
for _, s := range smpls {
- if s.H == nil {
- if _, err := a.Append(0, m, s.T, s.V); err != nil {
- return err
- }
- } else {
- if _, err := a.AppendHistogram(0, m, s.T, nil, s.H); err != nil {
- return err
- }
+ if _, err := a.Append(0, m, s.T, s.V); err != nil {
+ return err
}
}
}
diff --git a/storage/remote/read_handler_test.go b/storage/remote/read_handler_test.go
index 2ac9cc32b4..261c28e215 100644
--- a/storage/remote/read_handler_test.go
+++ b/storage/remote/read_handler_test.go
@@ -15,11 +15,13 @@ package remote
import (
"bytes"
+ "context"
"errors"
"io"
"net/http"
"net/http/httptest"
"testing"
+ "time"
"github.com/gogo/protobuf/proto"
"github.com/golang/snappy"
@@ -37,12 +39,12 @@ func TestSampledReadEndpoint(t *testing.T) {
suite, err := promql.NewTest(t, `
load 1m
test_metric1{foo="bar",baz="qux"} 1
- test_histogram_metric1{foo="bar",baz="qux"} 1
`)
require.NoError(t, err)
-
defer suite.Close()
+ addNativeHistogramsToTestSuite(t, suite, 1)
+
err = suite.Run()
require.NoError(t, err)
@@ -123,10 +125,9 @@ func TestSampledReadEndpoint(t *testing.T) {
{Name: "b", Value: "c"},
{Name: "baz", Value: "qux"},
{Name: "d", Value: "e"},
- {Name: "foo", Value: "bar"},
},
Histograms: []prompb.Histogram{
- FloatHistogramToHistogramProto(0, tsdbutil.GenerateTestFloatHistogram(1)),
+ FloatHistogramToHistogramProto(0, tsdbutil.GenerateTestFloatHistogram(0)),
},
},
},
@@ -139,7 +140,7 @@ func BenchmarkStreamReadEndpoint(b *testing.B) {
test_metric1{foo="bar1",baz="qux"} 0+100x119
test_metric1{foo="bar2",baz="qux"} 0+100x120
test_metric1{foo="bar3",baz="qux"} 0+100x240
-`)
+ `)
require.NoError(b, err)
defer suite.Close()
@@ -210,12 +211,12 @@ func TestStreamReadEndpoint(t *testing.T) {
test_metric1{foo="bar1",baz="qux"} 0+100x119
test_metric1{foo="bar2",baz="qux"} 0+100x120
test_metric1{foo="bar3",baz="qux"} 0+100x240
- test_histogram_metric1{foo="bar4",baz="qux"} 0+1x119
`)
require.NoError(t, err)
-
defer suite.Close()
+ addNativeHistogramsToTestSuite(t, suite, 120)
+
require.NoError(t, suite.Run())
api := NewReadHandler(nil, nil, suite.Storage(), func() config.Config {
@@ -424,7 +425,6 @@ func TestStreamReadEndpoint(t *testing.T) {
{Name: "b", Value: "c"},
{Name: "baz", Value: "qux"},
{Name: "d", Value: "e"},
- {Name: "foo", Value: "bar4"},
},
Chunks: []prompb.Chunk{
{
@@ -439,3 +439,14 @@ func TestStreamReadEndpoint(t *testing.T) {
},
}, results)
}
+
+func addNativeHistogramsToTestSuite(t *testing.T, pqlTest *promql.Test, n int) {
+ lbls := labels.FromStrings("__name__", "test_histogram_metric1", "baz", "qux")
+
+ app := pqlTest.Storage().Appender(context.TODO())
+ for i, fh := range tsdbutil.GenerateTestFloatHistograms(n) {
+ _, err := app.AppendHistogram(0, lbls, int64(i)*int64(60*time.Second/time.Millisecond), nil, fh)
+ require.NoError(t, err)
+ }
+ require.NoError(t, app.Commit())
+}
From c9d06f2826bef4606e4a8c4edea259f012079f10 Mon Sep 17 00:00:00 2001
From: Ganesh Vernekar
Date: Mon, 13 Mar 2023 11:15:45 +0530
Subject: [PATCH 005/231] tsdb: Replay m-map chunk only when required
M-map chunks replayed on startup are discarded if there
was no WAL and no snapshot loaded, because there is no
series created in the Head that it can map to. So only
load m-map chunks from disk if there is either a snapshot
loaded or there is WAL on disk.
Signed-off-by: Ganesh Vernekar
---
tsdb/head.go | 44 ++++++++++++++++++++++++++++----------------
1 file changed, 28 insertions(+), 16 deletions(-)
diff --git a/tsdb/head.go b/tsdb/head.go
index b28f5aca5e..f3e0a7f6c9 100644
--- a/tsdb/head.go
+++ b/tsdb/head.go
@@ -590,6 +590,7 @@ func (h *Head) Init(minValidTime int64) error {
snapIdx, snapOffset := -1, 0
refSeries := make(map[chunks.HeadSeriesRef]*memSeries)
+ snapshotLoaded := false
if h.opts.EnableMemorySnapshotOnShutdown {
level.Info(h.logger).Log("msg", "Chunk snapshot is enabled, replaying from the snapshot")
// If there are any WAL files, there should be at least one WAL file with an index that is current or newer
@@ -619,6 +620,7 @@ func (h *Head) Init(minValidTime int64) error {
var err error
snapIdx, snapOffset, refSeries, err = h.loadChunkSnapshot()
if err == nil {
+ snapshotLoaded = true
level.Info(h.logger).Log("msg", "Chunk snapshot loading time", "duration", time.Since(start).String())
}
if err != nil {
@@ -636,26 +638,36 @@ func (h *Head) Init(minValidTime int64) error {
}
mmapChunkReplayStart := time.Now()
- mmappedChunks, oooMmappedChunks, lastMmapRef, err := h.loadMmappedChunks(refSeries)
- if err != nil {
- // TODO(codesome): clear out all m-map chunks here for refSeries.
- level.Error(h.logger).Log("msg", "Loading on-disk chunks failed", "err", err)
- if _, ok := errors.Cause(err).(*chunks.CorruptionErr); ok {
- h.metrics.mmapChunkCorruptionTotal.Inc()
- }
-
- // Discard snapshot data since we need to replay the WAL for the missed m-map chunks data.
- snapIdx, snapOffset = -1, 0
-
- // If this fails, data will be recovered from WAL.
- // Hence we wont lose any data (given WAL is not corrupt).
- mmappedChunks, oooMmappedChunks, lastMmapRef, err = h.removeCorruptedMmappedChunks(err)
+ var (
+ mmappedChunks map[chunks.HeadSeriesRef][]*mmappedChunk
+ oooMmappedChunks map[chunks.HeadSeriesRef][]*mmappedChunk
+ lastMmapRef chunks.ChunkDiskMapperRef
+ err error
+ )
+ if snapshotLoaded || h.wal != nil {
+ // If snapshot was not loaded and if there is no WAL, then m-map chunks will be discarded
+ // anyway. So we only load m-map chunks when it won't be discarded.
+ mmappedChunks, oooMmappedChunks, lastMmapRef, err = h.loadMmappedChunks(refSeries)
if err != nil {
- return err
+ // TODO(codesome): clear out all m-map chunks here for refSeries.
+ level.Error(h.logger).Log("msg", "Loading on-disk chunks failed", "err", err)
+ if _, ok := errors.Cause(err).(*chunks.CorruptionErr); ok {
+ h.metrics.mmapChunkCorruptionTotal.Inc()
+ }
+
+ // Discard snapshot data since we need to replay the WAL for the missed m-map chunks data.
+ snapIdx, snapOffset = -1, 0
+
+ // If this fails, data will be recovered from WAL.
+ // Hence we wont lose any data (given WAL is not corrupt).
+ mmappedChunks, oooMmappedChunks, lastMmapRef, err = h.removeCorruptedMmappedChunks(err)
+ if err != nil {
+ return err
+ }
}
+ level.Info(h.logger).Log("msg", "On-disk memory mappable chunks replay completed", "duration", time.Since(mmapChunkReplayStart).String())
}
- level.Info(h.logger).Log("msg", "On-disk memory mappable chunks replay completed", "duration", time.Since(mmapChunkReplayStart).String())
if h.wal == nil {
level.Info(h.logger).Log("msg", "WAL not found")
return nil
From 1c3f1216b303aabbaac8f3cf86e186398d717c37 Mon Sep 17 00:00:00 2001
From: Ganesh Vernekar
Date: Mon, 13 Mar 2023 12:23:57 +0530
Subject: [PATCH 006/231] tsdb: Test querying after missing wbl with snapshots
enabled
If the snapshot was enabled with some ooo mmap chunks on disk,
and wbl was removed between restarts, then we should still be able
to query the ooo mmap chunks after a restart. This test shows that
we are not able to query those ooo mmap chunks after a restart
under this situation.
Signed-off-by: Ganesh Vernekar
---
tsdb/db_test.go | 109 ++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 109 insertions(+)
diff --git a/tsdb/db_test.go b/tsdb/db_test.go
index cc65069e43..926af273da 100644
--- a/tsdb/db_test.go
+++ b/tsdb/db_test.go
@@ -4433,6 +4433,115 @@ func TestOOOCompactionWithDisabledWriteLog(t *testing.T) {
verifySamples(db.Blocks()[1], 250, 350)
}
+// TestOOOQueryAfterRestartWithSnapshotAndRemovedWBL tests the scenario where the WBL goes
+// missing after a restart while snapshot was enabled, but the query still returns the right
+// data from the mmap chunks.
+func TestOOOQueryAfterRestartWithSnapshotAndRemovedWBL(t *testing.T) {
+ dir := t.TempDir()
+
+ opts := DefaultOptions()
+ opts.OutOfOrderCapMax = 10
+ opts.OutOfOrderTimeWindow = 300 * time.Minute.Milliseconds()
+ opts.EnableMemorySnapshotOnShutdown = true
+
+ db, err := Open(dir, nil, nil, opts, nil)
+ require.NoError(t, err)
+ db.DisableCompactions() // We want to manually call it.
+ t.Cleanup(func() {
+ require.NoError(t, db.Close())
+ })
+
+ series1 := labels.FromStrings("foo", "bar1")
+ series2 := labels.FromStrings("foo", "bar2")
+
+ addSamples := func(fromMins, toMins int64) {
+ app := db.Appender(context.Background())
+ for min := fromMins; min <= toMins; min++ {
+ ts := min * time.Minute.Milliseconds()
+ _, err := app.Append(0, series1, ts, float64(ts))
+ require.NoError(t, err)
+ _, err = app.Append(0, series2, ts, float64(2*ts))
+ require.NoError(t, err)
+ }
+ require.NoError(t, app.Commit())
+ }
+
+ // Add an in-order samples.
+ addSamples(250, 350)
+
+ // Add ooo samples that will result into a single block.
+ addSamples(90, 110) // The sample 110 will not be in m-map chunks.
+
+ // Checking that there are some ooo m-map chunks.
+ for _, lbls := range []labels.Labels{series1, series2} {
+ ms, created, err := db.head.getOrCreate(lbls.Hash(), lbls)
+ require.NoError(t, err)
+ require.False(t, created)
+ require.Equal(t, 2, len(ms.ooo.oooMmappedChunks))
+ require.NotNil(t, ms.ooo.oooHeadChunk)
+ }
+
+ // Restart DB.
+ require.NoError(t, db.Close())
+
+ // For some reason wbl goes missing.
+ require.NoError(t, os.RemoveAll(path.Join(dir, "wbl")))
+
+ db, err = Open(dir, nil, nil, opts, nil)
+ require.NoError(t, err)
+ db.DisableCompactions() // We want to manually call it.
+
+ // Check ooo m-map chunks again.
+ for _, lbls := range []labels.Labels{series1, series2} {
+ ms, created, err := db.head.getOrCreate(lbls.Hash(), lbls)
+ require.NoError(t, err)
+ require.False(t, created)
+ require.Equal(t, 2, len(ms.ooo.oooMmappedChunks))
+ require.Equal(t, 109*time.Minute.Milliseconds(), ms.ooo.oooMmappedChunks[1].maxTime)
+ require.Nil(t, ms.ooo.oooHeadChunk) // Because of missing wbl.
+ }
+
+ verifySamples := func(fromMins, toMins int64) {
+ series1Samples := make([]tsdbutil.Sample, 0, toMins-fromMins+1)
+ series2Samples := make([]tsdbutil.Sample, 0, toMins-fromMins+1)
+ for min := fromMins; min <= toMins; min++ {
+ ts := min * time.Minute.Milliseconds()
+ series1Samples = append(series1Samples, sample{ts, float64(ts), nil, nil})
+ series2Samples = append(series2Samples, sample{ts, float64(2 * ts), nil, nil})
+ }
+ expRes := map[string][]tsdbutil.Sample{
+ series1.String(): series1Samples,
+ series2.String(): series2Samples,
+ }
+
+ q, err := db.Querier(context.Background(), fromMins*time.Minute.Milliseconds(), toMins*time.Minute.Milliseconds())
+ require.NoError(t, err)
+
+ actRes := query(t, q, labels.MustNewMatcher(labels.MatchRegexp, "foo", "bar.*"))
+ require.Equal(t, expRes, actRes)
+ }
+
+ // Checking for expected ooo data from mmap chunks.
+ verifySamples(90, 109)
+
+ // Compaction should also work fine.
+ require.Equal(t, len(db.Blocks()), 0)
+ require.NoError(t, db.CompactOOOHead())
+ require.Equal(t, len(db.Blocks()), 1) // One block from OOO data.
+ require.Equal(t, int64(0), db.Blocks()[0].MinTime())
+ require.Equal(t, 120*time.Minute.Milliseconds(), db.Blocks()[0].MaxTime())
+
+ // Checking that ooo chunk is empty in Head.
+ for _, lbls := range []labels.Labels{series1, series2} {
+ ms, created, err := db.head.getOrCreate(lbls.Hash(), lbls)
+ require.NoError(t, err)
+ require.False(t, created)
+ require.Nil(t, ms.ooo)
+ }
+
+ verifySamples(90, 109)
+}
+
func Test_Querier_OOOQuery(t *testing.T) {
opts := DefaultOptions()
opts.OutOfOrderCapMax = 30
From 2af44f955871a18bd043f807441888aedd31dff7 Mon Sep 17 00:00:00 2001
From: Ganesh Vernekar
Date: Mon, 13 Mar 2023 12:27:46 +0530
Subject: [PATCH 007/231] tsdb: Update OOO min/max time properly after
replaying m-map chunks
Without this fix, if snapshots were enabled, and wbl goes missing
between restarts, then TSDB does not recognize that there are ooo
mmap chunks on disk and we cannot query them until those chunks
are compacted into blocks.
Signed-off-by: Ganesh Vernekar
---
tsdb/head.go | 1 +
1 file changed, 1 insertion(+)
diff --git a/tsdb/head.go b/tsdb/head.go
index f3e0a7f6c9..41933d7a04 100644
--- a/tsdb/head.go
+++ b/tsdb/head.go
@@ -836,6 +836,7 @@ func (h *Head) loadMmappedChunks(refSeries map[chunks.HeadSeriesRef]*memSeries)
numSamples: numSamples,
})
+ h.updateMinOOOMaxOOOTime(mint, maxt)
return nil
}
From dd94ebb87bbc14e103ccca0cb8283246d40c70fb Mon Sep 17 00:00:00 2001
From: Trevor Whitney
Date: Wed, 15 Feb 2023 05:49:51 -0700
Subject: [PATCH 008/231] promql: set CounterResetHint after rate and sum
Signed-off-by: Trevor Whitney
---
model/histogram/float_histogram.go | 11 +++++++++++
promql/engine_test.go | 6 ++----
promql/functions.go | 3 +++
3 files changed, 16 insertions(+), 4 deletions(-)
diff --git a/model/histogram/float_histogram.go b/model/histogram/float_histogram.go
index 256679a8c9..6b823f7d38 100644
--- a/model/histogram/float_histogram.go
+++ b/model/histogram/float_histogram.go
@@ -192,6 +192,8 @@ func (h *FloatHistogram) Scale(factor float64) *FloatHistogram {
//
// This method returns a pointer to the receiving histogram for convenience.
func (h *FloatHistogram) Add(other *FloatHistogram) *FloatHistogram {
+ // TODO(trevorwhitney): If other.CounterResetHint != h.CounterResetHint then
+ // we should return some warning.
otherZeroCount := h.reconcileZeroBuckets(other)
h.ZeroCount += otherZeroCount
h.Count += other.Count
@@ -438,6 +440,15 @@ func (h *FloatHistogram) Compact(maxEmptyBuckets int) *FloatHistogram {
// information can be read directly from there rather than be detected each time
// again.
func (h *FloatHistogram) DetectReset(previous *FloatHistogram) bool {
+ if h.CounterResetHint == CounterReset {
+ return true
+ }
+ if h.CounterResetHint == NotCounterReset {
+ return false
+ }
+ // In all other cases of CounterResetHint, we go on as we would otherwise.
+ // Even in the GaugeHistogram case, we pretend this is a counter histogram
+ // for consistency.
if h.Count < previous.Count {
return true
}
diff --git a/promql/engine_test.go b/promql/engine_test.go
index e2e209849b..d1c4570ea2 100644
--- a/promql/engine_test.go
+++ b/promql/engine_test.go
@@ -3155,8 +3155,7 @@ func TestNativeHistogramRate(t *testing.T) {
require.Len(t, vector, 1)
actualHistogram := vector[0].H
expectedHistogram := &histogram.FloatHistogram{
- // TODO(beorn7): This should be GaugeType. Change it once supported by PromQL.
- CounterResetHint: histogram.NotCounterReset,
+ CounterResetHint: histogram.GaugeType,
Schema: 1,
ZeroThreshold: 0.001,
ZeroCount: 1. / 15.,
@@ -3200,8 +3199,7 @@ func TestNativeFloatHistogramRate(t *testing.T) {
require.Len(t, vector, 1)
actualHistogram := vector[0].H
expectedHistogram := &histogram.FloatHistogram{
- // TODO(beorn7): This should be GaugeType. Change it once supported by PromQL.
- CounterResetHint: histogram.NotCounterReset,
+ CounterResetHint: histogram.GaugeType,
Schema: 1,
ZeroThreshold: 0.001,
ZeroCount: 1. / 15.,
diff --git a/promql/functions.go b/promql/functions.go
index c5922002b0..3da38ea0f3 100644
--- a/promql/functions.go
+++ b/promql/functions.go
@@ -187,6 +187,7 @@ func histogramRate(points []Point, isCounter bool) *histogram.FloatHistogram {
if curr == nil {
return nil // Range contains a mix of histograms and floats.
}
+ // TODO(trevorwhitney): Check if isCounter is consistent with curr.CounterResetHint.
if !isCounter {
continue
}
@@ -208,6 +209,8 @@ func histogramRate(points []Point, isCounter bool) *histogram.FloatHistogram {
prev = curr
}
}
+
+ h.CounterResetHint = histogram.GaugeType
return h.Compact(0)
}
From c3e0a83725c77a868310f13a003b324994e93bb7 Mon Sep 17 00:00:00 2001
From: Trevor Whitney
Date: Wed, 15 Feb 2023 05:59:02 -0700
Subject: [PATCH 009/231] rules: no longer force CounterResetHint to Gauge
Signed-off-by: Trevor Whitney
---
rules/manager.go | 4 ----
rules/manager_test.go | 2 --
2 files changed, 6 deletions(-)
diff --git a/rules/manager.go b/rules/manager.go
index 6f6ce2cfe4..f8dcf081fe 100644
--- a/rules/manager.go
+++ b/rules/manager.go
@@ -31,7 +31,6 @@ import (
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
- "github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/rulefmt"
"github.com/prometheus/prometheus/model/timestamp"
@@ -671,9 +670,6 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) {
for _, s := range vector {
if s.H != nil {
- // We assume that all native histogram results are gauge histograms.
- // TODO(codesome): once PromQL can give the counter reset info, remove this assumption.
- s.H.CounterResetHint = histogram.GaugeType
_, err = app.AppendHistogram(0, s.Metric, s.T, nil, s.H)
} else {
_, err = app.Append(0, s.Metric, s.T, s.V)
diff --git a/rules/manager_test.go b/rules/manager_test.go
index d287c25ce0..aed289c5b9 100644
--- a/rules/manager_test.go
+++ b/rules/manager_test.go
@@ -30,7 +30,6 @@ import (
"go.uber.org/goleak"
"gopkg.in/yaml.v2"
- "github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/rulefmt"
"github.com/prometheus/prometheus/model/timestamp"
@@ -1393,7 +1392,6 @@ func TestNativeHistogramsInRecordingRules(t *testing.T) {
for _, h := range hists[1:] {
expHist = expHist.Add(h.ToFloat())
}
- expHist.CounterResetHint = histogram.GaugeType
it := s.Iterator(nil)
require.Equal(t, chunkenc.ValFloatHistogram, it.Next())
From b4e324f637281c2fc75893c9f33899697ec38e6a Mon Sep 17 00:00:00 2001
From: Trevor Whitney
Date: Wed, 8 Mar 2023 14:58:40 -0700
Subject: [PATCH 010/231] Handle valid cases of mismatched hints when adding
Signed-off-by: Trevor Whitney
---
model/histogram/float_histogram.go | 43 +++++++++++++++++++++++-------
1 file changed, 33 insertions(+), 10 deletions(-)
diff --git a/model/histogram/float_histogram.go b/model/histogram/float_histogram.go
index 6b823f7d38..e96b5682b7 100644
--- a/model/histogram/float_histogram.go
+++ b/model/histogram/float_histogram.go
@@ -192,8 +192,25 @@ func (h *FloatHistogram) Scale(factor float64) *FloatHistogram {
//
// This method returns a pointer to the receiving histogram for convenience.
func (h *FloatHistogram) Add(other *FloatHistogram) *FloatHistogram {
- // TODO(trevorwhitney): If other.CounterResetHint != h.CounterResetHint then
- // we should return some warning.
+ if other.CounterResetHint != h.CounterResetHint {
+ // The outcome of adding an increment to a guage histogram will always be a GaugeType
+ if other.CounterResetHint == GaugeType && h.CounterResetHint != GaugeType {
+ h.CounterResetHint = GaugeType
+ }
+
+ // This could be legitime if the caller knows what they are doing, but the resulting hint
+ // must be UnknownCounterReset.
+ if other.CounterResetHint == UnknownCounterReset && h.CounterResetHint != GaugeType {
+ h.CounterResetHint = UnknownCounterReset
+ }
+
+ // TODO(trevorwhitney): this leaves CounterReset and NotCounterReset. If we have mismatch of
+ // these hints, that cannot be right, and we should raise a warning when possible.
+ // if other.CounterResetHint == CounterReset && h.CounterResetHint == NotCounterReset ||
+ // other.CounterResetHint == NotCounterReset && h.CounterResetHint == CounterReset {
+ // }
+ }
+
otherZeroCount := h.reconcileZeroBuckets(other)
h.ZeroCount += otherZeroCount
h.Count += other.Count
@@ -416,6 +433,10 @@ func (h *FloatHistogram) Compact(maxEmptyBuckets int) *FloatHistogram {
// of observations, but NOT the sum of observations) is smaller in the receiving
// histogram compared to the previous histogram. Otherwise, it returns false.
//
+// This method will shortcut to true if a CounterReset is detected, and shortcut
+// to false if NotCounterReset is detected. Otherwise it will do the work to detect
+// a reset.
+//
// Special behavior in case the Schema or the ZeroThreshold are not the same in
// both histograms:
//
@@ -434,11 +455,6 @@ func (h *FloatHistogram) Compact(maxEmptyBuckets int) *FloatHistogram {
// - Upon a decrease of the Schema, the buckets of the previous histogram are
// merged so that they match the new, lower-resolution schema (again without
// mutating the provided previous histogram).
-//
-// Note that this kind of reset detection is quite expensive. Ideally, resets
-// are detected at ingest time and stored in the TSDB, so that the reset
-// information can be read directly from there rather than be detected each time
-// again.
func (h *FloatHistogram) DetectReset(previous *FloatHistogram) bool {
if h.CounterResetHint == CounterReset {
return true
@@ -446,9 +462,16 @@ func (h *FloatHistogram) DetectReset(previous *FloatHistogram) bool {
if h.CounterResetHint == NotCounterReset {
return false
}
- // In all other cases of CounterResetHint, we go on as we would otherwise.
- // Even in the GaugeHistogram case, we pretend this is a counter histogram
- // for consistency.
+ // In all other cases of CounterResetHint (UnknownCounterReset and GaugeType),
+ // we go on as we would otherwise, for reasons explained below.
+ //
+ // If the CounterResetHint is UnknownCounterReset, we do not know yet if this histogram comes
+ // with a counter reset. Therefore, we have to do all the detailed work to find out if there
+ // is a counter reset or not.
+ // We do the same if the CounterResetHint is GaugeType, which should not happen, but PromQL still
+ // allows the user to apply functions to gauge histograms that are only meant for counter histograms.
+ // In this case, we treat the gauge histograms as a counter histograms
+ // (and we plan to return a warning about it to the user).
if h.Count < previous.Count {
return true
}
From e3513d1dd222154bafcc355281d17109e0a62b01 Mon Sep 17 00:00:00 2001
From: Trevor Whitney
Date: Mon, 13 Mar 2023 14:31:49 -0600
Subject: [PATCH 011/231] Change nested ifs to a switch
Signed-off-by: Trevor Whitney
---
model/histogram/float_histogram.go | 45 +++++++++++++++++-------------
1 file changed, 25 insertions(+), 20 deletions(-)
diff --git a/model/histogram/float_histogram.go b/model/histogram/float_histogram.go
index e96b5682b7..cd73083bbd 100644
--- a/model/histogram/float_histogram.go
+++ b/model/histogram/float_histogram.go
@@ -192,23 +192,28 @@ func (h *FloatHistogram) Scale(factor float64) *FloatHistogram {
//
// This method returns a pointer to the receiving histogram for convenience.
func (h *FloatHistogram) Add(other *FloatHistogram) *FloatHistogram {
- if other.CounterResetHint != h.CounterResetHint {
- // The outcome of adding an increment to a guage histogram will always be a GaugeType
- if other.CounterResetHint == GaugeType && h.CounterResetHint != GaugeType {
- h.CounterResetHint = GaugeType
- }
-
- // This could be legitime if the caller knows what they are doing, but the resulting hint
- // must be UnknownCounterReset.
- if other.CounterResetHint == UnknownCounterReset && h.CounterResetHint != GaugeType {
- h.CounterResetHint = UnknownCounterReset
- }
-
- // TODO(trevorwhitney): this leaves CounterReset and NotCounterReset. If we have mismatch of
- // these hints, that cannot be right, and we should raise a warning when possible.
- // if other.CounterResetHint == CounterReset && h.CounterResetHint == NotCounterReset ||
- // other.CounterResetHint == NotCounterReset && h.CounterResetHint == CounterReset {
- // }
+ switch {
+ case other.CounterResetHint == h.CounterResetHint:
+ // Adding apples to apples, all good. No need to change anything.
+ case h.CounterResetHint == GaugeType:
+ // Adding something else to a gauge. That's probably OK. Outcome is a gauge.
+ // Nothing to do since the receiver is already marked as gauge.
+ case other.CounterResetHint == GaugeType:
+ // Similar to before, but this time the receiver is "something else" and we have to change it to gauge.
+ h.CounterResetHint = GaugeType
+ case h.CounterResetHint == UnknownCounterReset:
+ // With the receiver's CounterResetHint being "unknown", this could still be legitimate
+ // if the caller knows what they are doing. Outcome is then again "unknown".
+ // No need to do anything since the receiver's CounterResetHint is already "unknown".
+ case other.CounterResetHint == UnknownCounterReset:
+ // Similar to before, but now we have to set the receiver's CounterResetHint to "unknown".
+ h.CounterResetHint = UnknownCounterReset
+ default:
+ // All other cases shouldn't actually happen.
+ // They are a direct collision of CounterReset and NotCounterReset.
+ // Conservatively set the CounterResetHint to "unknown" and isse a warning.
+ h.CounterResetHint = UnknownCounterReset
+ // TODO(trevorwhitney): Actually issue the warning as soon as the plumbing for it is in place
}
otherZeroCount := h.reconcileZeroBuckets(other)
@@ -433,9 +438,9 @@ func (h *FloatHistogram) Compact(maxEmptyBuckets int) *FloatHistogram {
// of observations, but NOT the sum of observations) is smaller in the receiving
// histogram compared to the previous histogram. Otherwise, it returns false.
//
-// This method will shortcut to true if a CounterReset is detected, and shortcut
-// to false if NotCounterReset is detected. Otherwise it will do the work to detect
-// a reset.
+// This method will shortcut to true if a CounterReset is detected, and shortcut
+// to false if NotCounterReset is detected. Otherwise it will do the work to detect
+// a reset.
//
// Special behavior in case the Schema or the ZeroThreshold are not the same in
// both histograms:
From 0c0c2af7f5566f1e7002f26ea51549afb793d323 Mon Sep 17 00:00:00 2001
From: Ganesh Vernekar
Date: Sun, 19 Feb 2023 23:04:51 +0530
Subject: [PATCH 012/231] Do not re-encode head chunk in ChunkQuerier
Signed-off-by: Ganesh Vernekar
---
tsdb/db_test.go | 73 +++++++++++++++++++++++++++++++++++++++++++++++
tsdb/head_read.go | 51 ++++++++++++++++++++++++++-------
tsdb/querier.go | 46 +++++++++++++++--------------
tsdb/test.txt | 1 +
4 files changed, 139 insertions(+), 32 deletions(-)
diff --git a/tsdb/db_test.go b/tsdb/db_test.go
index 9e5623bea7..26da7122ad 100644
--- a/tsdb/db_test.go
+++ b/tsdb/db_test.go
@@ -6334,3 +6334,76 @@ func compareSeries(t require.TestingT, expected, actual map[string][]tsdbutil.Sa
}
}
}
+
+// TestChunkQuerierReadWriteRace looks for any possible race between appending
+// samples and reading chunks because the head chunk that is being appended to
+// can be read in parallel and we should be able to make a copy of the chunk without
+// worrying about the parallel write.
+func TestChunkQuerierReadWriteRace(t *testing.T) {
+ db := openTestDB(t, nil, nil)
+ defer func() {
+ require.NoError(t, db.Close())
+ }()
+
+ lbls := labels.FromStrings("foo", "bar")
+
+ writer := func() error {
+ <-time.After(5 * time.Millisecond) // Initial pause while readers start.
+ ts := 0
+ for i := 0; i < 500; i++ {
+ app := db.Appender(context.Background())
+ for j := 0; j < 10; j++ {
+ ts++
+ _, err := app.Append(0, lbls, int64(ts), float64(ts*100))
+ if err != nil {
+ return err
+ }
+ }
+ err := app.Commit()
+ if err != nil {
+ return err
+ }
+ <-time.After(time.Millisecond)
+ }
+ return nil
+ }
+
+ reader := func() {
+ querier, err := db.ChunkQuerier(context.Background(), math.MinInt64, math.MaxInt64)
+ require.NoError(t, err)
+ defer func(q storage.ChunkQuerier) {
+ require.NoError(t, q.Close())
+ }(querier)
+ ss := querier.Select(false, nil, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"))
+ for ss.Next() {
+ cs := ss.At()
+ it := cs.Iterator(nil)
+ for it.Next() {
+ m := it.At()
+ b := m.Chunk.Bytes()
+ bb := make([]byte, len(b))
+ copy(bb, b) // This copying of chunk bytes detects any race.
+ }
+ }
+ require.NoError(t, ss.Err())
+ }
+
+ ch := make(chan struct{})
+ var writerErr error
+ go func() {
+ defer close(ch)
+ writerErr = writer()
+ }()
+
+Outer:
+ for {
+ reader()
+ select {
+ case <-ch:
+ break Outer
+ default:
+ }
+ }
+
+ require.NoError(t, writerErr)
+}
diff --git a/tsdb/head_read.go b/tsdb/head_read.go
index efcafcf6c5..b54e53aa07 100644
--- a/tsdb/head_read.go
+++ b/tsdb/head_read.go
@@ -274,22 +274,36 @@ func (h *headChunkReader) Close() error {
// Chunk returns the chunk for the reference number.
func (h *headChunkReader) Chunk(meta chunks.Meta) (chunkenc.Chunk, error) {
+ chk, _, err := h.chunk(meta, false)
+ return chk, err
+}
+
+// ChunkWithCopy returns the chunk for the reference number.
+// If the chunk is the in-memory chunk, then it makes a copy and returns the copied chunk.
+func (h *headChunkReader) ChunkWithCopy(meta chunks.Meta) (chunkenc.Chunk, int64, error) {
+ return h.chunk(meta, true)
+}
+
+// chunk returns the chunk for the reference number.
+// If copyLastChunk is true, then it makes a copy of the head chunk if asked for it.
+// Also returns max time of the chunk.
+func (h *headChunkReader) chunk(meta chunks.Meta, copyLastChunk bool) (chunkenc.Chunk, int64, error) {
sid, cid := chunks.HeadChunkRef(meta.Ref).Unpack()
s := h.head.series.getByID(sid)
// This means that the series has been garbage collected.
if s == nil {
- return nil, storage.ErrNotFound
+ return nil, 0, storage.ErrNotFound
}
s.Lock()
- c, garbageCollect, err := s.chunk(cid, h.head.chunkDiskMapper, &h.head.memChunkPool)
+ c, headChunk, err := s.chunk(cid, h.head.chunkDiskMapper, &h.head.memChunkPool)
if err != nil {
s.Unlock()
- return nil, err
+ return nil, 0, err
}
defer func() {
- if garbageCollect {
+ if !headChunk {
// Set this to nil so that Go GC can collect it after it has been used.
c.chunk = nil
h.head.memChunkPool.Put(c)
@@ -299,22 +313,36 @@ func (h *headChunkReader) Chunk(meta chunks.Meta) (chunkenc.Chunk, error) {
// This means that the chunk is outside the specified range.
if !c.OverlapsClosedInterval(h.mint, h.maxt) {
s.Unlock()
- return nil, storage.ErrNotFound
+ return nil, 0, storage.ErrNotFound
+ }
+
+ chk, maxTime := c.chunk, c.maxTime
+ if headChunk && copyLastChunk {
+ // The caller may ask to copy the head chunk in order to take the
+ // bytes of the chunk without causing the race between read and append.
+ b := s.headChunk.chunk.Bytes()
+ newB := make([]byte, len(b))
+ copy(newB, b) // TODO(codesome): Use bytes.Clone() when we upgrade to Go 1.20.
+ // TODO(codesome): Put back in the pool (non-trivial).
+ chk, err = h.head.opts.ChunkPool.Get(s.headChunk.chunk.Encoding(), newB)
+ if err != nil {
+ return nil, 0, err
+ }
}
s.Unlock()
return &safeChunk{
- Chunk: c.chunk,
+ Chunk: chk,
s: s,
cid: cid,
isoState: h.isoState,
- }, nil
+ }, maxTime, nil
}
// chunk returns the chunk for the HeadChunkID from memory or by m-mapping it from the disk.
-// If garbageCollect is true, it means that the returned *memChunk
+// If headChunk is true, it means that the returned *memChunk
// (and not the chunkenc.Chunk inside it) can be garbage collected after its usage.
-func (s *memSeries) chunk(id chunks.HeadChunkID, chunkDiskMapper *chunks.ChunkDiskMapper, memChunkPool *sync.Pool) (chunk *memChunk, garbageCollect bool, err error) {
+func (s *memSeries) chunk(id chunks.HeadChunkID, chunkDiskMapper *chunks.ChunkDiskMapper, memChunkPool *sync.Pool) (chunk *memChunk, headChunk bool, err error) {
// ix represents the index of chunk in the s.mmappedChunks slice. The chunk id's are
// incremented by 1 when new chunk is created, hence (id - firstChunkID) gives the slice index.
// The max index for the s.mmappedChunks slice can be len(s.mmappedChunks)-1, hence if the ix
@@ -323,11 +351,12 @@ func (s *memSeries) chunk(id chunks.HeadChunkID, chunkDiskMapper *chunks.ChunkDi
if ix < 0 || ix > len(s.mmappedChunks) {
return nil, false, storage.ErrNotFound
}
+
if ix == len(s.mmappedChunks) {
if s.headChunk == nil {
return nil, false, errors.New("invalid head chunk")
}
- return s.headChunk, false, nil
+ return s.headChunk, true, nil
}
chk, err := chunkDiskMapper.Chunk(s.mmappedChunks[ix].ref)
if err != nil {
@@ -340,7 +369,7 @@ func (s *memSeries) chunk(id chunks.HeadChunkID, chunkDiskMapper *chunks.ChunkDi
mc.chunk = chk
mc.minTime = s.mmappedChunks[ix].minTime
mc.maxTime = s.mmappedChunks[ix].maxTime
- return mc, true, nil
+ return mc, false, nil
}
// oooMergedChunk returns the requested chunk based on the given chunks.Meta
diff --git a/tsdb/querier.go b/tsdb/querier.go
index 061d5b3941..b4513218e9 100644
--- a/tsdb/querier.go
+++ b/tsdb/querier.go
@@ -584,7 +584,11 @@ func (p *populateWithDelGenericSeriesIterator) reset(blockID ulid.ULID, cr Chunk
p.currChkMeta = chunks.Meta{}
}
-func (p *populateWithDelGenericSeriesIterator) next() bool {
+// If copyHeadChunk is true, then the head chunk (i.e. the in-memory chunk of the TSDB)
+// is deep copied to avoid races between reads and copying chunk bytes.
+// However, if the deletion intervals overlaps with the head chunk, then the head chunk is
+// not copied irrespective of copyHeadChunk because it will be re-encoded later anyway.
+func (p *populateWithDelGenericSeriesIterator) next(copyHeadChunk bool) bool {
if p.err != nil || p.i >= len(p.chks)-1 {
return false
}
@@ -592,12 +596,6 @@ func (p *populateWithDelGenericSeriesIterator) next() bool {
p.i++
p.currChkMeta = p.chks[p.i]
- p.currChkMeta.Chunk, p.err = p.chunks.Chunk(p.currChkMeta)
- if p.err != nil {
- p.err = errors.Wrapf(p.err, "cannot populate chunk %d from block %s", p.currChkMeta.Ref, p.blockID.String())
- return false
- }
-
p.bufIter.Intervals = p.bufIter.Intervals[:0]
for _, interval := range p.intervals {
if p.currChkMeta.OverlapsClosedInterval(interval.Mint, interval.Maxt) {
@@ -605,22 +603,28 @@ func (p *populateWithDelGenericSeriesIterator) next() bool {
}
}
- // Re-encode head chunks that are still open (being appended to) or
- // outside the compacted MaxTime range.
- // The chunk.Bytes() method is not safe for open chunks hence the re-encoding.
- // This happens when snapshotting the head block or just fetching chunks from TSDB.
- //
- // TODO(codesome): think how to avoid the typecasting to verify when it is head block.
- _, isSafeChunk := p.currChkMeta.Chunk.(*safeChunk)
- if len(p.bufIter.Intervals) == 0 && !(isSafeChunk && p.currChkMeta.MaxTime == math.MaxInt64) {
- // If there is no overlap with deletion intervals AND it's NOT
- // an "open" head chunk, we can take chunk as it is.
+ hcr, ok := p.chunks.(*headChunkReader)
+ if ok && copyHeadChunk && len(p.bufIter.Intervals) == 0 {
+ // ChunkWithCopy will copy the head chunk.
+ var maxt int64
+ p.currChkMeta.Chunk, maxt, p.err = hcr.ChunkWithCopy(p.currChkMeta)
+ // For the in-memory head chunk the index reader sets maxt as MaxInt64. We fix it here.
+ p.currChkMeta.MaxTime = maxt
+ } else {
+ p.currChkMeta.Chunk, p.err = p.chunks.Chunk(p.currChkMeta)
+ }
+ if p.err != nil {
+ p.err = errors.Wrapf(p.err, "cannot populate chunk %d from block %s", p.currChkMeta.Ref, p.blockID.String())
+ return false
+ }
+
+ if len(p.bufIter.Intervals) == 0 {
+ // If there is no overlap with deletion intervals, we can take chunk as it is.
p.currDelIter = nil
return true
}
- // We don't want the full chunk, or it's potentially still opened, take
- // just a part of it.
+ // We don't want the full chunk, take just a part of it.
p.bufIter.Iter = p.currChkMeta.Chunk.Iterator(p.bufIter.Iter)
p.currDelIter = &p.bufIter
return true
@@ -677,7 +681,7 @@ func (p *populateWithDelSeriesIterator) Next() chunkenc.ValueType {
}
}
- for p.next() {
+ for p.next(false) {
if p.currDelIter != nil {
p.curr = p.currDelIter
} else {
@@ -742,7 +746,7 @@ func (p *populateWithDelChunkSeriesIterator) reset(blockID ulid.ULID, cr ChunkRe
}
func (p *populateWithDelChunkSeriesIterator) Next() bool {
- if !p.next() {
+ if !p.next(true) {
return false
}
p.curr = p.currChkMeta
diff --git a/tsdb/test.txt b/tsdb/test.txt
index e69de29bb2..a66a6fb729 100644
--- a/tsdb/test.txt
+++ b/tsdb/test.txt
@@ -0,0 +1 @@
+make: Nothing to be done for `test'.
From 45b025898faeec98569b5900c5adc556f9adcbcf Mon Sep 17 00:00:00 2001
From: Ganesh Vernekar
Date: Mon, 20 Feb 2023 13:58:06 +0530
Subject: [PATCH 013/231] Add BenchmarkHeadChunkQuerier and
BenchmarkHeadQuerier
Signed-off-by: Ganesh Vernekar
---
tsdb/querier_test.go | 90 ++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 90 insertions(+)
diff --git a/tsdb/querier_test.go b/tsdb/querier_test.go
index e6e9f143f1..ae9ddad015 100644
--- a/tsdb/querier_test.go
+++ b/tsdb/querier_test.go
@@ -2246,3 +2246,93 @@ func TestBlockBaseSeriesSet(t *testing.T) {
require.NoError(t, bcs.Err())
}
}
+
+func BenchmarkHeadChunkQuerier(b *testing.B) {
+ db := openTestDB(b, nil, nil)
+ defer func() {
+ require.NoError(b, db.Close())
+ }()
+
+ // 3h of data.
+ numTimeseries := 100
+ app := db.Appender(context.Background())
+ for i := 0; i < 120*6; i++ {
+ for j := 0; j < numTimeseries; j++ {
+ lbls := labels.FromStrings("foo", fmt.Sprintf("bar%d", j))
+ if i%10 == 0 {
+ require.NoError(b, app.Commit())
+ app = db.Appender(context.Background())
+ }
+ _, err := app.Append(0, lbls, int64(i*15)*time.Second.Milliseconds(), float64(i*100))
+ require.NoError(b, err)
+ }
+ }
+ require.NoError(b, app.Commit())
+
+ querier, err := db.ChunkQuerier(context.Background(), math.MinInt64, math.MaxInt64)
+ require.NoError(b, err)
+ defer func(q storage.ChunkQuerier) {
+ require.NoError(b, q.Close())
+ }(querier)
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ ss := querier.Select(false, nil, labels.MustNewMatcher(labels.MatchRegexp, "foo", "bar.*"))
+ total := 0
+ for ss.Next() {
+ cs := ss.At()
+ it := cs.Iterator(nil)
+ for it.Next() {
+ m := it.At()
+ total += m.Chunk.NumSamples()
+ }
+ }
+ _ = total
+ require.NoError(b, ss.Err())
+ }
+}
+
+func BenchmarkHeadQuerier(b *testing.B) {
+ db := openTestDB(b, nil, nil)
+ defer func() {
+ require.NoError(b, db.Close())
+ }()
+
+ // 3h of data.
+ numTimeseries := 100
+ app := db.Appender(context.Background())
+ for i := 0; i < 120*6; i++ {
+ for j := 0; j < numTimeseries; j++ {
+ lbls := labels.FromStrings("foo", fmt.Sprintf("bar%d", j))
+ if i%10 == 0 {
+ require.NoError(b, app.Commit())
+ app = db.Appender(context.Background())
+ }
+ _, err := app.Append(0, lbls, int64(i*15)*time.Second.Milliseconds(), float64(i*100))
+ require.NoError(b, err)
+ }
+ }
+ require.NoError(b, app.Commit())
+
+ querier, err := db.Querier(context.Background(), math.MinInt64, math.MaxInt64)
+ require.NoError(b, err)
+ defer func(q storage.Querier) {
+ require.NoError(b, q.Close())
+ }(querier)
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ ss := querier.Select(false, nil, labels.MustNewMatcher(labels.MatchRegexp, "foo", "bar.*"))
+ total := int64(0)
+ for ss.Next() {
+ cs := ss.At()
+ it := cs.Iterator(nil)
+ for it.Next() != chunkenc.ValNone {
+ ts, _ := it.At()
+ total += ts
+ }
+ }
+ _ = total
+ require.NoError(b, ss.Err())
+ }
+}
From 0a3f203c636c5de3a32b3ef35350b041de5afcb0 Mon Sep 17 00:00:00 2001
From: Ganesh Vernekar
Date: Tue, 21 Feb 2023 14:02:59 +0530
Subject: [PATCH 014/231] Update tests to not assume the chunk implementation
Signed-off-by: Ganesh Vernekar
---
tsdb/db_test.go | 26 ++++++++++++++++++++++----
tsdb/querier_test.go | 14 +++++++++++++-
2 files changed, 35 insertions(+), 5 deletions(-)
diff --git a/tsdb/db_test.go b/tsdb/db_test.go
index 26da7122ad..04db4560c2 100644
--- a/tsdb/db_test.go
+++ b/tsdb/db_test.go
@@ -130,7 +130,25 @@ func query(t testing.TB, q storage.Querier, matchers ...*labels.Matcher) map[str
return result
}
-// queryChunks runs a matcher query against the querier and fully expands its data.
+// queryAndExpandChunks runs a matcher query against the querier and fully expands its data into samples.
+func queryAndExpandChunks(t testing.TB, q storage.ChunkQuerier, matchers ...*labels.Matcher) map[string][][]tsdbutil.Sample {
+ s := queryChunks(t, q, matchers...)
+
+ res := make(map[string][][]tsdbutil.Sample)
+ for k, v := range s {
+ var samples [][]tsdbutil.Sample
+ for _, chk := range v {
+ sam, err := storage.ExpandSamples(chk.Chunk.Iterator(nil), nil)
+ require.NoError(t, err)
+ samples = append(samples, sam)
+ }
+ res[k] = samples
+ }
+
+ return res
+}
+
+// queryChunks runs a matcher query against the querier and expands its data.
func queryChunks(t testing.TB, q storage.ChunkQuerier, matchers ...*labels.Matcher) map[string][]chunks.Meta {
ss := q.Select(false, nil, matchers...)
defer func() {
@@ -2367,7 +2385,7 @@ func TestDBReadOnly(t *testing.T) {
logger = log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr))
expBlocks []*Block
expSeries map[string][]tsdbutil.Sample
- expChunks map[string][]chunks.Meta
+ expChunks map[string][][]tsdbutil.Sample
expDBHash []byte
matchAll = labels.MustNewMatcher(labels.MatchEqual, "", "")
err error
@@ -2418,7 +2436,7 @@ func TestDBReadOnly(t *testing.T) {
expSeries = query(t, q, matchAll)
cq, err := dbWritable.ChunkQuerier(context.TODO(), math.MinInt64, math.MaxInt64)
require.NoError(t, err)
- expChunks = queryChunks(t, cq, matchAll)
+ expChunks = queryAndExpandChunks(t, cq, matchAll)
require.NoError(t, dbWritable.Close()) // Close here to allow getting the dir hash for windows.
expDBHash = testutil.DirHash(t, dbWritable.Dir())
@@ -2452,7 +2470,7 @@ func TestDBReadOnly(t *testing.T) {
t.Run("chunk querier", func(t *testing.T) {
cq, err := dbReadOnly.ChunkQuerier(context.TODO(), math.MinInt64, math.MaxInt64)
require.NoError(t, err)
- readOnlySeries := queryChunks(t, cq, matchAll)
+ readOnlySeries := queryAndExpandChunks(t, cq, matchAll)
readOnlyDBHash := testutil.DirHash(t, dbDir)
require.Equal(t, len(expChunks), len(readOnlySeries), "total series mismatch")
diff --git a/tsdb/querier_test.go b/tsdb/querier_test.go
index ae9ddad015..cf9867a4f3 100644
--- a/tsdb/querier_test.go
+++ b/tsdb/querier_test.go
@@ -235,7 +235,19 @@ func testBlockQuerier(t *testing.T, c blockQuerierTestCase, ir IndexReader, cr C
chksRes, errRes := storage.ExpandChunks(sres.Iterator(nil))
rmChunkRefs(chksRes)
require.Equal(t, errExp, errRes)
- require.Equal(t, chksExp, chksRes)
+
+ require.Equal(t, len(chksExp), len(chksRes))
+ var exp, act [][]tsdbutil.Sample
+ for i := range chksExp {
+ samples, err := storage.ExpandSamples(chksExp[i].Chunk.Iterator(nil), nil)
+ require.NoError(t, err)
+ exp = append(exp, samples)
+ samples, err = storage.ExpandSamples(chksRes[i].Chunk.Iterator(nil), nil)
+ require.NoError(t, err)
+ act = append(act, samples)
+ }
+
+ require.Equal(t, exp, act)
}
require.NoError(t, res.Err())
})
From d01c51fad09784aa53bdf5b16fc39bbd335d58e2 Mon Sep 17 00:00:00 2001
From: g3offrey <11151445+g3offrey@users.noreply.github.com>
Date: Wed, 15 Mar 2023 15:57:15 +0100
Subject: [PATCH 015/231] docs: update ansible installation link
Signed-off-by: g3offrey <11151445+g3offrey@users.noreply.github.com>
---
docs/installation.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/docs/installation.md b/docs/installation.md
index 592d67b28e..05df14a46e 100644
--- a/docs/installation.md
+++ b/docs/installation.md
@@ -87,7 +87,7 @@ the following third-party contributions:
### Ansible
-* [Cloud Alchemy/ansible-prometheus](https://github.com/cloudalchemy/ansible-prometheus)
+* [prometheus-community/ansible](https://github.com/prometheus-community/ansible)
### Chef
From beb7d3b80f479aafa452828ff7d62ec78389a774 Mon Sep 17 00:00:00 2001
From: Oleg Zaytsev
Date: Thu, 16 Mar 2023 09:36:19 +0100
Subject: [PATCH 016/231] remote.Client: store urlString
During remote write, we call url.String() twice:
- to add the Endpoint() to the span
- to actually know where whe should send the request
This value does not change over time, and it's not really that
lightweight to calculate. I wrote this simple benchmark:
func BenchmarkURLString(b *testing.B) {
u, err := url.Parse("https://remote.write.com/api/v1")
require.NoError(b, err)
b.Run("string", func(b *testing.B) {
count := 0
for i := 0; i < b.N; i++ {
count += len(u.String())
}
})
}
And the results are ~200ns/op, 80B/op, 3 allocs/op.
Yes, we're going to go to the network here, which is a huge amount of
resources compared to this, but still, on agents that send 500 requests
per second, that is 1500 wasteful allocations per second.
Signed-off-by: Oleg Zaytsev
---
storage/remote/client.go | 14 +++++++-------
1 file changed, 7 insertions(+), 7 deletions(-)
diff --git a/storage/remote/client.go b/storage/remote/client.go
index 92666cd1d1..1625c9918d 100644
--- a/storage/remote/client.go
+++ b/storage/remote/client.go
@@ -80,7 +80,7 @@ func init() {
// Client allows reading and writing from/to a remote HTTP endpoint.
type Client struct {
remoteName string // Used to differentiate clients in metrics.
- url *config_util.URL
+ urlString string // url.String()
Client *http.Client
timeout time.Duration
@@ -122,7 +122,7 @@ func NewReadClient(name string, conf *ClientConfig) (ReadClient, error) {
return &Client{
remoteName: name,
- url: conf.URL,
+ urlString: conf.URL.String(),
Client: httpClient,
timeout: time.Duration(conf.Timeout),
readQueries: remoteReadQueries.WithLabelValues(name, conf.URL.String()),
@@ -154,7 +154,7 @@ func NewWriteClient(name string, conf *ClientConfig) (WriteClient, error) {
return &Client{
remoteName: name,
- url: conf.URL,
+ urlString: conf.URL.String(),
Client: httpClient,
retryOnRateLimit: conf.RetryOnRateLimit,
timeout: time.Duration(conf.Timeout),
@@ -187,7 +187,7 @@ type RecoverableError struct {
// Store sends a batch of samples to the HTTP endpoint, the request is the proto marshalled
// and encoded bytes from codec.go.
func (c *Client) Store(ctx context.Context, req []byte) error {
- httpReq, err := http.NewRequest("POST", c.url.String(), bytes.NewReader(req))
+ httpReq, err := http.NewRequest("POST", c.urlString, bytes.NewReader(req))
if err != nil {
// Errors from NewRequest are from unparsable URLs, so are not
// recoverable.
@@ -255,7 +255,7 @@ func (c Client) Name() string {
// Endpoint is the remote read or write endpoint.
func (c Client) Endpoint() string {
- return c.url.String()
+ return c.urlString
}
// Read reads from a remote endpoint.
@@ -276,7 +276,7 @@ func (c *Client) Read(ctx context.Context, query *prompb.Query) (*prompb.QueryRe
}
compressed := snappy.Encode(nil, data)
- httpReq, err := http.NewRequest("POST", c.url.String(), bytes.NewReader(compressed))
+ httpReq, err := http.NewRequest("POST", c.urlString, bytes.NewReader(compressed))
if err != nil {
return nil, fmt.Errorf("unable to create request: %w", err)
}
@@ -310,7 +310,7 @@ func (c *Client) Read(ctx context.Context, query *prompb.Query) (*prompb.QueryRe
}
if httpResp.StatusCode/100 != 2 {
- return nil, fmt.Errorf("remote server %s returned HTTP status %s: %s", c.url.String(), httpResp.Status, strings.TrimSpace(string(compressed)))
+ return nil, fmt.Errorf("remote server %s returned HTTP status %s: %s", c.urlString, httpResp.Status, strings.TrimSpace(string(compressed)))
}
uncompressed, err := snappy.Decode(nil, compressed)
From 8f6d5dcd4516ec9ef99b079a6f8b786b91534995 Mon Sep 17 00:00:00 2001
From: Abhijit Mukherjee
Date: Thu, 16 Mar 2023 15:53:47 +0530
Subject: [PATCH 017/231] Fix: getting rid of EncOOOXOR chunk encoding (#12111)
Signed-off-by: mabhi
---
tsdb/chunkenc/chunk.go | 19 +++--------
tsdb/chunkenc/xor.go | 9 -----
tsdb/chunks/chunk_write_queue.go | 5 +--
tsdb/chunks/chunk_write_queue_test.go | 10 +++---
tsdb/chunks/head_chunks.go | 49 +++++++++++++++++++++------
tsdb/chunks/head_chunks_test.go | 25 ++++++++------
tsdb/head.go | 3 +-
tsdb/head_append.go | 5 ++-
tsdb/head_test.go | 4 +--
9 files changed, 70 insertions(+), 59 deletions(-)
diff --git a/tsdb/chunkenc/chunk.go b/tsdb/chunkenc/chunk.go
index b7d240123b..c550cbc78e 100644
--- a/tsdb/chunkenc/chunk.go
+++ b/tsdb/chunkenc/chunk.go
@@ -47,20 +47,9 @@ func (e Encoding) String() string {
return ""
}
-// Chunk encodings for out-of-order chunks.
-// These encodings must be only used by the Head block for its internal bookkeeping.
-const (
- OutOfOrderMask = 0b10000000
- EncOOOXOR = EncXOR | OutOfOrderMask
-)
-
-func IsOutOfOrderChunk(e Encoding) bool {
- return (e & OutOfOrderMask) != 0
-}
-
// IsValidEncoding returns true for supported encodings.
func IsValidEncoding(e Encoding) bool {
- return e == EncXOR || e == EncOOOXOR || e == EncHistogram || e == EncFloatHistogram
+ return e == EncXOR || e == EncHistogram || e == EncFloatHistogram
}
// Chunk holds a sequence of sample pairs that can be iterated over and appended to.
@@ -262,7 +251,7 @@ func NewPool() Pool {
func (p *pool) Get(e Encoding, b []byte) (Chunk, error) {
switch e {
- case EncXOR, EncOOOXOR:
+ case EncXOR:
c := p.xor.Get().(*XORChunk)
c.b.stream = b
c.b.count = 0
@@ -283,7 +272,7 @@ func (p *pool) Get(e Encoding, b []byte) (Chunk, error) {
func (p *pool) Put(c Chunk) error {
switch c.Encoding() {
- case EncXOR, EncOOOXOR:
+ case EncXOR:
xc, ok := c.(*XORChunk)
// This may happen often with wrapped chunks. Nothing we can really do about
// it but returning an error would cause a lot of allocations again. Thus,
@@ -327,7 +316,7 @@ func (p *pool) Put(c Chunk) error {
// bytes.
func FromData(e Encoding, d []byte) (Chunk, error) {
switch e {
- case EncXOR, EncOOOXOR:
+ case EncXOR:
return &XORChunk{b: bstream{count: 0, stream: d}}, nil
case EncHistogram:
return &HistogramChunk{b: bstream{count: 0, stream: d}}, nil
diff --git a/tsdb/chunkenc/xor.go b/tsdb/chunkenc/xor.go
index 62e90cbaae..2fa2f613cb 100644
--- a/tsdb/chunkenc/xor.go
+++ b/tsdb/chunkenc/xor.go
@@ -506,12 +506,3 @@ func xorRead(br *bstreamReader, value *float64, leading, trailing *uint8) error
*value = math.Float64frombits(vbits)
return nil
}
-
-// OOOXORChunk holds a XORChunk and overrides the Encoding() method.
-type OOOXORChunk struct {
- *XORChunk
-}
-
-func (c *OOOXORChunk) Encoding() Encoding {
- return EncOOOXOR
-}
diff --git a/tsdb/chunks/chunk_write_queue.go b/tsdb/chunks/chunk_write_queue.go
index ab34eb06c7..6d2dc743b0 100644
--- a/tsdb/chunks/chunk_write_queue.go
+++ b/tsdb/chunks/chunk_write_queue.go
@@ -42,6 +42,7 @@ type chunkWriteJob struct {
maxt int64
chk chunkenc.Chunk
ref ChunkDiskMapperRef
+ isOOO bool
callback func(error)
}
@@ -76,7 +77,7 @@ type chunkWriteQueue struct {
}
// writeChunkF is a function which writes chunks, it is dynamic to allow mocking in tests.
-type writeChunkF func(HeadSeriesRef, int64, int64, chunkenc.Chunk, ChunkDiskMapperRef, bool) error
+type writeChunkF func(HeadSeriesRef, int64, int64, chunkenc.Chunk, ChunkDiskMapperRef, bool, bool) error
func newChunkWriteQueue(reg prometheus.Registerer, size int, writeChunk writeChunkF) *chunkWriteQueue {
counters := prometheus.NewCounterVec(
@@ -133,7 +134,7 @@ func (c *chunkWriteQueue) start() {
}
func (c *chunkWriteQueue) processJob(job chunkWriteJob) {
- err := c.writeChunk(job.seriesRef, job.mint, job.maxt, job.chk, job.ref, job.cutFile)
+ err := c.writeChunk(job.seriesRef, job.mint, job.maxt, job.chk, job.ref, job.isOOO, job.cutFile)
if job.callback != nil {
job.callback(err)
}
diff --git a/tsdb/chunks/chunk_write_queue_test.go b/tsdb/chunks/chunk_write_queue_test.go
index a55896a6d6..c908d47f5b 100644
--- a/tsdb/chunks/chunk_write_queue_test.go
+++ b/tsdb/chunks/chunk_write_queue_test.go
@@ -31,7 +31,7 @@ func TestChunkWriteQueue_GettingChunkFromQueue(t *testing.T) {
blockWriterWg.Add(1)
// blockingChunkWriter blocks until blockWriterWg is done.
- blockingChunkWriter := func(_ HeadSeriesRef, _, _ int64, _ chunkenc.Chunk, _ ChunkDiskMapperRef, _ bool) error {
+ blockingChunkWriter := func(_ HeadSeriesRef, _, _ int64, _ chunkenc.Chunk, _ ChunkDiskMapperRef, _, _ bool) error {
blockWriterWg.Wait()
return nil
}
@@ -63,7 +63,7 @@ func TestChunkWriteQueue_WritingThroughQueue(t *testing.T) {
gotCutFile bool
)
- blockingChunkWriter := func(seriesRef HeadSeriesRef, mint, maxt int64, chunk chunkenc.Chunk, ref ChunkDiskMapperRef, cutFile bool) error {
+ blockingChunkWriter := func(seriesRef HeadSeriesRef, mint, maxt int64, chunk chunkenc.Chunk, ref ChunkDiskMapperRef, isOOO, cutFile bool) error {
gotSeriesRef = seriesRef
gotMint = mint
gotMaxt = maxt
@@ -101,7 +101,7 @@ func TestChunkWriteQueue_WrappingAroundSizeLimit(t *testing.T) {
unblockChunkWriterCh := make(chan struct{}, sizeLimit)
// blockingChunkWriter blocks until the unblockChunkWriterCh channel returns a value.
- blockingChunkWriter := func(seriesRef HeadSeriesRef, mint, maxt int64, chunk chunkenc.Chunk, ref ChunkDiskMapperRef, cutFile bool) error {
+ blockingChunkWriter := func(seriesRef HeadSeriesRef, mint, maxt int64, chunk chunkenc.Chunk, ref ChunkDiskMapperRef, isOOO, cutFile bool) error {
<-unblockChunkWriterCh
return nil
}
@@ -184,7 +184,7 @@ func TestChunkWriteQueue_WrappingAroundSizeLimit(t *testing.T) {
func TestChunkWriteQueue_HandlerErrorViaCallback(t *testing.T) {
testError := errors.New("test error")
- chunkWriter := func(_ HeadSeriesRef, _, _ int64, _ chunkenc.Chunk, _ ChunkDiskMapperRef, _ bool) error {
+ chunkWriter := func(_ HeadSeriesRef, _, _ int64, _ chunkenc.Chunk, _ ChunkDiskMapperRef, _, _ bool) error {
return testError
}
@@ -212,7 +212,7 @@ func BenchmarkChunkWriteQueue_addJob(b *testing.B) {
for _, concurrentWrites := range []int{1, 10, 100, 1000} {
b.Run(fmt.Sprintf("%d concurrent writes", concurrentWrites), func(b *testing.B) {
issueReadSignal := make(chan struct{})
- q := newChunkWriteQueue(nil, 1000, func(ref HeadSeriesRef, i, i2 int64, chunk chunkenc.Chunk, ref2 ChunkDiskMapperRef, b bool) error {
+ q := newChunkWriteQueue(nil, 1000, func(ref HeadSeriesRef, i, i2 int64, chunk chunkenc.Chunk, ref2 ChunkDiskMapperRef, ooo, b bool) error {
if withReads {
select {
case issueReadSignal <- struct{}{}:
diff --git a/tsdb/chunks/head_chunks.go b/tsdb/chunks/head_chunks.go
index a0bd735b8b..a7ff90475e 100644
--- a/tsdb/chunks/head_chunks.go
+++ b/tsdb/chunks/head_chunks.go
@@ -273,6 +273,26 @@ func NewChunkDiskMapper(reg prometheus.Registerer, dir string, pool chunkenc.Poo
return m, m.openMMapFiles()
}
+// Chunk encodings for out-of-order chunks.
+// These encodings must be only used by the Head block for its internal bookkeeping.
+const (
+ OutOfOrderMask = uint8(0b10000000)
+)
+
+func (cdm *ChunkDiskMapper) ApplyOutOfOrderMask(sourceEncoding chunkenc.Encoding) chunkenc.Encoding {
+ enc := uint8(sourceEncoding) | OutOfOrderMask
+ return chunkenc.Encoding(enc)
+}
+
+func (cdm *ChunkDiskMapper) IsOutOfOrderChunk(e chunkenc.Encoding) bool {
+ return (uint8(e) & OutOfOrderMask) != 0
+}
+
+func (cdm *ChunkDiskMapper) RemoveMasks(sourceEncoding chunkenc.Encoding) chunkenc.Encoding {
+ restored := uint8(sourceEncoding) & (^OutOfOrderMask)
+ return chunkenc.Encoding(restored)
+}
+
// openMMapFiles opens all files within dir for mmapping.
func (cdm *ChunkDiskMapper) openMMapFiles() (returnErr error) {
cdm.mmappedChunkFiles = map[int]*mmappedChunkFile{}
@@ -403,17 +423,17 @@ func repairLastChunkFile(files map[int]string) (_ map[int]string, returnErr erro
// WriteChunk writes the chunk to the disk.
// The returned chunk ref is the reference from where the chunk encoding starts for the chunk.
-func (cdm *ChunkDiskMapper) WriteChunk(seriesRef HeadSeriesRef, mint, maxt int64, chk chunkenc.Chunk, callback func(err error)) (chkRef ChunkDiskMapperRef) {
+func (cdm *ChunkDiskMapper) WriteChunk(seriesRef HeadSeriesRef, mint, maxt int64, chk chunkenc.Chunk, isOOO bool, callback func(err error)) (chkRef ChunkDiskMapperRef) {
// cdm.evtlPosMtx must be held to serialize the calls to cdm.evtlPos.getNextChunkRef() and the writing of the chunk (either with or without queue).
cdm.evtlPosMtx.Lock()
defer cdm.evtlPosMtx.Unlock()
ref, cutFile := cdm.evtlPos.getNextChunkRef(chk)
if cdm.writeQueue != nil {
- return cdm.writeChunkViaQueue(ref, cutFile, seriesRef, mint, maxt, chk, callback)
+ return cdm.writeChunkViaQueue(ref, isOOO, cutFile, seriesRef, mint, maxt, chk, callback)
}
- err := cdm.writeChunk(seriesRef, mint, maxt, chk, ref, cutFile)
+ err := cdm.writeChunk(seriesRef, mint, maxt, chk, ref, isOOO, cutFile)
if callback != nil {
callback(err)
}
@@ -421,7 +441,7 @@ func (cdm *ChunkDiskMapper) WriteChunk(seriesRef HeadSeriesRef, mint, maxt int64
return ref
}
-func (cdm *ChunkDiskMapper) writeChunkViaQueue(ref ChunkDiskMapperRef, cutFile bool, seriesRef HeadSeriesRef, mint, maxt int64, chk chunkenc.Chunk, callback func(err error)) (chkRef ChunkDiskMapperRef) {
+func (cdm *ChunkDiskMapper) writeChunkViaQueue(ref ChunkDiskMapperRef, isOOO, cutFile bool, seriesRef HeadSeriesRef, mint, maxt int64, chk chunkenc.Chunk, callback func(err error)) (chkRef ChunkDiskMapperRef) {
var err error
if callback != nil {
defer func() {
@@ -438,13 +458,14 @@ func (cdm *ChunkDiskMapper) writeChunkViaQueue(ref ChunkDiskMapperRef, cutFile b
maxt: maxt,
chk: chk,
ref: ref,
+ isOOO: isOOO,
callback: callback,
})
return ref
}
-func (cdm *ChunkDiskMapper) writeChunk(seriesRef HeadSeriesRef, mint, maxt int64, chk chunkenc.Chunk, ref ChunkDiskMapperRef, cutFile bool) (err error) {
+func (cdm *ChunkDiskMapper) writeChunk(seriesRef HeadSeriesRef, mint, maxt int64, chk chunkenc.Chunk, ref ChunkDiskMapperRef, isOOO, cutFile bool) (err error) {
cdm.writePathMtx.Lock()
defer cdm.writePathMtx.Unlock()
@@ -476,7 +497,11 @@ func (cdm *ChunkDiskMapper) writeChunk(seriesRef HeadSeriesRef, mint, maxt int64
bytesWritten += MintMaxtSize
binary.BigEndian.PutUint64(cdm.byteBuf[bytesWritten:], uint64(maxt))
bytesWritten += MintMaxtSize
- cdm.byteBuf[bytesWritten] = byte(chk.Encoding())
+ enc := chk.Encoding()
+ if isOOO {
+ enc = cdm.ApplyOutOfOrderMask(enc)
+ }
+ cdm.byteBuf[bytesWritten] = byte(enc)
bytesWritten += ChunkEncodingSize
n := binary.PutUvarint(cdm.byteBuf[bytesWritten:], uint64(len(chk.Bytes())))
bytesWritten += n
@@ -696,7 +721,9 @@ func (cdm *ChunkDiskMapper) Chunk(ref ChunkDiskMapperRef) (chunkenc.Chunk, error
// Encoding.
chkEnc := mmapFile.byteSlice.Range(chkStart, chkStart+ChunkEncodingSize)[0]
-
+ sourceChkEnc := chunkenc.Encoding(chkEnc)
+ // Extract the encoding from the byte. ChunkDiskMapper uses only the last 7 bits for the encoding.
+ chkEnc = byte(cdm.RemoveMasks(sourceChkEnc))
// Data length.
// With the minimum chunk length this should never cause us reading
// over the end of the slice.
@@ -762,7 +789,7 @@ func (cdm *ChunkDiskMapper) Chunk(ref ChunkDiskMapperRef) (chunkenc.Chunk, error
// and runs the provided function with information about each chunk. It returns on the first error encountered.
// NOTE: This method needs to be called at least once after creating ChunkDiskMapper
// to set the maxt of all the file.
-func (cdm *ChunkDiskMapper) IterateAllChunks(f func(seriesRef HeadSeriesRef, chunkRef ChunkDiskMapperRef, mint, maxt int64, numSamples uint16, encoding chunkenc.Encoding) error) (err error) {
+func (cdm *ChunkDiskMapper) IterateAllChunks(f func(seriesRef HeadSeriesRef, chunkRef ChunkDiskMapperRef, mint, maxt int64, numSamples uint16, encoding chunkenc.Encoding, isOOO bool) error) (err error) {
cdm.writePathMtx.Lock()
defer cdm.writePathMtx.Unlock()
@@ -860,8 +887,10 @@ func (cdm *ChunkDiskMapper) IterateAllChunks(f func(seriesRef HeadSeriesRef, chu
if maxt > mmapFile.maxt {
mmapFile.maxt = maxt
}
-
- if err := f(seriesRef, chunkRef, mint, maxt, numSamples, chkEnc); err != nil {
+ isOOO := cdm.IsOutOfOrderChunk(chkEnc)
+ // Extract the encoding from the byte. ChunkDiskMapper uses only the last 7 bits for the encoding.
+ chkEnc = cdm.RemoveMasks(chkEnc)
+ if err := f(seriesRef, chunkRef, mint, maxt, numSamples, chkEnc, isOOO); err != nil {
if cerr, ok := err.(*CorruptionErr); ok {
cerr.Dir = cdm.dir.Name()
cerr.FileIndex = segID
diff --git a/tsdb/chunks/head_chunks_test.go b/tsdb/chunks/head_chunks_test.go
index 0b5bc460d2..ac89ae3e59 100644
--- a/tsdb/chunks/head_chunks_test.go
+++ b/tsdb/chunks/head_chunks_test.go
@@ -98,7 +98,11 @@ func TestChunkDiskMapper_WriteChunk_Chunk_IterateChunks(t *testing.T) {
bytesWritten += MintMaxtSize
binary.BigEndian.PutUint64(buf[bytesWritten:], uint64(maxt))
bytesWritten += MintMaxtSize
- buf[bytesWritten] = byte(chunk.Encoding())
+ enc := chunk.Encoding()
+ if isOOO {
+ enc = hrw.ApplyOutOfOrderMask(enc)
+ }
+ buf[bytesWritten] = byte(enc)
bytesWritten += ChunkEncodingSize
n := binary.PutUvarint(buf[bytesWritten:], uint64(len(chunk.Bytes())))
bytesWritten += n
@@ -149,7 +153,7 @@ func TestChunkDiskMapper_WriteChunk_Chunk_IterateChunks(t *testing.T) {
hrw = createChunkDiskMapper(t, dir)
idx := 0
- require.NoError(t, hrw.IterateAllChunks(func(seriesRef HeadSeriesRef, chunkRef ChunkDiskMapperRef, mint, maxt int64, numSamples uint16, encoding chunkenc.Encoding) error {
+ require.NoError(t, hrw.IterateAllChunks(func(seriesRef HeadSeriesRef, chunkRef ChunkDiskMapperRef, mint, maxt int64, numSamples uint16, encoding chunkenc.Encoding, isOOO bool) error {
t.Helper()
expData := expectedData[idx]
@@ -158,7 +162,7 @@ func TestChunkDiskMapper_WriteChunk_Chunk_IterateChunks(t *testing.T) {
require.Equal(t, expData.maxt, maxt)
require.Equal(t, expData.maxt, maxt)
require.Equal(t, expData.numSamples, numSamples)
- require.Equal(t, expData.isOOO, chunkenc.IsOutOfOrderChunk(encoding))
+ require.Equal(t, expData.isOOO, isOOO)
actChunk, err := hrw.Chunk(expData.chunkRef)
require.NoError(t, err)
@@ -188,7 +192,7 @@ func TestChunkDiskMapper_Truncate(t *testing.T) {
mint, maxt := timeRange+1, timeRange+step-1
var err error
awaitCb := make(chan struct{})
- hrw.WriteChunk(1, int64(mint), int64(maxt), randomChunk(t), func(cbErr error) {
+ hrw.WriteChunk(1, int64(mint), int64(maxt), randomChunk(t), false, func(cbErr error) {
err = cbErr
close(awaitCb)
})
@@ -282,7 +286,7 @@ func TestChunkDiskMapper_Truncate_PreservesFileSequence(t *testing.T) {
step := 100
mint, maxt := timeRange+1, timeRange+step-1
- hrw.WriteChunk(1, int64(mint), int64(maxt), randomChunk(t), func(err error) {
+ hrw.WriteChunk(1, int64(mint), int64(maxt), randomChunk(t), false, func(err error) {
close(awaitCb)
require.NoError(t, err)
})
@@ -363,7 +367,7 @@ func TestHeadReadWriter_TruncateAfterFailedIterateChunks(t *testing.T) {
// Write a chunks to iterate on it later.
var err error
awaitCb := make(chan struct{})
- hrw.WriteChunk(1, 0, 1000, randomChunk(t), func(cbErr error) {
+ hrw.WriteChunk(1, 0, 1000, randomChunk(t), false, func(cbErr error) {
err = cbErr
close(awaitCb)
})
@@ -377,7 +381,7 @@ func TestHeadReadWriter_TruncateAfterFailedIterateChunks(t *testing.T) {
hrw = createChunkDiskMapper(t, dir)
// Forcefully failing IterateAllChunks.
- require.Error(t, hrw.IterateAllChunks(func(_ HeadSeriesRef, _ ChunkDiskMapperRef, _, _ int64, _ uint16, _ chunkenc.Encoding) error {
+ require.Error(t, hrw.IterateAllChunks(func(_ HeadSeriesRef, _ ChunkDiskMapperRef, _, _ int64, _ uint16, _ chunkenc.Encoding, _ bool) error {
return errors.New("random error")
}))
@@ -396,7 +400,7 @@ func TestHeadReadWriter_ReadRepairOnEmptyLastFile(t *testing.T) {
mint, maxt := timeRange+1, timeRange+step-1
var err error
awaitCb := make(chan struct{})
- hrw.WriteChunk(1, int64(mint), int64(maxt), randomChunk(t), func(cbErr error) {
+ hrw.WriteChunk(1, int64(mint), int64(maxt), randomChunk(t), false, func(cbErr error) {
err = cbErr
close(awaitCb)
})
@@ -489,7 +493,7 @@ func createChunkDiskMapper(t *testing.T, dir string) *ChunkDiskMapper {
hrw, err := NewChunkDiskMapper(nil, dir, chunkenc.NewPool(), DefaultWriteBufferSize, writeQueueSize)
require.NoError(t, err)
require.False(t, hrw.fileMaxtSet)
- require.NoError(t, hrw.IterateAllChunks(func(_ HeadSeriesRef, _ ChunkDiskMapperRef, _, _ int64, _ uint16, _ chunkenc.Encoding) error {
+ require.NoError(t, hrw.IterateAllChunks(func(_ HeadSeriesRef, _ ChunkDiskMapperRef, _, _ int64, _ uint16, _ chunkenc.Encoding, _ bool) error {
return nil
}))
require.True(t, hrw.fileMaxtSet)
@@ -517,9 +521,8 @@ func createChunk(t *testing.T, idx int, hrw *ChunkDiskMapper) (seriesRef HeadSer
awaitCb := make(chan struct{})
if rand.Intn(2) == 0 {
isOOO = true
- chunk = &chunkenc.OOOXORChunk{XORChunk: chunk.(*chunkenc.XORChunk)}
}
- chunkRef = hrw.WriteChunk(seriesRef, mint, maxt, chunk, func(cbErr error) {
+ chunkRef = hrw.WriteChunk(seriesRef, mint, maxt, chunk, isOOO, func(cbErr error) {
require.NoError(t, err)
close(awaitCb)
})
diff --git a/tsdb/head.go b/tsdb/head.go
index b28f5aca5e..b5239bdf84 100644
--- a/tsdb/head.go
+++ b/tsdb/head.go
@@ -784,10 +784,9 @@ func (h *Head) loadMmappedChunks(refSeries map[chunks.HeadSeriesRef]*memSeries)
mmappedChunks := map[chunks.HeadSeriesRef][]*mmappedChunk{}
oooMmappedChunks := map[chunks.HeadSeriesRef][]*mmappedChunk{}
var lastRef, secondLastRef chunks.ChunkDiskMapperRef
- if err := h.chunkDiskMapper.IterateAllChunks(func(seriesRef chunks.HeadSeriesRef, chunkRef chunks.ChunkDiskMapperRef, mint, maxt int64, numSamples uint16, encoding chunkenc.Encoding) error {
+ if err := h.chunkDiskMapper.IterateAllChunks(func(seriesRef chunks.HeadSeriesRef, chunkRef chunks.ChunkDiskMapperRef, mint, maxt int64, numSamples uint16, encoding chunkenc.Encoding, isOOO bool) error {
secondLastRef = lastRef
lastRef = chunkRef
- isOOO := chunkenc.IsOutOfOrderChunk(encoding)
if !isOOO && maxt < h.minValidTime.Load() {
return nil
}
diff --git a/tsdb/head_append.go b/tsdb/head_append.go
index 8a622fafe5..e3beaae17b 100644
--- a/tsdb/head_append.go
+++ b/tsdb/head_append.go
@@ -1453,8 +1453,7 @@ func (s *memSeries) mmapCurrentOOOHeadChunk(chunkDiskMapper *chunks.ChunkDiskMap
return 0
}
xor, _ := s.ooo.oooHeadChunk.chunk.ToXOR() // Encode to XorChunk which is more compact and implements all of the needed functionality.
- oooXor := &chunkenc.OOOXORChunk{XORChunk: xor}
- chunkRef := chunkDiskMapper.WriteChunk(s.ref, s.ooo.oooHeadChunk.minTime, s.ooo.oooHeadChunk.maxTime, oooXor, handleChunkWriteError)
+ chunkRef := chunkDiskMapper.WriteChunk(s.ref, s.ooo.oooHeadChunk.minTime, s.ooo.oooHeadChunk.maxTime, xor, true, handleChunkWriteError)
s.ooo.oooMmappedChunks = append(s.ooo.oooMmappedChunks, &mmappedChunk{
ref: chunkRef,
numSamples: uint16(xor.NumSamples()),
@@ -1471,7 +1470,7 @@ func (s *memSeries) mmapCurrentHeadChunk(chunkDiskMapper *chunks.ChunkDiskMapper
return
}
- chunkRef := chunkDiskMapper.WriteChunk(s.ref, s.headChunk.minTime, s.headChunk.maxTime, s.headChunk.chunk, handleChunkWriteError)
+ chunkRef := chunkDiskMapper.WriteChunk(s.ref, s.headChunk.minTime, s.headChunk.maxTime, s.headChunk.chunk, false, handleChunkWriteError)
s.mmappedChunks = append(s.mmappedChunks, &mmappedChunk{
ref: chunkRef,
numSamples: uint16(s.headChunk.chunk.NumSamples()),
diff --git a/tsdb/head_test.go b/tsdb/head_test.go
index b5afed64b3..1a0558cce3 100644
--- a/tsdb/head_test.go
+++ b/tsdb/head_test.go
@@ -69,7 +69,7 @@ func newTestHead(t testing.TB, chunkRange int64, compressWAL, oooEnabled bool) (
h, err := NewHead(nil, nil, wal, nil, opts, nil)
require.NoError(t, err)
- require.NoError(t, h.chunkDiskMapper.IterateAllChunks(func(_ chunks.HeadSeriesRef, _ chunks.ChunkDiskMapperRef, _, _ int64, _ uint16, _ chunkenc.Encoding) error {
+ require.NoError(t, h.chunkDiskMapper.IterateAllChunks(func(_ chunks.HeadSeriesRef, _ chunks.ChunkDiskMapperRef, _, _ int64, _ uint16, _ chunkenc.Encoding, _ bool) error {
return nil
}))
@@ -4177,7 +4177,7 @@ func TestHeadInit_DiscardChunksWithUnsupportedEncoding(t *testing.T) {
uc := newUnsupportedChunk()
// Make this chunk not overlap with the previous and the next
- h.chunkDiskMapper.WriteChunk(chunks.HeadSeriesRef(seriesRef), 500, 600, uc, func(err error) { require.NoError(t, err) })
+ h.chunkDiskMapper.WriteChunk(chunks.HeadSeriesRef(seriesRef), 500, 600, uc, false, func(err error) { require.NoError(t, err) })
app = h.Appender(ctx)
for i := 700; i < 1200; i++ {
From 71c57a129282e0c0c6d15ef91b0d8404fc4cf375 Mon Sep 17 00:00:00 2001
From: beorn7
Date: Thu, 16 Mar 2023 13:55:57 +0100
Subject: [PATCH 018/231] docs: Clarify that range selectors use a closed
interval
Signed-off-by: beorn7
---
docs/querying/basics.md | 8 +++++---
1 file changed, 5 insertions(+), 3 deletions(-)
diff --git a/docs/querying/basics.md b/docs/querying/basics.md
index bc4478f628..9eb95c66eb 100644
--- a/docs/querying/basics.md
+++ b/docs/querying/basics.md
@@ -157,9 +157,11 @@ syntax](https://github.com/google/re2/wiki/Syntax).
Range vector literals work like instant vector literals, except that they
select a range of samples back from the current instant. Syntactically, a [time
-duration](#time-durations) is appended in square brackets (`[]`) at the end of a
-vector selector to specify how far back in time values should be fetched for
-each resulting range vector element.
+duration](#time-durations) is appended in square brackets (`[]`) at the end of
+a vector selector to specify how far back in time values should be fetched for
+each resulting range vector element. The range is a closed interval,
+i.e. samples with timestamps coinciding with either boundary of the range are
+still included in the selection.
In this example, we select all the values we have recorded within the last 5
minutes for all time series that have the metric name `http_requests_total` and
From 3c4ab7a0691f94899c8215f2cb0202c6db45a7af Mon Sep 17 00:00:00 2001
From: Bryan Boreham
Date: Thu, 16 Mar 2023 13:25:55 +0000
Subject: [PATCH 019/231] labels: add test for Builder.Range
Including mutating the Builder being Ranged over.
Signed-off-by: Bryan Boreham
---
model/labels/labels_test.go | 15 ++++++++++++++-
1 file changed, 14 insertions(+), 1 deletion(-)
diff --git a/model/labels/labels_test.go b/model/labels/labels_test.go
index 4832be3375..588a84b984 100644
--- a/model/labels/labels_test.go
+++ b/model/labels/labels_test.go
@@ -529,6 +529,11 @@ func TestBuilder(t *testing.T) {
base: FromStrings("aaa", "111"),
want: FromStrings("aaa", "111"),
},
+ {
+ base: FromStrings("aaa", "111", "bbb", "222", "ccc", "333"),
+ set: []Label{{"aaa", "444"}, {"bbb", "555"}, {"ccc", "666"}},
+ want: FromStrings("aaa", "444", "bbb", "555", "ccc", "666"),
+ },
{
base: FromStrings("aaa", "111", "bbb", "222", "ccc", "333"),
del: []string{"bbb"},
@@ -591,7 +596,15 @@ func TestBuilder(t *testing.T) {
b.Keep(tcase.keep...)
}
b.Del(tcase.del...)
- require.Equal(t, tcase.want, b.Labels(tcase.base))
+ require.Equal(t, tcase.want, b.Labels(EmptyLabels()))
+
+ // Check what happens when we call Range and mutate the builder.
+ b.Range(func(l Label) {
+ if l.Name == "aaa" || l.Name == "bbb" {
+ b.Del(l.Name)
+ }
+ })
+ require.Equal(t, tcase.want.BytesWithoutLabels(nil, "aaa", "bbb"), b.Labels(tcase.base).Bytes(nil))
})
}
}
From 3743d87c56a610fbcdd7d49e6f85e2e873d37bc5 Mon Sep 17 00:00:00 2001
From: Bryan Boreham
Date: Thu, 16 Mar 2023 13:28:13 +0000
Subject: [PATCH 020/231] labels: cope with mutating Builder during Range call
Although we had a different slice, the underlying memory was the same so
any changes meant we could skip some values.
Signed-off-by: Bryan Boreham
---
model/labels/labels.go | 7 +++++--
model/labels/labels_string.go | 7 +++++--
2 files changed, 10 insertions(+), 4 deletions(-)
diff --git a/model/labels/labels.go b/model/labels/labels.go
index 5e06e3b8da..6de001c3ce 100644
--- a/model/labels/labels.go
+++ b/model/labels/labels.go
@@ -545,9 +545,12 @@ func (b *Builder) Get(n string) string {
}
// Range calls f on each label in the Builder.
-// If f calls Set or Del on b then this may affect what callbacks subsequently happen.
func (b *Builder) Range(f func(l Label)) {
- origAdd, origDel := b.add, b.del
+ // Stack-based arrays to avoid heap allocation in most cases.
+ var addStack [1024]Label
+ var delStack [1024]string
+ // Take a copy of add and del, so they are unaffected by calls to Set() or Del().
+ origAdd, origDel := append(addStack[:0], b.add...), append(delStack[:0], b.del...)
b.base.Range(func(l Label) {
if !slices.Contains(origDel, l.Name) && !contains(origAdd, l.Name) {
f(l)
diff --git a/model/labels/labels_string.go b/model/labels/labels_string.go
index 6fe14bedc2..98db29d254 100644
--- a/model/labels/labels_string.go
+++ b/model/labels/labels_string.go
@@ -599,9 +599,12 @@ func (b *Builder) Get(n string) string {
}
// Range calls f on each label in the Builder.
-// If f calls Set or Del on b then this may affect what callbacks subsequently happen.
func (b *Builder) Range(f func(l Label)) {
- origAdd, origDel := b.add, b.del
+ // Stack-based arrays to avoid heap allocation in most cases.
+ var addStack [1024]Label
+ var delStack [1024]string
+ // Take a copy of add and del, so they are unaffected by calls to Set() or Del().
+ origAdd, origDel := append(addStack[:0], b.add...), append(delStack[:0], b.del...)
b.base.Range(func(l Label) {
if !slices.Contains(origDel, l.Name) && !contains(origAdd, l.Name) {
f(l)
From bd23e8899d022cd0461adc063546f1bd3259283d Mon Sep 17 00:00:00 2001
From: Bryan Boreham
Date: Thu, 16 Mar 2023 13:25:55 +0000
Subject: [PATCH 021/231] labels: add test for Builder.Range
Including mutating the Builder being Ranged over.
Signed-off-by: Bryan Boreham
---
model/labels/labels_test.go | 15 ++++++++++++++-
1 file changed, 14 insertions(+), 1 deletion(-)
diff --git a/model/labels/labels_test.go b/model/labels/labels_test.go
index 4832be3375..588a84b984 100644
--- a/model/labels/labels_test.go
+++ b/model/labels/labels_test.go
@@ -529,6 +529,11 @@ func TestBuilder(t *testing.T) {
base: FromStrings("aaa", "111"),
want: FromStrings("aaa", "111"),
},
+ {
+ base: FromStrings("aaa", "111", "bbb", "222", "ccc", "333"),
+ set: []Label{{"aaa", "444"}, {"bbb", "555"}, {"ccc", "666"}},
+ want: FromStrings("aaa", "444", "bbb", "555", "ccc", "666"),
+ },
{
base: FromStrings("aaa", "111", "bbb", "222", "ccc", "333"),
del: []string{"bbb"},
@@ -591,7 +596,15 @@ func TestBuilder(t *testing.T) {
b.Keep(tcase.keep...)
}
b.Del(tcase.del...)
- require.Equal(t, tcase.want, b.Labels(tcase.base))
+ require.Equal(t, tcase.want, b.Labels(EmptyLabels()))
+
+ // Check what happens when we call Range and mutate the builder.
+ b.Range(func(l Label) {
+ if l.Name == "aaa" || l.Name == "bbb" {
+ b.Del(l.Name)
+ }
+ })
+ require.Equal(t, tcase.want.BytesWithoutLabels(nil, "aaa", "bbb"), b.Labels(tcase.base).Bytes(nil))
})
}
}
From 934c520d374053b2350f24d9c6337f7873c33364 Mon Sep 17 00:00:00 2001
From: Bryan Boreham
Date: Thu, 16 Mar 2023 13:28:13 +0000
Subject: [PATCH 022/231] labels: cope with mutating Builder during Range call
Although we had a different slice, the underlying memory was the same so
any changes meant we could skip some values.
Signed-off-by: Bryan Boreham
---
model/labels/labels.go | 7 +++++--
model/labels/labels_string.go | 7 +++++--
2 files changed, 10 insertions(+), 4 deletions(-)
diff --git a/model/labels/labels.go b/model/labels/labels.go
index 5e06e3b8da..6de001c3ce 100644
--- a/model/labels/labels.go
+++ b/model/labels/labels.go
@@ -545,9 +545,12 @@ func (b *Builder) Get(n string) string {
}
// Range calls f on each label in the Builder.
-// If f calls Set or Del on b then this may affect what callbacks subsequently happen.
func (b *Builder) Range(f func(l Label)) {
- origAdd, origDel := b.add, b.del
+ // Stack-based arrays to avoid heap allocation in most cases.
+ var addStack [1024]Label
+ var delStack [1024]string
+ // Take a copy of add and del, so they are unaffected by calls to Set() or Del().
+ origAdd, origDel := append(addStack[:0], b.add...), append(delStack[:0], b.del...)
b.base.Range(func(l Label) {
if !slices.Contains(origDel, l.Name) && !contains(origAdd, l.Name) {
f(l)
diff --git a/model/labels/labels_string.go b/model/labels/labels_string.go
index 6fe14bedc2..98db29d254 100644
--- a/model/labels/labels_string.go
+++ b/model/labels/labels_string.go
@@ -599,9 +599,12 @@ func (b *Builder) Get(n string) string {
}
// Range calls f on each label in the Builder.
-// If f calls Set or Del on b then this may affect what callbacks subsequently happen.
func (b *Builder) Range(f func(l Label)) {
- origAdd, origDel := b.add, b.del
+ // Stack-based arrays to avoid heap allocation in most cases.
+ var addStack [1024]Label
+ var delStack [1024]string
+ // Take a copy of add and del, so they are unaffected by calls to Set() or Del().
+ origAdd, origDel := append(addStack[:0], b.add...), append(delStack[:0], b.del...)
b.base.Range(func(l Label) {
if !slices.Contains(origDel, l.Name) && !contains(origAdd, l.Name) {
f(l)
From 2c6168be5f96a232b5c87e18b59920ba611891a0 Mon Sep 17 00:00:00 2001
From: Julien Pivotto
Date: Thu, 16 Mar 2023 20:21:40 +0100
Subject: [PATCH 023/231] Release 2.43.0-rc.1
Signed-off-by: Julien Pivotto
---
CHANGELOG.md | 4 ++++
VERSION | 2 +-
web/ui/module/codemirror-promql/package.json | 4 ++--
web/ui/module/lezer-promql/package.json | 2 +-
web/ui/package-lock.json | 14 +++++++-------
web/ui/react-app/package.json | 4 ++--
6 files changed, 17 insertions(+), 13 deletions(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index cccbae7dd5..bf725d9665 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,9 @@
# Changelog
+## 2.43.0-rc.1 / 2023-03-16
+
+* [BUGFIX] Fixed a bug where changes in the Builder's Range function could lead to skipped labels during iteration (#12145)
+
## 2.43.0-rc.0 / 2023-03-09
We are working on some performance improvements in Prometheus, which are only
diff --git a/VERSION b/VERSION
index 1a38590b27..a55e12bb97 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-2.43.0-rc.0
+2.43.0-rc.1
diff --git a/web/ui/module/codemirror-promql/package.json b/web/ui/module/codemirror-promql/package.json
index 027029c15a..8d7a73ec70 100644
--- a/web/ui/module/codemirror-promql/package.json
+++ b/web/ui/module/codemirror-promql/package.json
@@ -1,6 +1,6 @@
{
"name": "@prometheus-io/codemirror-promql",
- "version": "0.43.0-rc.0",
+ "version": "0.43.0-rc.1",
"description": "a CodeMirror mode for the PromQL language",
"types": "dist/esm/index.d.ts",
"module": "dist/esm/index.js",
@@ -29,7 +29,7 @@
},
"homepage": "https://github.com/prometheus/prometheus/blob/main/web/ui/module/codemirror-promql/README.md",
"dependencies": {
- "@prometheus-io/lezer-promql": "0.43.0-rc.0",
+ "@prometheus-io/lezer-promql": "0.43.0-rc.1",
"lru-cache": "^6.0.0"
},
"devDependencies": {
diff --git a/web/ui/module/lezer-promql/package.json b/web/ui/module/lezer-promql/package.json
index b3f5ccceeb..0fb3bda0e8 100644
--- a/web/ui/module/lezer-promql/package.json
+++ b/web/ui/module/lezer-promql/package.json
@@ -1,6 +1,6 @@
{
"name": "@prometheus-io/lezer-promql",
- "version": "0.43.0-rc.0",
+ "version": "0.43.0-rc.1",
"description": "lezer-based PromQL grammar",
"main": "dist/index.cjs",
"type": "module",
diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json
index 6ff46f054e..736c438fd7 100644
--- a/web/ui/package-lock.json
+++ b/web/ui/package-lock.json
@@ -28,10 +28,10 @@
},
"module/codemirror-promql": {
"name": "@prometheus-io/codemirror-promql",
- "version": "0.43.0-rc.0",
+ "version": "0.43.0-rc.1",
"license": "Apache-2.0",
"dependencies": {
- "@prometheus-io/lezer-promql": "0.43.0-rc.0",
+ "@prometheus-io/lezer-promql": "0.43.0-rc.1",
"lru-cache": "^6.0.0"
},
"devDependencies": {
@@ -61,7 +61,7 @@
},
"module/lezer-promql": {
"name": "@prometheus-io/lezer-promql",
- "version": "0.43.0-rc.0",
+ "version": "0.43.0-rc.1",
"license": "Apache-2.0",
"devDependencies": {
"@lezer/generator": "^1.2.2",
@@ -20763,7 +20763,7 @@
},
"react-app": {
"name": "@prometheus-io/app",
- "version": "0.43.0-rc.0",
+ "version": "0.43.0-rc.1",
"dependencies": {
"@codemirror/autocomplete": "^6.4.0",
"@codemirror/commands": "^6.2.0",
@@ -20781,7 +20781,7 @@
"@lezer/lr": "^1.3.1",
"@nexucis/fuzzy": "^0.4.1",
"@nexucis/kvsearch": "^0.8.1",
- "@prometheus-io/codemirror-promql": "0.43.0-rc.0",
+ "@prometheus-io/codemirror-promql": "0.43.0-rc.1",
"bootstrap": "^4.6.2",
"css.escape": "^1.5.1",
"downshift": "^7.2.0",
@@ -23417,7 +23417,7 @@
"@lezer/lr": "^1.3.1",
"@nexucis/fuzzy": "^0.4.1",
"@nexucis/kvsearch": "^0.8.1",
- "@prometheus-io/codemirror-promql": "0.43.0-rc.0",
+ "@prometheus-io/codemirror-promql": "0.43.0-rc.1",
"@testing-library/react-hooks": "^7.0.2",
"@types/enzyme": "^3.10.12",
"@types/flot": "0.0.32",
@@ -23468,7 +23468,7 @@
"@lezer/common": "^1.0.2",
"@lezer/highlight": "^1.1.3",
"@lezer/lr": "^1.3.1",
- "@prometheus-io/lezer-promql": "0.43.0-rc.0",
+ "@prometheus-io/lezer-promql": "0.43.0-rc.1",
"@types/lru-cache": "^5.1.1",
"isomorphic-fetch": "^3.0.0",
"lru-cache": "^6.0.0",
diff --git a/web/ui/react-app/package.json b/web/ui/react-app/package.json
index 30ebd4922e..20f5819bb9 100644
--- a/web/ui/react-app/package.json
+++ b/web/ui/react-app/package.json
@@ -1,6 +1,6 @@
{
"name": "@prometheus-io/app",
- "version": "0.43.0-rc.0",
+ "version": "0.43.0-rc.1",
"private": true,
"dependencies": {
"@codemirror/autocomplete": "^6.4.0",
@@ -19,7 +19,7 @@
"@lezer/common": "^1.0.2",
"@nexucis/fuzzy": "^0.4.1",
"@nexucis/kvsearch": "^0.8.1",
- "@prometheus-io/codemirror-promql": "0.43.0-rc.0",
+ "@prometheus-io/codemirror-promql": "0.43.0-rc.1",
"bootstrap": "^4.6.2",
"css.escape": "^1.5.1",
"downshift": "^7.2.0",
From 331a7dfd213baae7e798bb3fabde2ad0e6a00114 Mon Sep 17 00:00:00 2001
From: Julien Pivotto
Date: Thu, 16 Mar 2023 20:29:51 +0100
Subject: [PATCH 024/231] Replace '+' with '_' in docker image tag for semver
compatibility
This change introduces a new variable, SANITIZED_DOCKER_IMAGE_TAG, which
replaces any '+' characters in the original DOCKER_IMAGE_TAG with '_'
characters. This ensures better compatibility with semver standards,
particularly when using metadata in version tags.
Signed-off-by: Julien Pivotto
---
Makefile.common | 14 ++++++++------
1 file changed, 8 insertions(+), 6 deletions(-)
diff --git a/Makefile.common b/Makefile.common
index 6d8007c951..221941db9c 100644
--- a/Makefile.common
+++ b/Makefile.common
@@ -91,6 +91,8 @@ BUILD_DOCKER_ARCHS = $(addprefix common-docker-,$(DOCKER_ARCHS))
PUBLISH_DOCKER_ARCHS = $(addprefix common-docker-publish-,$(DOCKER_ARCHS))
TAG_DOCKER_ARCHS = $(addprefix common-docker-tag-latest-,$(DOCKER_ARCHS))
+SANITIZED_DOCKER_IMAGE_TAG := $(subst +,_,$(DOCKER_IMAGE_TAG))
+
ifeq ($(GOHOSTARCH),amd64)
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux freebsd darwin windows))
# Only supported on amd64
@@ -205,7 +207,7 @@ common-tarball: promu
.PHONY: common-docker $(BUILD_DOCKER_ARCHS)
common-docker: $(BUILD_DOCKER_ARCHS)
$(BUILD_DOCKER_ARCHS): common-docker-%:
- docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" \
+ docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" \
-f $(DOCKERFILE_PATH) \
--build-arg ARCH="$*" \
--build-arg OS="linux" \
@@ -214,19 +216,19 @@ $(BUILD_DOCKER_ARCHS): common-docker-%:
.PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS)
common-docker-publish: $(PUBLISH_DOCKER_ARCHS)
$(PUBLISH_DOCKER_ARCHS): common-docker-publish-%:
- docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)"
+ docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)"
DOCKER_MAJOR_VERSION_TAG = $(firstword $(subst ., ,$(shell cat VERSION)))
.PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS)
common-docker-tag-latest: $(TAG_DOCKER_ARCHS)
$(TAG_DOCKER_ARCHS): common-docker-tag-latest-%:
- docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest"
- docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)"
+ docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest"
+ docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)"
.PHONY: common-docker-manifest
common-docker-manifest:
- DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(DOCKER_IMAGE_TAG))
- DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)"
+ DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(SANITIZED_DOCKER_IMAGE_TAG))
+ DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)"
.PHONY: promu
promu: $(PROMU)
From 2fde2fb37d257f085d7abd08c1de6eda82383409 Mon Sep 17 00:00:00 2001
From: Bryan Boreham
Date: Tue, 7 Mar 2023 17:10:15 +0000
Subject: [PATCH 025/231] scrape: add Target.LabelsRange
This allows users of a Target to iterate labels without allocating heap memory.
Signed-off-by: Bryan Boreham
---
scrape/target.go | 9 +++++++++
scrape/target_test.go | 11 +++++++++++
2 files changed, 20 insertions(+)
diff --git a/scrape/target.go b/scrape/target.go
index ae952b420a..59f6e2873e 100644
--- a/scrape/target.go
+++ b/scrape/target.go
@@ -181,6 +181,15 @@ func (t *Target) Labels() labels.Labels {
return b.Labels()
}
+// LabelsRange calls f on each public label of the target.
+func (t *Target) LabelsRange(f func(l labels.Label)) {
+ t.labels.Range(func(l labels.Label) {
+ if !strings.HasPrefix(l.Name, model.ReservedLabelPrefix) {
+ f(l)
+ }
+ })
+}
+
// DiscoveredLabels returns a copy of the target's labels before any processing.
func (t *Target) DiscoveredLabels() labels.Labels {
t.mtx.Lock()
diff --git a/scrape/target_test.go b/scrape/target_test.go
index 991195f5b8..4937359ed7 100644
--- a/scrape/target_test.go
+++ b/scrape/target_test.go
@@ -43,6 +43,17 @@ func TestTargetLabels(t *testing.T) {
want := labels.FromStrings(model.JobLabel, "some_job", "foo", "bar")
got := target.Labels()
require.Equal(t, want, got)
+ i := 0
+ target.LabelsRange(func(l labels.Label) {
+ switch i {
+ case 0:
+ require.Equal(t, labels.Label{Name: "foo", Value: "bar"}, l)
+ case 1:
+ require.Equal(t, labels.Label{Name: model.JobLabel, Value: "some_job"}, l)
+ }
+ i++
+ })
+ require.Equal(t, 2, i)
}
func TestTargetOffset(t *testing.T) {
From 0dfa1e73f8c248fa19282b56b5c8fdf58b44caf4 Mon Sep 17 00:00:00 2001
From: Bryan Boreham
Date: Tue, 7 Mar 2023 17:11:24 +0000
Subject: [PATCH 026/231] scrape: use LabelsRange instead of Labels, for
performance
Includes a rewrite of `resolveConflictingExposedLabels` to use
`labels.Builder.Get`, which simplifies it considerably.
Signed-off-by: Bryan Boreham
---
scrape/scrape.go | 37 ++++++++++++-------------------------
1 file changed, 12 insertions(+), 25 deletions(-)
diff --git a/scrape/scrape.go b/scrape/scrape.go
index 3fce6f9dd4..5c71a0110d 100644
--- a/scrape/scrape.go
+++ b/scrape/scrape.go
@@ -500,7 +500,10 @@ func (sp *scrapePool) Sync(tgs []*targetgroup.Group) {
}
targetSyncFailed.WithLabelValues(sp.config.JobName).Add(float64(len(failures)))
for _, t := range targets {
- if !t.Labels().IsEmpty() {
+ // Replicate .Labels().IsEmpty() with a loop here to avoid generating garbage.
+ nonEmpty := false
+ t.LabelsRange(func(l labels.Label) { nonEmpty = true })
+ if nonEmpty {
all = append(all, t)
} else if !t.DiscoveredLabels().IsEmpty() {
sp.droppedTargets = append(sp.droppedTargets, t)
@@ -666,17 +669,16 @@ func verifyLabelLimits(lset labels.Labels, limits *labelLimits) error {
func mutateSampleLabels(lset labels.Labels, target *Target, honor bool, rc []*relabel.Config) labels.Labels {
lb := labels.NewBuilder(lset)
- targetLabels := target.Labels()
if honor {
- targetLabels.Range(func(l labels.Label) {
+ target.LabelsRange(func(l labels.Label) {
if !lset.Has(l.Name) {
lb.Set(l.Name, l.Value)
}
})
} else {
var conflictingExposedLabels []labels.Label
- targetLabels.Range(func(l labels.Label) {
+ target.LabelsRange(func(l labels.Label) {
existingValue := lset.Get(l.Name)
if existingValue != "" {
conflictingExposedLabels = append(conflictingExposedLabels, labels.Label{Name: l.Name, Value: existingValue})
@@ -686,7 +688,7 @@ func mutateSampleLabels(lset labels.Labels, target *Target, honor bool, rc []*re
})
if len(conflictingExposedLabels) > 0 {
- resolveConflictingExposedLabels(lb, lset, targetLabels, conflictingExposedLabels)
+ resolveConflictingExposedLabels(lb, conflictingExposedLabels)
}
}
@@ -699,42 +701,27 @@ func mutateSampleLabels(lset labels.Labels, target *Target, honor bool, rc []*re
return res
}
-func resolveConflictingExposedLabels(lb *labels.Builder, exposedLabels, targetLabels labels.Labels, conflictingExposedLabels []labels.Label) {
+func resolveConflictingExposedLabels(lb *labels.Builder, conflictingExposedLabels []labels.Label) {
sort.SliceStable(conflictingExposedLabels, func(i, j int) bool {
return len(conflictingExposedLabels[i].Name) < len(conflictingExposedLabels[j].Name)
})
- for i, l := range conflictingExposedLabels {
+ for _, l := range conflictingExposedLabels {
newName := l.Name
for {
newName = model.ExportedLabelPrefix + newName
- if !exposedLabels.Has(newName) &&
- !targetLabels.Has(newName) &&
- !labelSliceHas(conflictingExposedLabels[:i], newName) {
- conflictingExposedLabels[i].Name = newName
+ if lb.Get(newName) == "" {
+ lb.Set(newName, l.Value)
break
}
}
}
-
- for _, l := range conflictingExposedLabels {
- lb.Set(l.Name, l.Value)
- }
-}
-
-func labelSliceHas(lbls []labels.Label, name string) bool {
- for _, l := range lbls {
- if l.Name == name {
- return true
- }
- }
- return false
}
func mutateReportSampleLabels(lset labels.Labels, target *Target) labels.Labels {
lb := labels.NewBuilder(lset)
- target.Labels().Range(func(l labels.Label) {
+ target.LabelsRange(func(l labels.Label) {
lb.Set(model.ExportedLabelPrefix+l.Name, lset.Get(l.Name))
lb.Set(l.Name, l.Value)
})
From 0c09c3feb021a45a87bd99dfc44ccadfa14deb21 Mon Sep 17 00:00:00 2001
From: Bryan Boreham
Date: Tue, 7 Mar 2023 17:17:49 +0000
Subject: [PATCH 027/231] scrape sync: avoid copy of labels for dropped targets
Since the Target object was just created in this function, nobody else
has a reference to it and there are no concerns about it being modified
concurrently so we don't need to copy the value.
Signed-off-by: Bryan Boreham
---
scrape/scrape.go | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/scrape/scrape.go b/scrape/scrape.go
index 5c71a0110d..01c66ca81d 100644
--- a/scrape/scrape.go
+++ b/scrape/scrape.go
@@ -505,7 +505,7 @@ func (sp *scrapePool) Sync(tgs []*targetgroup.Group) {
t.LabelsRange(func(l labels.Label) { nonEmpty = true })
if nonEmpty {
all = append(all, t)
- } else if !t.DiscoveredLabels().IsEmpty() {
+ } else if !t.discoveredLabels.IsEmpty() {
sp.droppedTargets = append(sp.droppedTargets, t)
}
}
From 1cc28ce9ca52453d6fc73a398158c4c135f90a47 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Ren=C3=A9=20Scheibe?=
Date: Sat, 18 Mar 2023 20:11:35 +0100
Subject: [PATCH 028/231] chore: Fix documentation on signal to shut down
instance
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Signed-off-by: René Scheibe
---
docs/getting_started.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/docs/getting_started.md b/docs/getting_started.md
index 11d8d0fb82..e89ac705ee 100644
--- a/docs/getting_started.md
+++ b/docs/getting_started.md
@@ -264,4 +264,4 @@ process ID.
While Prometheus does have recovery mechanisms in the case that there is an
abrupt process failure it is recommend to use the `SIGTERM` signal to cleanly
shutdown a Prometheus instance. If you're running on Linux this can be performed
-by using `kill -s SIGHUP `, replacing `` with your Prometheus process ID.
+by using `kill -s SIGTERM `, replacing `` with your Prometheus process ID.
From 1b7d973f143d27d83cd9186f6ceec33cbe3796f0 Mon Sep 17 00:00:00 2001
From: Ganesh Vernekar
Date: Tue, 21 Mar 2023 15:15:36 +0530
Subject: [PATCH 029/231] tsdb: Fix a comment in tsdb/head_read.go
Signed-off-by: Ganesh Vernekar
---
tsdb/head_read.go | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/tsdb/head_read.go b/tsdb/head_read.go
index b54e53aa07..9c40bcd7a8 100644
--- a/tsdb/head_read.go
+++ b/tsdb/head_read.go
@@ -340,7 +340,7 @@ func (h *headChunkReader) chunk(meta chunks.Meta, copyLastChunk bool) (chunkenc.
}
// chunk returns the chunk for the HeadChunkID from memory or by m-mapping it from the disk.
-// If headChunk is true, it means that the returned *memChunk
+// If headChunk is false, it means that the returned *memChunk
// (and not the chunkenc.Chunk inside it) can be garbage collected after its usage.
func (s *memSeries) chunk(id chunks.HeadChunkID, chunkDiskMapper *chunks.ChunkDiskMapper, memChunkPool *sync.Pool) (chunk *memChunk, headChunk bool, err error) {
// ix represents the index of chunk in the s.mmappedChunks slice. The chunk id's are
From f14665b9e3a6a30286a83c36d7f746259e5699aa Mon Sep 17 00:00:00 2001
From: SuperQ
Date: Mon, 20 Mar 2023 14:14:39 +0100
Subject: [PATCH 030/231] Fix docker tag sanitizer
Use a `-` instead of `_` to make the docker tag also pass semver checks.
Signed-off-by: SuperQ
---
Makefile.common | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/Makefile.common b/Makefile.common
index 221941db9c..b111d25620 100644
--- a/Makefile.common
+++ b/Makefile.common
@@ -91,7 +91,7 @@ BUILD_DOCKER_ARCHS = $(addprefix common-docker-,$(DOCKER_ARCHS))
PUBLISH_DOCKER_ARCHS = $(addprefix common-docker-publish-,$(DOCKER_ARCHS))
TAG_DOCKER_ARCHS = $(addprefix common-docker-tag-latest-,$(DOCKER_ARCHS))
-SANITIZED_DOCKER_IMAGE_TAG := $(subst +,_,$(DOCKER_IMAGE_TAG))
+SANITIZED_DOCKER_IMAGE_TAG := $(subst +,-,$(DOCKER_IMAGE_TAG))
ifeq ($(GOHOSTARCH),amd64)
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux freebsd darwin windows))
From 1070c9b06c3d9b6a627d284e7a475bb811833643 Mon Sep 17 00:00:00 2001
From: Julien Pivotto
Date: Tue, 21 Mar 2023 11:30:28 +0100
Subject: [PATCH 031/231] Release 2.43.0
Signed-off-by: Julien Pivotto
---
CHANGELOG.md | 12 ++++--------
VERSION | 2 +-
web/ui/module/codemirror-promql/package.json | 4 ++--
web/ui/module/lezer-promql/package.json | 2 +-
web/ui/package-lock.json | 14 +++++++-------
web/ui/react-app/package.json | 4 ++--
6 files changed, 17 insertions(+), 21 deletions(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index bf725d9665..38f3c995a8 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,10 +1,6 @@
# Changelog
-## 2.43.0-rc.1 / 2023-03-16
-
-* [BUGFIX] Fixed a bug where changes in the Builder's Range function could lead to skipped labels during iteration (#12145)
-
-## 2.43.0-rc.0 / 2023-03-09
+## 2.43.0 / 2023-03-21
We are working on some performance improvements in Prometheus, which are only
built into Prometheus when compiling it using the Go tag `stringlabels`
@@ -12,9 +8,9 @@ built into Prometheus when compiling it using the Go tag `stringlabels`
structure for labels that uses a single string to hold all the label/values,
resulting in a smaller heap size and some speedups in most cases. We would like
to encourage users who are interested in these improvements to help us measure
-the gains on their production architecture. Building Prometheus from source
-with the `stringlabels` Go tag and providing feedback on its effectiveness in
-their specific use cases would be incredibly helpful to us. #10991
+the gains on their production architecture. We are providing release artefacts
+2.43.0+stringlabels and docker images tagged v2.43.0-stringlabels with those
+improvements for testing. #10991
* [FEATURE] Promtool: Add HTTP client configuration to query commands. #11487
* [FEATURE] Scrape: Add `include_scrape_configs` to include scrape configs from different files. #12019
diff --git a/VERSION b/VERSION
index a55e12bb97..5b9cd9afd5 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-2.43.0-rc.1
+2.43.0
diff --git a/web/ui/module/codemirror-promql/package.json b/web/ui/module/codemirror-promql/package.json
index 8d7a73ec70..f20a634683 100644
--- a/web/ui/module/codemirror-promql/package.json
+++ b/web/ui/module/codemirror-promql/package.json
@@ -1,6 +1,6 @@
{
"name": "@prometheus-io/codemirror-promql",
- "version": "0.43.0-rc.1",
+ "version": "0.43.0",
"description": "a CodeMirror mode for the PromQL language",
"types": "dist/esm/index.d.ts",
"module": "dist/esm/index.js",
@@ -29,7 +29,7 @@
},
"homepage": "https://github.com/prometheus/prometheus/blob/main/web/ui/module/codemirror-promql/README.md",
"dependencies": {
- "@prometheus-io/lezer-promql": "0.43.0-rc.1",
+ "@prometheus-io/lezer-promql": "0.43.0",
"lru-cache": "^6.0.0"
},
"devDependencies": {
diff --git a/web/ui/module/lezer-promql/package.json b/web/ui/module/lezer-promql/package.json
index 0fb3bda0e8..411251e56c 100644
--- a/web/ui/module/lezer-promql/package.json
+++ b/web/ui/module/lezer-promql/package.json
@@ -1,6 +1,6 @@
{
"name": "@prometheus-io/lezer-promql",
- "version": "0.43.0-rc.1",
+ "version": "0.43.0",
"description": "lezer-based PromQL grammar",
"main": "dist/index.cjs",
"type": "module",
diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json
index 736c438fd7..2e334cf9e4 100644
--- a/web/ui/package-lock.json
+++ b/web/ui/package-lock.json
@@ -28,10 +28,10 @@
},
"module/codemirror-promql": {
"name": "@prometheus-io/codemirror-promql",
- "version": "0.43.0-rc.1",
+ "version": "0.43.0",
"license": "Apache-2.0",
"dependencies": {
- "@prometheus-io/lezer-promql": "0.43.0-rc.1",
+ "@prometheus-io/lezer-promql": "0.43.0",
"lru-cache": "^6.0.0"
},
"devDependencies": {
@@ -61,7 +61,7 @@
},
"module/lezer-promql": {
"name": "@prometheus-io/lezer-promql",
- "version": "0.43.0-rc.1",
+ "version": "0.43.0",
"license": "Apache-2.0",
"devDependencies": {
"@lezer/generator": "^1.2.2",
@@ -20763,7 +20763,7 @@
},
"react-app": {
"name": "@prometheus-io/app",
- "version": "0.43.0-rc.1",
+ "version": "0.43.0",
"dependencies": {
"@codemirror/autocomplete": "^6.4.0",
"@codemirror/commands": "^6.2.0",
@@ -20781,7 +20781,7 @@
"@lezer/lr": "^1.3.1",
"@nexucis/fuzzy": "^0.4.1",
"@nexucis/kvsearch": "^0.8.1",
- "@prometheus-io/codemirror-promql": "0.43.0-rc.1",
+ "@prometheus-io/codemirror-promql": "0.43.0",
"bootstrap": "^4.6.2",
"css.escape": "^1.5.1",
"downshift": "^7.2.0",
@@ -23417,7 +23417,7 @@
"@lezer/lr": "^1.3.1",
"@nexucis/fuzzy": "^0.4.1",
"@nexucis/kvsearch": "^0.8.1",
- "@prometheus-io/codemirror-promql": "0.43.0-rc.1",
+ "@prometheus-io/codemirror-promql": "0.43.0",
"@testing-library/react-hooks": "^7.0.2",
"@types/enzyme": "^3.10.12",
"@types/flot": "0.0.32",
@@ -23468,7 +23468,7 @@
"@lezer/common": "^1.0.2",
"@lezer/highlight": "^1.1.3",
"@lezer/lr": "^1.3.1",
- "@prometheus-io/lezer-promql": "0.43.0-rc.1",
+ "@prometheus-io/lezer-promql": "0.43.0",
"@types/lru-cache": "^5.1.1",
"isomorphic-fetch": "^3.0.0",
"lru-cache": "^6.0.0",
diff --git a/web/ui/react-app/package.json b/web/ui/react-app/package.json
index 20f5819bb9..c8b115582a 100644
--- a/web/ui/react-app/package.json
+++ b/web/ui/react-app/package.json
@@ -1,6 +1,6 @@
{
"name": "@prometheus-io/app",
- "version": "0.43.0-rc.1",
+ "version": "0.43.0",
"private": true,
"dependencies": {
"@codemirror/autocomplete": "^6.4.0",
@@ -19,7 +19,7 @@
"@lezer/common": "^1.0.2",
"@nexucis/fuzzy": "^0.4.1",
"@nexucis/kvsearch": "^0.8.1",
- "@prometheus-io/codemirror-promql": "0.43.0-rc.1",
+ "@prometheus-io/codemirror-promql": "0.43.0",
"bootstrap": "^4.6.2",
"css.escape": "^1.5.1",
"downshift": "^7.2.0",
From 67f48f47c7d2e3cb91b06adf190a275ed89b5081 Mon Sep 17 00:00:00 2001
From: Julien Pivotto
Date: Tue, 21 Mar 2023 13:50:48 +0100
Subject: [PATCH 032/231] Address review comments
Signed-off-by: Julien Pivotto
---
CHANGELOG.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 38f3c995a8..13bdde0316 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -9,7 +9,7 @@ structure for labels that uses a single string to hold all the label/values,
resulting in a smaller heap size and some speedups in most cases. We would like
to encourage users who are interested in these improvements to help us measure
the gains on their production architecture. We are providing release artefacts
-2.43.0+stringlabels and docker images tagged v2.43.0-stringlabels with those
+`2.43.0+stringlabels` and Docker images tagged `v2.43.0-stringlabels` with those
improvements for testing. #10991
* [FEATURE] Promtool: Add HTTP client configuration to query commands. #11487
From ca0abf26c526377e06a0024c0a1f410b601e6b0f Mon Sep 17 00:00:00 2001
From: Vernon Miller <96601789+aldernero@users.noreply.github.com>
Date: Tue, 21 Mar 2023 08:03:43 -0600
Subject: [PATCH 033/231] Adds an affirmative log message for successful WAL
repair (#12135)
* Adds an affirmative log message for successful WAL repair
Signed-off-by: Vernon Miller
Signed-off-by: Vernon Miller <96601789+aldernero@users.noreply.github.com>
Co-authored-by: Ganesh Vernekar
---
tsdb/agent/db.go | 1 +
tsdb/db.go | 2 ++
2 files changed, 3 insertions(+)
diff --git a/tsdb/agent/db.go b/tsdb/agent/db.go
index da74fe4c9d..a17e0d1b98 100644
--- a/tsdb/agent/db.go
+++ b/tsdb/agent/db.go
@@ -303,6 +303,7 @@ func Open(l log.Logger, reg prometheus.Registerer, rs *remote.Storage, dir strin
if err := w.Repair(err); err != nil {
return nil, errors.Wrap(err, "repair corrupted WAL")
}
+ level.Info(db.logger).Log("msg", "successfully repaired WAL")
}
go db.run()
diff --git a/tsdb/db.go b/tsdb/db.go
index 561867025b..659251c3ca 100644
--- a/tsdb/db.go
+++ b/tsdb/db.go
@@ -828,11 +828,13 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs
if err := wbl.Repair(initErr); err != nil {
return nil, errors.Wrap(err, "repair corrupted OOO WAL")
}
+ level.Info(db.logger).Log("msg", "Successfully repaired OOO WAL")
} else {
level.Warn(db.logger).Log("msg", "Encountered WAL read error, attempting repair", "err", initErr)
if err := wal.Repair(initErr); err != nil {
return nil, errors.Wrap(err, "repair corrupted WAL")
}
+ level.Info(db.logger).Log("msg", "Successfully repaired WAL")
}
}
From ae220724d4594a3d6c882e1d7e85a8e737948b8f Mon Sep 17 00:00:00 2001
From: Julien Pivotto
Date: Tue, 21 Mar 2023 17:27:21 +0100
Subject: [PATCH 034/231] Docs: use boolean instead of bool
boolean makes the type consistent and clickable on
https://prometheus.io/docs/prometheus/latest/configuration/configuration/
Signed-off-by: Julien Pivotto
---
docs/configuration/configuration.md | 98 ++++++++++++++---------------
docs/configuration/https.md | 2 +-
2 files changed, 50 insertions(+), 50 deletions(-)
diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md
index 415ad4a279..f27f8256a5 100644
--- a/docs/configuration/configuration.md
+++ b/docs/configuration/configuration.md
@@ -205,7 +205,7 @@ oauth2:
[ follow_redirects: | default = true ]
# Whether to enable HTTP2.
-[ enable_http2: | default: true ]
+[ enable_http2: | default: true ]
# Configures the scrape request's TLS settings.
tls_config:
@@ -218,7 +218,7 @@ tls_config:
# contain port numbers.
[ no_proxy: ]
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
-[ proxy_from_environment: | default: false ]
+[ proxy_from_environment: | default: false ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_header:
[ : [, ...] ] ]
@@ -447,7 +447,7 @@ tls_config:
# contain port numbers.
[ no_proxy: ]
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
-[ proxy_from_environment: | default: false ]
+[ proxy_from_environment: | default: false ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_header:
[ : [, ...] ] ]
@@ -535,7 +535,7 @@ oauth2:
# contain port numbers.
[ no_proxy: ]
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
-[ proxy_from_environment: | default: false ]
+[ proxy_from_environment: | default: false ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_header:
[ : [, ...] ] ]
@@ -544,7 +544,7 @@ oauth2:
[ follow_redirects: | default = true ]
# Whether to enable HTTP2.
-[ enable_http2: | default: true ]
+[ enable_http2: | default: true ]
# TLS configuration.
tls_config:
@@ -646,7 +646,7 @@ oauth2:
# contain port numbers.
[ no_proxy: ]
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
-[ proxy_from_environment: | default: false ]
+[ proxy_from_environment: | default: false ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_header:
[ : [, ...] ] ]
@@ -655,7 +655,7 @@ oauth2:
[ follow_redirects: | default = true ]
# Whether to enable HTTP2.
-[ enable_http2: | default: true ]
+[ enable_http2: | default: true ]
# TLS configuration.
tls_config:
@@ -733,7 +733,7 @@ oauth2:
# contain port numbers.
[ no_proxy: ]
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
-[ proxy_from_environment: | default: false ]
+[ proxy_from_environment: | default: false ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_header:
[ : [, ...] ] ]
@@ -742,7 +742,7 @@ oauth2:
[ follow_redirects: | default = true ]
# Whether to enable HTTP2.
-[ enable_http2: | default: true ]
+[ enable_http2: | default: true ]
# TLS configuration.
tls_config:
@@ -791,7 +791,7 @@ host:
# contain port numbers.
[ no_proxy: ]
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
-[ proxy_from_environment: | default: false ]
+[ proxy_from_environment: | default: false ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_header:
[ : [, ...] ] ]
@@ -849,7 +849,7 @@ oauth2:
[ follow_redirects: | default = true ]
# Whether to enable HTTP2.
-[ enable_http2: | default: true ]
+[ enable_http2: | default: true ]
```
@@ -966,7 +966,7 @@ host:
# contain port numbers.
[ no_proxy: ]
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
-[ proxy_from_environment: | default: false ]
+[ proxy_from_environment: | default: false ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_header:
[ : [, ...] ] ]
@@ -1026,7 +1026,7 @@ oauth2:
[ follow_redirects: | default = true ]
# Whether to enable HTTP2.
-[ enable_http2: | default: true ]
+[ enable_http2: | default: true ]
```
@@ -1173,7 +1173,7 @@ oauth2:
# contain port numbers.
[ no_proxy: ]
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
-[ proxy_from_environment: | default: false ]
+[ proxy_from_environment: | default: false ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_header:
[ : [, ...] ] ]
@@ -1182,7 +1182,7 @@ oauth2:
[ follow_redirects: | default = true ]
# Whether to enable HTTP2.
-[ enable_http2: | default: true ]
+[ enable_http2: | default: true ]
# TLS configuration.
tls_config:
@@ -1448,7 +1448,7 @@ oauth2:
# contain port numbers.
[ no_proxy: ]
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
-[ proxy_from_environment: | default: false ]
+[ proxy_from_environment: | default: false ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_header:
[ : [, ...] ] ]
@@ -1457,7 +1457,7 @@ oauth2:
[ follow_redirects: | default = true ]
# Whether to enable HTTP2.
-[ enable_http2: | default: true ]
+[ enable_http2: | default: true ]
```
See [this example Prometheus configuration file](/documentation/examples/prometheus-puppetdb.yml)
@@ -1665,7 +1665,7 @@ oauth2:
# contain port numbers.
[ no_proxy: ]
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
-[ proxy_from_environment: | default: false ]
+[ proxy_from_environment: | default: false ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_header:
[ : [, ...] ] ]
@@ -1674,7 +1674,7 @@ oauth2:
[ follow_redirects: | default = true ]
# Whether to enable HTTP2.
-[ enable_http2: | default: true ]
+[ enable_http2: | default: true ]
# TLS configuration.
tls_config:
@@ -1759,7 +1759,7 @@ oauth2:
# contain port numbers.
[ no_proxy: ]
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
-[ proxy_from_environment: | default: false ]
+[ proxy_from_environment: | default: false ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_header:
[ : [, ...] ] ]
@@ -1768,7 +1768,7 @@ oauth2:
[ follow_redirects: | default = true ]
# Whether to enable HTTP2.
-[ enable_http2: | default: true ]
+[ enable_http2: | default: true ]
# TLS configuration.
tls_config:
@@ -1842,7 +1842,7 @@ oauth2:
# contain port numbers.
[ no_proxy: ]
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
-[ proxy_from_environment: | default: false ]
+[ proxy_from_environment: | default: false ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_header:
[ : [, ...] ] ]
@@ -1851,7 +1851,7 @@ oauth2:
[ follow_redirects: | default = true ]
# Whether to enable HTTP2.
-[ enable_http2: | default: true ]
+[ enable_http2: | default: true ]
# TLS configuration.
tls_config:
@@ -2067,7 +2067,7 @@ oauth2:
# contain port numbers.
[ no_proxy: ]
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
-[ proxy_from_environment: | default: false ]
+[ proxy_from_environment: | default: false ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_header:
[ : [, ...] ] ]
@@ -2076,7 +2076,7 @@ oauth2:
[ follow_redirects: | default = true ]
# Whether to enable HTTP2.
-[ enable_http2: | default: true ]
+[ enable_http2: | default: true ]
# TLS configuration.
tls_config:
@@ -2153,7 +2153,7 @@ server:
# contain port numbers.
[ no_proxy: ]
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
-[ proxy_from_environment: | default: false ]
+[ proxy_from_environment: | default: false ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_header:
[ : [, ...] ] ]
@@ -2193,7 +2193,7 @@ oauth2:
[ follow_redirects: | default = true ]
# Whether to enable HTTP2.
-[ enable_http2: | default: true ]
+[ enable_http2: | default: true ]
```
The [relabeling phase](#relabel_config) is the preferred and more powerful way
@@ -2280,7 +2280,7 @@ oauth2:
# contain port numbers.
[ no_proxy: ]
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
-[ proxy_from_environment: | default: false ]
+[ proxy_from_environment: | default: false ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_header:
[ : [, ...] ] ]
@@ -2289,7 +2289,7 @@ oauth2:
[ follow_redirects: | default = true ]
# Whether to enable HTTP2.
-[ enable_http2: | default: true ]
+[ enable_http2: | default: true ]
# TLS configuration.
tls_config:
@@ -2361,7 +2361,7 @@ oauth2:
# contain port numbers.
[ no_proxy: ]
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
-[ proxy_from_environment: | default: false ]
+[ proxy_from_environment: | default: false ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_header:
[ : [, ...] ] ]
@@ -2370,7 +2370,7 @@ oauth2:
[ follow_redirects: | default = true ]
# Whether to enable HTTP2.
-[ enable_http2: | default: true ]
+[ enable_http2: | default: true ]
# TLS configuration.
tls_config:
@@ -2456,7 +2456,7 @@ oauth2:
[ follow_redirects: | default = true ]
# Whether to enable HTTP2.
-[ enable_http2: | default: true ]
+[ enable_http2: | default: true ]
# TLS configuration for connecting to marathon servers
tls_config:
@@ -2469,7 +2469,7 @@ tls_config:
# contain port numbers.
[ no_proxy: ]
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
-[ proxy_from_environment: | default: false ]
+[ proxy_from_environment: | default: false ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_header:
[ : [, ...] ] ]
@@ -2567,7 +2567,7 @@ oauth2:
# contain port numbers.
[ no_proxy: ]
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
-[ proxy_from_environment: | default: false ]
+[ proxy_from_environment: | default: false ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_header:
[ : [, ...] ] ]
@@ -2576,7 +2576,7 @@ oauth2:
[ follow_redirects: | default = true ]
# Whether to enable HTTP2.
-[ enable_http2: | default: true ]
+[ enable_http2: | default: true ]
# TLS configuration.
tls_config:
@@ -2753,7 +2753,7 @@ tls_config:
# contain port numbers.
[ no_proxy: ]
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
-[ proxy_from_environment: | default: false ]
+[ proxy_from_environment: | default: false ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_header:
[ : [, ...] ] ]
@@ -2762,7 +2762,7 @@ tls_config:
[ follow_redirects: | default = true ]
# Whether to enable HTTP2.
-[ enable_http2: | default: true ]
+[ enable_http2: | default: true ]
# Refresh interval to re-read the app instance list.
[ refresh_interval: | default = 30s ]
@@ -2869,7 +2869,7 @@ tags_filter:
[ follow_redirects: | default = true ]
# Whether to enable HTTP2.
-[ enable_http2: | default: true ]
+[ enable_http2: | default: true ]
# Optional proxy URL.
[ proxy_url: ]
@@ -2878,7 +2878,7 @@ tags_filter:
# contain port numbers.
[ no_proxy: ]
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
-[ proxy_from_environment: | default: false ]
+[ proxy_from_environment: | default: false ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_header:
[ : [, ...] ] ]
@@ -2954,7 +2954,7 @@ oauth2:
# contain port numbers.
[ no_proxy: ]
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
-[ proxy_from_environment: | default: false ]
+[ proxy_from_environment: | default: false ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_header:
[ : [, ...] ] ]
@@ -2963,7 +2963,7 @@ oauth2:
[ follow_redirects: | default = true ]
# Whether to enable HTTP2.
-[ enable_http2: | default: true ]
+[ enable_http2: | default: true ]
# TLS configuration.
tls_config:
@@ -3036,7 +3036,7 @@ oauth2:
# contain port numbers.
[ no_proxy: ]
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
-[ proxy_from_environment: | default: false ]
+[ proxy_from_environment: | default: false ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_header:
[ : [, ...] ] ]
@@ -3045,7 +3045,7 @@ oauth2:
[ follow_redirects: | default = true ]
# Whether to enable HTTP2.
-[ enable_http2: | default: true ]
+[ enable_http2: | default: true ]
# TLS configuration.
tls_config:
@@ -3238,7 +3238,7 @@ tls_config:
# contain port numbers.
[ no_proxy: ]
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
-[ proxy_from_environment: | default: false ]
+[ proxy_from_environment: