mirror of
				https://github.com/prometheus/prometheus.git
				synced 2025-10-31 00:11:23 +01:00 
			
		
		
		
	klauspost/compress is a high quality drop-in replacement for common Go compression libraries. Since Prometheus sends out a lot of HTTP requests that often return compressed output having improved compression libraries helps to save cpu & memory resources. On a test Prometheus server I was able to see cpu reduction from 31 to 30 cores. Benchmark results: name old time/op new time/op delta TargetScraperGzip/metrics=1-8 69.4µs ± 4% 69.2µs ± 3% ~ (p=0.122 n=50+50) TargetScraperGzip/metrics=100-8 84.3µs ± 2% 80.9µs ± 2% -4.02% (p=0.000 n=48+46) TargetScraperGzip/metrics=1000-8 296µs ± 1% 274µs ±14% -7.35% (p=0.000 n=47+45) TargetScraperGzip/metrics=10000-8 2.06ms ± 1% 1.66ms ± 2% -19.34% (p=0.000 n=47+45) TargetScraperGzip/metrics=100000-8 20.9ms ± 2% 17.5ms ± 3% -16.50% (p=0.000 n=49+50) name old alloc/op new alloc/op delta TargetScraperGzip/metrics=1-8 6.06kB ± 0% 6.07kB ± 0% +0.24% (p=0.000 n=48+48) TargetScraperGzip/metrics=100-8 7.04kB ± 0% 6.89kB ± 0% -2.17% (p=0.000 n=49+50) TargetScraperGzip/metrics=1000-8 9.02kB ± 0% 8.35kB ± 1% -7.49% (p=0.000 n=50+50) TargetScraperGzip/metrics=10000-8 18.1kB ± 1% 16.1kB ± 2% -10.87% (p=0.000 n=47+47) TargetScraperGzip/metrics=100000-8 1.21MB ± 0% 1.01MB ± 2% -16.69% (p=0.000 n=36+50) name old allocs/op new allocs/op delta TargetScraperGzip/metrics=1-8 71.0 ± 0% 72.0 ± 0% +1.41% (p=0.000 n=50+50) TargetScraperGzip/metrics=100-8 81.0 ± 0% 76.0 ± 0% -6.17% (p=0.000 n=50+50) TargetScraperGzip/metrics=1000-8 92.0 ± 0% 83.0 ± 0% -9.78% (p=0.000 n=50+50) TargetScraperGzip/metrics=10000-8 93.0 ± 0% 91.0 ± 0% -2.15% (p=0.000 n=50+50) TargetScraperGzip/metrics=100000-8 111 ± 0% 135 ± 1% +21.89% (p=0.000 n=40+50) Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
		
			
				
	
	
		
			94 lines
		
	
	
		
			2.8 KiB
		
	
	
	
		
			Go
		
	
	
	
	
	
			
		
		
	
	
			94 lines
		
	
	
		
			2.8 KiB
		
	
	
	
		
			Go
		
	
	
	
	
	
| // Copyright 2013 The Prometheus Authors
 | |
| // Licensed under the Apache License, Version 2.0 (the "License");
 | |
| // you may not use this file except in compliance with the License.
 | |
| // You may obtain a copy of the License at
 | |
| //
 | |
| // http://www.apache.org/licenses/LICENSE-2.0
 | |
| //
 | |
| // Unless required by applicable law or agreed to in writing, software
 | |
| // distributed under the License is distributed on an "AS IS" BASIS,
 | |
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | |
| // See the License for the specific language governing permissions and
 | |
| // limitations under the License.
 | |
| 
 | |
| package httputil
 | |
| 
 | |
| import (
 | |
| 	"io"
 | |
| 	"net/http"
 | |
| 	"strings"
 | |
| 
 | |
| 	"github.com/klauspost/compress/gzip"
 | |
| 	"github.com/klauspost/compress/zlib"
 | |
| )
 | |
| 
 | |
| const (
 | |
| 	acceptEncodingHeader  = "Accept-Encoding"
 | |
| 	contentEncodingHeader = "Content-Encoding"
 | |
| 	gzipEncoding          = "gzip"
 | |
| 	deflateEncoding       = "deflate"
 | |
| )
 | |
| 
 | |
| // Wrapper around http.Handler which adds suitable response compression based
 | |
| // on the client's Accept-Encoding headers.
 | |
| type compressedResponseWriter struct {
 | |
| 	http.ResponseWriter
 | |
| 	writer io.Writer
 | |
| }
 | |
| 
 | |
| // Writes HTTP response content data.
 | |
| func (c *compressedResponseWriter) Write(p []byte) (int, error) {
 | |
| 	return c.writer.Write(p)
 | |
| }
 | |
| 
 | |
| // Closes the compressedResponseWriter and ensures to flush all data before.
 | |
| func (c *compressedResponseWriter) Close() {
 | |
| 	if zlibWriter, ok := c.writer.(*zlib.Writer); ok {
 | |
| 		zlibWriter.Flush()
 | |
| 	}
 | |
| 	if gzipWriter, ok := c.writer.(*gzip.Writer); ok {
 | |
| 		gzipWriter.Flush()
 | |
| 	}
 | |
| 	if closer, ok := c.writer.(io.Closer); ok {
 | |
| 		defer closer.Close()
 | |
| 	}
 | |
| }
 | |
| 
 | |
| // Constructs a new compressedResponseWriter based on client request headers.
 | |
| func newCompressedResponseWriter(writer http.ResponseWriter, req *http.Request) *compressedResponseWriter {
 | |
| 	encodings := strings.Split(req.Header.Get(acceptEncodingHeader), ",")
 | |
| 	for _, encoding := range encodings {
 | |
| 		switch strings.TrimSpace(encoding) {
 | |
| 		case gzipEncoding:
 | |
| 			writer.Header().Set(contentEncodingHeader, gzipEncoding)
 | |
| 			return &compressedResponseWriter{
 | |
| 				ResponseWriter: writer,
 | |
| 				writer:         gzip.NewWriter(writer),
 | |
| 			}
 | |
| 		case deflateEncoding:
 | |
| 			writer.Header().Set(contentEncodingHeader, deflateEncoding)
 | |
| 			return &compressedResponseWriter{
 | |
| 				ResponseWriter: writer,
 | |
| 				writer:         zlib.NewWriter(writer),
 | |
| 			}
 | |
| 		}
 | |
| 	}
 | |
| 	return &compressedResponseWriter{
 | |
| 		ResponseWriter: writer,
 | |
| 		writer:         writer,
 | |
| 	}
 | |
| }
 | |
| 
 | |
| // CompressionHandler is a wrapper around http.Handler which adds suitable
 | |
| // response compression based on the client's Accept-Encoding headers.
 | |
| type CompressionHandler struct {
 | |
| 	Handler http.Handler
 | |
| }
 | |
| 
 | |
| // ServeHTTP adds compression to the original http.Handler's ServeHTTP() method.
 | |
| func (c CompressionHandler) ServeHTTP(writer http.ResponseWriter, req *http.Request) {
 | |
| 	compWriter := newCompressedResponseWriter(writer, req)
 | |
| 	c.Handler.ServeHTTP(compWriter, req)
 | |
| 	compWriter.Close()
 | |
| }
 |