From 8224ddec23598152d7506b7b39f5235a77b5e036 Mon Sep 17 00:00:00 2001 From: Marek Slabicki Date: Sat, 11 Apr 2020 08:22:18 +0000 Subject: [PATCH] Capitalizing first letter of all log lines (#7043) Signed-off-by: Marek Slabicki --- cmd/prometheus/main.go | 4 +-- discovery/manager.go | 4 +-- .../remote_storage_adapter/graphite/client.go | 2 +- .../remote_storage_adapter/influxdb/client.go | 2 +- .../remote_storage_adapter/opentsdb/client.go | 2 +- promql/engine.go | 4 +-- rules/manager.go | 8 +++--- scrape/scrape.go | 8 +++--- storage/remote/queue_manager.go | 4 +-- storage/remote/write.go | 2 +- tsdb/compact.go | 2 +- tsdb/db.go | 16 +++++------ tsdb/head.go | 12 ++++---- tsdb/repair.go | 4 +-- tsdb/wal.go | 4 +-- tsdb/wal/live_reader.go | 2 +- tsdb/wal/wal.go | 8 +++--- tsdb/wal/watcher.go | 28 +++++++++---------- web/api/v1/api.go | 2 +- web/federate.go | 2 +- web/web.go | 2 +- 21 files changed, 61 insertions(+), 61 deletions(-) diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index 13b5b26911..bb19554e7f 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -295,7 +295,7 @@ func main() { if cfg.tsdb.RetentionDuration == 0 && cfg.tsdb.MaxBytes == 0 { cfg.tsdb.RetentionDuration = defaultRetentionDuration - level.Info(logger).Log("msg", "no time or size retention was set so using the default time retention", "duration", defaultRetentionDuration) + level.Info(logger).Log("msg", "No time or size retention was set so using the default time retention", "duration", defaultRetentionDuration) } // Check for overflows. This limits our max retention to 100y. @@ -305,7 +305,7 @@ func main() { panic(err) } cfg.tsdb.RetentionDuration = y - level.Warn(logger).Log("msg", "time retention value is too high. Limiting to: "+y.String()) + level.Warn(logger).Log("msg", "Time retention value is too high. Limiting to: "+y.String()) } } diff --git a/discovery/manager.go b/discovery/manager.go index 49bcbf86b7..66c0057a35 100644 --- a/discovery/manager.go +++ b/discovery/manager.go @@ -239,7 +239,7 @@ func (m *Manager) updater(ctx context.Context, p *provider, updates chan []*targ case tgs, ok := <-updates: receivedUpdates.WithLabelValues(m.name).Inc() if !ok { - level.Debug(m.logger).Log("msg", "discoverer channel closed", "provider", p.name) + level.Debug(m.logger).Log("msg", "Discoverer channel closed", "provider", p.name) return } @@ -271,7 +271,7 @@ func (m *Manager) sender() { case m.syncCh <- m.allGroups(): default: delayedUpdates.WithLabelValues(m.name).Inc() - level.Debug(m.logger).Log("msg", "discovery receiver's channel was full so will retry the next cycle") + level.Debug(m.logger).Log("msg", "Discovery receiver's channel was full so will retry the next cycle") select { case m.triggerSend <- struct{}{}: default: diff --git a/documentation/examples/remote_storage/remote_storage_adapter/graphite/client.go b/documentation/examples/remote_storage/remote_storage_adapter/graphite/client.go index 9735729130..34cfb61bfb 100644 --- a/documentation/examples/remote_storage/remote_storage_adapter/graphite/client.go +++ b/documentation/examples/remote_storage/remote_storage_adapter/graphite/client.go @@ -93,7 +93,7 @@ func (c *Client) Write(samples model.Samples) error { t := float64(s.Timestamp.UnixNano()) / 1e9 v := float64(s.Value) if math.IsNaN(v) || math.IsInf(v, 0) { - level.Debug(c.logger).Log("msg", "cannot send value to Graphite, skipping sample", "value", v, "sample", s) + level.Debug(c.logger).Log("msg", "Cannot send value to Graphite, skipping sample", "value", v, "sample", s) continue } fmt.Fprintf(&buf, "%s %f %f\n", k, v, t) diff --git a/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client.go b/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client.go index 96020571e4..327b2ac75f 100644 --- a/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client.go +++ b/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client.go @@ -84,7 +84,7 @@ func (c *Client) Write(samples model.Samples) error { for _, s := range samples { v := float64(s.Value) if math.IsNaN(v) || math.IsInf(v, 0) { - level.Debug(c.logger).Log("msg", "cannot send to InfluxDB, skipping sample", "value", v, "sample", s) + level.Debug(c.logger).Log("msg", "Cannot send to InfluxDB, skipping sample", "value", v, "sample", s) c.ignoredSamples.Inc() continue } diff --git a/documentation/examples/remote_storage/remote_storage_adapter/opentsdb/client.go b/documentation/examples/remote_storage/remote_storage_adapter/opentsdb/client.go index 5e50eb4ac6..eb69df96dd 100644 --- a/documentation/examples/remote_storage/remote_storage_adapter/opentsdb/client.go +++ b/documentation/examples/remote_storage/remote_storage_adapter/opentsdb/client.go @@ -79,7 +79,7 @@ func (c *Client) Write(samples model.Samples) error { for _, s := range samples { v := float64(s.Value) if math.IsNaN(v) || math.IsInf(v, 0) { - level.Debug(c.logger).Log("msg", "cannot send value to OpenTSDB, skipping sample", "value", v, "sample", s) + level.Debug(c.logger).Log("msg", "Cannot send value to OpenTSDB, skipping sample", "value", v, "sample", s) continue } metric := TagValue(s.Metric[model.MetricNameLabel]) diff --git a/promql/engine.go b/promql/engine.go index bf55e60326..cd492b2df9 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -308,7 +308,7 @@ func NewEngine(opts EngineOpts) *Engine { if opts.LookbackDelta == 0 { opts.LookbackDelta = defaultLookbackDelta if l := opts.Logger; l != nil { - level.Debug(l).Log("msg", "lookback delta is zero, setting to default value", "value", defaultLookbackDelta) + level.Debug(l).Log("msg", "Lookback delta is zero, setting to default value", "value", defaultLookbackDelta) } } @@ -345,7 +345,7 @@ func (ng *Engine) SetQueryLogger(l QueryLogger) { // not make reload fail; only log a warning. err := ng.queryLogger.Close() if err != nil { - level.Warn(ng.logger).Log("msg", "error while closing the previous query log file", "err", err) + level.Warn(ng.logger).Log("msg", "Error while closing the previous query log file", "err", err) } } diff --git a/rules/manager.go b/rules/manager.go index eca688c052..e480e3be15 100644 --- a/rules/manager.go +++ b/rules/manager.go @@ -599,7 +599,7 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) { seriesReturned := make(map[string]labels.Labels, len(g.seriesInPreviousEval[i])) defer func() { if err := app.Commit(); err != nil { - level.Warn(g.logger).Log("msg", "rule sample appending failed", "err", err) + level.Warn(g.logger).Log("msg", "Rule sample appending failed", "err", err) return } g.seriesInPreviousEval[i] = seriesReturned @@ -637,7 +637,7 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) { // Do not count these in logging, as this is expected if series // is exposed from a different rule. default: - level.Warn(g.logger).Log("msg", "adding stale sample failed", "sample", metric, "err", err) + level.Warn(g.logger).Log("msg", "Adding stale sample failed", "sample", metric, "err", err) } } } @@ -660,11 +660,11 @@ func (g *Group) cleanupStaleSeries(ts time.Time) { // Do not count these in logging, as this is expected if series // is exposed from a different rule. default: - level.Warn(g.logger).Log("msg", "adding stale sample for previous configuration failed", "sample", s, "err", err) + level.Warn(g.logger).Log("msg", "Adding stale sample for previous configuration failed", "sample", s, "err", err) } } if err := app.Commit(); err != nil { - level.Warn(g.logger).Log("msg", "stale sample appending for previous configuration failed", "err", err) + level.Warn(g.logger).Log("msg", "Stale sample appending for previous configuration failed", "err", err) } else { g.staleSeries = nil } diff --git a/scrape/scrape.go b/scrape/scrape.go index 80b7541088..f63711e3fa 100644 --- a/scrape/scrape.go +++ b/scrape/scrape.go @@ -969,11 +969,11 @@ mainLoop: // we still call sl.append to trigger stale markers. total, added, seriesAdded, appErr := sl.append(b, contentType, start) if appErr != nil { - level.Debug(sl.l).Log("msg", "append failed", "err", appErr) + level.Debug(sl.l).Log("msg", "Append failed", "err", appErr) // The append failed, probably due to a parse error or sample limit. // Call sl.append again with an empty scrape to trigger stale markers. if _, _, _, err := sl.append([]byte{}, "", start); err != nil { - level.Warn(sl.l).Log("msg", "append failed", "err", err) + level.Warn(sl.l).Log("msg", "Append failed", "err", err) } } @@ -984,7 +984,7 @@ mainLoop: } if err := sl.report(start, time.Since(start), total, added, seriesAdded, scrapeErr); err != nil { - level.Warn(sl.l).Log("msg", "appending scrape report failed", "err", err) + level.Warn(sl.l).Log("msg", "Appending scrape report failed", "err", err) } last = start @@ -1172,7 +1172,7 @@ loop: sampleAdded, err = sl.checkAddError(nil, met, tp, err, &sampleLimitErr, appErrs) if err != nil { if err != storage.ErrNotFound { - level.Debug(sl.l).Log("msg", "unexpected error", "series", string(met), "err", err) + level.Debug(sl.l).Log("msg", "Unexpected error", "series", string(met), "err", err) } break loop } diff --git a/storage/remote/queue_manager.go b/storage/remote/queue_manager.go index 9324062172..c363237740 100644 --- a/storage/remote/queue_manager.go +++ b/storage/remote/queue_manager.go @@ -318,7 +318,7 @@ outer: t.droppedSamplesTotal.Inc() t.samplesDropped.incr(1) if _, ok := t.droppedSeries[s.Ref]; !ok { - level.Info(t.logger).Log("msg", "dropped sample for series that was not explicitly dropped via relabelling", "ref", s.Ref) + level.Info(t.logger).Log("msg", "Dropped sample for series that was not explicitly dropped via relabelling", "ref", s.Ref) } t.seriesMtx.Unlock() continue @@ -881,7 +881,7 @@ func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.Ti return err } s.qm.retriedSamplesTotal.Add(float64(len(samples))) - level.Debug(s.qm.logger).Log("msg", "failed to send batch, retrying", "err", err) + level.Debug(s.qm.logger).Log("msg", "Failed to send batch, retrying", "err", err) time.Sleep(time.Duration(backoff)) backoff = backoff * 2 diff --git a/storage/remote/write.go b/storage/remote/write.go index 5d77ec751d..af918b5028 100644 --- a/storage/remote/write.go +++ b/storage/remote/write.go @@ -107,7 +107,7 @@ func (rws *WriteStorage) ApplyConfig(conf *config.Config) error { // external labels change. externalLabelUnchanged := externalLabelHash == rws.externalLabelHash if configHash == rws.configHash && externalLabelUnchanged { - level.Debug(rws.logger).Log("msg", "remote write config has not changed, no need to restart QueueManagers") + level.Debug(rws.logger).Log("msg", "Remote write config has not changed, no need to restart QueueManagers") return nil } diff --git a/tsdb/compact.go b/tsdb/compact.go index 0fe69ba453..9bb04920bf 100644 --- a/tsdb/compact.go +++ b/tsdb/compact.go @@ -674,7 +674,7 @@ func (c *LeveledCompactor) populateBlock(blocks []BlockReader, meta *BlockMeta, if i > 0 && b.Meta().MinTime < globalMaxt { c.metrics.overlappingBlocks.Inc() overlapping = true - level.Warn(c.logger).Log("msg", "found overlapping blocks during compaction", "ulid", meta.ULID) + level.Warn(c.logger).Log("msg", "Found overlapping blocks during compaction", "ulid", meta.ULID) } if b.Meta().MaxTime > globalMaxt { globalMaxt = b.Meta().MaxTime diff --git a/tsdb/db.go b/tsdb/db.go index 97df4b3c49..9ab1124d61 100644 --- a/tsdb/db.go +++ b/tsdb/db.go @@ -433,7 +433,7 @@ func (db *DBReadOnly) Blocks() ([]BlockReader, error) { if len(corrupted) > 0 { for _, b := range loadable { if err := b.Close(); err != nil { - level.Warn(db.logger).Log("msg", "closing a block", err) + level.Warn(db.logger).Log("msg", "Closing a block", err) } } return nil, errors.Errorf("unexpected corrupted block:%v", corrupted) @@ -452,7 +452,7 @@ func (db *DBReadOnly) Blocks() ([]BlockReader, error) { blockMetas = append(blockMetas, b.Meta()) } if overlaps := OverlappingBlocks(blockMetas); len(overlaps) > 0 { - level.Warn(db.logger).Log("msg", "overlapping blocks found during opening", "detail", overlaps.String()) + level.Warn(db.logger).Log("msg", "Overlapping blocks found during opening", "detail", overlaps.String()) } // Close all previously open readers and add the new ones to the cache. @@ -612,7 +612,7 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs if initErr := db.head.Init(minValidTime); initErr != nil { db.head.metrics.walCorruptionsTotal.Inc() - level.Warn(db.logger).Log("msg", "encountered WAL read error, attempting repair", "err", initErr) + level.Warn(db.logger).Log("msg", "Encountered WAL read error, attempting repair", "err", initErr) if err := wlog.Repair(initErr); err != nil { return nil, errors.Wrap(err, "repair corrupted WAL") } @@ -908,7 +908,7 @@ func (db *DB) reload() (err error) { blockMetas = append(blockMetas, b.Meta()) } if overlaps := OverlappingBlocks(blockMetas); len(overlaps) > 0 { - level.Warn(db.logger).Log("msg", "overlapping blocks found during reload", "detail", overlaps.String()) + level.Warn(db.logger).Log("msg", "Overlapping blocks found during reload", "detail", overlaps.String()) } for _, b := range oldBlocks { @@ -1041,7 +1041,7 @@ func (db *DB) deleteBlocks(blocks map[ulid.ULID]*Block) error { for ulid, block := range blocks { if block != nil { if err := block.Close(); err != nil { - level.Warn(db.logger).Log("msg", "closing block failed", "err", err) + level.Warn(db.logger).Log("msg", "Closing block failed", "err", err) } } if err := os.RemoveAll(filepath.Join(db.dir, ulid.String())); err != nil { @@ -1220,7 +1220,7 @@ func (db *DB) DisableCompactions() { defer db.autoCompactMtx.Unlock() db.autoCompact = false - level.Info(db.logger).Log("msg", "compactions disabled") + level.Info(db.logger).Log("msg", "Compactions disabled") } // EnableCompactions enables auto compactions. @@ -1229,7 +1229,7 @@ func (db *DB) EnableCompactions() { defer db.autoCompactMtx.Unlock() db.autoCompact = true - level.Info(db.logger).Log("msg", "compactions enabled") + level.Info(db.logger).Log("msg", "Compactions enabled") } // Snapshot writes the current data to the directory. If withHead is set to true it @@ -1249,7 +1249,7 @@ func (db *DB) Snapshot(dir string, withHead bool) error { defer db.mtx.RUnlock() for _, b := range db.blocks { - level.Info(db.logger).Log("msg", "snapshotting block", "block", b) + level.Info(db.logger).Log("msg", "Snapshotting block", "block", b) if err := b.Snapshot(dir); err != nil { return errors.Wrapf(err, "error snapshotting block: %s", b.Dir()) diff --git a/tsdb/head.go b/tsdb/head.go index c0abe150e5..fa5946e487 100644 --- a/tsdb/head.go +++ b/tsdb/head.go @@ -555,7 +555,7 @@ func (h *Head) loadWAL(r *wal.Reader, multiRef map[uint64]uint64) (err error) { } if unknownRefs > 0 { - level.Warn(h.logger).Log("msg", "unknown series references", "count", unknownRefs) + level.Warn(h.logger).Log("msg", "Unknown series references", "count", unknownRefs) } return nil } @@ -572,7 +572,7 @@ func (h *Head) Init(minValidTime int64) error { return nil } - level.Info(h.logger).Log("msg", "replaying WAL, this may take awhile") + level.Info(h.logger).Log("msg", "Replaying WAL, this may take awhile") start := time.Now() // Backfill the checkpoint first if it exists. dir, startFrom, err := wal.LastCheckpoint(h.wal.Dir()) @@ -587,7 +587,7 @@ func (h *Head) Init(minValidTime int64) error { } defer func() { if err := sr.Close(); err != nil { - level.Warn(h.logger).Log("msg", "error while closing the wal segments reader", "err", err) + level.Warn(h.logger).Log("msg", "Error while closing the wal segments reader", "err", err) } }() @@ -616,7 +616,7 @@ func (h *Head) Init(minValidTime int64) error { sr := wal.NewSegmentBufReader(s) err = h.loadWAL(wal.NewReader(sr), multiRef) if err := sr.Close(); err != nil { - level.Warn(h.logger).Log("msg", "error while closing the wal segments reader", "err", err) + level.Warn(h.logger).Log("msg", "Error while closing the wal segments reader", "err", err) } if err != nil { return err @@ -659,7 +659,7 @@ func (h *Head) Truncate(mint int64) (err error) { start := time.Now() h.gc() - level.Info(h.logger).Log("msg", "head GC completed", "duration", time.Since(start)) + level.Info(h.logger).Log("msg", "Head GC completed", "duration", time.Since(start)) h.metrics.gcDuration.Observe(time.Since(start).Seconds()) if h.wal == nil { @@ -1399,7 +1399,7 @@ func (h *headIndexReader) SortedPostings(p index.Postings) index.Postings { for p.Next() { s := h.head.series.getByID(p.At()) if s == nil { - level.Debug(h.head.logger).Log("msg", "looked up series not found") + level.Debug(h.head.logger).Log("msg", "Looked up series not found") } else { series = append(series, s) } diff --git a/tsdb/repair.go b/tsdb/repair.go index c9179d5b55..efc81967e9 100644 --- a/tsdb/repair.go +++ b/tsdb/repair.go @@ -57,7 +57,7 @@ func repairBadIndexVersion(logger log.Logger, dir string) error { } if meta.Version == metaVersion1 { level.Info(logger).Log( - "msg", "found healthy block", + "msg", "Found healthy block", "mint", meta.MinTime, "maxt", meta.MaxTime, "ulid", meta.ULID, @@ -65,7 +65,7 @@ func repairBadIndexVersion(logger log.Logger, dir string) error { continue } level.Info(logger).Log( - "msg", "fixing broken block", + "msg", "Fixing broken block", "mint", meta.MinTime, "maxt", meta.MaxTime, "ulid", meta.ULID, diff --git a/tsdb/wal.go b/tsdb/wal.go index 41232bd8f7..1e503a1481 100644 --- a/tsdb/wal.go +++ b/tsdb/wal.go @@ -204,7 +204,7 @@ func OpenSegmentWAL(dir string, logger log.Logger, flushInterval time.Duration, w.files = append(w.files, newSegmentFile(f)) continue } - level.Warn(logger).Log("msg", "invalid segment file detected, truncating WAL", "err", err, "file", fn) + level.Warn(logger).Log("msg", "Invalid segment file detected, truncating WAL", "err", err, "file", fn) for _, fn := range fns[i:] { if err := os.Remove(fn); err != nil { @@ -1233,7 +1233,7 @@ func MigrateWAL(logger log.Logger, dir string) (err error) { if exists, err := deprecatedWALExists(logger, dir); err != nil || !exists { return err } - level.Info(logger).Log("msg", "migrating WAL format") + level.Info(logger).Log("msg", "Migrating WAL format") tmpdir := dir + ".tmp" if err := os.RemoveAll(tmpdir); err != nil { diff --git a/tsdb/wal/live_reader.go b/tsdb/wal/live_reader.go index 7124f6408e..54c4a584e2 100644 --- a/tsdb/wal/live_reader.go +++ b/tsdb/wal/live_reader.go @@ -297,7 +297,7 @@ func (r *LiveReader) readRecord() ([]byte, int, error) { return nil, 0, fmt.Errorf("record would overflow current page: %d > %d", r.readIndex+recordHeaderSize+length, pageSize) } r.metrics.readerCorruptionErrors.WithLabelValues("record_span_page").Inc() - level.Warn(r.logger).Log("msg", "record spans page boundaries", "start", r.readIndex, "end", recordHeaderSize+length, "pageSize", pageSize) + level.Warn(r.logger).Log("msg", "Record spans page boundaries", "start", r.readIndex, "end", recordHeaderSize+length, "pageSize", pageSize) } if recordHeaderSize+length > pageSize { return nil, 0, fmt.Errorf("record length greater than a single page: %d > %d", recordHeaderSize+length, pageSize) diff --git a/tsdb/wal/wal.go b/tsdb/wal/wal.go index e5f58c0345..83cbb61b8d 100644 --- a/tsdb/wal/wal.go +++ b/tsdb/wal/wal.go @@ -123,7 +123,7 @@ func OpenWriteSegment(logger log.Logger, dir string, k int) (*Segment, error) { // If it was torn mid-record, a full read (which the caller should do anyway // to ensure integrity) will detect it as a corruption by the end. if d := stat.Size() % pageSize; d != 0 { - level.Warn(logger).Log("msg", "last page of the wal is torn, filling it with zeros", "segment", segName) + level.Warn(logger).Log("msg", "Last page of the wal is torn, filling it with zeros", "segment", segName) if _, err := f.Write(make([]byte, pageSize-d)); err != nil { f.Close() return nil, errors.Wrap(err, "zero-pad torn page") @@ -351,7 +351,7 @@ func (w *WAL) Repair(origErr error) error { if cerr.Segment < 0 { return errors.New("corruption error does not specify position") } - level.Warn(w.logger).Log("msg", "starting corruption repair", + level.Warn(w.logger).Log("msg", "Starting corruption repair", "segment", cerr.Segment, "offset", cerr.Offset) // All segments behind the corruption can no longer be used. @@ -359,7 +359,7 @@ func (w *WAL) Repair(origErr error) error { if err != nil { return errors.Wrap(err, "list segments") } - level.Warn(w.logger).Log("msg", "deleting all segments newer than corrupted segment", "segment", cerr.Segment) + level.Warn(w.logger).Log("msg", "Deleting all segments newer than corrupted segment", "segment", cerr.Segment) for _, s := range segs { if w.segment.i == s.index { @@ -381,7 +381,7 @@ func (w *WAL) Repair(origErr error) error { // Regardless of the corruption offset, no record reaches into the previous segment. // So we can safely repair the WAL by removing the segment and re-inserting all // its records up to the corruption. - level.Warn(w.logger).Log("msg", "rewrite corrupted segment", "segment", cerr.Segment) + level.Warn(w.logger).Log("msg", "Rewrite corrupted segment", "segment", cerr.Segment) fn := SegmentName(w.dir, cerr.Segment) tmpfn := fn + ".repair" diff --git a/tsdb/wal/watcher.go b/tsdb/wal/watcher.go index 7e1a891453..abd7bdc77e 100644 --- a/tsdb/wal/watcher.go +++ b/tsdb/wal/watcher.go @@ -168,7 +168,7 @@ func (w *Watcher) setMetrics() { // Start the Watcher. func (w *Watcher) Start() { w.setMetrics() - level.Info(w.logger).Log("msg", "starting WAL watcher", "queue", w.name) + level.Info(w.logger).Log("msg", "Starting WAL watcher", "queue", w.name) go w.loop() } @@ -220,7 +220,7 @@ func (w *Watcher) Run() error { // Run will be called again if there was a failure to read the WAL. w.sendSamples = false - level.Info(w.logger).Log("msg", "replaying WAL", "queue", w.name) + level.Info(w.logger).Log("msg", "Replaying WAL", "queue", w.name) // Backfill from the checkpoint first if it exists. lastCheckpoint, checkpointIndex, err := LastCheckpoint(w.walDir) @@ -240,10 +240,10 @@ func (w *Watcher) Run() error { return err } - level.Debug(w.logger).Log("msg", "tailing WAL", "lastCheckpoint", lastCheckpoint, "checkpointIndex", checkpointIndex, "currentSegment", currentSegment, "lastSegment", lastSegment) + level.Debug(w.logger).Log("msg", "Tailing WAL", "lastCheckpoint", lastCheckpoint, "checkpointIndex", checkpointIndex, "currentSegment", currentSegment, "lastSegment", lastSegment) for !isClosed(w.quit) { w.currentSegmentMetric.Set(float64(currentSegment)) - level.Debug(w.logger).Log("msg", "processing segment", "currentSegment", currentSegment) + level.Debug(w.logger).Log("msg", "Processing segment", "currentSegment", currentSegment) // On start, after reading the existing WAL for series records, we have a pointer to what is the latest segment. // On subsequent calls to this function, currentSegment will have been incremented and we should open that segment. @@ -369,7 +369,7 @@ func (w *Watcher) watch(segmentNum int, tail bool) error { <-gcSem }() if err := w.garbageCollectSeries(segmentNum); err != nil { - level.Warn(w.logger).Log("msg", "error process checkpoint", "err", err) + level.Warn(w.logger).Log("msg", "Error process checkpoint", "err", err) } }() default: @@ -392,9 +392,9 @@ func (w *Watcher) watch(segmentNum int, tail bool) error { // Ignore errors reading to end of segment whilst replaying the WAL. if !tail { if err != nil && err != io.EOF { - level.Warn(w.logger).Log("msg", "ignoring error reading to end of segment, may have dropped data", "err", err) + level.Warn(w.logger).Log("msg", "Ignoring error reading to end of segment, may have dropped data", "err", err) } else if reader.Offset() != size { - level.Warn(w.logger).Log("msg", "expected to have read whole segment, may have dropped data", "segment", segmentNum, "read", reader.Offset(), "size", size) + level.Warn(w.logger).Log("msg", "Expected to have read whole segment, may have dropped data", "segment", segmentNum, "read", reader.Offset(), "size", size) } return nil } @@ -412,9 +412,9 @@ func (w *Watcher) watch(segmentNum int, tail bool) error { // Ignore all errors reading to end of segment whilst replaying the WAL. if !tail { if err != nil && err != io.EOF { - level.Warn(w.logger).Log("msg", "ignoring error reading to end of segment, may have dropped data", "segment", segmentNum, "err", err) + level.Warn(w.logger).Log("msg", "Ignoring error reading to end of segment, may have dropped data", "segment", segmentNum, "err", err) } else if reader.Offset() != size { - level.Warn(w.logger).Log("msg", "expected to have read whole segment, may have dropped data", "segment", segmentNum, "read", reader.Offset(), "size", size) + level.Warn(w.logger).Log("msg", "Expected to have read whole segment, may have dropped data", "segment", segmentNum, "read", reader.Offset(), "size", size) } return nil } @@ -444,11 +444,11 @@ func (w *Watcher) garbageCollectSeries(segmentNum int) error { } if index >= segmentNum { - level.Debug(w.logger).Log("msg", "current segment is behind the checkpoint, skipping reading of checkpoint", "current", fmt.Sprintf("%08d", segmentNum), "checkpoint", dir) + level.Debug(w.logger).Log("msg", "Current segment is behind the checkpoint, skipping reading of checkpoint", "current", fmt.Sprintf("%08d", segmentNum), "checkpoint", dir) return nil } - level.Debug(w.logger).Log("msg", "new checkpoint detected", "new", dir, "currentSegment", segmentNum) + level.Debug(w.logger).Log("msg", "New checkpoint detected", "new", dir, "currentSegment", segmentNum) if err = w.readCheckpoint(dir); err != nil { return errors.Wrap(err, "readCheckpoint") @@ -495,7 +495,7 @@ func (w *Watcher) readSegment(r *LiveReader, segmentNum int, tail bool) error { if !w.sendSamples { w.sendSamples = true duration := time.Since(w.startTime) - level.Info(w.logger).Log("msg", "done replaying WAL", "duration", duration) + level.Info(w.logger).Log("msg", "Done replaying WAL", "duration", duration) } send = append(send, s) } @@ -541,7 +541,7 @@ func recordType(rt record.Type) string { // Read all the series records from a Checkpoint directory. func (w *Watcher) readCheckpoint(checkpointDir string) error { - level.Debug(w.logger).Log("msg", "reading checkpoint", "dir", checkpointDir) + level.Debug(w.logger).Log("msg", "Reading checkpoint", "dir", checkpointDir) index, err := checkpointNum(checkpointDir) if err != nil { return errors.Wrap(err, "checkpointNum") @@ -574,7 +574,7 @@ func (w *Watcher) readCheckpoint(checkpointDir string) error { } } - level.Debug(w.logger).Log("msg", "read series references from checkpoint", "checkpoint", checkpointDir) + level.Debug(w.logger).Log("msg", "Read series references from checkpoint", "checkpoint", checkpointDir) return nil } diff --git a/web/api/v1/api.go b/web/api/v1/api.go index 7188e08ff4..97790365c5 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -1265,7 +1265,7 @@ func (api *API) remoteReadQuery(ctx context.Context, query *prompb.Query, extern } defer func() { if err := querier.Close(); err != nil { - level.Warn(api.logger).Log("msg", "error on querier close", "err", err.Error()) + level.Warn(api.logger).Log("msg", "Error on querier close", "err", err.Error()) } }() diff --git a/web/federate.go b/web/federate.go index c735bce152..ebed25dc40 100644 --- a/web/federate.go +++ b/web/federate.go @@ -91,7 +91,7 @@ func (h *Handler) federation(w http.ResponseWriter, req *http.Request) { for _, mset := range matcherSets { s, wrns, err := q.Select(false, hints, mset...) if wrns != nil { - level.Debug(h.logger).Log("msg", "federation select returned warnings", "warnings", wrns) + level.Debug(h.logger).Log("msg", "Federation select returned warnings", "warnings", wrns) federationWarnings.Add(float64(len(wrns))) } if err != nil { diff --git a/web/web.go b/web/web.go index c2df87ed5c..fadd119c3b 100644 --- a/web/web.go +++ b/web/web.go @@ -556,7 +556,7 @@ func (h *Handler) Run(ctx context.Context) error { apiPath := "/api" if h.options.RoutePrefix != "/" { apiPath = h.options.RoutePrefix + apiPath - level.Info(h.logger).Log("msg", "router prefix", "prefix", h.options.RoutePrefix) + level.Info(h.logger).Log("msg", "Router prefix", "prefix", h.options.RoutePrefix) } av1 := route.New(). WithInstrumentation(h.metrics.instrumentHandlerWithPrefix("/api/v1")).