diff --git a/.golangci.yml b/.golangci.yml index 8cb3265f4f..05a23b53b2 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -128,8 +128,6 @@ linters: # Disable this check for now since it introduces too many changes in our existing codebase. # See https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/modernize#hdr-Analyzer_omitzero for more details. - omitzero - # Disable waitgroup check until we really move to Go 1.25. - - waitgroup perfsprint: # Optimizes even if it requires an int or uint type cast. int-conversion: true diff --git a/cmd/promtool/tsdb.go b/cmd/promtool/tsdb.go index 1aaf87bc42..f43da0e1d0 100644 --- a/cmd/promtool/tsdb.go +++ b/cmd/promtool/tsdb.go @@ -159,17 +159,14 @@ func (b *writeBenchmark) ingestScrapes(lbls []labels.Labels, scrapeCount int) (u batch := lbls[:l] lbls = lbls[l:] - wg.Add(1) - go func() { - defer wg.Done() - + wg.Go(func() { n, err := b.ingestScrapesShard(batch, 100, int64(timeDelta*i)) if err != nil { // exitWithError(err) fmt.Println(" err", err) } total.Add(n) - }() + }) } wg.Wait() } diff --git a/discovery/manager_test.go b/discovery/manager_test.go index 162730d9aa..8a49005100 100644 --- a/discovery/manager_test.go +++ b/discovery/manager_test.go @@ -1562,11 +1562,9 @@ func TestConfigReloadAndShutdownRace(t *testing.T) { discoveryManager.updatert = 100 * time.Millisecond var wgDiscovery sync.WaitGroup - wgDiscovery.Add(1) - go func() { + wgDiscovery.Go(func() { discoveryManager.Run() - wgDiscovery.Done() - }() + }) time.Sleep(time.Millisecond * 200) var wgBg sync.WaitGroup @@ -1588,11 +1586,9 @@ func TestConfigReloadAndShutdownRace(t *testing.T) { discoveryManager.ApplyConfig(c) delete(c, "prometheus") - wgBg.Add(1) - go func() { + wgBg.Go(func() { discoveryManager.ApplyConfig(c) - wgBg.Done() - }() + }) mgrCancel() wgDiscovery.Wait() diff --git a/promql/engine_test.go b/promql/engine_test.go index f911419c62..5dfffd7cc7 100644 --- a/promql/engine_test.go +++ b/promql/engine_test.go @@ -94,11 +94,9 @@ func TestQueryConcurrency(t *testing.T) { var wg sync.WaitGroup for range maxConcurrency { q := engine.NewTestQuery(f) - wg.Add(1) - go func() { + wg.Go(func() { q.Exec(ctx) - wg.Done() - }() + }) select { case <-processing: // Expected. @@ -108,11 +106,9 @@ func TestQueryConcurrency(t *testing.T) { } q := engine.NewTestQuery(f) - wg.Add(1) - go func() { + wg.Go(func() { q.Exec(ctx) - wg.Done() - }() + }) select { case <-processing: diff --git a/rules/manager_test.go b/rules/manager_test.go index 27930fc4c7..19c815e50c 100644 --- a/rules/manager_test.go +++ b/rules/manager_test.go @@ -2473,11 +2473,9 @@ func TestBoundedRuleEvalConcurrency(t *testing.T) { // Evaluate groups concurrently (like they normally do). var wg sync.WaitGroup for _, group := range groups { - wg.Add(1) - go func() { + wg.Go(func() { group.Eval(ctx, time.Now()) - wg.Done() - }() + }) } wg.Wait() diff --git a/tsdb/chunks/chunk_write_queue.go b/tsdb/chunks/chunk_write_queue.go index 1a046ea00a..a87c2602cd 100644 --- a/tsdb/chunks/chunk_write_queue.go +++ b/tsdb/chunks/chunk_write_queue.go @@ -111,10 +111,7 @@ func newChunkWriteQueue(reg prometheus.Registerer, size int, writeChunk writeChu } func (c *chunkWriteQueue) start() { - c.workerWg.Add(1) - go func() { - defer c.workerWg.Done() - + c.workerWg.Go(func() { for { job, ok := c.jobs.pop() if !ok { @@ -123,7 +120,7 @@ func (c *chunkWriteQueue) start() { c.processJob(job) } - }() + }) c.isRunningMtx.Lock() c.isRunning = true diff --git a/tsdb/chunks/queue_test.go b/tsdb/chunks/queue_test.go index 377a8181ff..2e3fff59a8 100644 --- a/tsdb/chunks/queue_test.go +++ b/tsdb/chunks/queue_test.go @@ -269,34 +269,26 @@ func TestQueuePushPopManyGoroutines(t *testing.T) { readersWG := sync.WaitGroup{} for range readGoroutines { - readersWG.Add(1) - - go func() { - defer readersWG.Done() - + readersWG.Go(func() { for j, ok := queue.pop(); ok; j, ok = queue.pop() { refsMx.Lock() refs[j.seriesRef] = true refsMx.Unlock() } - }() + }) } id := atomic.Uint64{} writersWG := sync.WaitGroup{} for range writeGoroutines { - writersWG.Add(1) - - go func() { - defer writersWG.Done() - + writersWG.Go(func() { for range writes { ref := id.Inc() require.True(t, queue.push(chunkWriteJob{seriesRef: HeadSeriesRef(ref)})) } - }() + }) } // Wait until all writes are done. diff --git a/tsdb/compact_test.go b/tsdb/compact_test.go index afe15a5f31..44a0921eec 100644 --- a/tsdb/compact_test.go +++ b/tsdb/compact_test.go @@ -1717,10 +1717,7 @@ func TestSparseHistogramSpaceSavings(t *testing.T) { var wg sync.WaitGroup - wg.Add(1) - go func() { - defer wg.Done() - + wg.Go(func() { // Ingest sparse histograms. for _, ah := range allSparseSeries { var ( @@ -1743,7 +1740,7 @@ func TestSparseHistogramSpaceSavings(t *testing.T) { sparseULIDs, err = compactor.Write(sparseHead.opts.ChunkDirRoot, sparseHead, mint, maxt, nil) require.NoError(t, err) require.Len(t, sparseULIDs, 1) - }() + }) wg.Add(1) go func(c testcase) { diff --git a/tsdb/head_append_v2_test.go b/tsdb/head_append_v2_test.go index 082d756e60..61b2eecf4e 100644 --- a/tsdb/head_append_v2_test.go +++ b/tsdb/head_append_v2_test.go @@ -1334,13 +1334,11 @@ func TestDataMissingOnQueryDuringCompaction_AppenderV2(t *testing.T) { require.NoError(t, err) var wg sync.WaitGroup - wg.Add(1) - go func() { - defer wg.Done() + wg.Go(func() { // Compacting head while the querier spans the compaction time. require.NoError(t, db.Compact(ctx)) require.NotEmpty(t, db.Blocks()) - }() + }) // Give enough time for compaction to finish. // We expect it to be blocked until querier is closed. diff --git a/tsdb/head_test.go b/tsdb/head_test.go index 7b8ae0ecbd..142fbc18e7 100644 --- a/tsdb/head_test.go +++ b/tsdb/head_test.go @@ -3259,12 +3259,10 @@ func testHeadSeriesChunkRace(t *testing.T) { defer q.Close() var wg sync.WaitGroup - wg.Add(1) - go func() { - defer wg.Done() + wg.Go(func() { h.updateMinMaxTime(20, 25) h.gc() - }() + }) ss := q.Select(context.Background(), false, nil, matcher) for ss.Next() { } @@ -3748,13 +3746,11 @@ func TestChunkNotFoundHeadGCRace(t *testing.T) { s := ss.At() var wg sync.WaitGroup - wg.Add(1) - go func() { - defer wg.Done() + wg.Go(func() { // Compacting head while the querier spans the compaction time. require.NoError(t, db.Compact(ctx)) require.NotEmpty(t, db.Blocks()) - }() + }) // Give enough time for compaction to finish. // We expect it to be blocked until querier is closed. @@ -3812,13 +3808,11 @@ func TestDataMissingOnQueryDuringCompaction(t *testing.T) { require.NoError(t, err) var wg sync.WaitGroup - wg.Add(1) - go func() { - defer wg.Done() + wg.Go(func() { // Compacting head while the querier spans the compaction time. require.NoError(t, db.Compact(ctx)) require.NotEmpty(t, db.Blocks()) - }() + }) // Give enough time for compaction to finish. // We expect it to be blocked until querier is closed. diff --git a/tsdb/isolation_test.go b/tsdb/isolation_test.go index f2671024e8..2b2e1a6487 100644 --- a/tsdb/isolation_test.go +++ b/tsdb/isolation_test.go @@ -88,10 +88,7 @@ func BenchmarkIsolation(b *testing.B) { start := make(chan struct{}) for range goroutines { - wg.Add(1) - - go func() { - defer wg.Done() + wg.Go(func() { <-start for b.Loop() { @@ -99,7 +96,7 @@ func BenchmarkIsolation(b *testing.B) { iso.closeAppend(appendID) } - }() + }) } b.ResetTimer() @@ -118,10 +115,7 @@ func BenchmarkIsolationWithState(b *testing.B) { start := make(chan struct{}) for range goroutines { - wg.Add(1) - - go func() { - defer wg.Done() + wg.Go(func() { <-start for b.Loop() { @@ -129,7 +123,7 @@ func BenchmarkIsolationWithState(b *testing.B) { iso.closeAppend(appendID) } - }() + }) } readers := goroutines / 100 @@ -138,17 +132,14 @@ func BenchmarkIsolationWithState(b *testing.B) { } for g := 0; g < readers; g++ { - wg.Add(1) - - go func() { - defer wg.Done() + wg.Go(func() { <-start for b.Loop() { s := iso.State(math.MinInt64, math.MaxInt64) s.Close() } - }() + }) } b.ResetTimer() diff --git a/util/treecache/treecache.go b/util/treecache/treecache.go index 32912c5a94..deb950b55a 100644 --- a/util/treecache/treecache.go +++ b/util/treecache/treecache.go @@ -265,8 +265,7 @@ func (tc *ZookeeperTreeCache) recursiveNodeUpdate(path string, node *zookeeperTr } } - tc.wg.Add(1) - go func() { + tc.wg.Go(func() { numWatchers.Inc() // Pass up zookeeper events, until the node is deleted. select { @@ -277,8 +276,7 @@ func (tc *ZookeeperTreeCache) recursiveNodeUpdate(path string, node *zookeeperTr case <-node.done: } numWatchers.Dec() - tc.wg.Done() - }() + }) return nil } diff --git a/web/web_test.go b/web/web_test.go index cbcf15ffdc..ff486beee0 100644 --- a/web/web_test.go +++ b/web/web_test.go @@ -509,14 +509,12 @@ func TestHandleMultipleQuitRequests(t *testing.T) { start := make(chan struct{}) var wg sync.WaitGroup for range 3 { - wg.Add(1) - go func() { - defer wg.Done() + wg.Go(func() { <-start resp, err := http.Post(baseURL+"/-/quit", "", strings.NewReader("")) require.NoError(t, err) require.Equal(t, http.StatusOK, resp.StatusCode) - }() + }) } close(start) wg.Wait()