chore(lint): enable wg.Go

Since our minimum supported go version is now go 1.25, we can use wg.Go.

Signed-off-by: Julien Pivotto <291750+roidelapluie@users.noreply.github.com>
This commit is contained in:
Julien Pivotto 2026-02-17 11:59:11 +01:00
parent 1e2529f8d6
commit 7d0a39ac93
13 changed files with 38 additions and 88 deletions

View file

@ -128,8 +128,6 @@ linters:
# Disable this check for now since it introduces too many changes in our existing codebase.
# See https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/modernize#hdr-Analyzer_omitzero for more details.
- omitzero
# Disable waitgroup check until we really move to Go 1.25.
- waitgroup
perfsprint:
# Optimizes even if it requires an int or uint type cast.
int-conversion: true

View file

@ -159,17 +159,14 @@ func (b *writeBenchmark) ingestScrapes(lbls []labels.Labels, scrapeCount int) (u
batch := lbls[:l]
lbls = lbls[l:]
wg.Add(1)
go func() {
defer wg.Done()
wg.Go(func() {
n, err := b.ingestScrapesShard(batch, 100, int64(timeDelta*i))
if err != nil {
// exitWithError(err)
fmt.Println(" err", err)
}
total.Add(n)
}()
})
}
wg.Wait()
}

View file

@ -1562,11 +1562,9 @@ func TestConfigReloadAndShutdownRace(t *testing.T) {
discoveryManager.updatert = 100 * time.Millisecond
var wgDiscovery sync.WaitGroup
wgDiscovery.Add(1)
go func() {
wgDiscovery.Go(func() {
discoveryManager.Run()
wgDiscovery.Done()
}()
})
time.Sleep(time.Millisecond * 200)
var wgBg sync.WaitGroup
@ -1588,11 +1586,9 @@ func TestConfigReloadAndShutdownRace(t *testing.T) {
discoveryManager.ApplyConfig(c)
delete(c, "prometheus")
wgBg.Add(1)
go func() {
wgBg.Go(func() {
discoveryManager.ApplyConfig(c)
wgBg.Done()
}()
})
mgrCancel()
wgDiscovery.Wait()

View file

@ -94,11 +94,9 @@ func TestQueryConcurrency(t *testing.T) {
var wg sync.WaitGroup
for range maxConcurrency {
q := engine.NewTestQuery(f)
wg.Add(1)
go func() {
wg.Go(func() {
q.Exec(ctx)
wg.Done()
}()
})
select {
case <-processing:
// Expected.
@ -108,11 +106,9 @@ func TestQueryConcurrency(t *testing.T) {
}
q := engine.NewTestQuery(f)
wg.Add(1)
go func() {
wg.Go(func() {
q.Exec(ctx)
wg.Done()
}()
})
select {
case <-processing:

View file

@ -2473,11 +2473,9 @@ func TestBoundedRuleEvalConcurrency(t *testing.T) {
// Evaluate groups concurrently (like they normally do).
var wg sync.WaitGroup
for _, group := range groups {
wg.Add(1)
go func() {
wg.Go(func() {
group.Eval(ctx, time.Now())
wg.Done()
}()
})
}
wg.Wait()

View file

@ -111,10 +111,7 @@ func newChunkWriteQueue(reg prometheus.Registerer, size int, writeChunk writeChu
}
func (c *chunkWriteQueue) start() {
c.workerWg.Add(1)
go func() {
defer c.workerWg.Done()
c.workerWg.Go(func() {
for {
job, ok := c.jobs.pop()
if !ok {
@ -123,7 +120,7 @@ func (c *chunkWriteQueue) start() {
c.processJob(job)
}
}()
})
c.isRunningMtx.Lock()
c.isRunning = true

View file

@ -269,34 +269,26 @@ func TestQueuePushPopManyGoroutines(t *testing.T) {
readersWG := sync.WaitGroup{}
for range readGoroutines {
readersWG.Add(1)
go func() {
defer readersWG.Done()
readersWG.Go(func() {
for j, ok := queue.pop(); ok; j, ok = queue.pop() {
refsMx.Lock()
refs[j.seriesRef] = true
refsMx.Unlock()
}
}()
})
}
id := atomic.Uint64{}
writersWG := sync.WaitGroup{}
for range writeGoroutines {
writersWG.Add(1)
go func() {
defer writersWG.Done()
writersWG.Go(func() {
for range writes {
ref := id.Inc()
require.True(t, queue.push(chunkWriteJob{seriesRef: HeadSeriesRef(ref)}))
}
}()
})
}
// Wait until all writes are done.

View file

@ -1717,10 +1717,7 @@ func TestSparseHistogramSpaceSavings(t *testing.T) {
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
wg.Go(func() {
// Ingest sparse histograms.
for _, ah := range allSparseSeries {
var (
@ -1743,7 +1740,7 @@ func TestSparseHistogramSpaceSavings(t *testing.T) {
sparseULIDs, err = compactor.Write(sparseHead.opts.ChunkDirRoot, sparseHead, mint, maxt, nil)
require.NoError(t, err)
require.Len(t, sparseULIDs, 1)
}()
})
wg.Add(1)
go func(c testcase) {

View file

@ -1334,13 +1334,11 @@ func TestDataMissingOnQueryDuringCompaction_AppenderV2(t *testing.T) {
require.NoError(t, err)
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
wg.Go(func() {
// Compacting head while the querier spans the compaction time.
require.NoError(t, db.Compact(ctx))
require.NotEmpty(t, db.Blocks())
}()
})
// Give enough time for compaction to finish.
// We expect it to be blocked until querier is closed.

View file

@ -3259,12 +3259,10 @@ func testHeadSeriesChunkRace(t *testing.T) {
defer q.Close()
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
wg.Go(func() {
h.updateMinMaxTime(20, 25)
h.gc()
}()
})
ss := q.Select(context.Background(), false, nil, matcher)
for ss.Next() {
}
@ -3748,13 +3746,11 @@ func TestChunkNotFoundHeadGCRace(t *testing.T) {
s := ss.At()
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
wg.Go(func() {
// Compacting head while the querier spans the compaction time.
require.NoError(t, db.Compact(ctx))
require.NotEmpty(t, db.Blocks())
}()
})
// Give enough time for compaction to finish.
// We expect it to be blocked until querier is closed.
@ -3812,13 +3808,11 @@ func TestDataMissingOnQueryDuringCompaction(t *testing.T) {
require.NoError(t, err)
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
wg.Go(func() {
// Compacting head while the querier spans the compaction time.
require.NoError(t, db.Compact(ctx))
require.NotEmpty(t, db.Blocks())
}()
})
// Give enough time for compaction to finish.
// We expect it to be blocked until querier is closed.

View file

@ -88,10 +88,7 @@ func BenchmarkIsolation(b *testing.B) {
start := make(chan struct{})
for range goroutines {
wg.Add(1)
go func() {
defer wg.Done()
wg.Go(func() {
<-start
for b.Loop() {
@ -99,7 +96,7 @@ func BenchmarkIsolation(b *testing.B) {
iso.closeAppend(appendID)
}
}()
})
}
b.ResetTimer()
@ -118,10 +115,7 @@ func BenchmarkIsolationWithState(b *testing.B) {
start := make(chan struct{})
for range goroutines {
wg.Add(1)
go func() {
defer wg.Done()
wg.Go(func() {
<-start
for b.Loop() {
@ -129,7 +123,7 @@ func BenchmarkIsolationWithState(b *testing.B) {
iso.closeAppend(appendID)
}
}()
})
}
readers := goroutines / 100
@ -138,17 +132,14 @@ func BenchmarkIsolationWithState(b *testing.B) {
}
for g := 0; g < readers; g++ {
wg.Add(1)
go func() {
defer wg.Done()
wg.Go(func() {
<-start
for b.Loop() {
s := iso.State(math.MinInt64, math.MaxInt64)
s.Close()
}
}()
})
}
b.ResetTimer()

View file

@ -265,8 +265,7 @@ func (tc *ZookeeperTreeCache) recursiveNodeUpdate(path string, node *zookeeperTr
}
}
tc.wg.Add(1)
go func() {
tc.wg.Go(func() {
numWatchers.Inc()
// Pass up zookeeper events, until the node is deleted.
select {
@ -277,8 +276,7 @@ func (tc *ZookeeperTreeCache) recursiveNodeUpdate(path string, node *zookeeperTr
case <-node.done:
}
numWatchers.Dec()
tc.wg.Done()
}()
})
return nil
}

View file

@ -509,14 +509,12 @@ func TestHandleMultipleQuitRequests(t *testing.T) {
start := make(chan struct{})
var wg sync.WaitGroup
for range 3 {
wg.Add(1)
go func() {
defer wg.Done()
wg.Go(func() {
<-start
resp, err := http.Post(baseURL+"/-/quit", "", strings.NewReader(""))
require.NoError(t, err)
require.Equal(t, http.StatusOK, resp.StatusCode)
}()
})
}
close(start)
wg.Wait()