From c7bc56cf6c8f9c92e98beddca26ed9b47f8a5ac9 Mon Sep 17 00:00:00 2001 From: Devarsh Date: Tue, 13 Jan 2026 22:37:27 +0530 Subject: [PATCH] Add scrape commit and total duration metrics (#17665) * Add scrape commit and total duration metrics Signed-off-by: Devarsh * update metric based on the review Signed-off-by: Devarsh * conditionally record scrape duration Signed-off-by: Devarsh * Fix formatting in scrape.go Signed-off-by: Devarsh --------- Signed-off-by: Devarsh --- scrape/metrics.go | 12 ++++++++++++ scrape/scrape.go | 5 +++++ 2 files changed, 17 insertions(+) diff --git a/scrape/metrics.go b/scrape/metrics.go index 4662a9fd9e..34f1e28dba 100644 --- a/scrape/metrics.go +++ b/scrape/metrics.go @@ -56,6 +56,7 @@ type scrapeMetrics struct { targetScrapeExemplarOutOfOrder prometheus.Counter targetScrapePoolExceededLabelLimits prometheus.Counter targetScrapeNativeHistogramBucketLimit prometheus.Counter + targetScrapeDuration prometheus.Histogram } func newScrapeMetrics(reg prometheus.Registerer) (*scrapeMetrics, error) { @@ -252,6 +253,15 @@ func newScrapeMetrics(reg prometheus.Registerer) (*scrapeMetrics, error) { Help: "Total number of exemplar rejected due to not being out of the expected order.", }, ) + sm.targetScrapeDuration = prometheus.NewHistogram( + prometheus.HistogramOpts{ + Name: "prometheus_target_scrape_duration_seconds", + Help: "Total duration of the scrape from start to commit completion in seconds.", + NativeHistogramBucketFactor: 1.1, + NativeHistogramMaxBucketNumber: 100, + NativeHistogramMinResetDuration: 1 * time.Hour, + }, + ) for _, collector := range []prometheus.Collector{ // Used by Manager. @@ -284,6 +294,7 @@ func newScrapeMetrics(reg prometheus.Registerer) (*scrapeMetrics, error) { sm.targetScrapeExemplarOutOfOrder, sm.targetScrapePoolExceededLabelLimits, sm.targetScrapeNativeHistogramBucketLimit, + sm.targetScrapeDuration, } { err := reg.Register(collector) if err != nil { @@ -324,6 +335,7 @@ func (sm *scrapeMetrics) Unregister() { sm.reg.Unregister(sm.targetScrapeExemplarOutOfOrder) sm.reg.Unregister(sm.targetScrapePoolExceededLabelLimits) sm.reg.Unregister(sm.targetScrapeNativeHistogramBucketLimit) + sm.reg.Unregister(sm.targetScrapeDuration) } type TargetsGatherer interface { diff --git a/scrape/scrape.go b/scrape/scrape.go index 1a99155d09..58df858b3d 100644 --- a/scrape/scrape.go +++ b/scrape/scrape.go @@ -1335,6 +1335,11 @@ func (sl *scrapeLoop) scrapeAndReport(last, appendTime time.Time, errc chan<- er return } err = app.Commit() + if sl.reportExtraMetrics { + totalDuration := time.Since(start) + // Record total scrape duration metric. + sl.metrics.targetScrapeDuration.Observe(totalDuration.Seconds()) + } if err != nil { sl.l.Error("Scrape commit failed", "err", err) }