diff --git a/include/haproxy/thread.h b/include/haproxy/thread.h index ad0ae1aa7..c030c5e0f 100644 --- a/include/haproxy/thread.h +++ b/include/haproxy/thread.h @@ -360,7 +360,15 @@ static inline unsigned long thread_isolated() } while (0) #define _lock_wait(_LK_, lbl, expr) do { \ + uint64_t lock_start = 0; \ + extern uint64_t now_mono_time(void); \ + if (_LK_ != _LK_UN) { \ + if (unlikely(th_ctx->flags & TH_FL_TASK_PROFILING)) \ + lock_start = now_mono_time(); \ + } \ (void)(expr); \ + if (_LK_ != _LK_UN && unlikely(lock_start)) \ + th_ctx->lock_wait_total += now_mono_time() - lock_start; \ if (lbl != OTHER_LOCK) \ _lock_wait_common(_LK_, lbl); \ } while (0) diff --git a/include/haproxy/tinfo-t.h b/include/haproxy/tinfo-t.h index 9e379ae5b..78084c497 100644 --- a/include/haproxy/tinfo-t.h +++ b/include/haproxy/tinfo-t.h @@ -161,13 +161,16 @@ struct thread_ctx { uint32_t sched_wake_date; /* current task/tasklet's wake date in 32-bit ns or 0 if not supported */ uint64_t sched_call_date; /* current task/tasklet's call date in ns */ + uint64_t lock_wait_total; /* total time in ns spent waiting for a lock (task prof) */ uint64_t prev_mono_time; /* previous system wide monotonic time (leaving poll) */ uint64_t curr_mono_time; /* latest system wide monotonic time (leaving poll) */ ulong lock_history; /* history of used locks, see thread.h for more details */ - // third cache line here on 64 bits: accessed mostly using atomic ops + /* around 56 unused bytes here */ + + // fourth cache line here on 64 bits: accessed mostly using atomic ops ALWAYS_ALIGN(64); struct mt_list shared_tasklet_list; /* Tasklet to be run, woken up by other threads */ unsigned int rqueue_ticks; /* Insertion counter for the run queue */ diff --git a/src/task.c b/src/task.c index 72be355f0..fdd2775ba 100644 --- a/src/task.c +++ b/src/task.c @@ -569,6 +569,7 @@ unsigned int run_tasks_from_lists(unsigned int budgets[]) process = t->process; t->calls++; + th_ctx->lock_wait_total = 0; th_ctx->sched_wake_date = t->wake_date; if (th_ctx->sched_wake_date || (t->state & TASK_F_WANTS_TIME)) { /* take the most accurate clock we have, either @@ -678,8 +679,11 @@ unsigned int run_tasks_from_lists(unsigned int budgets[]) __ha_barrier_store(); /* stats are only registered for non-zero wake dates */ - if (unlikely(th_ctx->sched_wake_date)) + if (unlikely(th_ctx->sched_wake_date)) { HA_ATOMIC_ADD(&profile_entry->cpu_time, (uint32_t)(now_mono_time() - th_ctx->sched_call_date)); + if (th_ctx->lock_wait_total) + HA_ATOMIC_ADD(&profile_entry->lkw_time, th_ctx->lock_wait_total); + } } th_ctx->current_queue = -1; th_ctx->sched_wake_date = TICK_ETERNITY;