From e8e5091510f667feeb36f68b8d2f4d9e8cf00dc0 Mon Sep 17 00:00:00 2001 From: Willy Tarreau Date: Sat, 20 Feb 2021 11:38:56 +0100 Subject: [PATCH] MINOR: dynbuf: make the buffer wait queue per thread The buffer wait queue used to be global historically but this doest not make any sense anymore given that the most common use case is to have thread-local pools. Thus there's no point waking up waiters of other threads after releasing an entry, as they won't benefit from it. Let's move the queue head to the thread_info structure and use ti->buffer_wq from now on. --- include/haproxy/channel.h | 2 +- include/haproxy/dynbuf.h | 5 ++--- include/haproxy/tinfo-t.h | 2 ++ src/check.c | 2 +- src/dynbuf.c | 10 ++++++---- src/flt_spoe.c | 2 +- src/mux_fcgi.c | 2 +- src/mux_h1.c | 2 +- src/mux_h2.c | 2 +- src/stream.c | 2 +- 10 files changed, 17 insertions(+), 14 deletions(-) diff --git a/include/haproxy/channel.h b/include/haproxy/channel.h index de51d0467..812808c0e 100644 --- a/include/haproxy/channel.h +++ b/include/haproxy/channel.h @@ -852,7 +852,7 @@ static inline int channel_alloc_buffer(struct channel *chn, struct buffer_wait * return 1; if (!MT_LIST_ADDED(&wait->list)) - MT_LIST_ADDQ(&buffer_wq, &wait->list); + MT_LIST_ADDQ(&ti->buffer_wq, &wait->list); return 0; } diff --git a/include/haproxy/dynbuf.h b/include/haproxy/dynbuf.h index 2a7aa4768..c26ad21f6 100644 --- a/include/haproxy/dynbuf.h +++ b/include/haproxy/dynbuf.h @@ -35,7 +35,6 @@ #include extern struct pool_head *pool_head_buffer; -extern struct mt_list buffer_wq; int init_buffer(); void buffer_dump(FILE *o, struct buffer *b, int from, int to); @@ -192,13 +191,13 @@ static inline struct buffer *b_alloc_margin(struct buffer *buf, int margin) * passing a buffer to oneself in case of failed allocations (e.g. need two * buffers, get one, fail, release it and wake up self again). In case of * normal buffer release where it is expected that the caller is not waiting - * for a buffer, NULL is fine. + * for a buffer, NULL is fine. It will wake waiters on the current thread only. */ void __offer_buffer(void *from, unsigned int threshold); static inline void offer_buffers(void *from, unsigned int threshold) { - if (!MT_LIST_ISEMPTY(&buffer_wq)) + if (!MT_LIST_ISEMPTY(&ti->buffer_wq)) __offer_buffer(from, threshold); } diff --git a/include/haproxy/tinfo-t.h b/include/haproxy/tinfo-t.h index f3badbd88..6dc045851 100644 --- a/include/haproxy/tinfo-t.h +++ b/include/haproxy/tinfo-t.h @@ -45,6 +45,8 @@ struct thread_info { #ifdef CONFIG_HAP_LOCAL_POOLS struct list pool_lru_head; /* oldest objects */ #endif + struct mt_list buffer_wq; /* buffer waiters */ + /* pad to cache line (64B) */ char __pad[0]; /* unused except to check remaining room */ char __end[0] __attribute__((aligned(64))); diff --git a/src/check.c b/src/check.c index a09c67a7d..ff4effa17 100644 --- a/src/check.c +++ b/src/check.c @@ -1019,7 +1019,7 @@ struct buffer *check_get_buf(struct check *check, struct buffer *bptr) unlikely((buf = b_alloc_margin(bptr, 0)) == NULL)) { check->buf_wait.target = check; check->buf_wait.wakeup_cb = check_buf_available; - MT_LIST_ADDQ(&buffer_wq, &check->buf_wait.list); + MT_LIST_ADDQ(&ti->buffer_wq, &check->buf_wait.list); } return buf; } diff --git a/src/dynbuf.c b/src/dynbuf.c index ad2cde926..395fa8a97 100644 --- a/src/dynbuf.c +++ b/src/dynbuf.c @@ -22,18 +22,20 @@ struct pool_head *pool_head_buffer; -/* list of objects waiting for at least one buffer */ -struct mt_list buffer_wq = LIST_HEAD_INIT(buffer_wq); - /* perform minimal intializations, report 0 in case of error, 1 if OK. */ int init_buffer() { void *buffer; + int thr; pool_head_buffer = create_pool("buffer", global.tune.bufsize, MEM_F_SHARED|MEM_F_EXACT); if (!pool_head_buffer) return 0; + for (thr = 0; thr < MAX_THREADS; thr++) + MT_LIST_INIT(&ha_thread_info[thr].buffer_wq); + + /* The reserved buffer is what we leave behind us. Thus we always need * at least one extra buffer in minavail otherwise we'll end up waking * up tasks with no memory available, causing a lot of useless wakeups. @@ -112,7 +114,7 @@ void __offer_buffer(void *from, unsigned int threshold) */ avail = pool_head_buffer->allocated - pool_head_buffer->used - global.tune.reserved_bufs / 2; - mt_list_for_each_entry_safe(wait, &buffer_wq, list, elt1, elt2) { + mt_list_for_each_entry_safe(wait, &ti->buffer_wq, list, elt1, elt2) { if (avail <= threshold) break; diff --git a/src/flt_spoe.c b/src/flt_spoe.c index b9471d917..27a15b6c1 100644 --- a/src/flt_spoe.c +++ b/src/flt_spoe.c @@ -2828,7 +2828,7 @@ spoe_acquire_buffer(struct buffer *buf, struct buffer_wait *buffer_wait) if (b_alloc_margin(buf, global.tune.reserved_bufs)) return 1; - MT_LIST_ADDQ(&buffer_wq, &buffer_wait->list); + MT_LIST_ADDQ(&ti->buffer_wq, &buffer_wait->list); return 0; } diff --git a/src/mux_fcgi.c b/src/mux_fcgi.c index a2c1c86c6..2ff7aa909 100644 --- a/src/mux_fcgi.c +++ b/src/mux_fcgi.c @@ -608,7 +608,7 @@ static inline struct buffer *fcgi_get_buf(struct fcgi_conn *fconn, struct buffer unlikely((buf = b_alloc_margin(bptr, 0)) == NULL)) { fconn->buf_wait.target = fconn; fconn->buf_wait.wakeup_cb = fcgi_buf_available; - MT_LIST_ADDQ(&buffer_wq, &fconn->buf_wait.list); + MT_LIST_ADDQ(&ti->buffer_wq, &fconn->buf_wait.list); } return buf; } diff --git a/src/mux_h1.c b/src/mux_h1.c index 63339351f..077f4ea2d 100644 --- a/src/mux_h1.c +++ b/src/mux_h1.c @@ -452,7 +452,7 @@ static inline struct buffer *h1_get_buf(struct h1c *h1c, struct buffer *bptr) unlikely((buf = b_alloc_margin(bptr, 0)) == NULL)) { h1c->buf_wait.target = h1c; h1c->buf_wait.wakeup_cb = h1_buf_available; - MT_LIST_ADDQ(&buffer_wq, &h1c->buf_wait.list); + MT_LIST_ADDQ(&ti->buffer_wq, &h1c->buf_wait.list); } return buf; } diff --git a/src/mux_h2.c b/src/mux_h2.c index 21c14b873..cfa5f8c4d 100644 --- a/src/mux_h2.c +++ b/src/mux_h2.c @@ -810,7 +810,7 @@ static inline struct buffer *h2_get_buf(struct h2c *h2c, struct buffer *bptr) unlikely((buf = b_alloc_margin(bptr, 0)) == NULL)) { h2c->buf_wait.target = h2c; h2c->buf_wait.wakeup_cb = h2_buf_available; - MT_LIST_ADDQ(&buffer_wq, &h2c->buf_wait.list); + MT_LIST_ADDQ(&ti->buffer_wq, &h2c->buf_wait.list); } return buf; } diff --git a/src/stream.c b/src/stream.c index 9394ac0a7..d74721162 100644 --- a/src/stream.c +++ b/src/stream.c @@ -773,7 +773,7 @@ static int stream_alloc_work_buffer(struct stream *s) if (b_alloc_margin(&s->res.buf, 0)) return 1; - MT_LIST_ADDQ(&buffer_wq, &s->buffer_wait.list); + MT_LIST_ADDQ(&ti->buffer_wq, &s->buffer_wait.list); return 0; }