mirror of
https://github.com/haproxy/haproxy.git
synced 2026-04-15 21:59:41 -04:00
MAJOR: ring: drop the now unneeded lock
It was only used to protect the list which is now an mt_list so it doesn't provide any required protection anymore. It obviously also used to provide strict ordering between the writer and the reader when the writer started to update the messages, but that's now covered by the oredered tail updates and updates to the readers count to protect the area. The message rate on small thread counts (up to 12) saw a boost of roughly 5% while on large counts while for large counts it lost about 2% due to some contention now becoming visible elsewhere. Typical measures are 6.13M -> 6.61M at 3C6T, and 1.88 -> 1.92M at 24C48T on the EPYC.
This commit is contained in:
parent
cb482f92c4
commit
9e99cfbeb6
3 changed files with 0 additions and 20 deletions
|
|
@ -122,7 +122,6 @@ struct ring_storage {
|
|||
struct ring {
|
||||
struct ring_storage *storage; // the mapped part
|
||||
struct mt_list waiters; // list of waiters, for now, CLI "show event"
|
||||
__decl_thread(HA_RWLOCK_T lock);
|
||||
int readers_count;
|
||||
uint flags; // RING_FL_*
|
||||
};
|
||||
|
|
|
|||
|
|
@ -45,7 +45,6 @@ struct show_ring_ctx {
|
|||
*/
|
||||
void ring_init(struct ring *ring, void *area, size_t size, int reset)
|
||||
{
|
||||
HA_RWLOCK_INIT(&ring->lock);
|
||||
MT_LIST_INIT(&ring->waiters);
|
||||
ring->readers_count = 0;
|
||||
ring->flags = 0;
|
||||
|
|
@ -342,10 +341,8 @@ ssize_t ring_write(struct ring *ring, size_t maxlen, const struct ist pfx[], siz
|
|||
struct mt_list *elt1, elt2;
|
||||
struct appctx *appctx;
|
||||
|
||||
HA_RWLOCK_RDLOCK(RING_LOCK, &ring->lock);
|
||||
mt_list_for_each_entry_safe(appctx, &ring->waiters, wait_entry, elt1, elt2)
|
||||
appctx_wakeup(appctx);
|
||||
HA_RWLOCK_RDUNLOCK(RING_LOCK, &ring->lock);
|
||||
}
|
||||
|
||||
leave:
|
||||
|
|
@ -379,7 +376,6 @@ void ring_detach_appctx(struct ring *ring, struct appctx *appctx, size_t ofs)
|
|||
if (!ring)
|
||||
return;
|
||||
|
||||
HA_RWLOCK_WRLOCK(RING_LOCK, &ring->lock);
|
||||
HA_ATOMIC_DEC(&ring->readers_count);
|
||||
|
||||
if (ofs != ~0) {
|
||||
|
|
@ -396,7 +392,6 @@ void ring_detach_appctx(struct ring *ring, struct appctx *appctx, size_t ofs)
|
|||
} while ((readers > RING_MAX_READERS ||
|
||||
!_HA_ATOMIC_CAS(area + ofs, &readers, readers - 1)) && __ha_cpu_relax());
|
||||
}
|
||||
HA_RWLOCK_WRUNLOCK(RING_LOCK, &ring->lock);
|
||||
}
|
||||
|
||||
/* Tries to attach CLI handler <appctx> as a new reader on ring <ring>. This is
|
||||
|
|
@ -583,9 +578,7 @@ int cli_io_handler_show_ring(struct appctx *appctx)
|
|||
if (unlikely(sc_opposite(sc)->flags & SC_FL_SHUT_DONE))
|
||||
return 1;
|
||||
|
||||
HA_RWLOCK_WRLOCK(RING_LOCK, &ring->lock);
|
||||
MT_LIST_DELETE(&appctx->wait_entry);
|
||||
HA_RWLOCK_WRUNLOCK(RING_LOCK, &ring->lock);
|
||||
|
||||
ret = ring_dispatch_messages(ring, appctx, &ctx->ofs, &last_ofs, ctx->flags, applet_append_line);
|
||||
|
||||
|
|
@ -595,10 +588,8 @@ int cli_io_handler_show_ring(struct appctx *appctx)
|
|||
*/
|
||||
if (!sc_oc(sc)->output && !(sc->flags & SC_FL_SHUT_DONE)) {
|
||||
/* let's be woken up once new data arrive */
|
||||
HA_RWLOCK_WRLOCK(RING_LOCK, &ring->lock);
|
||||
MT_LIST_APPEND(&ring->waiters, &appctx->wait_entry);
|
||||
ofs = ring_tail(ring);
|
||||
HA_RWLOCK_WRUNLOCK(RING_LOCK, &ring->lock);
|
||||
if (ofs != last_ofs) {
|
||||
/* more data was added into the ring between the
|
||||
* unlock and the lock, and the writer might not
|
||||
|
|
|
|||
10
src/sink.c
10
src/sink.c
|
|
@ -382,18 +382,14 @@ static void sink_forward_io_handler(struct appctx *appctx)
|
|||
goto close;
|
||||
}
|
||||
|
||||
HA_RWLOCK_WRLOCK(RING_LOCK, &ring->lock);
|
||||
MT_LIST_DELETE(&appctx->wait_entry);
|
||||
HA_RWLOCK_WRUNLOCK(RING_LOCK, &ring->lock);
|
||||
|
||||
ret = ring_dispatch_messages(ring, appctx, &sft->ofs, &last_ofs, 0, applet_append_line);
|
||||
|
||||
if (ret) {
|
||||
/* let's be woken up once new data arrive */
|
||||
HA_RWLOCK_WRLOCK(RING_LOCK, &ring->lock);
|
||||
MT_LIST_APPEND(&ring->waiters, &appctx->wait_entry);
|
||||
ofs = ring_tail(ring);
|
||||
HA_RWLOCK_WRUNLOCK(RING_LOCK, &ring->lock);
|
||||
if (ofs != last_ofs) {
|
||||
/* more data was added into the ring between the
|
||||
* unlock and the lock, and the writer might not
|
||||
|
|
@ -451,17 +447,13 @@ static void sink_forward_oc_io_handler(struct appctx *appctx)
|
|||
goto close;
|
||||
}
|
||||
|
||||
HA_RWLOCK_WRLOCK(RING_LOCK, &ring->lock);
|
||||
MT_LIST_DELETE(&appctx->wait_entry);
|
||||
HA_RWLOCK_WRUNLOCK(RING_LOCK, &ring->lock);
|
||||
|
||||
ret = ring_dispatch_messages(ring, appctx, &sft->ofs, &last_ofs, 0, syslog_applet_append_event);
|
||||
if (ret) {
|
||||
/* let's be woken up once new data arrive */
|
||||
HA_RWLOCK_WRLOCK(RING_LOCK, &ring->lock);
|
||||
MT_LIST_APPEND(&ring->waiters, &appctx->wait_entry);
|
||||
ofs = ring_tail(ring);
|
||||
HA_RWLOCK_WRUNLOCK(RING_LOCK, &ring->lock);
|
||||
if (ofs != last_ofs) {
|
||||
/* more data was added into the ring between the
|
||||
* unlock and the lock, and the writer might not
|
||||
|
|
@ -491,9 +483,7 @@ void __sink_forward_session_deinit(struct sink_forward_target *sft)
|
|||
if (!sink)
|
||||
return;
|
||||
|
||||
HA_RWLOCK_WRLOCK(RING_LOCK, &sink->ctx.ring->lock);
|
||||
MT_LIST_DELETE(&sft->appctx->wait_entry);
|
||||
HA_RWLOCK_WRUNLOCK(RING_LOCK, &sink->ctx.ring->lock);
|
||||
|
||||
sft->appctx = NULL;
|
||||
task_wakeup(sink->forward_task, TASK_WOKEN_MSG);
|
||||
|
|
|
|||
Loading…
Reference in a new issue