2019-08-08 09:28:52 -04:00
|
|
|
/*
|
|
|
|
|
* Ring buffer management
|
|
|
|
|
*
|
|
|
|
|
* Copyright (C) 2000-2019 Willy Tarreau - w@1wt.eu
|
|
|
|
|
*
|
|
|
|
|
* This library is free software; you can redistribute it and/or
|
|
|
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
|
|
|
* License as published by the Free Software Foundation, version 2.1
|
|
|
|
|
* exclusively.
|
|
|
|
|
*
|
|
|
|
|
* This library is distributed in the hope that it will be useful,
|
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
|
* Lesser General Public License for more details.
|
|
|
|
|
*
|
|
|
|
|
* You should have received a copy of the GNU Lesser General Public
|
|
|
|
|
* License along with this library; if not, write to the Free Software
|
|
|
|
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
#include <stdlib.h>
|
2020-05-27 06:58:42 -04:00
|
|
|
#include <haproxy/api.h>
|
2020-06-09 03:07:15 -04:00
|
|
|
#include <haproxy/applet.h>
|
2020-05-27 11:22:10 -04:00
|
|
|
#include <haproxy/buf.h>
|
2024-03-14 03:57:02 -04:00
|
|
|
#include <haproxy/cfgparse.h>
|
2020-06-04 14:19:54 -04:00
|
|
|
#include <haproxy/cli.h>
|
2020-06-03 13:43:35 -04:00
|
|
|
#include <haproxy/ring.h>
|
2022-05-27 03:25:10 -04:00
|
|
|
#include <haproxy/sc_strm.h>
|
2022-05-27 03:47:12 -04:00
|
|
|
#include <haproxy/stconn.h>
|
2020-06-09 03:07:15 -04:00
|
|
|
#include <haproxy/thread.h>
|
2024-02-26 05:03:03 -05:00
|
|
|
#include <haproxy/vecpair.h>
|
2019-08-08 09:28:52 -04:00
|
|
|
|
2022-05-05 09:29:43 -04:00
|
|
|
/* context used to dump the contents of a ring via "show events" or "show errors" */
|
|
|
|
|
struct show_ring_ctx {
|
|
|
|
|
struct ring *ring; /* ring to be dumped */
|
MEDIUM: ring: make the offset relative to the head/tail instead of absolute
The ring's offset currently contains a perpetually growing custor which
is the number of bytes written from the start. It's used by readers to
know where to (re)start reading from. It was made absolute because both
the head and the tail can change during writes and we needed a fixed
position to know where the reader was attached. But this is complicated,
error-prone, and limits the ability to reduce the lock's coverage. In
fact what is needed is to know where the reader is currently waiting, if
at all. And this location is exactly where it stored its count, so the
absolute position in the buffer (the seek offset from the first storage
byte) does represent exactly this, as it doesn't move (we don't realign
the buffer), and is stable regardless of how head/tail changes with writes.
This patch modifies this so that the application code now uses this
representation instead. The most noticeable change is the initialization,
where we've kept ~0 as a marker to go to the end, and it's now set to
the tail offset instead of trying to resolve the current write offset
against the current ring's position.
The offset was also used at the end of the consuming loop, to detect
if a new write had happened between the lock being released and taken
again, so as to wake the consumer(s) up again. For this we used to
take a copy of the ring->ofs before unlocking and comparing with the
new value read in the next lock. Since it's not possible to write past
the current reader's location, there's no risk of complete rollover, so
it's sufficient to check if the tail has changed.
Note that the change also has an impact on the haring consumer which
needs to adapt as well. But that's good in fact because it will rely
on one less variable, and will use offsets relative to the buffer's
head, and the change remains backward-compatible.
2023-02-22 08:50:14 -05:00
|
|
|
size_t ofs; /* storage offset to restart from; ~0=oldest */
|
2022-05-05 09:29:43 -04:00
|
|
|
uint flags; /* set of RING_WF_* */
|
|
|
|
|
};
|
|
|
|
|
|
MAJOR: ring: insert an intermediary ring_storage level
We'll need to add more complex structures in the ring, such as wait
queues. That's far too much to be stored into the area in case of
file-backed contents, so let's split the ring definition and its
storage once for all.
This patch introduces a struct ring_storage which is assigned to
ring->storage, which contains minimal information to represent the
storage layout, i.e. for now only the buffer, and all the rest
remains in the ring itself. The storage is appended immediately after
it and the buffer's pointer always points to that area. It has the
benefit of remaining 100% compatible with the existing file-backed
layout. In memory, the allocation loses the size of a struct buffer.
It's not even certain it's worth placing the size there, given that it's
constant and that a dump of a ring wouldn't really need it (the file size
is sufficient). But for now everything comes with the struct buffer, and
later this will change once split into head and tail. Also this area may
be completed with more information in the future (e.g. storage version,
format, endianness, word size etc).
2024-03-03 11:20:10 -05:00
|
|
|
/* Initialize a pre-allocated ring with the buffer area of size <size>.
|
|
|
|
|
* Makes the storage point to the indicated area and adjusts the declared
|
|
|
|
|
* ring size according to the position of the area in the storage. If <reset>
|
|
|
|
|
* is non-zero, the storage area is reset, otherwise it's left intact (except
|
|
|
|
|
* for the area origin pointer which is updated so that the area can come from
|
|
|
|
|
* an mmap()).
|
|
|
|
|
*/
|
|
|
|
|
void ring_init(struct ring *ring, void *area, size_t size, int reset)
|
2021-01-12 08:21:00 -05:00
|
|
|
{
|
2024-02-28 11:04:40 -05:00
|
|
|
MT_LIST_INIT(&ring->waiters);
|
2021-01-12 08:21:00 -05:00
|
|
|
ring->readers_count = 0;
|
2024-03-03 11:50:11 -05:00
|
|
|
ring->flags = 0;
|
MAJOR: ring: insert an intermediary ring_storage level
We'll need to add more complex structures in the ring, such as wait
queues. That's far too much to be stored into the area in case of
file-backed contents, so let's split the ring definition and its
storage once for all.
This patch introduces a struct ring_storage which is assigned to
ring->storage, which contains minimal information to represent the
storage layout, i.e. for now only the buffer, and all the rest
remains in the ring itself. The storage is appended immediately after
it and the buffer's pointer always points to that area. It has the
benefit of remaining 100% compatible with the existing file-backed
layout. In memory, the allocation loses the size of a struct buffer.
It's not even certain it's worth placing the size there, given that it's
constant and that a dump of a ring wouldn't really need it (the file size
is sufficient). But for now everything comes with the struct buffer, and
later this will change once split into head and tail. Also this area may
be completed with more information in the future (e.g. storage version,
format, endianness, word size etc).
2024-03-03 11:20:10 -05:00
|
|
|
ring->storage = area;
|
2024-03-02 05:09:37 -05:00
|
|
|
ring->pending = 0;
|
|
|
|
|
ring->waking = 0;
|
2024-03-14 03:57:02 -04:00
|
|
|
memset(&ring->queue, 0, sizeof(ring->queue));
|
2024-03-03 11:50:11 -05:00
|
|
|
|
MAJOR: ring: insert an intermediary ring_storage level
We'll need to add more complex structures in the ring, such as wait
queues. That's far too much to be stored into the area in case of
file-backed contents, so let's split the ring definition and its
storage once for all.
This patch introduces a struct ring_storage which is assigned to
ring->storage, which contains minimal information to represent the
storage layout, i.e. for now only the buffer, and all the rest
remains in the ring itself. The storage is appended immediately after
it and the buffer's pointer always points to that area. It has the
benefit of remaining 100% compatible with the existing file-backed
layout. In memory, the allocation loses the size of a struct buffer.
It's not even certain it's worth placing the size there, given that it's
constant and that a dump of a ring wouldn't really need it (the file size
is sufficient). But for now everything comes with the struct buffer, and
later this will change once split into head and tail. Also this area may
be completed with more information in the future (e.g. storage version,
format, endianness, word size etc).
2024-03-03 11:20:10 -05:00
|
|
|
if (reset) {
|
2024-02-27 03:17:45 -05:00
|
|
|
ring->storage->size = size - sizeof(*ring->storage);
|
|
|
|
|
ring->storage->rsvd = sizeof(*ring->storage);
|
|
|
|
|
ring->storage->head = 0;
|
|
|
|
|
ring->storage->tail = 0;
|
2019-08-08 09:28:52 -04:00
|
|
|
|
MAJOR: ring: insert an intermediary ring_storage level
We'll need to add more complex structures in the ring, such as wait
queues. That's far too much to be stored into the area in case of
file-backed contents, so let's split the ring definition and its
storage once for all.
This patch introduces a struct ring_storage which is assigned to
ring->storage, which contains minimal information to represent the
storage layout, i.e. for now only the buffer, and all the rest
remains in the ring itself. The storage is appended immediately after
it and the buffer's pointer always points to that area. It has the
benefit of remaining 100% compatible with the existing file-backed
layout. In memory, the allocation loses the size of a struct buffer.
It's not even certain it's worth placing the size there, given that it's
constant and that a dump of a ring wouldn't really need it (the file size
is sufficient). But for now everything comes with the struct buffer, and
later this will change once split into head and tail. Also this area may
be completed with more information in the future (e.g. storage version,
format, endianness, word size etc).
2024-03-03 11:20:10 -05:00
|
|
|
/* write the initial RC byte */
|
2024-02-27 03:17:45 -05:00
|
|
|
*ring->storage->area = 0;
|
|
|
|
|
ring->storage->tail = 1;
|
MAJOR: ring: insert an intermediary ring_storage level
We'll need to add more complex structures in the ring, such as wait
queues. That's far too much to be stored into the area in case of
file-backed contents, so let's split the ring definition and its
storage once for all.
This patch introduces a struct ring_storage which is assigned to
ring->storage, which contains minimal information to represent the
storage layout, i.e. for now only the buffer, and all the rest
remains in the ring itself. The storage is appended immediately after
it and the buffer's pointer always points to that area. It has the
benefit of remaining 100% compatible with the existing file-backed
layout. In memory, the allocation loses the size of a struct buffer.
It's not even certain it's worth placing the size there, given that it's
constant and that a dump of a ring wouldn't really need it (the file size
is sufficient). But for now everything comes with the struct buffer, and
later this will change once split into head and tail. Also this area may
be completed with more information in the future (e.g. storage version,
format, endianness, word size etc).
2024-03-03 11:20:10 -05:00
|
|
|
}
|
2019-08-08 09:28:52 -04:00
|
|
|
}
|
|
|
|
|
|
MAJOR: ring: insert an intermediary ring_storage level
We'll need to add more complex structures in the ring, such as wait
queues. That's far too much to be stored into the area in case of
file-backed contents, so let's split the ring definition and its
storage once for all.
This patch introduces a struct ring_storage which is assigned to
ring->storage, which contains minimal information to represent the
storage layout, i.e. for now only the buffer, and all the rest
remains in the ring itself. The storage is appended immediately after
it and the buffer's pointer always points to that area. It has the
benefit of remaining 100% compatible with the existing file-backed
layout. In memory, the allocation loses the size of a struct buffer.
It's not even certain it's worth placing the size there, given that it's
constant and that a dump of a ring wouldn't really need it (the file size
is sufficient). But for now everything comes with the struct buffer, and
later this will change once split into head and tail. Also this area may
be completed with more information in the future (e.g. storage version,
format, endianness, word size etc).
2024-03-03 11:20:10 -05:00
|
|
|
/* Creates a ring and its storage area at address <area> for <size> bytes.
|
2022-08-12 01:50:43 -04:00
|
|
|
* If <area> is null, then it's allocated of the requested size. The ring
|
MAJOR: ring: insert an intermediary ring_storage level
We'll need to add more complex structures in the ring, such as wait
queues. That's far too much to be stored into the area in case of
file-backed contents, so let's split the ring definition and its
storage once for all.
This patch introduces a struct ring_storage which is assigned to
ring->storage, which contains minimal information to represent the
storage layout, i.e. for now only the buffer, and all the rest
remains in the ring itself. The storage is appended immediately after
it and the buffer's pointer always points to that area. It has the
benefit of remaining 100% compatible with the existing file-backed
layout. In memory, the allocation loses the size of a struct buffer.
It's not even certain it's worth placing the size there, given that it's
constant and that a dump of a ring wouldn't really need it (the file size
is sufficient). But for now everything comes with the struct buffer, and
later this will change once split into head and tail. Also this area may
be completed with more information in the future (e.g. storage version,
format, endianness, word size etc).
2024-03-03 11:20:10 -05:00
|
|
|
* storage struct is part of the area so the usable area is slightly reduced.
|
|
|
|
|
* However the storage is immediately adjacent to the struct so that the ring
|
2024-04-14 03:23:52 -04:00
|
|
|
* remains consistent on-disk. ring_free() will ignore such ring storages and
|
MAJOR: ring: insert an intermediary ring_storage level
We'll need to add more complex structures in the ring, such as wait
queues. That's far too much to be stored into the area in case of
file-backed contents, so let's split the ring definition and its
storage once for all.
This patch introduces a struct ring_storage which is assigned to
ring->storage, which contains minimal information to represent the
storage layout, i.e. for now only the buffer, and all the rest
remains in the ring itself. The storage is appended immediately after
it and the buffer's pointer always points to that area. It has the
benefit of remaining 100% compatible with the existing file-backed
layout. In memory, the allocation loses the size of a struct buffer.
It's not even certain it's worth placing the size there, given that it's
constant and that a dump of a ring wouldn't really need it (the file size
is sufficient). But for now everything comes with the struct buffer, and
later this will change once split into head and tail. Also this area may
be completed with more information in the future (e.g. storage version,
format, endianness, word size etc).
2024-03-03 11:20:10 -05:00
|
|
|
* will only release the ring part, so the caller is responsible for releasing
|
|
|
|
|
* them. If <reset> is non-zero, the storage area is reset, otherwise it's left
|
|
|
|
|
* intact.
|
2022-08-12 01:50:43 -04:00
|
|
|
*/
|
MAJOR: ring: insert an intermediary ring_storage level
We'll need to add more complex structures in the ring, such as wait
queues. That's far too much to be stored into the area in case of
file-backed contents, so let's split the ring definition and its
storage once for all.
This patch introduces a struct ring_storage which is assigned to
ring->storage, which contains minimal information to represent the
storage layout, i.e. for now only the buffer, and all the rest
remains in the ring itself. The storage is appended immediately after
it and the buffer's pointer always points to that area. It has the
benefit of remaining 100% compatible with the existing file-backed
layout. In memory, the allocation loses the size of a struct buffer.
It's not even certain it's worth placing the size there, given that it's
constant and that a dump of a ring wouldn't really need it (the file size
is sufficient). But for now everything comes with the struct buffer, and
later this will change once split into head and tail. Also this area may
be completed with more information in the future (e.g. storage version,
format, endianness, word size etc).
2024-03-03 11:20:10 -05:00
|
|
|
struct ring *ring_make_from_area(void *area, size_t size, int reset)
|
2022-08-12 01:50:43 -04:00
|
|
|
{
|
|
|
|
|
struct ring *ring = NULL;
|
2024-03-03 11:50:11 -05:00
|
|
|
uint flags = 0;
|
2022-08-12 01:50:43 -04:00
|
|
|
|
MAJOR: ring: insert an intermediary ring_storage level
We'll need to add more complex structures in the ring, such as wait
queues. That's far too much to be stored into the area in case of
file-backed contents, so let's split the ring definition and its
storage once for all.
This patch introduces a struct ring_storage which is assigned to
ring->storage, which contains minimal information to represent the
storage layout, i.e. for now only the buffer, and all the rest
remains in the ring itself. The storage is appended immediately after
it and the buffer's pointer always points to that area. It has the
benefit of remaining 100% compatible with the existing file-backed
layout. In memory, the allocation loses the size of a struct buffer.
It's not even certain it's worth placing the size there, given that it's
constant and that a dump of a ring wouldn't really need it (the file size
is sufficient). But for now everything comes with the struct buffer, and
later this will change once split into head and tail. Also this area may
be completed with more information in the future (e.g. storage version,
format, endianness, word size etc).
2024-03-03 11:20:10 -05:00
|
|
|
if (size < sizeof(*ring->storage) + 2)
|
2022-08-12 01:50:43 -04:00
|
|
|
return NULL;
|
|
|
|
|
|
2025-08-13 11:36:18 -04:00
|
|
|
ring = ha_aligned_alloc_typed(1, typeof(*ring));
|
MAJOR: ring: insert an intermediary ring_storage level
We'll need to add more complex structures in the ring, such as wait
queues. That's far too much to be stored into the area in case of
file-backed contents, so let's split the ring definition and its
storage once for all.
This patch introduces a struct ring_storage which is assigned to
ring->storage, which contains minimal information to represent the
storage layout, i.e. for now only the buffer, and all the rest
remains in the ring itself. The storage is appended immediately after
it and the buffer's pointer always points to that area. It has the
benefit of remaining 100% compatible with the existing file-backed
layout. In memory, the allocation loses the size of a struct buffer.
It's not even certain it's worth placing the size there, given that it's
constant and that a dump of a ring wouldn't really need it (the file size
is sufficient). But for now everything comes with the struct buffer, and
later this will change once split into head and tail. Also this area may
be completed with more information in the future (e.g. storage version,
format, endianness, word size etc).
2024-03-03 11:20:10 -05:00
|
|
|
if (!ring)
|
|
|
|
|
goto fail;
|
|
|
|
|
|
2022-08-12 01:50:43 -04:00
|
|
|
if (!area)
|
2025-08-13 11:36:18 -04:00
|
|
|
area = ha_aligned_alloc(__alignof__(*ring->storage), size);
|
2024-03-03 11:50:11 -05:00
|
|
|
else
|
|
|
|
|
flags |= RING_FL_MAPPED;
|
|
|
|
|
|
2022-08-12 01:50:43 -04:00
|
|
|
if (!area)
|
MAJOR: ring: insert an intermediary ring_storage level
We'll need to add more complex structures in the ring, such as wait
queues. That's far too much to be stored into the area in case of
file-backed contents, so let's split the ring definition and its
storage once for all.
This patch introduces a struct ring_storage which is assigned to
ring->storage, which contains minimal information to represent the
storage layout, i.e. for now only the buffer, and all the rest
remains in the ring itself. The storage is appended immediately after
it and the buffer's pointer always points to that area. It has the
benefit of remaining 100% compatible with the existing file-backed
layout. In memory, the allocation loses the size of a struct buffer.
It's not even certain it's worth placing the size there, given that it's
constant and that a dump of a ring wouldn't really need it (the file size
is sufficient). But for now everything comes with the struct buffer, and
later this will change once split into head and tail. Also this area may
be completed with more information in the future (e.g. storage version,
format, endianness, word size etc).
2024-03-03 11:20:10 -05:00
|
|
|
goto fail;
|
2022-08-12 01:50:43 -04:00
|
|
|
|
MAJOR: ring: insert an intermediary ring_storage level
We'll need to add more complex structures in the ring, such as wait
queues. That's far too much to be stored into the area in case of
file-backed contents, so let's split the ring definition and its
storage once for all.
This patch introduces a struct ring_storage which is assigned to
ring->storage, which contains minimal information to represent the
storage layout, i.e. for now only the buffer, and all the rest
remains in the ring itself. The storage is appended immediately after
it and the buffer's pointer always points to that area. It has the
benefit of remaining 100% compatible with the existing file-backed
layout. In memory, the allocation loses the size of a struct buffer.
It's not even certain it's worth placing the size there, given that it's
constant and that a dump of a ring wouldn't really need it (the file size
is sufficient). But for now everything comes with the struct buffer, and
later this will change once split into head and tail. Also this area may
be completed with more information in the future (e.g. storage version,
format, endianness, word size etc).
2024-03-03 11:20:10 -05:00
|
|
|
ring_init(ring, area, size, reset);
|
2024-03-03 11:50:11 -05:00
|
|
|
ring->flags |= flags;
|
2022-08-12 01:50:43 -04:00
|
|
|
return ring;
|
MAJOR: ring: insert an intermediary ring_storage level
We'll need to add more complex structures in the ring, such as wait
queues. That's far too much to be stored into the area in case of
file-backed contents, so let's split the ring definition and its
storage once for all.
This patch introduces a struct ring_storage which is assigned to
ring->storage, which contains minimal information to represent the
storage layout, i.e. for now only the buffer, and all the rest
remains in the ring itself. The storage is appended immediately after
it and the buffer's pointer always points to that area. It has the
benefit of remaining 100% compatible with the existing file-backed
layout. In memory, the allocation loses the size of a struct buffer.
It's not even certain it's worth placing the size there, given that it's
constant and that a dump of a ring wouldn't really need it (the file size
is sufficient). But for now everything comes with the struct buffer, and
later this will change once split into head and tail. Also this area may
be completed with more information in the future (e.g. storage version,
format, endianness, word size etc).
2024-03-03 11:20:10 -05:00
|
|
|
fail:
|
2025-08-13 11:36:18 -04:00
|
|
|
ha_aligned_free(ring);
|
MAJOR: ring: insert an intermediary ring_storage level
We'll need to add more complex structures in the ring, such as wait
queues. That's far too much to be stored into the area in case of
file-backed contents, so let's split the ring definition and its
storage once for all.
This patch introduces a struct ring_storage which is assigned to
ring->storage, which contains minimal information to represent the
storage layout, i.e. for now only the buffer, and all the rest
remains in the ring itself. The storage is appended immediately after
it and the buffer's pointer always points to that area. It has the
benefit of remaining 100% compatible with the existing file-backed
layout. In memory, the allocation loses the size of a struct buffer.
It's not even certain it's worth placing the size there, given that it's
constant and that a dump of a ring wouldn't really need it (the file size
is sufficient). But for now everything comes with the struct buffer, and
later this will change once split into head and tail. Also this area may
be completed with more information in the future (e.g. storage version,
format, endianness, word size etc).
2024-03-03 11:20:10 -05:00
|
|
|
return NULL;
|
2022-08-12 01:50:43 -04:00
|
|
|
}
|
|
|
|
|
|
MAJOR: ring: insert an intermediary ring_storage level
We'll need to add more complex structures in the ring, such as wait
queues. That's far too much to be stored into the area in case of
file-backed contents, so let's split the ring definition and its
storage once for all.
This patch introduces a struct ring_storage which is assigned to
ring->storage, which contains minimal information to represent the
storage layout, i.e. for now only the buffer, and all the rest
remains in the ring itself. The storage is appended immediately after
it and the buffer's pointer always points to that area. It has the
benefit of remaining 100% compatible with the existing file-backed
layout. In memory, the allocation loses the size of a struct buffer.
It's not even certain it's worth placing the size there, given that it's
constant and that a dump of a ring wouldn't really need it (the file size
is sufficient). But for now everything comes with the struct buffer, and
later this will change once split into head and tail. Also this area may
be completed with more information in the future (e.g. storage version,
format, endianness, word size etc).
2024-03-03 11:20:10 -05:00
|
|
|
/* Creates and returns a ring buffer of size <size> bytes. Returns NULL on
|
2024-04-15 02:25:03 -04:00
|
|
|
* allocation failure. The size is the area size, not the usable size.
|
2022-09-27 09:53:53 -04:00
|
|
|
*/
|
MAJOR: ring: insert an intermediary ring_storage level
We'll need to add more complex structures in the ring, such as wait
queues. That's far too much to be stored into the area in case of
file-backed contents, so let's split the ring definition and its
storage once for all.
This patch introduces a struct ring_storage which is assigned to
ring->storage, which contains minimal information to represent the
storage layout, i.e. for now only the buffer, and all the rest
remains in the ring itself. The storage is appended immediately after
it and the buffer's pointer always points to that area. It has the
benefit of remaining 100% compatible with the existing file-backed
layout. In memory, the allocation loses the size of a struct buffer.
It's not even certain it's worth placing the size there, given that it's
constant and that a dump of a ring wouldn't really need it (the file size
is sufficient). But for now everything comes with the struct buffer, and
later this will change once split into head and tail. Also this area may
be completed with more information in the future (e.g. storage version,
format, endianness, word size etc).
2024-03-03 11:20:10 -05:00
|
|
|
struct ring *ring_new(size_t size)
|
2022-09-27 09:53:53 -04:00
|
|
|
{
|
MAJOR: ring: insert an intermediary ring_storage level
We'll need to add more complex structures in the ring, such as wait
queues. That's far too much to be stored into the area in case of
file-backed contents, so let's split the ring definition and its
storage once for all.
This patch introduces a struct ring_storage which is assigned to
ring->storage, which contains minimal information to represent the
storage layout, i.e. for now only the buffer, and all the rest
remains in the ring itself. The storage is appended immediately after
it and the buffer's pointer always points to that area. It has the
benefit of remaining 100% compatible with the existing file-backed
layout. In memory, the allocation loses the size of a struct buffer.
It's not even certain it's worth placing the size there, given that it's
constant and that a dump of a ring wouldn't really need it (the file size
is sufficient). But for now everything comes with the struct buffer, and
later this will change once split into head and tail. Also this area may
be completed with more information in the future (e.g. storage version,
format, endianness, word size etc).
2024-03-03 11:20:10 -05:00
|
|
|
return ring_make_from_area(NULL, size, 1);
|
2022-09-27 09:53:53 -04:00
|
|
|
}
|
|
|
|
|
|
2019-08-08 09:28:52 -04:00
|
|
|
/* Resizes existing ring <ring> to <size> which must be larger, without losing
|
|
|
|
|
* its contents. The new size must be at least as large as the previous one or
|
|
|
|
|
* no change will be performed. The pointer to the ring is returned on success,
|
2024-04-15 02:25:03 -04:00
|
|
|
* or NULL on allocation failure. This will lock the ring for writes. The size
|
|
|
|
|
* is the allocated area size, and includes the ring_storage header.
|
2019-08-08 09:28:52 -04:00
|
|
|
*/
|
|
|
|
|
struct ring *ring_resize(struct ring *ring, size_t size)
|
|
|
|
|
{
|
2024-02-27 03:17:45 -05:00
|
|
|
struct ring_storage *old, *new;
|
2019-08-08 09:28:52 -04:00
|
|
|
|
2024-03-14 01:48:41 -04:00
|
|
|
if (size <= ring_data(ring) + sizeof(*ring->storage))
|
2019-08-08 09:28:52 -04:00
|
|
|
return ring;
|
|
|
|
|
|
2024-02-27 03:17:45 -05:00
|
|
|
old = ring->storage;
|
2025-08-13 11:36:18 -04:00
|
|
|
new = ha_aligned_alloc(__alignof__(*ring->storage), size);
|
MAJOR: ring: insert an intermediary ring_storage level
We'll need to add more complex structures in the ring, such as wait
queues. That's far too much to be stored into the area in case of
file-backed contents, so let's split the ring definition and its
storage once for all.
This patch introduces a struct ring_storage which is assigned to
ring->storage, which contains minimal information to represent the
storage layout, i.e. for now only the buffer, and all the rest
remains in the ring itself. The storage is appended immediately after
it and the buffer's pointer always points to that area. It has the
benefit of remaining 100% compatible with the existing file-backed
layout. In memory, the allocation loses the size of a struct buffer.
It's not even certain it's worth placing the size there, given that it's
constant and that a dump of a ring wouldn't really need it (the file size
is sufficient). But for now everything comes with the struct buffer, and
later this will change once split into head and tail. Also this area may
be completed with more information in the future (e.g. storage version,
format, endianness, word size etc).
2024-03-03 11:20:10 -05:00
|
|
|
if (!new)
|
2019-08-08 09:28:52 -04:00
|
|
|
return NULL;
|
|
|
|
|
|
2024-02-28 03:45:54 -05:00
|
|
|
thread_isolate();
|
2019-08-08 09:28:52 -04:00
|
|
|
|
2024-03-14 01:48:41 -04:00
|
|
|
/* recheck the ring's size, it may have changed during the malloc */
|
|
|
|
|
if (size > ring_data(ring) + sizeof(*ring->storage)) {
|
2019-08-08 09:28:52 -04:00
|
|
|
/* copy old contents */
|
2024-02-27 03:17:45 -05:00
|
|
|
struct ist v1, v2;
|
|
|
|
|
size_t len;
|
|
|
|
|
|
|
|
|
|
vp_ring_to_data(&v1, &v2, old->area, old->size, old->head, old->tail);
|
|
|
|
|
len = vp_size(v1, v2);
|
|
|
|
|
vp_peek_ofs(v1, v2, 0, new->area, len);
|
|
|
|
|
new->size = size - sizeof(*ring->storage);
|
|
|
|
|
new->rsvd = sizeof(*ring->storage);
|
|
|
|
|
new->head = 0;
|
|
|
|
|
new->tail = len;
|
MAJOR: ring: insert an intermediary ring_storage level
We'll need to add more complex structures in the ring, such as wait
queues. That's far too much to be stored into the area in case of
file-backed contents, so let's split the ring definition and its
storage once for all.
This patch introduces a struct ring_storage which is assigned to
ring->storage, which contains minimal information to represent the
storage layout, i.e. for now only the buffer, and all the rest
remains in the ring itself. The storage is appended immediately after
it and the buffer's pointer always points to that area. It has the
benefit of remaining 100% compatible with the existing file-backed
layout. In memory, the allocation loses the size of a struct buffer.
It's not even certain it's worth placing the size there, given that it's
constant and that a dump of a ring wouldn't really need it (the file size
is sufficient). But for now everything comes with the struct buffer, and
later this will change once split into head and tail. Also this area may
be completed with more information in the future (e.g. storage version,
format, endianness, word size etc).
2024-03-03 11:20:10 -05:00
|
|
|
new = HA_ATOMIC_XCHG(&ring->storage, new);
|
2019-08-08 09:28:52 -04:00
|
|
|
}
|
|
|
|
|
|
2024-02-28 03:45:54 -05:00
|
|
|
thread_release();
|
2019-08-08 09:28:52 -04:00
|
|
|
|
MAJOR: ring: insert an intermediary ring_storage level
We'll need to add more complex structures in the ring, such as wait
queues. That's far too much to be stored into the area in case of
file-backed contents, so let's split the ring definition and its
storage once for all.
This patch introduces a struct ring_storage which is assigned to
ring->storage, which contains minimal information to represent the
storage layout, i.e. for now only the buffer, and all the rest
remains in the ring itself. The storage is appended immediately after
it and the buffer's pointer always points to that area. It has the
benefit of remaining 100% compatible with the existing file-backed
layout. In memory, the allocation loses the size of a struct buffer.
It's not even certain it's worth placing the size there, given that it's
constant and that a dump of a ring wouldn't really need it (the file size
is sufficient). But for now everything comes with the struct buffer, and
later this will change once split into head and tail. Also this area may
be completed with more information in the future (e.g. storage version,
format, endianness, word size etc).
2024-03-03 11:20:10 -05:00
|
|
|
/* free the unused one */
|
2025-08-13 11:36:18 -04:00
|
|
|
ha_aligned_free(new);
|
2019-08-08 09:28:52 -04:00
|
|
|
return ring;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* destroys and frees ring <ring> */
|
|
|
|
|
void ring_free(struct ring *ring)
|
|
|
|
|
{
|
|
|
|
|
if (!ring)
|
|
|
|
|
return;
|
2022-08-12 01:50:43 -04:00
|
|
|
|
|
|
|
|
/* make sure it was not allocated by ring_make_from_area */
|
MAJOR: ring: insert an intermediary ring_storage level
We'll need to add more complex structures in the ring, such as wait
queues. That's far too much to be stored into the area in case of
file-backed contents, so let's split the ring definition and its
storage once for all.
This patch introduces a struct ring_storage which is assigned to
ring->storage, which contains minimal information to represent the
storage layout, i.e. for now only the buffer, and all the rest
remains in the ring itself. The storage is appended immediately after
it and the buffer's pointer always points to that area. It has the
benefit of remaining 100% compatible with the existing file-backed
layout. In memory, the allocation loses the size of a struct buffer.
It's not even certain it's worth placing the size there, given that it's
constant and that a dump of a ring wouldn't really need it (the file size
is sufficient). But for now everything comes with the struct buffer, and
later this will change once split into head and tail. Also this area may
be completed with more information in the future (e.g. storage version,
format, endianness, word size etc).
2024-03-03 11:20:10 -05:00
|
|
|
if (!(ring->flags & RING_FL_MAPPED))
|
2025-08-13 11:36:18 -04:00
|
|
|
ha_aligned_free(ring->storage);
|
|
|
|
|
ha_aligned_free(ring);
|
2019-08-08 09:28:52 -04:00
|
|
|
}
|
|
|
|
|
|
2019-08-27 05:44:13 -04:00
|
|
|
/* Tries to send <npfx> parts from <prefix> followed by <nmsg> parts from <msg>
|
|
|
|
|
* to ring <ring>. The message is sent atomically. It may be truncated to
|
|
|
|
|
* <maxlen> bytes if <maxlen> is non-null. There is no distinction between the
|
|
|
|
|
* two lists, it's just a convenience to help the caller prepend some prefixes
|
|
|
|
|
* when necessary. It takes the ring's write lock to make sure no other thread
|
|
|
|
|
* will touch the buffer during the update. Returns the number of bytes sent,
|
|
|
|
|
* or <=0 on failure.
|
|
|
|
|
*/
|
|
|
|
|
ssize_t ring_write(struct ring *ring, size_t maxlen, const struct ist pfx[], size_t npfx, const struct ist msg[], size_t nmsg)
|
|
|
|
|
{
|
MAJOR: ring: implement a waiting queue in front of the ring
The queue-based approach consists in forcing threads to wait away from
the work area so as not to disturb the current writer, and to prepare
the work by grouping them in a queue. The last arrived takes the head
of the queue by placing its preinitialized ring cell there, becomes the
queue's leader, informs itself about the amount of previously accumulated
bytes so that when its turn comes, it immediately knows how much room is
needed to be released.
It can then take the whole queue with it, leaving an empty one for new
threads to come while it's releasing the room needed to copy everything.
By doing so we're cascading contention areas so that multiple parts can
work in parallel.
Note that we must never leave a write counter set to 0xFF at tail, and
this happens when a message cannot fit and we give up, because in this
case we're writing back tail_ofs, and only later we restore the counter.
The solution here is to make a special case when we're going to drop
the messages, and to write the readers count before restoring tail.
This already shows a tremendous performance gain on ARM (385k -> 4.8M),
thanks to the fact that now all waiting threads wait on the queue's
head instead of polluting the tail lock. On x86_64, the EPYC sees a big
boost at 24C48T (1.88M -> 3.82M) and a slowdown at 3C6T (6.0->4.45)
though this one is much less of a concern as so few threads need less
bandwidth than bigger counts.
2024-03-11 09:57:37 -04:00
|
|
|
struct ring_wait_cell **ring_queue_ptr = DISGUISE(&ring->queue[ti->ring_queue].ptr);
|
|
|
|
|
struct ring_wait_cell cell, *next_cell, *curr_cell;
|
2024-03-22 09:46:12 -04:00
|
|
|
size_t *tail_ptr = &ring->storage->tail;
|
2024-02-28 03:57:00 -05:00
|
|
|
size_t head_ofs, tail_ofs, new_tail_ofs;
|
2024-02-26 05:03:03 -05:00
|
|
|
size_t ring_size;
|
|
|
|
|
char *ring_area;
|
|
|
|
|
struct ist v1, v2;
|
2024-02-26 14:03:20 -05:00
|
|
|
size_t msglen = 0;
|
2019-08-27 05:44:13 -04:00
|
|
|
size_t lenlen;
|
2024-02-26 14:03:20 -05:00
|
|
|
size_t needed;
|
2019-08-30 09:06:10 -04:00
|
|
|
uint64_t dellen;
|
2019-08-27 05:44:13 -04:00
|
|
|
int dellenlen;
|
2024-02-28 03:20:54 -05:00
|
|
|
uint8_t *lock_ptr;
|
|
|
|
|
uint8_t readers;
|
2019-08-27 05:44:13 -04:00
|
|
|
ssize_t sent = 0;
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
/* we have to find some room to add our message (the buffer is
|
|
|
|
|
* never empty and at least contains the previous counter) and
|
|
|
|
|
* to update both the buffer contents and heads at the same
|
|
|
|
|
* time (it's doable using atomic ops but not worth the
|
|
|
|
|
* trouble, let's just lock). For this we first need to know
|
|
|
|
|
* the total message's length. We cannot measure it while
|
|
|
|
|
* copying due to the varint encoding of the length.
|
|
|
|
|
*/
|
|
|
|
|
for (i = 0; i < npfx; i++)
|
2024-02-26 14:03:20 -05:00
|
|
|
msglen += pfx[i].len;
|
2019-08-27 05:44:13 -04:00
|
|
|
for (i = 0; i < nmsg; i++)
|
2024-02-26 14:03:20 -05:00
|
|
|
msglen += msg[i].len;
|
2019-08-27 05:44:13 -04:00
|
|
|
|
2024-02-26 14:03:20 -05:00
|
|
|
if (msglen > maxlen)
|
|
|
|
|
msglen = maxlen;
|
2019-08-27 05:44:13 -04:00
|
|
|
|
2024-02-26 14:03:20 -05:00
|
|
|
lenlen = varint_bytes(msglen);
|
|
|
|
|
|
|
|
|
|
/* We need:
|
|
|
|
|
* - lenlen bytes for the size encoding
|
|
|
|
|
* - msglen for the message
|
|
|
|
|
* - one byte for the new marker
|
2024-02-26 05:03:03 -05:00
|
|
|
*
|
|
|
|
|
* Note that we'll also reserve one extra byte to make sure we never
|
|
|
|
|
* leave a full buffer (the vec-to-ring conversion cannot be done if
|
|
|
|
|
* both areas are of size 0).
|
2024-02-26 14:03:20 -05:00
|
|
|
*/
|
|
|
|
|
needed = lenlen + msglen + 1;
|
2019-08-27 05:44:13 -04:00
|
|
|
|
2024-02-26 05:03:03 -05:00
|
|
|
/* these ones do not change under us (only resize affects them and it
|
|
|
|
|
* must be done under thread isolation).
|
|
|
|
|
*/
|
2024-02-27 03:17:45 -05:00
|
|
|
ring_area = ring->storage->area;
|
|
|
|
|
ring_size = ring->storage->size;
|
2024-02-26 05:03:03 -05:00
|
|
|
|
|
|
|
|
if (needed + 1 > ring_size)
|
|
|
|
|
goto leave;
|
|
|
|
|
|
MAJOR: ring: implement a waiting queue in front of the ring
The queue-based approach consists in forcing threads to wait away from
the work area so as not to disturb the current writer, and to prepare
the work by grouping them in a queue. The last arrived takes the head
of the queue by placing its preinitialized ring cell there, becomes the
queue's leader, informs itself about the amount of previously accumulated
bytes so that when its turn comes, it immediately knows how much room is
needed to be released.
It can then take the whole queue with it, leaving an empty one for new
threads to come while it's releasing the room needed to copy everything.
By doing so we're cascading contention areas so that multiple parts can
work in parallel.
Note that we must never leave a write counter set to 0xFF at tail, and
this happens when a message cannot fit and we give up, because in this
case we're writing back tail_ofs, and only later we restore the counter.
The solution here is to make a special case when we're going to drop
the messages, and to write the readers count before restoring tail.
This already shows a tremendous performance gain on ARM (385k -> 4.8M),
thanks to the fact that now all waiting threads wait on the queue's
head instead of polluting the tail lock. On x86_64, the EPYC sees a big
boost at 24C48T (1.88M -> 3.82M) and a slowdown at 3C6T (6.0->4.45)
though this one is much less of a concern as so few threads need less
bandwidth than bigger counts.
2024-03-11 09:57:37 -04:00
|
|
|
cell.to_send_self = needed;
|
|
|
|
|
cell.needed_tot = 0; // only when non-zero the cell is considered ready.
|
|
|
|
|
cell.maxlen = msglen;
|
|
|
|
|
cell.pfx = pfx;
|
|
|
|
|
cell.npfx = npfx;
|
|
|
|
|
cell.msg = msg;
|
|
|
|
|
cell.nmsg = nmsg;
|
|
|
|
|
|
|
|
|
|
/* insert our cell into the queue before the previous one. We may have
|
|
|
|
|
* to wait a bit if the queue's leader is attempting an election to win
|
|
|
|
|
* the tail, hence the busy value (should be rare enough).
|
|
|
|
|
*/
|
|
|
|
|
next_cell = HA_ATOMIC_XCHG(ring_queue_ptr, &cell);
|
|
|
|
|
|
|
|
|
|
/* let's add the cumulated size of pending messages to ours */
|
|
|
|
|
cell.next = next_cell;
|
|
|
|
|
if (next_cell) {
|
|
|
|
|
size_t next_needed;
|
|
|
|
|
|
|
|
|
|
while ((next_needed = HA_ATOMIC_LOAD(&next_cell->needed_tot)) == 0)
|
|
|
|
|
__ha_cpu_relax_for_read();
|
|
|
|
|
needed += next_needed;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* now <needed> will represent the size to store *all* messages. The
|
|
|
|
|
* atomic store may unlock a subsequent thread waiting for this one.
|
|
|
|
|
*/
|
|
|
|
|
HA_ATOMIC_STORE(&cell.needed_tot, needed);
|
|
|
|
|
|
|
|
|
|
/* OK now we're the queue leader, it's our job to try to get ownership
|
|
|
|
|
* of the tail, if we succeeded above, we don't even enter the loop. If
|
|
|
|
|
* we failed, we set ourselves at the top the queue, waiting for the
|
|
|
|
|
* tail to be unlocked again. We stop doing that if another thread
|
|
|
|
|
* comes in and becomes the leader in turn.
|
2024-02-28 03:37:47 -05:00
|
|
|
*/
|
2024-03-17 07:09:30 -04:00
|
|
|
|
|
|
|
|
/* Wait for another thread to take the lead or for the tail to
|
|
|
|
|
* be available again. It's critical to be read-only in this
|
|
|
|
|
* loop so as not to lose time synchronizing cache lines. Also,
|
|
|
|
|
* we must detect a new leader ASAP so that the fewest possible
|
|
|
|
|
* threads check the tail.
|
|
|
|
|
*/
|
|
|
|
|
|
2025-09-18 09:23:53 -04:00
|
|
|
tail_ofs = 0;
|
2024-03-17 07:19:29 -04:00
|
|
|
while (1) {
|
2025-09-18 09:08:12 -04:00
|
|
|
#if defined(__x86_64__)
|
|
|
|
|
/* read using a CAS on x86, as it will keep the cache line
|
|
|
|
|
* in exclusive state for a few more cycles that will allow
|
|
|
|
|
* us to release the queue without waiting after the loop.
|
|
|
|
|
*/
|
|
|
|
|
curr_cell = &cell;
|
|
|
|
|
HA_ATOMIC_CAS(ring_queue_ptr, &curr_cell, curr_cell);
|
|
|
|
|
#else
|
|
|
|
|
curr_cell = HA_ATOMIC_LOAD(ring_queue_ptr);
|
|
|
|
|
#endif
|
|
|
|
|
/* give up if another thread took the leadership of the queue */
|
|
|
|
|
if (curr_cell != &cell)
|
2024-03-17 07:19:29 -04:00
|
|
|
goto wait_for_flush;
|
2024-03-17 07:09:30 -04:00
|
|
|
|
2025-09-18 08:58:38 -04:00
|
|
|
/* OK the queue is locked, let's attempt to get the tail lock.
|
|
|
|
|
* we'll atomically OR the lock on the pointer and check if
|
|
|
|
|
* someone else had it already, otherwise we own it.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
#if defined(__ARM_FEATURE_ATOMICS)
|
2024-03-17 11:55:09 -04:00
|
|
|
/* ARMv8.1-a has a true atomic OR and doesn't need the preliminary read */
|
2024-03-17 07:19:29 -04:00
|
|
|
tail_ofs = HA_ATOMIC_FETCH_OR(tail_ptr, RING_TAIL_LOCK);
|
2025-09-18 08:58:38 -04:00
|
|
|
if (!(tail_ofs & RING_TAIL_LOCK))
|
|
|
|
|
break;
|
|
|
|
|
#else
|
2025-09-18 09:23:53 -04:00
|
|
|
if (HA_ATOMIC_CAS(tail_ptr, &tail_ofs, tail_ofs | RING_TAIL_LOCK))
|
|
|
|
|
break;
|
|
|
|
|
tail_ofs &= ~RING_TAIL_LOCK;
|
2025-09-18 08:58:38 -04:00
|
|
|
#endif
|
2025-09-18 09:01:29 -04:00
|
|
|
__ha_cpu_relax();
|
2024-02-28 03:37:47 -05:00
|
|
|
}
|
|
|
|
|
|
2025-09-18 08:58:38 -04:00
|
|
|
/* Here we own the tail. We can go on if we're still the leader,
|
|
|
|
|
* which we'll confirm by trying to reset the queue. If we're
|
|
|
|
|
* still the leader, we're done.
|
|
|
|
|
*/
|
|
|
|
|
if (!HA_ATOMIC_CAS(ring_queue_ptr, &curr_cell, NULL)) {
|
|
|
|
|
/* oops, no, let's give it back to another thread and wait.
|
|
|
|
|
* This does not happen often enough to warrant more complex
|
|
|
|
|
* approaches (tried already).
|
|
|
|
|
*/
|
|
|
|
|
HA_ATOMIC_STORE(tail_ptr, tail_ofs);
|
|
|
|
|
goto wait_for_flush;
|
|
|
|
|
}
|
|
|
|
|
|
2024-02-28 03:52:55 -05:00
|
|
|
head_ofs = HA_ATOMIC_LOAD(&ring->storage->head);
|
2019-08-27 05:44:13 -04:00
|
|
|
|
2024-02-28 03:20:54 -05:00
|
|
|
/* this is the byte before tail, it contains the users count */
|
|
|
|
|
lock_ptr = (uint8_t*)ring_area + (tail_ofs > 0 ? tail_ofs - 1 : ring_size - 1);
|
|
|
|
|
|
MAJOR: ring: implement a waiting queue in front of the ring
The queue-based approach consists in forcing threads to wait away from
the work area so as not to disturb the current writer, and to prepare
the work by grouping them in a queue. The last arrived takes the head
of the queue by placing its preinitialized ring cell there, becomes the
queue's leader, informs itself about the amount of previously accumulated
bytes so that when its turn comes, it immediately knows how much room is
needed to be released.
It can then take the whole queue with it, leaving an empty one for new
threads to come while it's releasing the room needed to copy everything.
By doing so we're cascading contention areas so that multiple parts can
work in parallel.
Note that we must never leave a write counter set to 0xFF at tail, and
this happens when a message cannot fit and we give up, because in this
case we're writing back tail_ofs, and only later we restore the counter.
The solution here is to make a special case when we're going to drop
the messages, and to write the readers count before restoring tail.
This already shows a tremendous performance gain on ARM (385k -> 4.8M),
thanks to the fact that now all waiting threads wait on the queue's
head instead of polluting the tail lock. On x86_64, the EPYC sees a big
boost at 24C48T (1.88M -> 3.82M) and a slowdown at 3C6T (6.0->4.45)
though this one is much less of a concern as so few threads need less
bandwidth than bigger counts.
2024-03-11 09:57:37 -04:00
|
|
|
/* Take the lock on the area. We're guaranteed to be the only writer
|
|
|
|
|
* here.
|
|
|
|
|
*/
|
2024-02-28 03:20:54 -05:00
|
|
|
readers = HA_ATOMIC_XCHG(lock_ptr, RING_WRITING_SIZE);
|
|
|
|
|
|
2024-02-26 05:03:03 -05:00
|
|
|
vp_ring_to_data(&v1, &v2, ring_area, ring_size, head_ofs, tail_ofs);
|
|
|
|
|
|
2025-09-17 12:45:13 -04:00
|
|
|
while (vp_size(v1, v2) + needed + 1 + 1 > ring_size) {
|
2019-08-27 05:44:13 -04:00
|
|
|
/* we need to delete the oldest message (from the end),
|
|
|
|
|
* and we have to stop if there's a reader stuck there.
|
|
|
|
|
* Unless there's corruption in the buffer it's guaranteed
|
|
|
|
|
* that we have enough data to find 1 counter byte, a
|
|
|
|
|
* varint-encoded length (1 byte min) and the message
|
|
|
|
|
* payload (0 bytes min).
|
|
|
|
|
*/
|
2024-02-26 05:03:03 -05:00
|
|
|
if (*_vp_head(v1, v2))
|
|
|
|
|
break;
|
|
|
|
|
dellenlen = vp_peek_varint_ofs(v1, v2, 1, &dellen);
|
2019-08-27 05:44:13 -04:00
|
|
|
if (!dellenlen)
|
2024-02-26 05:03:03 -05:00
|
|
|
break;
|
|
|
|
|
BUG_ON_HOT(vp_size(v1, v2) < 1 + dellenlen + dellen);
|
|
|
|
|
vp_skip(&v1, &v2, 1 + dellenlen + dellen);
|
2019-08-27 05:44:13 -04:00
|
|
|
}
|
|
|
|
|
|
2024-02-28 03:57:00 -05:00
|
|
|
/* now let's update the buffer with the new tail if our message will fit */
|
|
|
|
|
new_tail_ofs = tail_ofs;
|
2025-09-17 12:45:13 -04:00
|
|
|
if (vp_size(v1, v2) + needed + 1 + 1 <= ring_size) {
|
2024-02-28 03:57:00 -05:00
|
|
|
vp_data_to_ring(v1, v2, ring_area, ring_size, &head_ofs, &tail_ofs);
|
|
|
|
|
|
|
|
|
|
/* update the new space in the buffer */
|
|
|
|
|
HA_ATOMIC_STORE(&ring->storage->head, head_ofs);
|
|
|
|
|
|
|
|
|
|
/* calculate next tail pointer */
|
|
|
|
|
new_tail_ofs += needed;
|
|
|
|
|
if (new_tail_ofs >= ring_size)
|
|
|
|
|
new_tail_ofs -= ring_size;
|
|
|
|
|
|
|
|
|
|
/* reset next read counter before releasing writers */
|
|
|
|
|
HA_ATOMIC_STORE(ring_area + (new_tail_ofs > 0 ? new_tail_ofs - 1 : ring_size - 1), 0);
|
|
|
|
|
}
|
MAJOR: ring: implement a waiting queue in front of the ring
The queue-based approach consists in forcing threads to wait away from
the work area so as not to disturb the current writer, and to prepare
the work by grouping them in a queue. The last arrived takes the head
of the queue by placing its preinitialized ring cell there, becomes the
queue's leader, informs itself about the amount of previously accumulated
bytes so that when its turn comes, it immediately knows how much room is
needed to be released.
It can then take the whole queue with it, leaving an empty one for new
threads to come while it's releasing the room needed to copy everything.
By doing so we're cascading contention areas so that multiple parts can
work in parallel.
Note that we must never leave a write counter set to 0xFF at tail, and
this happens when a message cannot fit and we give up, because in this
case we're writing back tail_ofs, and only later we restore the counter.
The solution here is to make a special case when we're going to drop
the messages, and to write the readers count before restoring tail.
This already shows a tremendous performance gain on ARM (385k -> 4.8M),
thanks to the fact that now all waiting threads wait on the queue's
head instead of polluting the tail lock. On x86_64, the EPYC sees a big
boost at 24C48T (1.88M -> 3.82M) and a slowdown at 3C6T (6.0->4.45)
though this one is much less of a concern as so few threads need less
bandwidth than bigger counts.
2024-03-11 09:57:37 -04:00
|
|
|
else {
|
|
|
|
|
/* release readers right now, before writing the tail, so as
|
|
|
|
|
* not to expose the readers count byte to another writer.
|
|
|
|
|
*/
|
|
|
|
|
HA_ATOMIC_STORE(lock_ptr, readers);
|
|
|
|
|
}
|
2024-02-28 03:57:00 -05:00
|
|
|
|
|
|
|
|
/* and release other writers */
|
2024-03-22 09:46:12 -04:00
|
|
|
HA_ATOMIC_STORE(tail_ptr, new_tail_ofs);
|
2024-02-26 05:03:03 -05:00
|
|
|
|
2024-02-28 03:57:00 -05:00
|
|
|
vp_ring_to_room(&v1, &v2, ring_area, ring_size, (new_tail_ofs > 0 ? new_tail_ofs - 1 : ring_size - 1), tail_ofs);
|
2024-02-26 05:03:03 -05:00
|
|
|
|
2024-03-15 11:10:55 -04:00
|
|
|
if (likely(tail_ofs != new_tail_ofs)) {
|
|
|
|
|
/* the list stops on a NULL */
|
|
|
|
|
for (curr_cell = &cell; curr_cell; curr_cell = HA_ATOMIC_LOAD(&curr_cell->next)) {
|
|
|
|
|
maxlen = curr_cell->maxlen;
|
|
|
|
|
pfx = curr_cell->pfx;
|
|
|
|
|
npfx = curr_cell->npfx;
|
|
|
|
|
msg = curr_cell->msg;
|
|
|
|
|
nmsg = curr_cell->nmsg;
|
|
|
|
|
|
|
|
|
|
/* let's write the message size */
|
|
|
|
|
vp_put_varint(&v1, &v2, maxlen);
|
|
|
|
|
|
|
|
|
|
/* then write the messages */
|
|
|
|
|
msglen = 0;
|
|
|
|
|
for (i = 0; i < npfx; i++) {
|
|
|
|
|
size_t len = pfx[i].len;
|
|
|
|
|
|
|
|
|
|
if (len + msglen > maxlen)
|
|
|
|
|
len = maxlen - msglen;
|
|
|
|
|
if (len)
|
|
|
|
|
vp_putblk(&v1, &v2, pfx[i].ptr, len);
|
|
|
|
|
msglen += len;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < nmsg; i++) {
|
|
|
|
|
size_t len = msg[i].len;
|
|
|
|
|
|
|
|
|
|
if (len + msglen > maxlen)
|
|
|
|
|
len = maxlen - msglen;
|
|
|
|
|
if (len)
|
|
|
|
|
vp_putblk(&v1, &v2, msg[i].ptr, len);
|
|
|
|
|
msglen += len;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* for all but the last message we need to write the
|
|
|
|
|
* readers count byte.
|
MAJOR: ring: implement a waiting queue in front of the ring
The queue-based approach consists in forcing threads to wait away from
the work area so as not to disturb the current writer, and to prepare
the work by grouping them in a queue. The last arrived takes the head
of the queue by placing its preinitialized ring cell there, becomes the
queue's leader, informs itself about the amount of previously accumulated
bytes so that when its turn comes, it immediately knows how much room is
needed to be released.
It can then take the whole queue with it, leaving an empty one for new
threads to come while it's releasing the room needed to copy everything.
By doing so we're cascading contention areas so that multiple parts can
work in parallel.
Note that we must never leave a write counter set to 0xFF at tail, and
this happens when a message cannot fit and we give up, because in this
case we're writing back tail_ofs, and only later we restore the counter.
The solution here is to make a special case when we're going to drop
the messages, and to write the readers count before restoring tail.
This already shows a tremendous performance gain on ARM (385k -> 4.8M),
thanks to the fact that now all waiting threads wait on the queue's
head instead of polluting the tail lock. On x86_64, the EPYC sees a big
boost at 24C48T (1.88M -> 3.82M) and a slowdown at 3C6T (6.0->4.45)
though this one is much less of a concern as so few threads need less
bandwidth than bigger counts.
2024-03-11 09:57:37 -04:00
|
|
|
*/
|
2024-03-15 11:10:55 -04:00
|
|
|
if (curr_cell->next)
|
|
|
|
|
vp_putchr(&v1, &v2, 0);
|
MAJOR: ring: implement a waiting queue in front of the ring
The queue-based approach consists in forcing threads to wait away from
the work area so as not to disturb the current writer, and to prepare
the work by grouping them in a queue. The last arrived takes the head
of the queue by placing its preinitialized ring cell there, becomes the
queue's leader, informs itself about the amount of previously accumulated
bytes so that when its turn comes, it immediately knows how much room is
needed to be released.
It can then take the whole queue with it, leaving an empty one for new
threads to come while it's releasing the room needed to copy everything.
By doing so we're cascading contention areas so that multiple parts can
work in parallel.
Note that we must never leave a write counter set to 0xFF at tail, and
this happens when a message cannot fit and we give up, because in this
case we're writing back tail_ofs, and only later we restore the counter.
The solution here is to make a special case when we're going to drop
the messages, and to write the readers count before restoring tail.
This already shows a tremendous performance gain on ARM (385k -> 4.8M),
thanks to the fact that now all waiting threads wait on the queue's
head instead of polluting the tail lock. On x86_64, the EPYC sees a big
boost at 24C48T (1.88M -> 3.82M) and a slowdown at 3C6T (6.0->4.45)
though this one is much less of a concern as so few threads need less
bandwidth than bigger counts.
2024-03-11 09:57:37 -04:00
|
|
|
}
|
2024-02-26 05:03:03 -05:00
|
|
|
|
2024-03-15 11:10:55 -04:00
|
|
|
/* now release */
|
|
|
|
|
for (curr_cell = &cell; curr_cell; curr_cell = next_cell) {
|
|
|
|
|
next_cell = HA_ATOMIC_LOAD(&curr_cell->next);
|
2024-03-22 11:47:17 -04:00
|
|
|
_HA_ATOMIC_STORE(&curr_cell->next, curr_cell);
|
MAJOR: ring: implement a waiting queue in front of the ring
The queue-based approach consists in forcing threads to wait away from
the work area so as not to disturb the current writer, and to prepare
the work by grouping them in a queue. The last arrived takes the head
of the queue by placing its preinitialized ring cell there, becomes the
queue's leader, informs itself about the amount of previously accumulated
bytes so that when its turn comes, it immediately knows how much room is
needed to be released.
It can then take the whole queue with it, leaving an empty one for new
threads to come while it's releasing the room needed to copy everything.
By doing so we're cascading contention areas so that multiple parts can
work in parallel.
Note that we must never leave a write counter set to 0xFF at tail, and
this happens when a message cannot fit and we give up, because in this
case we're writing back tail_ofs, and only later we restore the counter.
The solution here is to make a special case when we're going to drop
the messages, and to write the readers count before restoring tail.
This already shows a tremendous performance gain on ARM (385k -> 4.8M),
thanks to the fact that now all waiting threads wait on the queue's
head instead of polluting the tail lock. On x86_64, the EPYC sees a big
boost at 24C48T (1.88M -> 3.82M) and a slowdown at 3C6T (6.0->4.45)
though this one is much less of a concern as so few threads need less
bandwidth than bigger counts.
2024-03-11 09:57:37 -04:00
|
|
|
}
|
2019-08-27 05:44:13 -04:00
|
|
|
|
2024-03-15 11:10:55 -04:00
|
|
|
/* unlock the message area */
|
|
|
|
|
HA_ATOMIC_STORE(lock_ptr, readers);
|
|
|
|
|
} else {
|
|
|
|
|
/* messages were dropped, notify about this and release them */
|
|
|
|
|
for (curr_cell = &cell; curr_cell; curr_cell = next_cell) {
|
|
|
|
|
next_cell = HA_ATOMIC_LOAD(&curr_cell->next);
|
|
|
|
|
HA_ATOMIC_STORE(&curr_cell->to_send_self, 0);
|
2024-03-22 11:47:17 -04:00
|
|
|
_HA_ATOMIC_STORE(&curr_cell->next, curr_cell);
|
MAJOR: ring: implement a waiting queue in front of the ring
The queue-based approach consists in forcing threads to wait away from
the work area so as not to disturb the current writer, and to prepare
the work by grouping them in a queue. The last arrived takes the head
of the queue by placing its preinitialized ring cell there, becomes the
queue's leader, informs itself about the amount of previously accumulated
bytes so that when its turn comes, it immediately knows how much room is
needed to be released.
It can then take the whole queue with it, leaving an empty one for new
threads to come while it's releasing the room needed to copy everything.
By doing so we're cascading contention areas so that multiple parts can
work in parallel.
Note that we must never leave a write counter set to 0xFF at tail, and
this happens when a message cannot fit and we give up, because in this
case we're writing back tail_ofs, and only later we restore the counter.
The solution here is to make a special case when we're going to drop
the messages, and to write the readers count before restoring tail.
This already shows a tremendous performance gain on ARM (385k -> 4.8M),
thanks to the fact that now all waiting threads wait on the queue's
head instead of polluting the tail lock. On x86_64, the EPYC sees a big
boost at 24C48T (1.88M -> 3.82M) and a slowdown at 3C6T (6.0->4.45)
though this one is much less of a concern as so few threads need less
bandwidth than bigger counts.
2024-03-11 09:57:37 -04:00
|
|
|
}
|
2019-08-27 05:44:13 -04:00
|
|
|
}
|
|
|
|
|
|
MAJOR: ring: implement a waiting queue in front of the ring
The queue-based approach consists in forcing threads to wait away from
the work area so as not to disturb the current writer, and to prepare
the work by grouping them in a queue. The last arrived takes the head
of the queue by placing its preinitialized ring cell there, becomes the
queue's leader, informs itself about the amount of previously accumulated
bytes so that when its turn comes, it immediately knows how much room is
needed to be released.
It can then take the whole queue with it, leaving an empty one for new
threads to come while it's releasing the room needed to copy everything.
By doing so we're cascading contention areas so that multiple parts can
work in parallel.
Note that we must never leave a write counter set to 0xFF at tail, and
this happens when a message cannot fit and we give up, because in this
case we're writing back tail_ofs, and only later we restore the counter.
The solution here is to make a special case when we're going to drop
the messages, and to write the readers count before restoring tail.
This already shows a tremendous performance gain on ARM (385k -> 4.8M),
thanks to the fact that now all waiting threads wait on the queue's
head instead of polluting the tail lock. On x86_64, the EPYC sees a big
boost at 24C48T (1.88M -> 3.82M) and a slowdown at 3C6T (6.0->4.45)
though this one is much less of a concern as so few threads need less
bandwidth than bigger counts.
2024-03-11 09:57:37 -04:00
|
|
|
/* we must not write the trailing read counter, it was already done,
|
2024-03-15 11:10:55 -04:00
|
|
|
* plus we could ruin the one of the next writer. And the front was
|
|
|
|
|
* unlocked either at the top if the ring was full, or just above if it
|
|
|
|
|
* could be properly filled.
|
2024-02-28 03:57:00 -05:00
|
|
|
*/
|
2019-08-30 05:17:01 -04:00
|
|
|
|
MAJOR: ring: implement a waiting queue in front of the ring
The queue-based approach consists in forcing threads to wait away from
the work area so as not to disturb the current writer, and to prepare
the work by grouping them in a queue. The last arrived takes the head
of the queue by placing its preinitialized ring cell there, becomes the
queue's leader, informs itself about the amount of previously accumulated
bytes so that when its turn comes, it immediately knows how much room is
needed to be released.
It can then take the whole queue with it, leaving an empty one for new
threads to come while it's releasing the room needed to copy everything.
By doing so we're cascading contention areas so that multiple parts can
work in parallel.
Note that we must never leave a write counter set to 0xFF at tail, and
this happens when a message cannot fit and we give up, because in this
case we're writing back tail_ofs, and only later we restore the counter.
The solution here is to make a special case when we're going to drop
the messages, and to write the readers count before restoring tail.
This already shows a tremendous performance gain on ARM (385k -> 4.8M),
thanks to the fact that now all waiting threads wait on the queue's
head instead of polluting the tail lock. On x86_64, the EPYC sees a big
boost at 24C48T (1.88M -> 3.82M) and a slowdown at 3C6T (6.0->4.45)
though this one is much less of a concern as so few threads need less
bandwidth than bigger counts.
2024-03-11 09:57:37 -04:00
|
|
|
sent = cell.to_send_self;
|
2024-02-28 03:20:54 -05:00
|
|
|
|
2019-08-30 05:17:01 -04:00
|
|
|
/* notify potential readers */
|
2024-02-28 06:07:51 -05:00
|
|
|
if (sent && HA_ATOMIC_LOAD(&ring->readers_count)) {
|
2024-03-02 05:09:37 -05:00
|
|
|
HA_ATOMIC_INC(&ring->pending);
|
|
|
|
|
while (HA_ATOMIC_LOAD(&ring->pending) && HA_ATOMIC_XCHG(&ring->waking, 1) == 0) {
|
MAJOR: import: update mt_list to support exponential back-off (try #2)
This is the second attempt at importing the updated mt_list code (commit
59459ea3). The previous one was attempted with commit c618ed5ff4 ("MAJOR:
import: update mt_list to support exponential back-off") but revealed
problems with QUIC connections and was reverted.
The problem that was faced was that elements deleted inside an iterator
were no longer reset, and that if they were to be recycled in this form,
they could appear as busy to the next user. This was trivially reproduced
with this:
$ cat quic-repro.cfg
global
stats socket /tmp/sock1 level admin
stats timeout 1h
limited-quic
frontend stats
mode http
bind quic4@:8443 ssl crt rsa+dh2048.pem alpn h3
timeout client 5s
stats uri /
$ ./haproxy -db -f quic-repro.cfg &
$ h2load -c 10 -n 100000 --npn h3 https://127.0.0.1:8443/
=> hang
This was purely an API issue caused by the simplified usage of the macros
for the iterator. The original version had two backups (one full element
and one pointer) that the user had to take care of, while the new one only
uses one that is transparent for the user. But during removal, the element
still has to be unlocked if it's going to be reused.
All of this sparked discussions with Fred and Aurlien regarding the still
unclear state of locking. It was found that the lock API does too much at
once and is lacking granularity. The new version offers a much more fine-
grained control allowing to selectively lock/unlock an element, a link,
the rest of the list etc.
It was also found that plenty of places just want to free the current
element, or delete it to do anything with it, hence don't need to reset
its pointers (e.g. event_hdl). Finally it appeared obvious that the
root cause of the problem was the unclear usage of the list iterators
themselves because one does not necessarily expect the element to be
presented locked when not needed, which makes the unlock easy to overlook
during reviews.
The updated version of the list presents explicit lock status in the
macro name (_LOCKED or _UNLOCKED suffixes). When using the _LOCKED
suffix, the caller is expected to unlock the element if it intends to
reuse it. At least the status is advertised. The _UNLOCKED variant,
instead, always unlocks it before starting the loop block. This means
it's not necessary to think about unlocking it, though it's obviously
not usable with everything. A few _UNLOCKED were used at obvious places
(i.e. where the element is deleted and freed without any prior check).
Interestingly, the tests performed last year on QUIC forwarding, that
resulted in limited traffic for the original version and higher bit
rate for the new one couldn't be reproduced because since then the QUIC
stack has gaind in efficiency, and the 100 Gbps barrier is now reached
with or without the mt_list update. However the unit tests definitely
show a huge difference, particularly on EPYC platforms where the EBO
provides tremendous CPU savings.
Overall, the following changes are visible from the application code:
- mt_list_for_each_entry_safe() + 1 back elem + 1 back ptr
=> MT_LIST_FOR_EACH_ENTRY_LOCKED() or MT_LIST_FOR_EACH_ENTRY_UNLOCKED()
+ 1 back elem
- MT_LIST_DELETE_SAFE() no longer needed in MT_LIST_FOR_EACH_ENTRY_UNLOCKED()
=> just manually set iterator to NULL however.
For MT_LIST_FOR_EACH_ENTRY_LOCKED()
=> mt_list_unlock_self() (if element going to be reused) + NULL
- MT_LIST_LOCK_ELT => mt_list_lock_full()
- MT_LIST_UNLOCK_ELT => mt_list_unlock_full()
- l = MT_LIST_APPEND_LOCKED(h, e); MT_LIST_UNLOCK_ELT();
=> l=mt_list_lock_prev(h); mt_list_lock_elem(e); mt_list_unlock_full(e, l)
2024-05-30 05:27:32 -04:00
|
|
|
struct mt_list back;
|
2024-03-02 05:09:37 -05:00
|
|
|
struct appctx *appctx;
|
|
|
|
|
|
|
|
|
|
HA_ATOMIC_STORE(&ring->pending, 0);
|
MAJOR: import: update mt_list to support exponential back-off (try #2)
This is the second attempt at importing the updated mt_list code (commit
59459ea3). The previous one was attempted with commit c618ed5ff4 ("MAJOR:
import: update mt_list to support exponential back-off") but revealed
problems with QUIC connections and was reverted.
The problem that was faced was that elements deleted inside an iterator
were no longer reset, and that if they were to be recycled in this form,
they could appear as busy to the next user. This was trivially reproduced
with this:
$ cat quic-repro.cfg
global
stats socket /tmp/sock1 level admin
stats timeout 1h
limited-quic
frontend stats
mode http
bind quic4@:8443 ssl crt rsa+dh2048.pem alpn h3
timeout client 5s
stats uri /
$ ./haproxy -db -f quic-repro.cfg &
$ h2load -c 10 -n 100000 --npn h3 https://127.0.0.1:8443/
=> hang
This was purely an API issue caused by the simplified usage of the macros
for the iterator. The original version had two backups (one full element
and one pointer) that the user had to take care of, while the new one only
uses one that is transparent for the user. But during removal, the element
still has to be unlocked if it's going to be reused.
All of this sparked discussions with Fred and Aurlien regarding the still
unclear state of locking. It was found that the lock API does too much at
once and is lacking granularity. The new version offers a much more fine-
grained control allowing to selectively lock/unlock an element, a link,
the rest of the list etc.
It was also found that plenty of places just want to free the current
element, or delete it to do anything with it, hence don't need to reset
its pointers (e.g. event_hdl). Finally it appeared obvious that the
root cause of the problem was the unclear usage of the list iterators
themselves because one does not necessarily expect the element to be
presented locked when not needed, which makes the unlock easy to overlook
during reviews.
The updated version of the list presents explicit lock status in the
macro name (_LOCKED or _UNLOCKED suffixes). When using the _LOCKED
suffix, the caller is expected to unlock the element if it intends to
reuse it. At least the status is advertised. The _UNLOCKED variant,
instead, always unlocks it before starting the loop block. This means
it's not necessary to think about unlocking it, though it's obviously
not usable with everything. A few _UNLOCKED were used at obvious places
(i.e. where the element is deleted and freed without any prior check).
Interestingly, the tests performed last year on QUIC forwarding, that
resulted in limited traffic for the original version and higher bit
rate for the new one couldn't be reproduced because since then the QUIC
stack has gaind in efficiency, and the 100 Gbps barrier is now reached
with or without the mt_list update. However the unit tests definitely
show a huge difference, particularly on EPYC platforms where the EBO
provides tremendous CPU savings.
Overall, the following changes are visible from the application code:
- mt_list_for_each_entry_safe() + 1 back elem + 1 back ptr
=> MT_LIST_FOR_EACH_ENTRY_LOCKED() or MT_LIST_FOR_EACH_ENTRY_UNLOCKED()
+ 1 back elem
- MT_LIST_DELETE_SAFE() no longer needed in MT_LIST_FOR_EACH_ENTRY_UNLOCKED()
=> just manually set iterator to NULL however.
For MT_LIST_FOR_EACH_ENTRY_LOCKED()
=> mt_list_unlock_self() (if element going to be reused) + NULL
- MT_LIST_LOCK_ELT => mt_list_lock_full()
- MT_LIST_UNLOCK_ELT => mt_list_unlock_full()
- l = MT_LIST_APPEND_LOCKED(h, e); MT_LIST_UNLOCK_ELT();
=> l=mt_list_lock_prev(h); mt_list_lock_elem(e); mt_list_unlock_full(e, l)
2024-05-30 05:27:32 -04:00
|
|
|
MT_LIST_FOR_EACH_ENTRY_LOCKED(appctx, &ring->waiters, wait_entry, back)
|
2024-03-02 05:09:37 -05:00
|
|
|
appctx_wakeup(appctx);
|
|
|
|
|
HA_ATOMIC_STORE(&ring->waking, 0);
|
|
|
|
|
}
|
2024-02-26 05:03:03 -05:00
|
|
|
}
|
2019-08-30 05:17:01 -04:00
|
|
|
|
2024-02-28 03:37:47 -05:00
|
|
|
leave:
|
2019-08-27 05:44:13 -04:00
|
|
|
return sent;
|
MAJOR: ring: implement a waiting queue in front of the ring
The queue-based approach consists in forcing threads to wait away from
the work area so as not to disturb the current writer, and to prepare
the work by grouping them in a queue. The last arrived takes the head
of the queue by placing its preinitialized ring cell there, becomes the
queue's leader, informs itself about the amount of previously accumulated
bytes so that when its turn comes, it immediately knows how much room is
needed to be released.
It can then take the whole queue with it, leaving an empty one for new
threads to come while it's releasing the room needed to copy everything.
By doing so we're cascading contention areas so that multiple parts can
work in parallel.
Note that we must never leave a write counter set to 0xFF at tail, and
this happens when a message cannot fit and we give up, because in this
case we're writing back tail_ofs, and only later we restore the counter.
The solution here is to make a special case when we're going to drop
the messages, and to write the readers count before restoring tail.
This already shows a tremendous performance gain on ARM (385k -> 4.8M),
thanks to the fact that now all waiting threads wait on the queue's
head instead of polluting the tail lock. On x86_64, the EPYC sees a big
boost at 24C48T (1.88M -> 3.82M) and a slowdown at 3C6T (6.0->4.45)
though this one is much less of a concern as so few threads need less
bandwidth than bigger counts.
2024-03-11 09:57:37 -04:00
|
|
|
|
|
|
|
|
wait_for_flush:
|
2024-03-17 07:19:29 -04:00
|
|
|
/* if we arrive here, it means we found another leader */
|
|
|
|
|
|
MAJOR: ring: implement a waiting queue in front of the ring
The queue-based approach consists in forcing threads to wait away from
the work area so as not to disturb the current writer, and to prepare
the work by grouping them in a queue. The last arrived takes the head
of the queue by placing its preinitialized ring cell there, becomes the
queue's leader, informs itself about the amount of previously accumulated
bytes so that when its turn comes, it immediately knows how much room is
needed to be released.
It can then take the whole queue with it, leaving an empty one for new
threads to come while it's releasing the room needed to copy everything.
By doing so we're cascading contention areas so that multiple parts can
work in parallel.
Note that we must never leave a write counter set to 0xFF at tail, and
this happens when a message cannot fit and we give up, because in this
case we're writing back tail_ofs, and only later we restore the counter.
The solution here is to make a special case when we're going to drop
the messages, and to write the readers count before restoring tail.
This already shows a tremendous performance gain on ARM (385k -> 4.8M),
thanks to the fact that now all waiting threads wait on the queue's
head instead of polluting the tail lock. On x86_64, the EPYC sees a big
boost at 24C48T (1.88M -> 3.82M) and a slowdown at 3C6T (6.0->4.45)
though this one is much less of a concern as so few threads need less
bandwidth than bigger counts.
2024-03-11 09:57:37 -04:00
|
|
|
/* The leader will write our own pointer in the cell's next to
|
|
|
|
|
* mark it as released. Let's wait for this.
|
|
|
|
|
*/
|
|
|
|
|
do {
|
|
|
|
|
next_cell = HA_ATOMIC_LOAD(&cell.next);
|
2025-09-18 09:01:29 -04:00
|
|
|
} while (next_cell != &cell && __ha_cpu_relax());
|
MAJOR: ring: implement a waiting queue in front of the ring
The queue-based approach consists in forcing threads to wait away from
the work area so as not to disturb the current writer, and to prepare
the work by grouping them in a queue. The last arrived takes the head
of the queue by placing its preinitialized ring cell there, becomes the
queue's leader, informs itself about the amount of previously accumulated
bytes so that when its turn comes, it immediately knows how much room is
needed to be released.
It can then take the whole queue with it, leaving an empty one for new
threads to come while it's releasing the room needed to copy everything.
By doing so we're cascading contention areas so that multiple parts can
work in parallel.
Note that we must never leave a write counter set to 0xFF at tail, and
this happens when a message cannot fit and we give up, because in this
case we're writing back tail_ofs, and only later we restore the counter.
The solution here is to make a special case when we're going to drop
the messages, and to write the readers count before restoring tail.
This already shows a tremendous performance gain on ARM (385k -> 4.8M),
thanks to the fact that now all waiting threads wait on the queue's
head instead of polluting the tail lock. On x86_64, the EPYC sees a big
boost at 24C48T (1.88M -> 3.82M) and a slowdown at 3C6T (6.0->4.45)
though this one is much less of a concern as so few threads need less
bandwidth than bigger counts.
2024-03-11 09:57:37 -04:00
|
|
|
|
|
|
|
|
/* OK our message was queued. Retrieving the sent size in the ring cell
|
|
|
|
|
* allows another leader thread to zero it if it finally couldn't send
|
|
|
|
|
* it (should only happen when using too small ring buffers to store
|
|
|
|
|
* all competing threads' messages at once).
|
|
|
|
|
*/
|
|
|
|
|
return HA_ATOMIC_LOAD(&cell.to_send_self);
|
2019-08-27 05:44:13 -04:00
|
|
|
}
|
2019-08-08 09:28:52 -04:00
|
|
|
|
2020-05-19 13:14:42 -04:00
|
|
|
/* Tries to attach appctx <appctx> as a new reader on ring <ring>. This is
|
|
|
|
|
* meant to be used by low level appctx code such as CLI or ring forwarding.
|
|
|
|
|
* For higher level functions, please see the relevant parts in appctx or CLI.
|
|
|
|
|
* It returns non-zero on success or zero on failure if too many users are
|
|
|
|
|
* already attached. On success, the caller MUST call ring_detach_appctx()
|
|
|
|
|
* to detach itself, even if it was never woken up.
|
|
|
|
|
*/
|
2020-05-28 08:39:30 -04:00
|
|
|
int ring_attach(struct ring *ring)
|
2020-05-19 13:14:42 -04:00
|
|
|
{
|
|
|
|
|
int users = ring->readers_count;
|
|
|
|
|
|
|
|
|
|
do {
|
2023-02-20 13:21:52 -05:00
|
|
|
if (users >= RING_MAX_READERS)
|
2020-05-19 13:14:42 -04:00
|
|
|
return 0;
|
|
|
|
|
} while (!_HA_ATOMIC_CAS(&ring->readers_count, &users, users + 1));
|
|
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
|
MEDIUM: ring: make the offset relative to the head/tail instead of absolute
The ring's offset currently contains a perpetually growing custor which
is the number of bytes written from the start. It's used by readers to
know where to (re)start reading from. It was made absolute because both
the head and the tail can change during writes and we needed a fixed
position to know where the reader was attached. But this is complicated,
error-prone, and limits the ability to reduce the lock's coverage. In
fact what is needed is to know where the reader is currently waiting, if
at all. And this location is exactly where it stored its count, so the
absolute position in the buffer (the seek offset from the first storage
byte) does represent exactly this, as it doesn't move (we don't realign
the buffer), and is stable regardless of how head/tail changes with writes.
This patch modifies this so that the application code now uses this
representation instead. The most noticeable change is the initialization,
where we've kept ~0 as a marker to go to the end, and it's now set to
the tail offset instead of trying to resolve the current write offset
against the current ring's position.
The offset was also used at the end of the consuming loop, to detect
if a new write had happened between the lock being released and taken
again, so as to wake the consumer(s) up again. For this we used to
take a copy of the ring->ofs before unlocking and comparing with the
new value read in the next lock. Since it's not possible to write past
the current reader's location, there's no risk of complete rollover, so
it's sufficient to check if the tail has changed.
Note that the change also has an impact on the haring consumer which
needs to adapt as well. But that's good in fact because it will rely
on one less variable, and will use offsets relative to the buffer's
head, and the change remains backward-compatible.
2023-02-22 08:50:14 -05:00
|
|
|
/* detach an appctx from a ring. The appctx is expected to be waiting at offset
|
|
|
|
|
* <ofs> relative to the beginning of the storage, or ~0 if not waiting yet.
|
|
|
|
|
* Nothing is done if <ring> is NULL.
|
2020-05-19 13:14:42 -04:00
|
|
|
*/
|
|
|
|
|
void ring_detach_appctx(struct ring *ring, struct appctx *appctx, size_t ofs)
|
|
|
|
|
{
|
|
|
|
|
if (!ring)
|
|
|
|
|
return;
|
|
|
|
|
|
2024-02-28 11:04:40 -05:00
|
|
|
HA_ATOMIC_DEC(&ring->readers_count);
|
|
|
|
|
|
2020-05-19 13:14:42 -04:00
|
|
|
if (ofs != ~0) {
|
|
|
|
|
/* reader was still attached */
|
2024-02-28 03:03:46 -05:00
|
|
|
uint8_t *area = (uint8_t *)ring_area(ring);
|
|
|
|
|
uint8_t readers;
|
MEDIUM: ring: make the offset relative to the head/tail instead of absolute
The ring's offset currently contains a perpetually growing custor which
is the number of bytes written from the start. It's used by readers to
know where to (re)start reading from. It was made absolute because both
the head and the tail can change during writes and we needed a fixed
position to know where the reader was attached. But this is complicated,
error-prone, and limits the ability to reduce the lock's coverage. In
fact what is needed is to know where the reader is currently waiting, if
at all. And this location is exactly where it stored its count, so the
absolute position in the buffer (the seek offset from the first storage
byte) does represent exactly this, as it doesn't move (we don't realign
the buffer), and is stable regardless of how head/tail changes with writes.
This patch modifies this so that the application code now uses this
representation instead. The most noticeable change is the initialization,
where we've kept ~0 as a marker to go to the end, and it's now set to
the tail offset instead of trying to resolve the current write offset
against the current ring's position.
The offset was also used at the end of the consuming loop, to detect
if a new write had happened between the lock being released and taken
again, so as to wake the consumer(s) up again. For this we used to
take a copy of the ring->ofs before unlocking and comparing with the
new value read in the next lock. Since it's not possible to write past
the current reader's location, there's no risk of complete rollover, so
it's sufficient to check if the tail has changed.
Note that the change also has an impact on the haring consumer which
needs to adapt as well. But that's good in fact because it will rely
on one less variable, and will use offsets relative to the buffer's
head, and the change remains backward-compatible.
2023-02-22 08:50:14 -05:00
|
|
|
|
2024-03-06 10:50:40 -05:00
|
|
|
BUG_ON(ofs >= ring_size(ring));
|
2024-02-28 11:04:40 -05:00
|
|
|
MT_LIST_DELETE(&appctx->wait_entry);
|
2024-02-28 03:03:46 -05:00
|
|
|
|
|
|
|
|
/* dec readers count */
|
|
|
|
|
do {
|
|
|
|
|
readers = _HA_ATOMIC_LOAD(area + ofs);
|
|
|
|
|
} while ((readers > RING_MAX_READERS ||
|
|
|
|
|
!_HA_ATOMIC_CAS(area + ofs, &readers, readers - 1)) && __ha_cpu_relax());
|
2020-05-19 13:14:42 -04:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2019-08-27 05:55:39 -04:00
|
|
|
/* Tries to attach CLI handler <appctx> as a new reader on ring <ring>. This is
|
|
|
|
|
* meant to be used when registering a CLI function to dump a buffer, so it
|
|
|
|
|
* returns zero on success, or non-zero on failure with a message in the appctx
|
2019-11-15 09:07:21 -05:00
|
|
|
* CLI context. It automatically sets the io_handler and io_release callbacks if
|
2022-05-05 09:18:57 -04:00
|
|
|
* they were not set. The <flags> take a combination of RING_WF_*.
|
2019-08-27 05:55:39 -04:00
|
|
|
*/
|
2022-05-05 09:18:57 -04:00
|
|
|
int ring_attach_cli(struct ring *ring, struct appctx *appctx, uint flags)
|
2019-08-27 05:55:39 -04:00
|
|
|
{
|
2022-05-05 09:29:43 -04:00
|
|
|
struct show_ring_ctx *ctx = applet_reserve_svcctx(appctx, sizeof(*ctx));
|
|
|
|
|
|
2020-05-28 08:39:30 -04:00
|
|
|
if (!ring_attach(ring))
|
2020-05-19 13:14:42 -04:00
|
|
|
return cli_err(appctx,
|
2023-02-20 13:21:52 -05:00
|
|
|
"Sorry, too many watchers (" TOSTR(RING_MAX_READERS) ") on this ring buffer. "
|
2020-05-19 13:14:42 -04:00
|
|
|
"What could it have so interesting to attract so many watchers ?");
|
2019-08-27 05:55:39 -04:00
|
|
|
|
2025-04-24 05:17:07 -04:00
|
|
|
if (!appctx->cli_ctx.io_handler)
|
|
|
|
|
appctx->cli_ctx.io_handler = cli_io_handler_show_ring;
|
|
|
|
|
if (!appctx->cli_ctx.io_release)
|
|
|
|
|
appctx->cli_ctx.io_release = cli_io_release_show_ring;
|
2022-05-05 09:29:43 -04:00
|
|
|
|
|
|
|
|
memset(ctx, 0, sizeof(*ctx));
|
|
|
|
|
ctx->ring = ring;
|
|
|
|
|
ctx->ofs = ~0; // start from the oldest event
|
|
|
|
|
ctx->flags = flags;
|
2019-08-27 05:55:39 -04:00
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2024-02-27 10:54:18 -05:00
|
|
|
|
|
|
|
|
/* parses as many messages as possible from ring <ring>, starting at the offset
|
|
|
|
|
* stored at *ofs_ptr, with RING_WF_* flags in <flags>, and passes them to
|
|
|
|
|
* the message handler <msg_handler>. If <last_of_ptr> is not NULL, a copy of
|
|
|
|
|
* the last known tail pointer will be copied there so that the caller may use
|
|
|
|
|
* this to detect new data have arrived since we left the function. Returns 0
|
|
|
|
|
* if it needs to pause, 1 once finished.
|
2024-07-22 05:17:08 -04:00
|
|
|
*
|
|
|
|
|
* If <processed> is not NULL, it will be set to the number of messages
|
|
|
|
|
* processed by the function (even when the function returns 0)
|
2019-08-27 05:55:39 -04:00
|
|
|
*/
|
2024-02-27 10:54:18 -05:00
|
|
|
int ring_dispatch_messages(struct ring *ring, void *ctx, size_t *ofs_ptr, size_t *last_ofs_ptr, uint flags,
|
2025-03-31 12:17:35 -04:00
|
|
|
ssize_t (*msg_handler)(void *ctx, struct ist v1, struct ist v2, size_t ofs, size_t len, char delim),
|
|
|
|
|
char delim,
|
2024-07-22 05:17:08 -04:00
|
|
|
size_t *processed)
|
2019-08-27 05:55:39 -04:00
|
|
|
{
|
2024-02-28 11:18:34 -05:00
|
|
|
size_t head_ofs, tail_ofs, prev_ofs;
|
2024-02-27 01:58:26 -05:00
|
|
|
size_t ring_size;
|
2024-02-28 03:03:46 -05:00
|
|
|
uint8_t *ring_area;
|
2024-02-27 01:58:26 -05:00
|
|
|
struct ist v1, v2;
|
2019-08-27 05:55:39 -04:00
|
|
|
uint64_t msg_len;
|
2024-02-27 10:54:18 -05:00
|
|
|
size_t len, cnt;
|
2024-07-22 05:17:08 -04:00
|
|
|
size_t msg_count = 0;
|
2024-02-27 01:58:26 -05:00
|
|
|
ssize_t copied;
|
2024-02-28 03:03:46 -05:00
|
|
|
uint8_t readers;
|
2019-08-27 05:55:39 -04:00
|
|
|
int ret;
|
|
|
|
|
|
2024-02-28 03:03:46 -05:00
|
|
|
ring_area = (uint8_t *)ring->storage->area;
|
2024-02-27 03:17:45 -05:00
|
|
|
ring_size = ring->storage->size;
|
2024-02-27 01:58:26 -05:00
|
|
|
|
2019-08-27 05:55:39 -04:00
|
|
|
/* explanation for the initialization below: it would be better to do
|
|
|
|
|
* this in the parsing function but this would occasionally result in
|
|
|
|
|
* dropped events because we'd take a reference on the oldest message
|
|
|
|
|
* and keep it while being scheduled. Thus instead let's take it the
|
|
|
|
|
* first time we enter here so that we have a chance to pass many
|
2019-08-30 04:16:14 -04:00
|
|
|
* existing messages before grabbing a reference to a location. This
|
2024-02-28 11:42:56 -05:00
|
|
|
* value cannot be produced after initialization. The first offset
|
|
|
|
|
* needs to be taken under isolation as it must not move while we're
|
|
|
|
|
* trying to catch it.
|
2019-08-27 05:55:39 -04:00
|
|
|
*/
|
2024-02-27 10:54:18 -05:00
|
|
|
if (unlikely(*ofs_ptr == ~0)) {
|
2024-02-28 11:42:56 -05:00
|
|
|
thread_isolate();
|
|
|
|
|
|
|
|
|
|
head_ofs = HA_ATOMIC_LOAD(&ring->storage->head);
|
|
|
|
|
tail_ofs = ring_tail(ring);
|
|
|
|
|
|
2024-02-27 01:58:26 -05:00
|
|
|
if (flags & RING_WF_SEEK_NEW) {
|
|
|
|
|
/* going to the end means looking at tail-1 */
|
|
|
|
|
head_ofs = tail_ofs + ring_size - 1;
|
|
|
|
|
if (head_ofs >= ring_size)
|
|
|
|
|
head_ofs -= ring_size;
|
|
|
|
|
}
|
|
|
|
|
|
2024-02-28 11:42:56 -05:00
|
|
|
/* reserve our slot here (inc readers count) */
|
2024-02-28 03:03:46 -05:00
|
|
|
do {
|
|
|
|
|
readers = _HA_ATOMIC_LOAD(ring_area + head_ofs);
|
|
|
|
|
} while ((readers > RING_MAX_READERS ||
|
|
|
|
|
!_HA_ATOMIC_CAS(ring_area + head_ofs, &readers, readers + 1)) && __ha_cpu_relax());
|
2024-02-28 11:42:56 -05:00
|
|
|
|
|
|
|
|
thread_release();
|
|
|
|
|
|
|
|
|
|
/* store this precious offset in our context, and we're done */
|
|
|
|
|
*ofs_ptr = head_ofs;
|
2019-08-27 05:55:39 -04:00
|
|
|
}
|
|
|
|
|
|
2024-02-27 01:58:26 -05:00
|
|
|
/* we have the guarantee we can restart from our own head */
|
|
|
|
|
head_ofs = *ofs_ptr;
|
|
|
|
|
BUG_ON(head_ofs >= ring_size);
|
|
|
|
|
|
2024-02-28 11:42:56 -05:00
|
|
|
/* the tail will continue to move but we're getting a safe value
|
|
|
|
|
* here that will continue to work.
|
|
|
|
|
*/
|
|
|
|
|
tail_ofs = ring_tail(ring);
|
|
|
|
|
|
2024-02-28 11:18:34 -05:00
|
|
|
/* we keep track of where we were and we don't release it before
|
|
|
|
|
* we've protected the next place.
|
|
|
|
|
*/
|
|
|
|
|
prev_ofs = head_ofs;
|
2019-08-27 05:55:39 -04:00
|
|
|
|
2024-02-27 01:58:26 -05:00
|
|
|
/* in this loop, head_ofs always points to the counter byte that precedes
|
2019-08-27 05:55:39 -04:00
|
|
|
* the message so that we can take our reference there if we have to
|
2024-02-27 01:58:26 -05:00
|
|
|
* stop before the end (ret=0). The reference is relative to the ring's
|
|
|
|
|
* origin, while pos is relative to the ring's head.
|
2019-08-27 05:55:39 -04:00
|
|
|
*/
|
|
|
|
|
ret = 1;
|
2024-02-28 03:03:46 -05:00
|
|
|
vp_ring_to_data(&v1, &v2, (char *)ring_area, ring_size, head_ofs, tail_ofs);
|
2023-02-23 03:53:38 -05:00
|
|
|
|
2024-02-27 01:58:26 -05:00
|
|
|
while (1) {
|
|
|
|
|
if (vp_size(v1, v2) <= 1) {
|
2023-02-23 03:53:38 -05:00
|
|
|
/* no more data */
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2024-02-29 05:55:22 -05:00
|
|
|
readers = _HA_ATOMIC_LOAD(_vp_addr(v1, v2, 0));
|
|
|
|
|
if (readers > RING_MAX_READERS) {
|
|
|
|
|
/* we just met a writer which hasn't finished */
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2019-08-27 05:55:39 -04:00
|
|
|
cnt = 1;
|
2024-02-27 01:58:26 -05:00
|
|
|
len = vp_peek_varint_ofs(v1, v2, cnt, &msg_len);
|
2019-08-27 05:55:39 -04:00
|
|
|
if (!len)
|
|
|
|
|
break;
|
|
|
|
|
cnt += len;
|
|
|
|
|
|
2024-02-27 01:58:26 -05:00
|
|
|
BUG_ON(msg_len + cnt + 1 > vp_size(v1, v2));
|
|
|
|
|
|
2025-03-31 12:17:35 -04:00
|
|
|
copied = msg_handler(ctx, v1, v2, cnt, msg_len, delim);
|
2024-02-27 09:55:26 -05:00
|
|
|
if (copied == -2) {
|
2019-08-27 05:55:39 -04:00
|
|
|
/* too large a message to ever fit, let's skip it */
|
2023-02-23 03:53:38 -05:00
|
|
|
goto skip;
|
2019-08-27 05:55:39 -04:00
|
|
|
}
|
2024-02-27 09:55:26 -05:00
|
|
|
else if (copied == -1) {
|
|
|
|
|
/* output full */
|
2019-08-27 05:55:39 -04:00
|
|
|
ret = 0;
|
|
|
|
|
break;
|
|
|
|
|
}
|
2023-02-23 03:53:38 -05:00
|
|
|
skip:
|
2024-07-22 05:17:08 -04:00
|
|
|
msg_count += 1;
|
2024-02-27 01:58:26 -05:00
|
|
|
vp_skip(&v1, &v2, cnt + msg_len);
|
2019-08-27 05:55:39 -04:00
|
|
|
}
|
|
|
|
|
|
2024-02-28 03:03:46 -05:00
|
|
|
vp_data_to_ring(v1, v2, (char *)ring_area, ring_size, &head_ofs, &tail_ofs);
|
|
|
|
|
|
2024-02-29 05:57:28 -05:00
|
|
|
if (head_ofs != prev_ofs) {
|
|
|
|
|
/* inc readers count on new place */
|
|
|
|
|
do {
|
|
|
|
|
readers = _HA_ATOMIC_LOAD(ring_area + head_ofs);
|
|
|
|
|
} while ((readers > RING_MAX_READERS ||
|
|
|
|
|
!_HA_ATOMIC_CAS(ring_area + head_ofs, &readers, readers + 1)) && __ha_cpu_relax());
|
2024-02-27 01:58:26 -05:00
|
|
|
|
2024-02-29 05:57:28 -05:00
|
|
|
/* dec readers count on old place */
|
|
|
|
|
do {
|
|
|
|
|
readers = _HA_ATOMIC_LOAD(ring_area + prev_ofs);
|
|
|
|
|
} while ((readers > RING_MAX_READERS ||
|
|
|
|
|
!_HA_ATOMIC_CAS(ring_area + prev_ofs, &readers, readers - 1)) && __ha_cpu_relax());
|
|
|
|
|
}
|
2024-02-28 11:18:34 -05:00
|
|
|
|
2024-02-27 10:54:18 -05:00
|
|
|
if (last_ofs_ptr)
|
2024-02-27 01:58:26 -05:00
|
|
|
*last_ofs_ptr = tail_ofs;
|
|
|
|
|
*ofs_ptr = head_ofs;
|
2024-07-22 05:17:08 -04:00
|
|
|
if (processed)
|
|
|
|
|
*processed = msg_count;
|
2024-02-27 10:54:18 -05:00
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* This function dumps all events from the ring whose pointer is in <p0> into
|
|
|
|
|
* the appctx's output buffer, and takes from <o0> the seek offset into the
|
|
|
|
|
* buffer's history (0 for oldest known event). It looks at <i0> for boolean
|
|
|
|
|
* options: bit0 means it must wait for new data or any key to be pressed. Bit1
|
|
|
|
|
* means it must seek directly to the end to wait for new contents. It returns
|
|
|
|
|
* 0 if the output buffer or events are missing is full and it needs to be
|
|
|
|
|
* called again, otherwise non-zero. It is meant to be used with
|
|
|
|
|
* cli_release_show_ring() to clean up.
|
|
|
|
|
*/
|
|
|
|
|
int cli_io_handler_show_ring(struct appctx *appctx)
|
|
|
|
|
{
|
|
|
|
|
struct show_ring_ctx *ctx = appctx->svcctx;
|
|
|
|
|
struct ring *ring = ctx->ring;
|
|
|
|
|
size_t last_ofs;
|
|
|
|
|
size_t ofs;
|
|
|
|
|
int ret;
|
|
|
|
|
|
2024-02-28 11:04:40 -05:00
|
|
|
MT_LIST_DELETE(&appctx->wait_entry);
|
2024-02-27 10:54:18 -05:00
|
|
|
|
2025-03-31 12:26:26 -04:00
|
|
|
ret = ring_dispatch_messages(ring, appctx, &ctx->ofs, &last_ofs, ctx->flags, applet_append_line,
|
|
|
|
|
(ctx->flags & RING_WF_END_ZERO) ? 0 : '\n', NULL);
|
2019-08-30 05:17:01 -04:00
|
|
|
|
2022-05-05 09:29:43 -04:00
|
|
|
if (ret && (ctx->flags & RING_WF_WAIT_MODE)) {
|
2019-08-30 05:17:01 -04:00
|
|
|
/* we've drained everything and are configured to wait for more
|
|
|
|
|
* data or an event (keypress, close)
|
|
|
|
|
*/
|
2025-05-26 08:25:16 -04:00
|
|
|
if (!b_data(&appctx->inbuf) && !se_fl_test(appctx->sedesc, SE_FL_SHW)) {
|
2019-08-30 05:17:01 -04:00
|
|
|
/* let's be woken up once new data arrive */
|
2024-02-28 11:04:40 -05:00
|
|
|
MT_LIST_APPEND(&ring->waiters, &appctx->wait_entry);
|
2024-03-06 10:50:40 -05:00
|
|
|
ofs = ring_tail(ring);
|
2022-08-04 11:00:21 -04:00
|
|
|
if (ofs != last_ofs) {
|
|
|
|
|
/* more data was added into the ring between the
|
|
|
|
|
* unlock and the lock, and the writer might not
|
|
|
|
|
* have seen us. We need to reschedule a read.
|
|
|
|
|
*/
|
|
|
|
|
applet_have_more_data(appctx);
|
|
|
|
|
} else
|
|
|
|
|
applet_have_no_more_data(appctx);
|
2019-08-30 05:17:01 -04:00
|
|
|
ret = 0;
|
|
|
|
|
}
|
|
|
|
|
/* always drain all the request */
|
2025-04-23 10:38:25 -04:00
|
|
|
b_reset(&appctx->inbuf);
|
|
|
|
|
applet_fl_clr(appctx, APPCTX_FL_INBLK_FULL);
|
2019-08-30 05:17:01 -04:00
|
|
|
}
|
2023-09-06 03:26:06 -04:00
|
|
|
|
2025-04-23 10:38:25 -04:00
|
|
|
applet_will_consume(appctx);
|
2023-09-06 03:26:06 -04:00
|
|
|
applet_expect_no_data(appctx);
|
2019-08-27 05:55:39 -04:00
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* must be called after cli_io_handler_show_ring() above */
|
|
|
|
|
void cli_io_release_show_ring(struct appctx *appctx)
|
|
|
|
|
{
|
2022-05-05 09:29:43 -04:00
|
|
|
struct show_ring_ctx *ctx = appctx->svcctx;
|
|
|
|
|
struct ring *ring = ctx->ring;
|
|
|
|
|
size_t ofs = ctx->ofs;
|
2019-08-27 05:55:39 -04:00
|
|
|
|
2020-05-19 13:14:42 -04:00
|
|
|
ring_detach_appctx(ring, appctx, ofs);
|
2019-08-27 05:55:39 -04:00
|
|
|
}
|
|
|
|
|
|
2023-06-26 13:22:38 -04:00
|
|
|
/* Returns the MAXIMUM payload len that could theoretically fit into the ring
|
|
|
|
|
* based on ring buffer size.
|
|
|
|
|
*
|
|
|
|
|
* Computation logic relies on implementation details from 'ring-t.h'.
|
|
|
|
|
*/
|
|
|
|
|
size_t ring_max_payload(const struct ring *ring)
|
|
|
|
|
{
|
|
|
|
|
size_t max;
|
|
|
|
|
|
|
|
|
|
/* initial max = bufsize - 1 (initial RC) - 1 (payload RC) */
|
2024-03-06 10:50:40 -05:00
|
|
|
max = ring_size(ring) - 1 - 1;
|
2023-06-26 13:22:38 -04:00
|
|
|
|
2023-11-21 13:54:16 -05:00
|
|
|
/* subtract payload VI (varint-encoded size) */
|
2023-06-26 13:22:38 -04:00
|
|
|
max -= varint_bytes(max);
|
|
|
|
|
return max;
|
|
|
|
|
}
|
2019-08-27 05:55:39 -04:00
|
|
|
|
2024-03-14 03:57:02 -04:00
|
|
|
/* config parser for global "tune.ring.queues", accepts a number from 0 to RING_WAIT_QUEUES */
|
|
|
|
|
static int cfg_parse_tune_ring_queues(char **args, int section_type, struct proxy *curpx,
|
|
|
|
|
const struct proxy *defpx, const char *file, int line,
|
|
|
|
|
char **err)
|
|
|
|
|
{
|
|
|
|
|
int queues;
|
|
|
|
|
|
|
|
|
|
if (too_many_args(1, args, err, NULL))
|
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
|
|
queues = atoi(args[1]);
|
|
|
|
|
if (queues < 0 || queues > RING_WAIT_QUEUES) {
|
|
|
|
|
memprintf(err, "'%s' expects a number between 0 and %d but got '%s'.", args[0], RING_WAIT_QUEUES, args[1]);
|
|
|
|
|
return -1;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
global.tune.ring_queues = queues;
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* config keyword parsers */
|
|
|
|
|
static struct cfg_kw_list cfg_kws = {ILH, {
|
|
|
|
|
{ CFG_GLOBAL, "tune.ring.queues", cfg_parse_tune_ring_queues },
|
|
|
|
|
{ 0, NULL, NULL }
|
|
|
|
|
}};
|
|
|
|
|
|
|
|
|
|
INITCALL1(STG_REGISTER, cfg_register_keywords, &cfg_kws);
|
|
|
|
|
|
2019-08-08 09:28:52 -04:00
|
|
|
/*
|
|
|
|
|
* Local variables:
|
|
|
|
|
* c-indent-level: 8
|
|
|
|
|
* c-basic-offset: 8
|
|
|
|
|
* End:
|
|
|
|
|
*/
|