2019-08-08 09:28:52 -04:00
|
|
|
/*
|
|
|
|
|
* Ring buffer management
|
|
|
|
|
*
|
|
|
|
|
* Copyright (C) 2000-2019 Willy Tarreau - w@1wt.eu
|
|
|
|
|
*
|
|
|
|
|
* This library is free software; you can redistribute it and/or
|
|
|
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
|
|
|
* License as published by the Free Software Foundation, version 2.1
|
|
|
|
|
* exclusively.
|
|
|
|
|
*
|
|
|
|
|
* This library is distributed in the hope that it will be useful,
|
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
|
* Lesser General Public License for more details.
|
|
|
|
|
*
|
|
|
|
|
* You should have received a copy of the GNU Lesser General Public
|
|
|
|
|
* License along with this library; if not, write to the Free Software
|
|
|
|
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
#include <stdlib.h>
|
2020-05-27 06:58:42 -04:00
|
|
|
#include <haproxy/api.h>
|
2020-06-09 03:07:15 -04:00
|
|
|
#include <haproxy/applet.h>
|
2020-05-27 11:22:10 -04:00
|
|
|
#include <haproxy/buf.h>
|
2020-06-04 14:19:54 -04:00
|
|
|
#include <haproxy/cli.h>
|
2020-06-03 13:43:35 -04:00
|
|
|
#include <haproxy/ring.h>
|
2022-05-27 03:25:10 -04:00
|
|
|
#include <haproxy/sc_strm.h>
|
2022-05-27 03:47:12 -04:00
|
|
|
#include <haproxy/stconn.h>
|
2020-06-09 03:07:15 -04:00
|
|
|
#include <haproxy/thread.h>
|
2019-08-08 09:28:52 -04:00
|
|
|
|
2022-05-05 09:29:43 -04:00
|
|
|
/* context used to dump the contents of a ring via "show events" or "show errors" */
|
|
|
|
|
struct show_ring_ctx {
|
|
|
|
|
struct ring *ring; /* ring to be dumped */
|
MEDIUM: ring: make the offset relative to the head/tail instead of absolute
The ring's offset currently contains a perpetually growing custor which
is the number of bytes written from the start. It's used by readers to
know where to (re)start reading from. It was made absolute because both
the head and the tail can change during writes and we needed a fixed
position to know where the reader was attached. But this is complicated,
error-prone, and limits the ability to reduce the lock's coverage. In
fact what is needed is to know where the reader is currently waiting, if
at all. And this location is exactly where it stored its count, so the
absolute position in the buffer (the seek offset from the first storage
byte) does represent exactly this, as it doesn't move (we don't realign
the buffer), and is stable regardless of how head/tail changes with writes.
This patch modifies this so that the application code now uses this
representation instead. The most noticeable change is the initialization,
where we've kept ~0 as a marker to go to the end, and it's now set to
the tail offset instead of trying to resolve the current write offset
against the current ring's position.
The offset was also used at the end of the consuming loop, to detect
if a new write had happened between the lock being released and taken
again, so as to wake the consumer(s) up again. For this we used to
take a copy of the ring->ofs before unlocking and comparing with the
new value read in the next lock. Since it's not possible to write past
the current reader's location, there's no risk of complete rollover, so
it's sufficient to check if the tail has changed.
Note that the change also has an impact on the haring consumer which
needs to adapt as well. But that's good in fact because it will rely
on one less variable, and will use offsets relative to the buffer's
head, and the change remains backward-compatible.
2023-02-22 08:50:14 -05:00
|
|
|
size_t ofs; /* storage offset to restart from; ~0=oldest */
|
2022-05-05 09:29:43 -04:00
|
|
|
uint flags; /* set of RING_WF_* */
|
|
|
|
|
};
|
|
|
|
|
|
2021-01-12 08:21:00 -05:00
|
|
|
/* Initialize a pre-allocated ring with the buffer area
|
|
|
|
|
* of size */
|
|
|
|
|
void ring_init(struct ring *ring, void *area, size_t size)
|
|
|
|
|
{
|
|
|
|
|
HA_RWLOCK_INIT(&ring->lock);
|
|
|
|
|
LIST_INIT(&ring->waiters);
|
|
|
|
|
ring->readers_count = 0;
|
|
|
|
|
ring->buf = b_make(area, size, 0, 0);
|
|
|
|
|
/* write the initial RC byte */
|
|
|
|
|
b_putchr(&ring->buf, 0);
|
|
|
|
|
}
|
|
|
|
|
|
2019-08-08 09:28:52 -04:00
|
|
|
/* Creates and returns a ring buffer of size <size> bytes. Returns NULL on
|
|
|
|
|
* allocation failure.
|
|
|
|
|
*/
|
|
|
|
|
struct ring *ring_new(size_t size)
|
|
|
|
|
{
|
|
|
|
|
struct ring *ring = NULL;
|
|
|
|
|
void *area = NULL;
|
|
|
|
|
|
|
|
|
|
if (size < 2)
|
|
|
|
|
goto fail;
|
|
|
|
|
|
|
|
|
|
ring = malloc(sizeof(*ring));
|
|
|
|
|
if (!ring)
|
|
|
|
|
goto fail;
|
|
|
|
|
|
|
|
|
|
area = malloc(size);
|
|
|
|
|
if (!area)
|
|
|
|
|
goto fail;
|
|
|
|
|
|
2021-01-12 08:21:00 -05:00
|
|
|
ring_init(ring, area, size);
|
2019-08-08 09:28:52 -04:00
|
|
|
return ring;
|
|
|
|
|
fail:
|
|
|
|
|
free(area);
|
|
|
|
|
free(ring);
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
2022-08-12 01:50:43 -04:00
|
|
|
/* Creates a unified ring + storage area at address <area> for <size> bytes.
|
|
|
|
|
* If <area> is null, then it's allocated of the requested size. The ring
|
|
|
|
|
* struct is part of the area so the usable area is slightly reduced. However
|
|
|
|
|
* the ring storage is immediately adjacent to the struct. ring_free() will
|
|
|
|
|
* ignore such rings, so the caller is responsible for releasing them.
|
|
|
|
|
*/
|
|
|
|
|
struct ring *ring_make_from_area(void *area, size_t size)
|
|
|
|
|
{
|
|
|
|
|
struct ring *ring = NULL;
|
|
|
|
|
|
2022-09-27 08:31:37 -04:00
|
|
|
if (size < sizeof(*ring))
|
2022-08-12 01:50:43 -04:00
|
|
|
return NULL;
|
|
|
|
|
|
|
|
|
|
if (!area)
|
|
|
|
|
area = malloc(size);
|
|
|
|
|
if (!area)
|
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
|
|
ring = area;
|
|
|
|
|
area += sizeof(*ring);
|
|
|
|
|
ring_init(ring, area, size - sizeof(*ring));
|
|
|
|
|
return ring;
|
|
|
|
|
}
|
|
|
|
|
|
2022-09-27 09:53:53 -04:00
|
|
|
/* Cast an unified ring + storage area to a ring from <area>, without
|
|
|
|
|
* reinitializing the data buffer.
|
|
|
|
|
*
|
|
|
|
|
* Reinitialize the waiters and the lock.
|
|
|
|
|
*/
|
|
|
|
|
struct ring *ring_cast_from_area(void *area)
|
|
|
|
|
{
|
|
|
|
|
struct ring *ring = NULL;
|
|
|
|
|
|
|
|
|
|
ring = area;
|
|
|
|
|
ring->buf.area = area + sizeof(*ring);
|
|
|
|
|
|
|
|
|
|
HA_RWLOCK_INIT(&ring->lock);
|
|
|
|
|
LIST_INIT(&ring->waiters);
|
|
|
|
|
ring->readers_count = 0;
|
|
|
|
|
|
|
|
|
|
return ring;
|
|
|
|
|
}
|
|
|
|
|
|
2019-08-08 09:28:52 -04:00
|
|
|
/* Resizes existing ring <ring> to <size> which must be larger, without losing
|
|
|
|
|
* its contents. The new size must be at least as large as the previous one or
|
|
|
|
|
* no change will be performed. The pointer to the ring is returned on success,
|
|
|
|
|
* or NULL on allocation failure. This will lock the ring for writes.
|
|
|
|
|
*/
|
|
|
|
|
struct ring *ring_resize(struct ring *ring, size_t size)
|
|
|
|
|
{
|
|
|
|
|
void *area;
|
|
|
|
|
|
|
|
|
|
if (b_size(&ring->buf) >= size)
|
|
|
|
|
return ring;
|
|
|
|
|
|
|
|
|
|
area = malloc(size);
|
|
|
|
|
if (!area)
|
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
|
|
HA_RWLOCK_WRLOCK(LOGSRV_LOCK, &ring->lock);
|
|
|
|
|
|
|
|
|
|
/* recheck the buffer's size, it may have changed during the malloc */
|
|
|
|
|
if (b_size(&ring->buf) < size) {
|
|
|
|
|
/* copy old contents */
|
|
|
|
|
b_getblk(&ring->buf, area, ring->buf.data, 0);
|
|
|
|
|
area = HA_ATOMIC_XCHG(&ring->buf.area, area);
|
|
|
|
|
ring->buf.size = size;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
HA_RWLOCK_WRUNLOCK(LOGSRV_LOCK, &ring->lock);
|
|
|
|
|
|
|
|
|
|
free(area);
|
|
|
|
|
return ring;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* destroys and frees ring <ring> */
|
|
|
|
|
void ring_free(struct ring *ring)
|
|
|
|
|
{
|
|
|
|
|
if (!ring)
|
|
|
|
|
return;
|
2022-08-12 01:50:43 -04:00
|
|
|
|
|
|
|
|
/* make sure it was not allocated by ring_make_from_area */
|
|
|
|
|
if (ring->buf.area == (void *)ring + sizeof(*ring))
|
|
|
|
|
return;
|
|
|
|
|
|
2019-08-08 09:28:52 -04:00
|
|
|
free(ring->buf.area);
|
|
|
|
|
free(ring);
|
|
|
|
|
}
|
|
|
|
|
|
2019-08-27 05:44:13 -04:00
|
|
|
/* Tries to send <npfx> parts from <prefix> followed by <nmsg> parts from <msg>
|
|
|
|
|
* to ring <ring>. The message is sent atomically. It may be truncated to
|
|
|
|
|
* <maxlen> bytes if <maxlen> is non-null. There is no distinction between the
|
|
|
|
|
* two lists, it's just a convenience to help the caller prepend some prefixes
|
|
|
|
|
* when necessary. It takes the ring's write lock to make sure no other thread
|
|
|
|
|
* will touch the buffer during the update. Returns the number of bytes sent,
|
|
|
|
|
* or <=0 on failure.
|
|
|
|
|
*/
|
|
|
|
|
ssize_t ring_write(struct ring *ring, size_t maxlen, const struct ist pfx[], size_t npfx, const struct ist msg[], size_t nmsg)
|
|
|
|
|
{
|
|
|
|
|
struct buffer *buf = &ring->buf;
|
2019-08-30 05:17:01 -04:00
|
|
|
struct appctx *appctx;
|
2019-08-27 05:44:13 -04:00
|
|
|
size_t totlen = 0;
|
|
|
|
|
size_t lenlen;
|
2019-08-30 09:06:10 -04:00
|
|
|
uint64_t dellen;
|
2019-08-27 05:44:13 -04:00
|
|
|
int dellenlen;
|
|
|
|
|
ssize_t sent = 0;
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
/* we have to find some room to add our message (the buffer is
|
|
|
|
|
* never empty and at least contains the previous counter) and
|
|
|
|
|
* to update both the buffer contents and heads at the same
|
|
|
|
|
* time (it's doable using atomic ops but not worth the
|
|
|
|
|
* trouble, let's just lock). For this we first need to know
|
|
|
|
|
* the total message's length. We cannot measure it while
|
|
|
|
|
* copying due to the varint encoding of the length.
|
|
|
|
|
*/
|
|
|
|
|
for (i = 0; i < npfx; i++)
|
|
|
|
|
totlen += pfx[i].len;
|
|
|
|
|
for (i = 0; i < nmsg; i++)
|
|
|
|
|
totlen += msg[i].len;
|
|
|
|
|
|
|
|
|
|
if (totlen > maxlen)
|
|
|
|
|
totlen = maxlen;
|
|
|
|
|
|
|
|
|
|
lenlen = varint_bytes(totlen);
|
|
|
|
|
|
|
|
|
|
HA_RWLOCK_WRLOCK(LOGSRV_LOCK, &ring->lock);
|
|
|
|
|
if (lenlen + totlen + 1 + 1 > b_size(buf))
|
|
|
|
|
goto done_buf;
|
|
|
|
|
|
|
|
|
|
while (b_room(buf) < lenlen + totlen + 1) {
|
|
|
|
|
/* we need to delete the oldest message (from the end),
|
|
|
|
|
* and we have to stop if there's a reader stuck there.
|
|
|
|
|
* Unless there's corruption in the buffer it's guaranteed
|
|
|
|
|
* that we have enough data to find 1 counter byte, a
|
|
|
|
|
* varint-encoded length (1 byte min) and the message
|
|
|
|
|
* payload (0 bytes min).
|
|
|
|
|
*/
|
|
|
|
|
if (*b_head(buf))
|
|
|
|
|
goto done_buf;
|
|
|
|
|
dellenlen = b_peek_varint(buf, 1, &dellen);
|
|
|
|
|
if (!dellenlen)
|
|
|
|
|
goto done_buf;
|
|
|
|
|
BUG_ON(b_data(buf) < 1 + dellenlen + dellen);
|
|
|
|
|
|
|
|
|
|
b_del(buf, 1 + dellenlen + dellen);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* OK now we do have room */
|
|
|
|
|
__b_put_varint(buf, totlen);
|
|
|
|
|
|
|
|
|
|
totlen = 0;
|
|
|
|
|
for (i = 0; i < npfx; i++) {
|
|
|
|
|
size_t len = pfx[i].len;
|
|
|
|
|
|
|
|
|
|
if (len + totlen > maxlen)
|
|
|
|
|
len = maxlen - totlen;
|
|
|
|
|
if (len)
|
|
|
|
|
__b_putblk(buf, pfx[i].ptr, len);
|
|
|
|
|
totlen += len;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < nmsg; i++) {
|
|
|
|
|
size_t len = msg[i].len;
|
|
|
|
|
|
|
|
|
|
if (len + totlen > maxlen)
|
|
|
|
|
len = maxlen - totlen;
|
|
|
|
|
if (len)
|
|
|
|
|
__b_putblk(buf, msg[i].ptr, len);
|
|
|
|
|
totlen += len;
|
|
|
|
|
}
|
|
|
|
|
|
2020-08-07 16:19:23 -04:00
|
|
|
*b_tail(buf) = 0; buf->data++; // new read counter
|
2019-08-27 05:44:13 -04:00
|
|
|
sent = lenlen + totlen + 1;
|
2019-08-30 05:17:01 -04:00
|
|
|
|
|
|
|
|
/* notify potential readers */
|
2020-05-19 11:07:30 -04:00
|
|
|
list_for_each_entry(appctx, &ring->waiters, wait_entry)
|
2019-08-30 05:17:01 -04:00
|
|
|
appctx_wakeup(appctx);
|
|
|
|
|
|
2019-08-27 05:44:13 -04:00
|
|
|
done_buf:
|
|
|
|
|
HA_RWLOCK_WRUNLOCK(LOGSRV_LOCK, &ring->lock);
|
|
|
|
|
return sent;
|
|
|
|
|
}
|
2019-08-08 09:28:52 -04:00
|
|
|
|
2020-05-19 13:14:42 -04:00
|
|
|
/* Tries to attach appctx <appctx> as a new reader on ring <ring>. This is
|
|
|
|
|
* meant to be used by low level appctx code such as CLI or ring forwarding.
|
|
|
|
|
* For higher level functions, please see the relevant parts in appctx or CLI.
|
|
|
|
|
* It returns non-zero on success or zero on failure if too many users are
|
|
|
|
|
* already attached. On success, the caller MUST call ring_detach_appctx()
|
|
|
|
|
* to detach itself, even if it was never woken up.
|
|
|
|
|
*/
|
2020-05-28 08:39:30 -04:00
|
|
|
int ring_attach(struct ring *ring)
|
2020-05-19 13:14:42 -04:00
|
|
|
{
|
|
|
|
|
int users = ring->readers_count;
|
|
|
|
|
|
|
|
|
|
do {
|
|
|
|
|
if (users >= 255)
|
|
|
|
|
return 0;
|
|
|
|
|
} while (!_HA_ATOMIC_CAS(&ring->readers_count, &users, users + 1));
|
|
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
|
MEDIUM: ring: make the offset relative to the head/tail instead of absolute
The ring's offset currently contains a perpetually growing custor which
is the number of bytes written from the start. It's used by readers to
know where to (re)start reading from. It was made absolute because both
the head and the tail can change during writes and we needed a fixed
position to know where the reader was attached. But this is complicated,
error-prone, and limits the ability to reduce the lock's coverage. In
fact what is needed is to know where the reader is currently waiting, if
at all. And this location is exactly where it stored its count, so the
absolute position in the buffer (the seek offset from the first storage
byte) does represent exactly this, as it doesn't move (we don't realign
the buffer), and is stable regardless of how head/tail changes with writes.
This patch modifies this so that the application code now uses this
representation instead. The most noticeable change is the initialization,
where we've kept ~0 as a marker to go to the end, and it's now set to
the tail offset instead of trying to resolve the current write offset
against the current ring's position.
The offset was also used at the end of the consuming loop, to detect
if a new write had happened between the lock being released and taken
again, so as to wake the consumer(s) up again. For this we used to
take a copy of the ring->ofs before unlocking and comparing with the
new value read in the next lock. Since it's not possible to write past
the current reader's location, there's no risk of complete rollover, so
it's sufficient to check if the tail has changed.
Note that the change also has an impact on the haring consumer which
needs to adapt as well. But that's good in fact because it will rely
on one less variable, and will use offsets relative to the buffer's
head, and the change remains backward-compatible.
2023-02-22 08:50:14 -05:00
|
|
|
/* detach an appctx from a ring. The appctx is expected to be waiting at offset
|
|
|
|
|
* <ofs> relative to the beginning of the storage, or ~0 if not waiting yet.
|
|
|
|
|
* Nothing is done if <ring> is NULL.
|
2020-05-19 13:14:42 -04:00
|
|
|
*/
|
|
|
|
|
void ring_detach_appctx(struct ring *ring, struct appctx *appctx, size_t ofs)
|
|
|
|
|
{
|
|
|
|
|
if (!ring)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
HA_RWLOCK_WRLOCK(LOGSRV_LOCK, &ring->lock);
|
|
|
|
|
if (ofs != ~0) {
|
|
|
|
|
/* reader was still attached */
|
MEDIUM: ring: make the offset relative to the head/tail instead of absolute
The ring's offset currently contains a perpetually growing custor which
is the number of bytes written from the start. It's used by readers to
know where to (re)start reading from. It was made absolute because both
the head and the tail can change during writes and we needed a fixed
position to know where the reader was attached. But this is complicated,
error-prone, and limits the ability to reduce the lock's coverage. In
fact what is needed is to know where the reader is currently waiting, if
at all. And this location is exactly where it stored its count, so the
absolute position in the buffer (the seek offset from the first storage
byte) does represent exactly this, as it doesn't move (we don't realign
the buffer), and is stable regardless of how head/tail changes with writes.
This patch modifies this so that the application code now uses this
representation instead. The most noticeable change is the initialization,
where we've kept ~0 as a marker to go to the end, and it's now set to
the tail offset instead of trying to resolve the current write offset
against the current ring's position.
The offset was also used at the end of the consuming loop, to detect
if a new write had happened between the lock being released and taken
again, so as to wake the consumer(s) up again. For this we used to
take a copy of the ring->ofs before unlocking and comparing with the
new value read in the next lock. Since it's not possible to write past
the current reader's location, there's no risk of complete rollover, so
it's sufficient to check if the tail has changed.
Note that the change also has an impact on the haring consumer which
needs to adapt as well. But that's good in fact because it will rely
on one less variable, and will use offsets relative to the buffer's
head, and the change remains backward-compatible.
2023-02-22 08:50:14 -05:00
|
|
|
if (ofs < b_head_ofs(&ring->buf))
|
|
|
|
|
ofs += b_size(&ring->buf) - b_head_ofs(&ring->buf);
|
|
|
|
|
else
|
|
|
|
|
ofs -= b_head_ofs(&ring->buf);
|
|
|
|
|
|
2020-05-19 13:14:42 -04:00
|
|
|
BUG_ON(ofs >= b_size(&ring->buf));
|
|
|
|
|
LIST_DEL_INIT(&appctx->wait_entry);
|
2021-04-06 07:53:36 -04:00
|
|
|
HA_ATOMIC_DEC(b_peek(&ring->buf, ofs));
|
2020-05-19 13:14:42 -04:00
|
|
|
}
|
2021-04-06 07:53:36 -04:00
|
|
|
HA_ATOMIC_DEC(&ring->readers_count);
|
2020-05-19 13:14:42 -04:00
|
|
|
HA_RWLOCK_WRUNLOCK(LOGSRV_LOCK, &ring->lock);
|
|
|
|
|
}
|
|
|
|
|
|
2019-08-27 05:55:39 -04:00
|
|
|
/* Tries to attach CLI handler <appctx> as a new reader on ring <ring>. This is
|
|
|
|
|
* meant to be used when registering a CLI function to dump a buffer, so it
|
|
|
|
|
* returns zero on success, or non-zero on failure with a message in the appctx
|
2019-11-15 09:07:21 -05:00
|
|
|
* CLI context. It automatically sets the io_handler and io_release callbacks if
|
2022-05-05 09:18:57 -04:00
|
|
|
* they were not set. The <flags> take a combination of RING_WF_*.
|
2019-08-27 05:55:39 -04:00
|
|
|
*/
|
2022-05-05 09:18:57 -04:00
|
|
|
int ring_attach_cli(struct ring *ring, struct appctx *appctx, uint flags)
|
2019-08-27 05:55:39 -04:00
|
|
|
{
|
2022-05-05 09:29:43 -04:00
|
|
|
struct show_ring_ctx *ctx = applet_reserve_svcctx(appctx, sizeof(*ctx));
|
|
|
|
|
|
2020-05-28 08:39:30 -04:00
|
|
|
if (!ring_attach(ring))
|
2020-05-19 13:14:42 -04:00
|
|
|
return cli_err(appctx,
|
|
|
|
|
"Sorry, too many watchers (255) on this ring buffer. "
|
|
|
|
|
"What could it have so interesting to attract so many watchers ?");
|
2019-08-27 05:55:39 -04:00
|
|
|
|
2019-11-15 09:07:21 -05:00
|
|
|
if (!appctx->io_handler)
|
|
|
|
|
appctx->io_handler = cli_io_handler_show_ring;
|
|
|
|
|
if (!appctx->io_release)
|
|
|
|
|
appctx->io_release = cli_io_release_show_ring;
|
2022-05-05 09:29:43 -04:00
|
|
|
|
|
|
|
|
memset(ctx, 0, sizeof(*ctx));
|
|
|
|
|
ctx->ring = ring;
|
|
|
|
|
ctx->ofs = ~0; // start from the oldest event
|
|
|
|
|
ctx->flags = flags;
|
2019-08-27 05:55:39 -04:00
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* This function dumps all events from the ring whose pointer is in <p0> into
|
2019-08-30 04:16:14 -04:00
|
|
|
* the appctx's output buffer, and takes from <o0> the seek offset into the
|
2019-08-30 05:17:01 -04:00
|
|
|
* buffer's history (0 for oldest known event). It looks at <i0> for boolean
|
|
|
|
|
* options: bit0 means it must wait for new data or any key to be pressed. Bit1
|
|
|
|
|
* means it must seek directly to the end to wait for new contents. It returns
|
|
|
|
|
* 0 if the output buffer or events are missing is full and it needs to be
|
|
|
|
|
* called again, otherwise non-zero. It is meant to be used with
|
|
|
|
|
* cli_release_show_ring() to clean up.
|
2019-08-27 05:55:39 -04:00
|
|
|
*/
|
|
|
|
|
int cli_io_handler_show_ring(struct appctx *appctx)
|
|
|
|
|
{
|
2022-05-05 09:29:43 -04:00
|
|
|
struct show_ring_ctx *ctx = appctx->svcctx;
|
2022-05-27 05:08:15 -04:00
|
|
|
struct stconn *sc = appctx_sc(appctx);
|
2022-05-05 09:29:43 -04:00
|
|
|
struct ring *ring = ctx->ring;
|
2019-08-27 05:55:39 -04:00
|
|
|
struct buffer *buf = &ring->buf;
|
MEDIUM: ring: make the offset relative to the head/tail instead of absolute
The ring's offset currently contains a perpetually growing custor which
is the number of bytes written from the start. It's used by readers to
know where to (re)start reading from. It was made absolute because both
the head and the tail can change during writes and we needed a fixed
position to know where the reader was attached. But this is complicated,
error-prone, and limits the ability to reduce the lock's coverage. In
fact what is needed is to know where the reader is currently waiting, if
at all. And this location is exactly where it stored its count, so the
absolute position in the buffer (the seek offset from the first storage
byte) does represent exactly this, as it doesn't move (we don't realign
the buffer), and is stable regardless of how head/tail changes with writes.
This patch modifies this so that the application code now uses this
representation instead. The most noticeable change is the initialization,
where we've kept ~0 as a marker to go to the end, and it's now set to
the tail offset instead of trying to resolve the current write offset
against the current ring's position.
The offset was also used at the end of the consuming loop, to detect
if a new write had happened between the lock being released and taken
again, so as to wake the consumer(s) up again. For this we used to
take a copy of the ring->ofs before unlocking and comparing with the
new value read in the next lock. Since it's not possible to write past
the current reader's location, there's no risk of complete rollover, so
it's sufficient to check if the tail has changed.
Note that the change also has an impact on the haring consumer which
needs to adapt as well. But that's good in fact because it will rely
on one less variable, and will use offsets relative to the buffer's
head, and the change remains backward-compatible.
2023-02-22 08:50:14 -05:00
|
|
|
size_t ofs;
|
2022-08-04 11:00:21 -04:00
|
|
|
size_t last_ofs;
|
2019-08-27 05:55:39 -04:00
|
|
|
uint64_t msg_len;
|
|
|
|
|
size_t len, cnt;
|
|
|
|
|
int ret;
|
|
|
|
|
|
2023-04-03 12:32:50 -04:00
|
|
|
/* FIXME: Don't watch the other side !*/
|
2023-04-13 10:16:15 -04:00
|
|
|
if (unlikely(sc_opposite(sc)->flags & SC_FL_SHUT_DONE))
|
2019-08-27 05:55:39 -04:00
|
|
|
return 1;
|
|
|
|
|
|
2020-05-19 13:21:45 -04:00
|
|
|
HA_RWLOCK_WRLOCK(LOGSRV_LOCK, &ring->lock);
|
2020-05-19 11:07:30 -04:00
|
|
|
LIST_DEL_INIT(&appctx->wait_entry);
|
2020-05-19 13:21:45 -04:00
|
|
|
HA_RWLOCK_WRUNLOCK(LOGSRV_LOCK, &ring->lock);
|
|
|
|
|
|
|
|
|
|
HA_RWLOCK_RDLOCK(LOGSRV_LOCK, &ring->lock);
|
2019-08-30 05:17:01 -04:00
|
|
|
|
2019-08-27 05:55:39 -04:00
|
|
|
/* explanation for the initialization below: it would be better to do
|
|
|
|
|
* this in the parsing function but this would occasionally result in
|
|
|
|
|
* dropped events because we'd take a reference on the oldest message
|
|
|
|
|
* and keep it while being scheduled. Thus instead let's take it the
|
|
|
|
|
* first time we enter here so that we have a chance to pass many
|
2019-08-30 04:16:14 -04:00
|
|
|
* existing messages before grabbing a reference to a location. This
|
|
|
|
|
* value cannot be produced after initialization.
|
2019-08-27 05:55:39 -04:00
|
|
|
*/
|
MEDIUM: ring: make the offset relative to the head/tail instead of absolute
The ring's offset currently contains a perpetually growing custor which
is the number of bytes written from the start. It's used by readers to
know where to (re)start reading from. It was made absolute because both
the head and the tail can change during writes and we needed a fixed
position to know where the reader was attached. But this is complicated,
error-prone, and limits the ability to reduce the lock's coverage. In
fact what is needed is to know where the reader is currently waiting, if
at all. And this location is exactly where it stored its count, so the
absolute position in the buffer (the seek offset from the first storage
byte) does represent exactly this, as it doesn't move (we don't realign
the buffer), and is stable regardless of how head/tail changes with writes.
This patch modifies this so that the application code now uses this
representation instead. The most noticeable change is the initialization,
where we've kept ~0 as a marker to go to the end, and it's now set to
the tail offset instead of trying to resolve the current write offset
against the current ring's position.
The offset was also used at the end of the consuming loop, to detect
if a new write had happened between the lock being released and taken
again, so as to wake the consumer(s) up again. For this we used to
take a copy of the ring->ofs before unlocking and comparing with the
new value read in the next lock. Since it's not possible to write past
the current reader's location, there's no risk of complete rollover, so
it's sufficient to check if the tail has changed.
Note that the change also has an impact on the haring consumer which
needs to adapt as well. But that's good in fact because it will rely
on one less variable, and will use offsets relative to the buffer's
head, and the change remains backward-compatible.
2023-02-22 08:50:14 -05:00
|
|
|
if (unlikely(ctx->ofs == ~0)) {
|
2019-08-30 05:17:01 -04:00
|
|
|
/* going to the end means looking at tail-1 */
|
MEDIUM: ring: make the offset relative to the head/tail instead of absolute
The ring's offset currently contains a perpetually growing custor which
is the number of bytes written from the start. It's used by readers to
know where to (re)start reading from. It was made absolute because both
the head and the tail can change during writes and we needed a fixed
position to know where the reader was attached. But this is complicated,
error-prone, and limits the ability to reduce the lock's coverage. In
fact what is needed is to know where the reader is currently waiting, if
at all. And this location is exactly where it stored its count, so the
absolute position in the buffer (the seek offset from the first storage
byte) does represent exactly this, as it doesn't move (we don't realign
the buffer), and is stable regardless of how head/tail changes with writes.
This patch modifies this so that the application code now uses this
representation instead. The most noticeable change is the initialization,
where we've kept ~0 as a marker to go to the end, and it's now set to
the tail offset instead of trying to resolve the current write offset
against the current ring's position.
The offset was also used at the end of the consuming loop, to detect
if a new write had happened between the lock being released and taken
again, so as to wake the consumer(s) up again. For this we used to
take a copy of the ring->ofs before unlocking and comparing with the
new value read in the next lock. Since it's not possible to write past
the current reader's location, there's no risk of complete rollover, so
it's sufficient to check if the tail has changed.
Note that the change also has an impact on the haring consumer which
needs to adapt as well. But that's good in fact because it will rely
on one less variable, and will use offsets relative to the buffer's
head, and the change remains backward-compatible.
2023-02-22 08:50:14 -05:00
|
|
|
ctx->ofs = b_peek_ofs(buf, (ctx->flags & RING_WF_SEEK_NEW) ? b_data(buf) - 1 : 0);
|
|
|
|
|
HA_ATOMIC_INC(b_orig(buf) + ctx->ofs);
|
2019-08-27 05:55:39 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* we were already there, adjust the offset to be relative to
|
|
|
|
|
* the buffer's head and remove us from the counter.
|
|
|
|
|
*/
|
MEDIUM: ring: make the offset relative to the head/tail instead of absolute
The ring's offset currently contains a perpetually growing custor which
is the number of bytes written from the start. It's used by readers to
know where to (re)start reading from. It was made absolute because both
the head and the tail can change during writes and we needed a fixed
position to know where the reader was attached. But this is complicated,
error-prone, and limits the ability to reduce the lock's coverage. In
fact what is needed is to know where the reader is currently waiting, if
at all. And this location is exactly where it stored its count, so the
absolute position in the buffer (the seek offset from the first storage
byte) does represent exactly this, as it doesn't move (we don't realign
the buffer), and is stable regardless of how head/tail changes with writes.
This patch modifies this so that the application code now uses this
representation instead. The most noticeable change is the initialization,
where we've kept ~0 as a marker to go to the end, and it's now set to
the tail offset instead of trying to resolve the current write offset
against the current ring's position.
The offset was also used at the end of the consuming loop, to detect
if a new write had happened between the lock being released and taken
again, so as to wake the consumer(s) up again. For this we used to
take a copy of the ring->ofs before unlocking and comparing with the
new value read in the next lock. Since it's not possible to write past
the current reader's location, there's no risk of complete rollover, so
it's sufficient to check if the tail has changed.
Note that the change also has an impact on the haring consumer which
needs to adapt as well. But that's good in fact because it will rely
on one less variable, and will use offsets relative to the buffer's
head, and the change remains backward-compatible.
2023-02-22 08:50:14 -05:00
|
|
|
ofs = ctx->ofs - b_head_ofs(buf);
|
|
|
|
|
if (ctx->ofs < b_head_ofs(buf))
|
|
|
|
|
ofs += b_size(buf);
|
|
|
|
|
|
2019-08-27 05:55:39 -04:00
|
|
|
BUG_ON(ofs >= buf->size);
|
2021-04-06 07:53:36 -04:00
|
|
|
HA_ATOMIC_DEC(b_peek(buf, ofs));
|
2019-08-27 05:55:39 -04:00
|
|
|
|
|
|
|
|
/* in this loop, ofs always points to the counter byte that precedes
|
|
|
|
|
* the message so that we can take our reference there if we have to
|
|
|
|
|
* stop before the end (ret=0).
|
|
|
|
|
*/
|
|
|
|
|
ret = 1;
|
|
|
|
|
while (ofs + 1 < b_data(buf)) {
|
|
|
|
|
cnt = 1;
|
|
|
|
|
len = b_peek_varint(buf, ofs + cnt, &msg_len);
|
|
|
|
|
if (!len)
|
|
|
|
|
break;
|
|
|
|
|
cnt += len;
|
|
|
|
|
BUG_ON(msg_len + ofs + cnt + 1 > b_data(buf));
|
|
|
|
|
|
|
|
|
|
if (unlikely(msg_len + 1 > b_size(&trash))) {
|
|
|
|
|
/* too large a message to ever fit, let's skip it */
|
|
|
|
|
ofs += cnt + msg_len;
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
chunk_reset(&trash);
|
|
|
|
|
len = b_getblk(buf, trash.area, msg_len, ofs + cnt);
|
|
|
|
|
trash.data += len;
|
|
|
|
|
trash.area[trash.data++] = '\n';
|
|
|
|
|
|
2022-05-18 09:07:19 -04:00
|
|
|
if (applet_putchk(appctx, &trash) == -1) {
|
2019-08-27 05:55:39 -04:00
|
|
|
ret = 0;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
ofs += cnt + msg_len;
|
|
|
|
|
}
|
|
|
|
|
|
2021-04-06 07:53:36 -04:00
|
|
|
HA_ATOMIC_INC(b_peek(buf, ofs));
|
MEDIUM: ring: make the offset relative to the head/tail instead of absolute
The ring's offset currently contains a perpetually growing custor which
is the number of bytes written from the start. It's used by readers to
know where to (re)start reading from. It was made absolute because both
the head and the tail can change during writes and we needed a fixed
position to know where the reader was attached. But this is complicated,
error-prone, and limits the ability to reduce the lock's coverage. In
fact what is needed is to know where the reader is currently waiting, if
at all. And this location is exactly where it stored its count, so the
absolute position in the buffer (the seek offset from the first storage
byte) does represent exactly this, as it doesn't move (we don't realign
the buffer), and is stable regardless of how head/tail changes with writes.
This patch modifies this so that the application code now uses this
representation instead. The most noticeable change is the initialization,
where we've kept ~0 as a marker to go to the end, and it's now set to
the tail offset instead of trying to resolve the current write offset
against the current ring's position.
The offset was also used at the end of the consuming loop, to detect
if a new write had happened between the lock being released and taken
again, so as to wake the consumer(s) up again. For this we used to
take a copy of the ring->ofs before unlocking and comparing with the
new value read in the next lock. Since it's not possible to write past
the current reader's location, there's no risk of complete rollover, so
it's sufficient to check if the tail has changed.
Note that the change also has an impact on the haring consumer which
needs to adapt as well. But that's good in fact because it will rely
on one less variable, and will use offsets relative to the buffer's
head, and the change remains backward-compatible.
2023-02-22 08:50:14 -05:00
|
|
|
last_ofs = b_tail_ofs(buf);
|
|
|
|
|
ctx->ofs = b_peek_ofs(buf, ofs);
|
2019-08-27 05:55:39 -04:00
|
|
|
HA_RWLOCK_RDUNLOCK(LOGSRV_LOCK, &ring->lock);
|
2019-08-30 05:17:01 -04:00
|
|
|
|
2022-05-05 09:29:43 -04:00
|
|
|
if (ret && (ctx->flags & RING_WF_WAIT_MODE)) {
|
2019-08-30 05:17:01 -04:00
|
|
|
/* we've drained everything and are configured to wait for more
|
|
|
|
|
* data or an event (keypress, close)
|
|
|
|
|
*/
|
2023-04-13 10:16:15 -04:00
|
|
|
if (!sc_oc(sc)->output && !(sc->flags & SC_FL_SHUT_DONE)) {
|
2019-08-30 05:17:01 -04:00
|
|
|
/* let's be woken up once new data arrive */
|
2020-05-19 13:21:45 -04:00
|
|
|
HA_RWLOCK_WRLOCK(LOGSRV_LOCK, &ring->lock);
|
2021-04-21 01:32:39 -04:00
|
|
|
LIST_APPEND(&ring->waiters, &appctx->wait_entry);
|
MEDIUM: ring: make the offset relative to the head/tail instead of absolute
The ring's offset currently contains a perpetually growing custor which
is the number of bytes written from the start. It's used by readers to
know where to (re)start reading from. It was made absolute because both
the head and the tail can change during writes and we needed a fixed
position to know where the reader was attached. But this is complicated,
error-prone, and limits the ability to reduce the lock's coverage. In
fact what is needed is to know where the reader is currently waiting, if
at all. And this location is exactly where it stored its count, so the
absolute position in the buffer (the seek offset from the first storage
byte) does represent exactly this, as it doesn't move (we don't realign
the buffer), and is stable regardless of how head/tail changes with writes.
This patch modifies this so that the application code now uses this
representation instead. The most noticeable change is the initialization,
where we've kept ~0 as a marker to go to the end, and it's now set to
the tail offset instead of trying to resolve the current write offset
against the current ring's position.
The offset was also used at the end of the consuming loop, to detect
if a new write had happened between the lock being released and taken
again, so as to wake the consumer(s) up again. For this we used to
take a copy of the ring->ofs before unlocking and comparing with the
new value read in the next lock. Since it's not possible to write past
the current reader's location, there's no risk of complete rollover, so
it's sufficient to check if the tail has changed.
Note that the change also has an impact on the haring consumer which
needs to adapt as well. But that's good in fact because it will rely
on one less variable, and will use offsets relative to the buffer's
head, and the change remains backward-compatible.
2023-02-22 08:50:14 -05:00
|
|
|
ofs = b_tail_ofs(&ring->buf);
|
2020-05-19 13:21:45 -04:00
|
|
|
HA_RWLOCK_WRUNLOCK(LOGSRV_LOCK, &ring->lock);
|
2022-08-04 11:00:21 -04:00
|
|
|
if (ofs != last_ofs) {
|
|
|
|
|
/* more data was added into the ring between the
|
|
|
|
|
* unlock and the lock, and the writer might not
|
|
|
|
|
* have seen us. We need to reschedule a read.
|
|
|
|
|
*/
|
|
|
|
|
applet_have_more_data(appctx);
|
|
|
|
|
} else
|
|
|
|
|
applet_have_no_more_data(appctx);
|
2019-08-30 05:17:01 -04:00
|
|
|
ret = 0;
|
|
|
|
|
}
|
|
|
|
|
/* always drain all the request */
|
2022-05-27 04:26:46 -04:00
|
|
|
co_skip(sc_oc(sc), sc_oc(sc)->output);
|
2019-08-30 05:17:01 -04:00
|
|
|
}
|
2023-09-06 03:26:06 -04:00
|
|
|
|
|
|
|
|
applet_expect_no_data(appctx);
|
2019-08-27 05:55:39 -04:00
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* must be called after cli_io_handler_show_ring() above */
|
|
|
|
|
void cli_io_release_show_ring(struct appctx *appctx)
|
|
|
|
|
{
|
2022-05-05 09:29:43 -04:00
|
|
|
struct show_ring_ctx *ctx = appctx->svcctx;
|
|
|
|
|
struct ring *ring = ctx->ring;
|
|
|
|
|
size_t ofs = ctx->ofs;
|
2019-08-27 05:55:39 -04:00
|
|
|
|
2020-05-19 13:14:42 -04:00
|
|
|
ring_detach_appctx(ring, appctx, ofs);
|
2019-08-27 05:55:39 -04:00
|
|
|
}
|
|
|
|
|
|
2023-06-26 13:22:38 -04:00
|
|
|
/* Returns the MAXIMUM payload len that could theoretically fit into the ring
|
|
|
|
|
* based on ring buffer size.
|
|
|
|
|
*
|
|
|
|
|
* Computation logic relies on implementation details from 'ring-t.h'.
|
|
|
|
|
*/
|
|
|
|
|
size_t ring_max_payload(const struct ring *ring)
|
|
|
|
|
{
|
|
|
|
|
size_t max;
|
|
|
|
|
|
|
|
|
|
/* initial max = bufsize - 1 (initial RC) - 1 (payload RC) */
|
|
|
|
|
max = b_size(&ring->buf) - 1 - 1;
|
|
|
|
|
|
|
|
|
|
/* substract payload VI (varint-encoded size) */
|
|
|
|
|
max -= varint_bytes(max);
|
|
|
|
|
return max;
|
|
|
|
|
}
|
2019-08-27 05:55:39 -04:00
|
|
|
|
2019-08-08 09:28:52 -04:00
|
|
|
/*
|
|
|
|
|
* Local variables:
|
|
|
|
|
* c-indent-level: 8
|
|
|
|
|
* c-basic-offset: 8
|
|
|
|
|
* End:
|
|
|
|
|
*/
|