2012-08-24 13:22:53 -04:00
|
|
|
/*
|
|
|
|
|
* Buffer management functions.
|
|
|
|
|
*
|
|
|
|
|
* Copyright 2000-2012 Willy Tarreau <w@1wt.eu>
|
|
|
|
|
*
|
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
|
* modify it under the terms of the GNU General Public License
|
|
|
|
|
* as published by the Free Software Foundation; either version
|
|
|
|
|
* 2 of the License, or (at your option) any later version.
|
|
|
|
|
*
|
|
|
|
|
*/
|
|
|
|
|
|
2012-11-22 12:01:40 -05:00
|
|
|
#include <ctype.h>
|
2012-08-24 13:22:53 -04:00
|
|
|
#include <stdio.h>
|
|
|
|
|
#include <string.h>
|
|
|
|
|
|
|
|
|
|
#include <common/config.h>
|
|
|
|
|
#include <common/buffer.h>
|
2012-10-12 17:49:43 -04:00
|
|
|
#include <common/memory.h>
|
2012-08-24 13:22:53 -04:00
|
|
|
|
|
|
|
|
#include <types/global.h>
|
|
|
|
|
|
2012-10-12 17:49:43 -04:00
|
|
|
struct pool_head *pool2_buffer;
|
|
|
|
|
|
2014-11-24 05:55:08 -05:00
|
|
|
/* These buffers are used to always have a valid pointer to an empty buffer in
|
|
|
|
|
* channels. The first buffer is set once a buffer is empty. The second one is
|
|
|
|
|
* set when a buffer is desired but no more are available. It helps knowing
|
|
|
|
|
* what channel wants a buffer. They can reliably be exchanged, the split
|
|
|
|
|
* between the two is only an optimization.
|
2014-11-24 05:39:34 -05:00
|
|
|
*/
|
|
|
|
|
struct buffer buf_empty = { .p = buf_empty.data };
|
2014-11-24 05:55:08 -05:00
|
|
|
struct buffer buf_wanted = { .p = buf_wanted.data };
|
2012-10-12 17:49:43 -04:00
|
|
|
|
|
|
|
|
/* perform minimal intializations, report 0 in case of error, 1 if OK. */
|
|
|
|
|
int init_buffer()
|
|
|
|
|
{
|
MAJOR: session: only wake up as many sessions as available buffers permit
We've already experimented with three wake up algorithms when releasing
buffers : the first naive one used to wake up far too many sessions,
causing many of them not to get any buffer. The second approach which
was still in use prior to this patch consisted in waking up either 1
or 2 sessions depending on the number of FDs we had released. And this
was still inaccurate. The third one tried to cover the accuracy issues
of the second and took into consideration the number of FDs the sessions
would be willing to use, but most of the time we ended up waking up too
many of them for nothing, or deadlocking by lack of buffers.
This patch completely removes the need to allocate two buffers at once.
Instead it splits allocations into critical and non-critical ones and
implements a reserve in the pool for this. The deadlock situation happens
when all buffers are be allocated for requests pending in a maxconn-limited
server queue, because then there's no more way to allocate buffers for
responses, and these responses are critical to release the servers's
connection in order to release the pending requests. In fact maxconn on
a server creates a dependence between sessions and particularly between
oldest session's responses and latest session's requests. Thus, it is
mandatory to get a free buffer for a response in order to release a
server connection which will permit to release a request buffer.
Since we definitely have non-symmetrical buffers, we need to implement
this logic in the buffer allocation mechanism. What this commit does is
implement a reserve of buffers which can only be allocated for responses
and that will never be allocated for requests. This is made possible by
the requester indicating how much margin it wants to leave after the
allocation succeeds. Thus it is a cooperative allocation mechanism : the
requester (process_session() in general) prefers not to get a buffer in
order to respect other's need for response buffers. The session management
code always knows if a buffer will be used for requests or responses, so
that is not difficult :
- either there's an applet on the initiator side and we really need
the request buffer (since currently the applet is called in the
context of the session)
- or we have a connection and we really need the response buffer (in
order to support building and sending an error message back)
This reserve ensures that we don't take all allocatable buffers for
requests waiting in a queue. The downside is that all the extra buffers
are really allocated to ensure they can be allocated. But with small
values it is not an issue.
With this change, we don't observe any more deadlocks even when running
with maxconn 1 on a server under severely constrained memory conditions.
The code becomes a bit tricky, it relies on the scheduler's run queue to
estimate how many sessions are already expected to run so that it doesn't
wake up everyone with too few resources. A better solution would probably
consist in having two queues, one for urgent requests and one for normal
requests. A failed allocation for a session dealing with an error, a
connection event, or the need for a response (or request when there's an
applet on the left) would go to the urgent request queue, while other
requests would go to the other queue. Urgent requests would be served
from 1 entry in the pool, while the regular ones would be served only
according to the reserve. Despite not yet having this, it works
remarkably well.
This mechanism is quite efficient, we don't perform too many wake up calls
anymore. For 1 million sessions elapsed during massive memory contention,
we observe about 4.5M calls to process_session() compared to 4.0M without
memory constraints. Previously we used to observe up to 16M calls, which
rougly means 12M failures.
During a test run under high memory constraints (limit enforced to 27 MB
instead of the 58 MB normally needed), performance used to drop by 53% prior
to this patch. Now with this patch instead it *increases* by about 1.5%.
The best effect of this change is that by limiting the memory usage to about
2/3 to 3/4 of what is needed by default, it's possible to increase performance
by up to about 18% mainly due to the fact that pools are reused more often
and remain hot in the CPU cache (observed on regular HTTP traffic with 20k
objects, buffers.limit = maxconn/10, buffers.reserve = limit/2).
Below is an example of scenario which used to cause a deadlock previously :
- connection is received
- two buffers are allocated in process_session() then released
- one is allocated when receiving an HTTP request
- the second buffer is allocated then released in process_session()
for request parsing then connection establishment.
- poll() says we can send, so the request buffer is sent and released
- process session gets notified that the connection is now established
and allocates two buffers then releases them
- all other sessions do the same till one cannot get the request buffer
without hitting the margin
- and now the server responds. stream_interface allocates the response
buffer and manages to get it since it's higher priority being for a
response.
- but process_session() cannot allocate the request buffer anymore
=> We could end up with all buffers used by responses so that none may
be allocated for a request in process_session().
When the applet processing leaves the session context, the test will have
to be changed so that we always allocate a response buffer regardless of
the left side (eg: H2->H1 gateway). A final improvement would consists in
being able to only retry the failed I/O operation without waking up a
task, but to date all experiments to achieve this have proven not to be
reliable enough.
2014-11-26 19:11:56 -05:00
|
|
|
void *buffer;
|
|
|
|
|
|
2016-01-24 20:23:25 -05:00
|
|
|
pool2_buffer = create_pool("buffer", sizeof (struct buffer) + global.tune.bufsize, MEM_F_SHARED|MEM_F_EXACT);
|
MAJOR: session: only wake up as many sessions as available buffers permit
We've already experimented with three wake up algorithms when releasing
buffers : the first naive one used to wake up far too many sessions,
causing many of them not to get any buffer. The second approach which
was still in use prior to this patch consisted in waking up either 1
or 2 sessions depending on the number of FDs we had released. And this
was still inaccurate. The third one tried to cover the accuracy issues
of the second and took into consideration the number of FDs the sessions
would be willing to use, but most of the time we ended up waking up too
many of them for nothing, or deadlocking by lack of buffers.
This patch completely removes the need to allocate two buffers at once.
Instead it splits allocations into critical and non-critical ones and
implements a reserve in the pool for this. The deadlock situation happens
when all buffers are be allocated for requests pending in a maxconn-limited
server queue, because then there's no more way to allocate buffers for
responses, and these responses are critical to release the servers's
connection in order to release the pending requests. In fact maxconn on
a server creates a dependence between sessions and particularly between
oldest session's responses and latest session's requests. Thus, it is
mandatory to get a free buffer for a response in order to release a
server connection which will permit to release a request buffer.
Since we definitely have non-symmetrical buffers, we need to implement
this logic in the buffer allocation mechanism. What this commit does is
implement a reserve of buffers which can only be allocated for responses
and that will never be allocated for requests. This is made possible by
the requester indicating how much margin it wants to leave after the
allocation succeeds. Thus it is a cooperative allocation mechanism : the
requester (process_session() in general) prefers not to get a buffer in
order to respect other's need for response buffers. The session management
code always knows if a buffer will be used for requests or responses, so
that is not difficult :
- either there's an applet on the initiator side and we really need
the request buffer (since currently the applet is called in the
context of the session)
- or we have a connection and we really need the response buffer (in
order to support building and sending an error message back)
This reserve ensures that we don't take all allocatable buffers for
requests waiting in a queue. The downside is that all the extra buffers
are really allocated to ensure they can be allocated. But with small
values it is not an issue.
With this change, we don't observe any more deadlocks even when running
with maxconn 1 on a server under severely constrained memory conditions.
The code becomes a bit tricky, it relies on the scheduler's run queue to
estimate how many sessions are already expected to run so that it doesn't
wake up everyone with too few resources. A better solution would probably
consist in having two queues, one for urgent requests and one for normal
requests. A failed allocation for a session dealing with an error, a
connection event, or the need for a response (or request when there's an
applet on the left) would go to the urgent request queue, while other
requests would go to the other queue. Urgent requests would be served
from 1 entry in the pool, while the regular ones would be served only
according to the reserve. Despite not yet having this, it works
remarkably well.
This mechanism is quite efficient, we don't perform too many wake up calls
anymore. For 1 million sessions elapsed during massive memory contention,
we observe about 4.5M calls to process_session() compared to 4.0M without
memory constraints. Previously we used to observe up to 16M calls, which
rougly means 12M failures.
During a test run under high memory constraints (limit enforced to 27 MB
instead of the 58 MB normally needed), performance used to drop by 53% prior
to this patch. Now with this patch instead it *increases* by about 1.5%.
The best effect of this change is that by limiting the memory usage to about
2/3 to 3/4 of what is needed by default, it's possible to increase performance
by up to about 18% mainly due to the fact that pools are reused more often
and remain hot in the CPU cache (observed on regular HTTP traffic with 20k
objects, buffers.limit = maxconn/10, buffers.reserve = limit/2).
Below is an example of scenario which used to cause a deadlock previously :
- connection is received
- two buffers are allocated in process_session() then released
- one is allocated when receiving an HTTP request
- the second buffer is allocated then released in process_session()
for request parsing then connection establishment.
- poll() says we can send, so the request buffer is sent and released
- process session gets notified that the connection is now established
and allocates two buffers then releases them
- all other sessions do the same till one cannot get the request buffer
without hitting the margin
- and now the server responds. stream_interface allocates the response
buffer and manages to get it since it's higher priority being for a
response.
- but process_session() cannot allocate the request buffer anymore
=> We could end up with all buffers used by responses so that none may
be allocated for a request in process_session().
When the applet processing leaves the session context, the test will have
to be changed so that we always allocate a response buffer regardless of
the left side (eg: H2->H1 gateway). A final improvement would consists in
being able to only retry the failed I/O operation without waking up a
task, but to date all experiments to achieve this have proven not to be
reliable enough.
2014-11-26 19:11:56 -05:00
|
|
|
if (!pool2_buffer)
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
/* The reserved buffer is what we leave behind us. Thus we always need
|
|
|
|
|
* at least one extra buffer in minavail otherwise we'll end up waking
|
|
|
|
|
* up tasks with no memory available, causing a lot of useless wakeups.
|
|
|
|
|
* That means that we always want to have at least 3 buffers available
|
|
|
|
|
* (2 for current session, one for next session that might be needed to
|
|
|
|
|
* release a server connection).
|
|
|
|
|
*/
|
|
|
|
|
pool2_buffer->minavail = MAX(global.tune.reserved_bufs, 3);
|
2014-12-23 16:52:37 -05:00
|
|
|
if (global.tune.buf_limit)
|
|
|
|
|
pool2_buffer->limit = global.tune.buf_limit;
|
MAJOR: session: only wake up as many sessions as available buffers permit
We've already experimented with three wake up algorithms when releasing
buffers : the first naive one used to wake up far too many sessions,
causing many of them not to get any buffer. The second approach which
was still in use prior to this patch consisted in waking up either 1
or 2 sessions depending on the number of FDs we had released. And this
was still inaccurate. The third one tried to cover the accuracy issues
of the second and took into consideration the number of FDs the sessions
would be willing to use, but most of the time we ended up waking up too
many of them for nothing, or deadlocking by lack of buffers.
This patch completely removes the need to allocate two buffers at once.
Instead it splits allocations into critical and non-critical ones and
implements a reserve in the pool for this. The deadlock situation happens
when all buffers are be allocated for requests pending in a maxconn-limited
server queue, because then there's no more way to allocate buffers for
responses, and these responses are critical to release the servers's
connection in order to release the pending requests. In fact maxconn on
a server creates a dependence between sessions and particularly between
oldest session's responses and latest session's requests. Thus, it is
mandatory to get a free buffer for a response in order to release a
server connection which will permit to release a request buffer.
Since we definitely have non-symmetrical buffers, we need to implement
this logic in the buffer allocation mechanism. What this commit does is
implement a reserve of buffers which can only be allocated for responses
and that will never be allocated for requests. This is made possible by
the requester indicating how much margin it wants to leave after the
allocation succeeds. Thus it is a cooperative allocation mechanism : the
requester (process_session() in general) prefers not to get a buffer in
order to respect other's need for response buffers. The session management
code always knows if a buffer will be used for requests or responses, so
that is not difficult :
- either there's an applet on the initiator side and we really need
the request buffer (since currently the applet is called in the
context of the session)
- or we have a connection and we really need the response buffer (in
order to support building and sending an error message back)
This reserve ensures that we don't take all allocatable buffers for
requests waiting in a queue. The downside is that all the extra buffers
are really allocated to ensure they can be allocated. But with small
values it is not an issue.
With this change, we don't observe any more deadlocks even when running
with maxconn 1 on a server under severely constrained memory conditions.
The code becomes a bit tricky, it relies on the scheduler's run queue to
estimate how many sessions are already expected to run so that it doesn't
wake up everyone with too few resources. A better solution would probably
consist in having two queues, one for urgent requests and one for normal
requests. A failed allocation for a session dealing with an error, a
connection event, or the need for a response (or request when there's an
applet on the left) would go to the urgent request queue, while other
requests would go to the other queue. Urgent requests would be served
from 1 entry in the pool, while the regular ones would be served only
according to the reserve. Despite not yet having this, it works
remarkably well.
This mechanism is quite efficient, we don't perform too many wake up calls
anymore. For 1 million sessions elapsed during massive memory contention,
we observe about 4.5M calls to process_session() compared to 4.0M without
memory constraints. Previously we used to observe up to 16M calls, which
rougly means 12M failures.
During a test run under high memory constraints (limit enforced to 27 MB
instead of the 58 MB normally needed), performance used to drop by 53% prior
to this patch. Now with this patch instead it *increases* by about 1.5%.
The best effect of this change is that by limiting the memory usage to about
2/3 to 3/4 of what is needed by default, it's possible to increase performance
by up to about 18% mainly due to the fact that pools are reused more often
and remain hot in the CPU cache (observed on regular HTTP traffic with 20k
objects, buffers.limit = maxconn/10, buffers.reserve = limit/2).
Below is an example of scenario which used to cause a deadlock previously :
- connection is received
- two buffers are allocated in process_session() then released
- one is allocated when receiving an HTTP request
- the second buffer is allocated then released in process_session()
for request parsing then connection establishment.
- poll() says we can send, so the request buffer is sent and released
- process session gets notified that the connection is now established
and allocates two buffers then releases them
- all other sessions do the same till one cannot get the request buffer
without hitting the margin
- and now the server responds. stream_interface allocates the response
buffer and manages to get it since it's higher priority being for a
response.
- but process_session() cannot allocate the request buffer anymore
=> We could end up with all buffers used by responses so that none may
be allocated for a request in process_session().
When the applet processing leaves the session context, the test will have
to be changed so that we always allocate a response buffer regardless of
the left side (eg: H2->H1 gateway). A final improvement would consists in
being able to only retry the failed I/O operation without waking up a
task, but to date all experiments to achieve this have proven not to be
reliable enough.
2014-11-26 19:11:56 -05:00
|
|
|
|
|
|
|
|
buffer = pool_refill_alloc(pool2_buffer, pool2_buffer->minavail - 1);
|
|
|
|
|
if (!buffer)
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
pool_free2(pool2_buffer, buffer);
|
|
|
|
|
return 1;
|
2012-10-12 17:49:43 -04:00
|
|
|
}
|
|
|
|
|
|
2012-08-27 16:08:00 -04:00
|
|
|
/* This function writes the string <str> at position <pos> which must be in
|
|
|
|
|
* buffer <b>, and moves <end> just after the end of <str>. <b>'s parameters
|
|
|
|
|
* <l> and <r> are updated to be valid after the shift. The shift value
|
|
|
|
|
* (positive or negative) is returned. If there's no space left, the move is
|
|
|
|
|
* not done. The function does not adjust ->o because it does not make sense to
|
|
|
|
|
* use it on data scheduled to be sent. For the same reason, it does not make
|
|
|
|
|
* sense to call this function on unparsed data, so <orig> is not updated. The
|
|
|
|
|
* string length is taken from parameter <len>. If <len> is null, the <str>
|
|
|
|
|
* pointer is allowed to be null.
|
|
|
|
|
*/
|
|
|
|
|
int buffer_replace2(struct buffer *b, char *pos, char *end, const char *str, int len)
|
|
|
|
|
{
|
|
|
|
|
int delta;
|
|
|
|
|
|
|
|
|
|
delta = len - (end - pos);
|
|
|
|
|
|
2015-03-09 20:55:01 -04:00
|
|
|
if (bi_end(b) + delta > b->data + b->size)
|
2012-08-27 16:08:00 -04:00
|
|
|
return 0; /* no space left */
|
|
|
|
|
|
|
|
|
|
if (buffer_not_empty(b) &&
|
|
|
|
|
bi_end(b) + delta > bo_ptr(b) &&
|
|
|
|
|
bo_ptr(b) >= bi_end(b))
|
|
|
|
|
return 0; /* no space left before wrapping data */
|
|
|
|
|
|
|
|
|
|
/* first, protect the end of the buffer */
|
|
|
|
|
memmove(end + delta, end, bi_end(b) - end);
|
|
|
|
|
|
|
|
|
|
/* now, copy str over pos */
|
|
|
|
|
if (len)
|
|
|
|
|
memcpy(pos, str, len);
|
|
|
|
|
|
|
|
|
|
b->i += delta;
|
|
|
|
|
|
2012-12-16 13:39:09 -05:00
|
|
|
if (buffer_empty(b))
|
2012-08-27 16:08:00 -04:00
|
|
|
b->p = b->data;
|
|
|
|
|
|
|
|
|
|
return delta;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Inserts <str> followed by "\r\n" at position <pos> in buffer <b>. The <len>
|
|
|
|
|
* argument informs about the length of string <str> so that we don't have to
|
|
|
|
|
* measure it. It does not include the "\r\n". If <str> is NULL, then the buffer
|
|
|
|
|
* is only opened for len+2 bytes but nothing is copied in. It may be useful in
|
|
|
|
|
* some circumstances. The send limit is *not* adjusted. Same comments as above
|
|
|
|
|
* for the valid use cases.
|
|
|
|
|
*
|
|
|
|
|
* The number of bytes added is returned on success. 0 is returned on failure.
|
|
|
|
|
*/
|
|
|
|
|
int buffer_insert_line2(struct buffer *b, char *pos, const char *str, int len)
|
|
|
|
|
{
|
|
|
|
|
int delta;
|
|
|
|
|
|
|
|
|
|
delta = len + 2;
|
|
|
|
|
|
|
|
|
|
if (bi_end(b) + delta >= b->data + b->size)
|
|
|
|
|
return 0; /* no space left */
|
|
|
|
|
|
BUG/MAJOR: buffer: check the space left is enough or not when input data in a buffer is wrapped
HAProxy will crash with the following configuration:
global
...
tune.bufsize 1024
tune.maxrewrite 0
frontend xxx
...
backend yyy
...
cookie cookie insert maxidle 300s
If client sends a request of which object size is more than tune.bufsize (1024
bytes), HAProxy will crash.
After doing some debugging, the crash was caused by http_header_add_tail2() ->
buffer_insert_line2() while inserting cookie at the end of response header.
Part codes of buffer_insert_line2() are as below:
int buffer_insert_line2(struct buffer *b, char *pos, const char *str, int len)
{
int delta;
delta = len + 2;
if (bi_end(b) + delta >= b->data + b->size)
return 0; /* no space left */
/* first, protect the end of the buffer */
memmove(pos + delta, pos, bi_end(b) - pos);
...
}
Since tune.maxrewrite is 0, HAProxy can receive 1024 bytes once which is equals
to full buffer size. Under such condition, the buffer is full and bi_end(b)
will be wrapped to the start of buffer which pointed to b->data. As a result,
though there is no space left in buffer, the check condition
if (bi_end(b) + delta >= b->data + b->size)
will be true, then memmove() is called, and (pos + delta) will exceed the end
of buffer (b->data + b->size), HAProxy crashes
Just take buffer_replace2() as a reference, the other check when input data in
a buffer is wrapped should be also added into buffer_insert_line2().
This fix must be backported to 1.5.
Signed-off-by: Godbach <nylzhaowei@gmail.com>
2014-10-31 01:16:37 -04:00
|
|
|
if (buffer_not_empty(b) &&
|
|
|
|
|
bi_end(b) + delta > bo_ptr(b) &&
|
|
|
|
|
bo_ptr(b) >= bi_end(b))
|
|
|
|
|
return 0; /* no space left before wrapping data */
|
|
|
|
|
|
2012-08-27 16:08:00 -04:00
|
|
|
/* first, protect the end of the buffer */
|
|
|
|
|
memmove(pos + delta, pos, bi_end(b) - pos);
|
|
|
|
|
|
|
|
|
|
/* now, copy str over pos */
|
|
|
|
|
if (len && str) {
|
|
|
|
|
memcpy(pos, str, len);
|
|
|
|
|
pos[len] = '\r';
|
|
|
|
|
pos[len + 1] = '\n';
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
b->i += delta;
|
|
|
|
|
return delta;
|
|
|
|
|
}
|
|
|
|
|
|
2015-07-02 06:50:23 -04:00
|
|
|
/* This function realigns a possibly wrapping buffer so that the input part is
|
|
|
|
|
* contiguous and starts at the beginning of the buffer and the output part
|
|
|
|
|
* ends at the end of the buffer. This provides the best conditions since it
|
|
|
|
|
* allows the largest inputs to be processed at once and ensures that once the
|
|
|
|
|
* output data leaves, the whole buffer is available at once.
|
2012-08-24 13:22:53 -04:00
|
|
|
*/
|
|
|
|
|
void buffer_slow_realign(struct buffer *buf)
|
|
|
|
|
{
|
2015-07-02 06:50:23 -04:00
|
|
|
int block1 = buf->o;
|
|
|
|
|
int block2 = 0;
|
|
|
|
|
|
|
|
|
|
/* process output data in two steps to cover wrapping */
|
|
|
|
|
if (block1 > buf->p - buf->data) {
|
|
|
|
|
block2 = buf->p - buf->data;
|
|
|
|
|
block1 -= block2;
|
|
|
|
|
}
|
|
|
|
|
memcpy(swap_buffer + buf->size - buf->o, bo_ptr(buf), block1);
|
|
|
|
|
memcpy(swap_buffer + buf->size - block2, buf->data, block2);
|
|
|
|
|
|
|
|
|
|
/* process input data in two steps to cover wrapping */
|
|
|
|
|
block1 = buf->i;
|
|
|
|
|
block2 = 0;
|
|
|
|
|
|
|
|
|
|
if (block1 > buf->data + buf->size - buf->p) {
|
|
|
|
|
block1 = buf->data + buf->size - buf->p;
|
|
|
|
|
block2 = buf->i - block1;
|
2012-08-24 13:22:53 -04:00
|
|
|
}
|
2015-07-02 06:50:23 -04:00
|
|
|
memcpy(swap_buffer, bi_ptr(buf), block1);
|
|
|
|
|
memcpy(swap_buffer + block1, buf->data, block2);
|
|
|
|
|
|
|
|
|
|
/* reinject changes into the buffer */
|
|
|
|
|
memcpy(buf->data, swap_buffer, buf->i);
|
|
|
|
|
memcpy(buf->data + buf->size - buf->o, swap_buffer + buf->size - buf->o, buf->o);
|
2012-08-24 13:22:53 -04:00
|
|
|
|
|
|
|
|
buf->p = buf->data;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* Realigns a possibly non-contiguous buffer by bouncing bytes from source to
|
|
|
|
|
* destination. It does not use any intermediate buffer and does the move in
|
|
|
|
|
* place, though it will be slower than a simple memmove() on contiguous data,
|
|
|
|
|
* so it's desirable to use it only on non-contiguous buffers. No pointers are
|
|
|
|
|
* changed, the caller is responsible for that.
|
|
|
|
|
*/
|
|
|
|
|
void buffer_bounce_realign(struct buffer *buf)
|
|
|
|
|
{
|
|
|
|
|
int advance, to_move;
|
|
|
|
|
char *from, *to;
|
|
|
|
|
|
|
|
|
|
from = bo_ptr(buf);
|
|
|
|
|
advance = buf->data + buf->size - from;
|
|
|
|
|
if (!advance)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
to_move = buffer_len(buf);
|
|
|
|
|
while (to_move) {
|
|
|
|
|
char last, save;
|
|
|
|
|
|
|
|
|
|
last = *from;
|
|
|
|
|
to = from + advance;
|
|
|
|
|
if (to >= buf->data + buf->size)
|
|
|
|
|
to -= buf->size;
|
|
|
|
|
|
|
|
|
|
while (1) {
|
|
|
|
|
save = *to;
|
|
|
|
|
*to = last;
|
|
|
|
|
last = save;
|
|
|
|
|
to_move--;
|
|
|
|
|
if (!to_move)
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
/* check if we went back home after rotating a number of bytes */
|
|
|
|
|
if (to == from)
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
/* if we ended up in the empty area, let's walk to next place. The
|
|
|
|
|
* empty area is either between buf->r and from or before from or
|
|
|
|
|
* after buf->r.
|
|
|
|
|
*/
|
|
|
|
|
if (from > bi_end(buf)) {
|
|
|
|
|
if (to >= bi_end(buf) && to < from)
|
|
|
|
|
break;
|
|
|
|
|
} else if (from < bi_end(buf)) {
|
|
|
|
|
if (to < from || to >= bi_end(buf))
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* we have overwritten a byte of the original set, let's move it */
|
|
|
|
|
to += advance;
|
|
|
|
|
if (to >= buf->data + buf->size)
|
|
|
|
|
to -= buf->size;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
from++;
|
|
|
|
|
if (from >= buf->data + buf->size)
|
|
|
|
|
from -= buf->size;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Dumps part or all of a buffer.
|
|
|
|
|
*/
|
|
|
|
|
void buffer_dump(FILE *o, struct buffer *b, int from, int to)
|
|
|
|
|
{
|
|
|
|
|
fprintf(o, "Dumping buffer %p\n", b);
|
2012-11-22 12:01:40 -05:00
|
|
|
fprintf(o, " data=%p o=%d i=%d p=%p\n"
|
|
|
|
|
" relative: p=0x%04x\n",
|
|
|
|
|
b->data, b->o, b->i, b->p, (unsigned int)(b->p - b->data));
|
2012-08-24 13:22:53 -04:00
|
|
|
|
|
|
|
|
fprintf(o, "Dumping contents from byte %d to byte %d\n", from, to);
|
2012-11-22 12:01:40 -05:00
|
|
|
fprintf(o, " 0 1 2 3 4 5 6 7 8 9 a b c d e f\n");
|
|
|
|
|
/* dump hexa */
|
|
|
|
|
while (from < to) {
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
fprintf(o, " %04x: ", from);
|
|
|
|
|
for (i = 0; ((from + i) < to) && (i < 16) ; i++) {
|
|
|
|
|
fprintf(o, "%02x ", (unsigned char)b->data[from + i]);
|
|
|
|
|
if (((from + i) & 15) == 7)
|
|
|
|
|
fprintf(o, "- ");
|
|
|
|
|
}
|
2013-11-13 21:15:20 -05:00
|
|
|
if (to - from < 16) {
|
2013-11-20 21:21:22 -05:00
|
|
|
int j = 0;
|
|
|
|
|
|
2013-11-13 21:15:20 -05:00
|
|
|
for (j = 0; j < from + 16 - to; j++)
|
|
|
|
|
fprintf(o, " ");
|
2013-11-20 21:21:22 -05:00
|
|
|
if (j > 8)
|
|
|
|
|
fprintf(o, " ");
|
2013-11-13 21:15:20 -05:00
|
|
|
}
|
2012-11-22 12:01:40 -05:00
|
|
|
fprintf(o, " ");
|
|
|
|
|
for (i = 0; (from + i < to) && (i < 16) ; i++) {
|
2012-11-25 18:57:40 -05:00
|
|
|
fprintf(o, "%c", isprint((int)b->data[from + i]) ? b->data[from + i] : '.') ;
|
2012-11-22 12:01:40 -05:00
|
|
|
if ((((from + i) & 15) == 15) && ((from + i) != to-1))
|
|
|
|
|
fprintf(o, "\n");
|
|
|
|
|
}
|
|
|
|
|
from += i;
|
2012-08-24 13:22:53 -04:00
|
|
|
}
|
|
|
|
|
fprintf(o, "\n--\n");
|
2012-11-22 12:01:40 -05:00
|
|
|
fflush(o);
|
2012-08-24 13:22:53 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Local variables:
|
|
|
|
|
* c-indent-level: 8
|
|
|
|
|
* c-basic-offset: 8
|
|
|
|
|
* End:
|
|
|
|
|
*/
|