2006-06-25 20:48:02 -04:00
/*
REORG/MAJOR: session: rename the "session" entity to "stream"
With HTTP/2, we'll have to support multiplexed streams. A stream is in
fact the largest part of what we currently call a session, it has buffers,
logs, etc.
In order to catch any error, this commit removes any reference to the
struct session and tries to rename most "session" occurrences in function
names to "stream" and "sess" to "strm" when that's related to a session.
The files stream.{c,h} were added and session.{c,h} removed.
The session will be reintroduced later and a few parts of the stream
will progressively be moved overthere. It will more or less contain
only what we need in an embryonic session.
Sample fetch functions and converters will have to change a bit so
that they'll use an L5 (session) instead of what's currently called
"L4" which is in fact L6 for now.
Once all changes are completed, we should see approximately this :
L7 - http_txn
L6 - stream
L5 - session
L4 - connection | applet
There will be at most one http_txn per stream, and a same session will
possibly be referenced by multiple streams. A connection will point to
a session and to a stream. The session will hold all the information
we need to keep even when we don't yet have a stream.
Some more cleanup is needed because some code was already far from
being clean. The server queue management still refers to sessions at
many places while comments talk about connections. This will have to
be cleaned up once we have a server-side connection pool manager.
Stream flags "SN_*" still need to be renamed, it doesn't seem like
any of them will need to move to the session.
2015-04-02 18:22:06 -04:00
* Stream management functions .
2006-06-25 20:48:02 -04:00
*
2012-04-19 13:28:33 -04:00
* Copyright 2000 - 2012 Willy Tarreau < w @ 1 wt . eu >
2006-06-25 20:48:02 -04:00
*
* This program is free software ; you can redistribute it and / or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation ; either version
* 2 of the License , or ( at your option ) any later version .
*
*/
# include <stdlib.h>
2010-06-01 11:45:26 -04:00
# include <unistd.h>
2006-06-29 12:54:54 -04:00
2020-06-04 16:29:18 -04:00
# include <import/ebistree.h>
2020-06-04 13:11:43 -04:00
# include <haproxy/acl.h>
2020-06-04 04:15:32 -04:00
# include <haproxy/action.h>
2020-06-09 03:07:15 -04:00
# include <haproxy/activity.h>
# include <haproxy/api.h>
2020-06-04 13:42:41 -04:00
# include <haproxy/applet.h>
2020-06-09 03:07:15 -04:00
# include <haproxy/arg.h>
2020-06-04 16:50:02 -04:00
# include <haproxy/backend.h>
2020-06-04 05:18:28 -04:00
# include <haproxy/capture.h>
2020-06-04 18:00:29 -04:00
# include <haproxy/cfgparse.h>
2020-06-04 15:07:02 -04:00
# include <haproxy/channel.h>
2020-06-04 12:21:56 -04:00
# include <haproxy/check.h>
2020-06-04 14:19:54 -04:00
# include <haproxy/cli.h>
2020-06-04 12:02:10 -04:00
# include <haproxy/connection.h>
2022-03-25 11:43:49 -04:00
# include <haproxy/conn_stream.h>
# include <haproxy/cs_utils.h>
2020-06-03 12:23:19 -04:00
# include <haproxy/dict.h>
2020-06-02 05:28:02 -04:00
# include <haproxy/dynbuf.h>
2020-06-09 03:07:15 -04:00
# include <haproxy/fd.h>
2020-06-04 15:29:29 -04:00
# include <haproxy/filters.h>
2020-06-09 03:07:15 -04:00
# include <haproxy/freq_ctr.h>
2020-06-04 05:23:07 -04:00
# include <haproxy/frontend.h>
2020-06-04 11:05:57 -04:00
# include <haproxy/global.h>
2020-06-04 03:20:54 -04:00
# include <haproxy/hlua.h>
2020-06-04 15:21:03 -04:00
# include <haproxy/http_ana.h>
2020-06-04 05:40:28 -04:00
# include <haproxy/http_rules.h>
2020-06-03 02:44:35 -04:00
# include <haproxy/htx.h>
2020-06-09 03:07:15 -04:00
# include <haproxy/istbuf.h>
2020-06-04 16:01:04 -04:00
# include <haproxy/log.h>
2020-06-09 03:07:15 -04:00
# include <haproxy/pipe.h>
2020-06-02 03:38:52 -04:00
# include <haproxy/pool.h>
2020-06-04 16:29:18 -04:00
# include <haproxy/proxy.h>
2020-06-04 16:59:39 -04:00
# include <haproxy/queue.h>
2020-06-04 17:20:13 -04:00
# include <haproxy/server.h>
2021-02-12 13:42:55 -05:00
# include <haproxy/resolvers.h>
2020-12-10 07:43:57 -05:00
# include <haproxy/sample.h>
2020-06-04 12:58:52 -04:00
# include <haproxy/session.h>
2020-06-04 13:58:55 -04:00
# include <haproxy/stats-t.h>
2020-06-09 03:07:15 -04:00
# include <haproxy/stick_table.h>
2020-06-04 17:46:14 -04:00
# include <haproxy/stream.h>
2020-06-04 11:25:40 -04:00
# include <haproxy/task.h>
2020-06-04 11:42:48 -04:00
# include <haproxy/tcp_rules.h>
2020-06-09 03:07:15 -04:00
# include <haproxy/thread.h>
2020-06-04 17:46:14 -04:00
# include <haproxy/trace.h>
2020-06-04 10:25:31 -04:00
# include <haproxy/vars.h>
2006-06-25 20:48:02 -04:00
2018-11-26 05:58:30 -05:00
DECLARE_POOL ( pool_head_stream , " stream " , sizeof ( struct stream ) ) ;
2020-02-28 09:13:33 -05:00
DECLARE_POOL ( pool_head_uniqueid , " uniqueid " , UNIQUEID_LEN ) ;
2018-11-26 05:58:30 -05:00
2021-02-24 05:29:51 -05:00
/* incremented by each "show sess" to fix a delimiter between streams */
unsigned stream_epoch = 0 ;
2006-06-25 20:48:02 -04:00
2015-09-27 13:29:33 -04:00
/* List of all use-service keywords. */
static struct list service_keywords = LIST_HEAD_INIT ( service_keywords ) ;
2017-08-28 11:18:36 -04:00
2019-11-05 10:18:10 -05:00
/* trace source and events */
static void strm_trace ( enum trace_level level , uint64_t mask ,
const struct trace_source * src ,
const struct ist where , const struct ist func ,
const void * a1 , const void * a2 , const void * a3 , const void * a4 ) ;
/* The event representation is split like this :
* strm - stream
2022-05-17 13:07:51 -04:00
* cs - stream connector
2019-11-05 10:18:10 -05:00
* http - http analyzis
* tcp - tcp analyzis
*
* STRM_EV_ * macros are defined in < proto / stream . h >
*/
static const struct trace_event strm_trace_events [ ] = {
{ . mask = STRM_EV_STRM_NEW , . name = " strm_new " , . desc = " new stream " } ,
{ . mask = STRM_EV_STRM_FREE , . name = " strm_free " , . desc = " release stream " } ,
{ . mask = STRM_EV_STRM_ERR , . name = " strm_err " , . desc = " error during stream processing " } ,
{ . mask = STRM_EV_STRM_ANA , . name = " strm_ana " , . desc = " stream analyzers " } ,
{ . mask = STRM_EV_STRM_PROC , . name = " strm_proc " , . desc = " stream processing " } ,
2022-05-17 13:07:51 -04:00
{ . mask = STRM_EV_CS_ST , . name = " cs_state " , . desc = " processing connector states " } ,
2019-11-05 10:18:10 -05:00
{ . mask = STRM_EV_HTTP_ANA , . name = " http_ana " , . desc = " HTTP analyzers " } ,
{ . mask = STRM_EV_HTTP_ERR , . name = " http_err " , . desc = " error during HTTP analyzis " } ,
{ . mask = STRM_EV_TCP_ANA , . name = " tcp_ana " , . desc = " TCP analyzers " } ,
{ . mask = STRM_EV_TCP_ERR , . name = " tcp_err " , . desc = " error during TCP analyzis " } ,
2022-03-08 09:47:02 -05:00
{ . mask = STRM_EV_FLT_ANA , . name = " flt_ana " , . desc = " Filter analyzers " } ,
{ . mask = STRM_EV_FLT_ERR , . name = " flt_err " , . desc = " error during filter analyzis " } ,
2019-11-05 10:18:10 -05:00
{ }
} ;
static const struct name_desc strm_trace_lockon_args [ 4 ] = {
/* arg1 */ { /* already used by the stream */ } ,
/* arg2 */ { } ,
/* arg3 */ { } ,
/* arg4 */ { }
} ;
static const struct name_desc strm_trace_decoding [ ] = {
# define STRM_VERB_CLEAN 1
{ . name = " clean " , . desc = " only user-friendly stuff, generally suitable for level \" user \" " } ,
# define STRM_VERB_MINIMAL 2
2022-05-17 13:07:51 -04:00
{ . name = " minimal " , . desc = " report info on streams and connectors " } ,
2019-11-05 10:18:10 -05:00
# define STRM_VERB_SIMPLE 3
{ . name = " simple " , . desc = " add info on request and response channels " } ,
# define STRM_VERB_ADVANCED 4
{ . name = " advanced " , . desc = " add info on channel's buffer for data and developer levels only " } ,
# define STRM_VERB_COMPLETE 5
{ . name = " complete " , . desc = " add info on channel's buffer " } ,
{ /* end */ }
} ;
struct trace_source trace_strm = {
. name = IST ( " stream " ) ,
. desc = " Applicative stream " ,
. arg_def = TRC_ARG1_STRM , // TRACE()'s first argument is always a stream
. default_cb = strm_trace ,
. known_events = strm_trace_events ,
. lockon_args = strm_trace_lockon_args ,
. decoding = strm_trace_decoding ,
. report_events = ~ 0 , // report everything by default
} ;
# define TRACE_SOURCE &trace_strm
INITCALL1 ( STG_REGISTER , trace_register_source , TRACE_SOURCE ) ;
/* the stream traces always expect that arg1, if non-null, is of a stream (from
* which we can derive everything ) , that arg2 , if non - null , is an http
* transaction , that arg3 , if non - null , is an http message .
*/
static void strm_trace ( enum trace_level level , uint64_t mask , const struct trace_source * src ,
const struct ist where , const struct ist func ,
const void * a1 , const void * a2 , const void * a3 , const void * a4 )
{
const struct stream * s = a1 ;
const struct http_txn * txn = a2 ;
const struct http_msg * msg = a3 ;
struct task * task ;
const struct channel * req , * res ;
struct htx * htx ;
if ( ! s | | src - > verbosity < STRM_VERB_CLEAN )
return ;
task = s - > task ;
req = & s - > req ;
res = & s - > res ;
htx = ( msg ? htxbuf ( & msg - > chn - > buf ) : NULL ) ;
/* General info about the stream (htx/tcp, id...) */
chunk_appendf ( & trace_buf , " : [%u,%s] " ,
s - > uniq_id , ( ( s - > flags & SF_HTX ) ? " HTX " : " TCP " ) ) ;
2020-03-05 14:19:02 -05:00
if ( isttest ( s - > unique_id ) ) {
chunk_appendf ( & trace_buf , " id= " ) ;
b_putist ( & trace_buf , s - > unique_id ) ;
}
2019-11-05 10:18:10 -05:00
2022-05-17 13:07:51 -04:00
/* Front and back stream connector state */
2022-03-31 03:16:34 -04:00
chunk_appendf ( & trace_buf , " CS=(%s,%s) " ,
2022-05-17 13:40:40 -04:00
cs_state_str ( s - > scf - > state ) , cs_state_str ( s - > scb - > state ) ) ;
2019-11-05 10:18:10 -05:00
/* If txn is defined, HTTP req/rep states */
if ( txn )
chunk_appendf ( & trace_buf , " HTTP=(%s,%s) " ,
h1_msg_state_str ( txn - > req . msg_state ) , h1_msg_state_str ( txn - > rsp . msg_state ) ) ;
if ( msg )
chunk_appendf ( & trace_buf , " %s " , ( ( msg - > chn - > flags & CF_ISRESP ) ? " RESPONSE " : " REQUEST " ) ) ;
if ( src - > verbosity = = STRM_VERB_CLEAN )
return ;
/* If msg defined, display status-line if possible (verbosity > MINIMAL) */
if ( src - > verbosity > STRM_VERB_MINIMAL & & htx & & htx_nbblks ( htx ) ) {
const struct htx_blk * blk = htx_get_head_blk ( htx ) ;
const struct htx_sl * sl = htx_get_blk_ptr ( htx , blk ) ;
enum htx_blk_type type = htx_get_blk_type ( blk ) ;
if ( type = = HTX_BLK_REQ_SL | | type = = HTX_BLK_RES_SL )
chunk_appendf ( & trace_buf , " - \" %.*s %.*s %.*s \" " ,
HTX_SL_P1_LEN ( sl ) , HTX_SL_P1_PTR ( sl ) ,
HTX_SL_P2_LEN ( sl ) , HTX_SL_P2_PTR ( sl ) ,
HTX_SL_P3_LEN ( sl ) , HTX_SL_P3_PTR ( sl ) ) ;
}
/* If txn defined info about HTTP msgs, otherwise info about SI. */
if ( txn ) {
2022-03-30 13:39:30 -04:00
chunk_appendf ( & trace_buf , " - t=%p s=(%p,0x%08x,0x%x) txn.flags=0x%08x, http.flags=(0x%08x,0x%08x) status=%d " ,
task , s , s - > flags , s - > conn_err_type , txn - > flags , txn - > req . flags , txn - > rsp . flags , txn - > status ) ;
2019-11-05 10:18:10 -05:00
}
else {
2022-05-17 13:40:40 -04:00
chunk_appendf ( & trace_buf , " - t=%p s=(%p,0x%08x,0x%x) scf=(%p,%d,0x%08x) scb=(%p,%d,0x%08x) retries=%d " ,
2022-04-04 05:08:42 -04:00
task , s , s - > flags , s - > conn_err_type ,
2022-05-17 13:40:40 -04:00
s - > scf , s - > scf - > state , s - > scf - > flags ,
s - > scb , s - > scb - > state , s - > scb - > flags ,
2022-04-04 05:08:42 -04:00
s - > conn_retries ) ;
2019-11-05 10:18:10 -05:00
}
if ( src - > verbosity = = STRM_VERB_MINIMAL )
return ;
/* If txn defined, don't display all channel info */
if ( src - > verbosity = = STRM_VERB_SIMPLE | | txn ) {
chunk_appendf ( & trace_buf , " req=(%p .fl=0x%08x .exp(r,w,a)=(%u,%u,%u)) " ,
req , req - > flags , req - > rex , req - > wex , req - > analyse_exp ) ;
chunk_appendf ( & trace_buf , " res=(%p .fl=0x%08x .exp(r,w,a)=(%u,%u,%u)) " ,
res , res - > flags , res - > rex , res - > wex , res - > analyse_exp ) ;
}
else {
chunk_appendf ( & trace_buf , " req=(%p .fl=0x%08x .ana=0x%08x .exp(r,w,a)=(%u,%u,%u) .o=%lu .tot=%llu .to_fwd=%u) " ,
req , req - > flags , req - > analysers , req - > rex , req - > wex , req - > analyse_exp ,
BUILD/MINOR: trace: fix use of long type in a few printf format strings
Building on a 32-bit platform produces these warnings in trace code:
src/stream.c: In function 'strm_trace':
src/stream.c:226:29: warning: format '%lu' expects argument of type 'long unsigned int', but argument 9 has type 'size_t {aka const unsigned int}' [-Wformat=]
chunk_appendf(&trace_buf, " req=(%p .fl=0x%08x .ana=0x%08x .exp(r,w,a)=(%u,%u,%u) .o=%lu .tot=%llu .to_fwd=%u)",
^
src/stream.c:229:29: warning: format '%lu' expects argument of type 'long unsigned int', but argument 9 has type 'size_t {aka const unsigned int}' [-Wformat=]
chunk_appendf(&trace_buf, " res=(%p .fl=0x%08x .ana=0x%08x .exp(r,w,a)=(%u,%u,%u) .o=%lu .tot=%llu .to_fwd=%u)",
^
src/mux_fcgi.c: In function 'fcgi_trace':
src/mux_fcgi.c:443:29: warning: format '%lu' expects argument of type 'long unsigned int', but argument 3 has type 'size_t {aka const unsigned int}' [-Wformat=]
chunk_appendf(&trace_buf, " - VAL=%lu", *val);
^
src/mux_h1.c: In function 'h1_trace':
src/mux_h1.c:290:29: warning: format '%lu' expects argument of type 'long unsigned int', but argument 3 has type 'size_t {aka const unsigned int}' [-Wformat=]
chunk_appendf(&trace_buf, " - VAL=%lu", *val);
^
Let's just cast the type to long. This should be backported to 2.1.
2019-11-27 09:41:31 -05:00
( long ) req - > output , req - > total , req - > to_forward ) ;
2019-11-05 10:18:10 -05:00
chunk_appendf ( & trace_buf , " res=(%p .fl=0x%08x .ana=0x%08x .exp(r,w,a)=(%u,%u,%u) .o=%lu .tot=%llu .to_fwd=%u) " ,
res , res - > flags , res - > analysers , res - > rex , res - > wex , res - > analyse_exp ,
BUILD/MINOR: trace: fix use of long type in a few printf format strings
Building on a 32-bit platform produces these warnings in trace code:
src/stream.c: In function 'strm_trace':
src/stream.c:226:29: warning: format '%lu' expects argument of type 'long unsigned int', but argument 9 has type 'size_t {aka const unsigned int}' [-Wformat=]
chunk_appendf(&trace_buf, " req=(%p .fl=0x%08x .ana=0x%08x .exp(r,w,a)=(%u,%u,%u) .o=%lu .tot=%llu .to_fwd=%u)",
^
src/stream.c:229:29: warning: format '%lu' expects argument of type 'long unsigned int', but argument 9 has type 'size_t {aka const unsigned int}' [-Wformat=]
chunk_appendf(&trace_buf, " res=(%p .fl=0x%08x .ana=0x%08x .exp(r,w,a)=(%u,%u,%u) .o=%lu .tot=%llu .to_fwd=%u)",
^
src/mux_fcgi.c: In function 'fcgi_trace':
src/mux_fcgi.c:443:29: warning: format '%lu' expects argument of type 'long unsigned int', but argument 3 has type 'size_t {aka const unsigned int}' [-Wformat=]
chunk_appendf(&trace_buf, " - VAL=%lu", *val);
^
src/mux_h1.c: In function 'h1_trace':
src/mux_h1.c:290:29: warning: format '%lu' expects argument of type 'long unsigned int', but argument 3 has type 'size_t {aka const unsigned int}' [-Wformat=]
chunk_appendf(&trace_buf, " - VAL=%lu", *val);
^
Let's just cast the type to long. This should be backported to 2.1.
2019-11-27 09:41:31 -05:00
( long ) res - > output , res - > total , res - > to_forward ) ;
2019-11-05 10:18:10 -05:00
}
if ( src - > verbosity = = STRM_VERB_SIMPLE | |
( src - > verbosity = = STRM_VERB_ADVANCED & & src - > level < TRACE_LEVEL_DATA ) )
return ;
/* channels' buffer info */
if ( s - > flags & SF_HTX ) {
struct htx * rqhtx = htxbuf ( & req - > buf ) ;
struct htx * rphtx = htxbuf ( & res - > buf ) ;
chunk_appendf ( & trace_buf , " htx=(%u/%u#%u, %u/%u#%u) " ,
rqhtx - > data , rqhtx - > size , htx_nbblks ( rqhtx ) ,
rphtx - > data , rphtx - > size , htx_nbblks ( rphtx ) ) ;
}
else {
chunk_appendf ( & trace_buf , " buf=(%u@%p+%u/%u, %u@%p+%u/%u) " ,
( unsigned int ) b_data ( & req - > buf ) , b_orig ( & req - > buf ) ,
( unsigned int ) b_head_ofs ( & req - > buf ) , ( unsigned int ) b_size ( & req - > buf ) ,
2022-03-08 09:48:55 -05:00
( unsigned int ) b_data ( & res - > buf ) , b_orig ( & res - > buf ) ,
( unsigned int ) b_head_ofs ( & res - > buf ) , ( unsigned int ) b_size ( & res - > buf ) ) ;
2019-11-05 10:18:10 -05:00
}
/* If msg defined, display htx info if defined (level > USER) */
if ( src - > level > TRACE_LEVEL_USER & & htx & & htx_nbblks ( htx ) ) {
int full = 0 ;
/* Full htx info (level > STATE && verbosity > SIMPLE) */
if ( src - > level > TRACE_LEVEL_STATE ) {
if ( src - > verbosity = = STRM_VERB_COMPLETE )
full = 1 ;
}
chunk_memcat ( & trace_buf , " \n \t " , 2 ) ;
htx_dump ( & trace_buf , htx , full ) ;
}
}
2022-05-17 13:07:51 -04:00
/* Upgrade an existing stream for stream connector <cs>. Return < 0 on error. This
2021-12-20 09:34:16 -05:00
* is only valid right after a TCP to H1 upgrade . The stream should be
* " reativated " by removing SF_IGNORE flag . And the right mode must be set . On
BUG/MEDIUM: stream: Xfer the input buffer to a fully created stream
The input buffer passed as argument to create a new stream must not be
transferred when the request channel is initialized because the channel
flags are not set at this stage. In addition, the API is a bit confusing
regarding the buffer owner when an error occurred. The caller remains the
owner, but reading the code it is not obvious.
So, first of all, to avoid any ambiguities, comments are added on the
calling chain to make it clear. The buffer owner is the caller if any error
occurred. And the ownership is transferred to the stream on success.
Then, to make things simple, the ownership is transferred at the end of
stream_new(), in case of success. And the input buffer is updated to point
on BUF_NULL. Thus, in all cases, if the caller try to release it calling
b_free() on it, it is not a problem. Of course, it remains the caller
responsibility to release it on error.
The patch fixes a bug introduced by the commit 26256f86e ("MINOR: stream:
Pass an optional input buffer when a stream is created"). No backport is
needed.
2020-12-04 10:47:41 -05:00
* success , < input > buffer is transferred to the stream and thus points to
* BUF_NULL . On error , it is unchanged and it is the caller responsibility to
MINOR: stream: Add a function to validate TCP to H1 upgrades
TCP to H1 upgrades are buggy for now. When such upgrade is performed, a
crash is experienced. The bug is the result of the recent H1 mux
refactoring, and more specifically because of the commit c4bfa59f1 ("MAJOR:
mux-h1: Create the client stream as later as possible"). Indeed, now the H1
mux is responsible to create the frontend conn-stream once the request
headers are fully received. Thus the TCP to H1 upgrade is a problem because
the frontend conn-stream already exists.
To fix the bug, we must keep this conn-stream and the associate stream and
use it in the H1 mux. To do so, the upgrade will be performed in two
steps. First, the mux is upgraded from mux-pt to mux-h1. Then, the mux-h1
performs the stream upgrade, once the request headers are fully received and
parsed. To do so, stream_upgrade_from_cs() must be used. This function set
the SF_HTX flags to switch the stream to HTX mode, it removes the SF_IGNORE
flags and eventually it fills the request channel with some input data.
This patch is required to fix the TCP to H1 upgrades and is intimately
linked with the next commits.
2021-01-21 11:36:12 -05:00
* release it ( this never happens for now ) .
*/
2022-05-17 13:07:51 -04:00
int stream_upgrade_from_cs ( struct stconn * cs , struct buffer * input )
MINOR: stream: Add a function to validate TCP to H1 upgrades
TCP to H1 upgrades are buggy for now. When such upgrade is performed, a
crash is experienced. The bug is the result of the recent H1 mux
refactoring, and more specifically because of the commit c4bfa59f1 ("MAJOR:
mux-h1: Create the client stream as later as possible"). Indeed, now the H1
mux is responsible to create the frontend conn-stream once the request
headers are fully received. Thus the TCP to H1 upgrade is a problem because
the frontend conn-stream already exists.
To fix the bug, we must keep this conn-stream and the associate stream and
use it in the H1 mux. To do so, the upgrade will be performed in two
steps. First, the mux is upgraded from mux-pt to mux-h1. Then, the mux-h1
performs the stream upgrade, once the request headers are fully received and
parsed. To do so, stream_upgrade_from_cs() must be used. This function set
the SF_HTX flags to switch the stream to HTX mode, it removes the SF_IGNORE
flags and eventually it fills the request channel with some input data.
This patch is required to fix the TCP to H1 upgrades and is intimately
linked with the next commits.
2021-01-21 11:36:12 -05:00
{
2022-05-18 10:10:52 -04:00
struct stream * s = __sc_strm ( cs ) ;
2022-05-18 11:51:19 -04:00
const struct mux_ops * mux = sc_mux_ops ( cs ) ;
2021-12-17 11:28:35 -05:00
2022-02-28 03:09:05 -05:00
if ( mux ) {
2021-12-20 09:34:16 -05:00
if ( mux - > flags & MX_FL_HTX )
s - > flags | = SF_HTX ;
}
MINOR: stream: Add a function to validate TCP to H1 upgrades
TCP to H1 upgrades are buggy for now. When such upgrade is performed, a
crash is experienced. The bug is the result of the recent H1 mux
refactoring, and more specifically because of the commit c4bfa59f1 ("MAJOR:
mux-h1: Create the client stream as later as possible"). Indeed, now the H1
mux is responsible to create the frontend conn-stream once the request
headers are fully received. Thus the TCP to H1 upgrade is a problem because
the frontend conn-stream already exists.
To fix the bug, we must keep this conn-stream and the associate stream and
use it in the H1 mux. To do so, the upgrade will be performed in two
steps. First, the mux is upgraded from mux-pt to mux-h1. Then, the mux-h1
performs the stream upgrade, once the request headers are fully received and
parsed. To do so, stream_upgrade_from_cs() must be used. This function set
the SF_HTX flags to switch the stream to HTX mode, it removes the SF_IGNORE
flags and eventually it fills the request channel with some input data.
This patch is required to fix the TCP to H1 upgrades and is intimately
linked with the next commits.
2021-01-21 11:36:12 -05:00
if ( ! b_is_null ( input ) ) {
/* Xfer the input buffer to the request channel. <input> will
* than point to BUF_NULL . From this point , it is the stream
* responsibility to release it .
*/
s - > req . buf = * input ;
* input = BUF_NULL ;
s - > req . total = ( IS_HTX_STRM ( s ) ? htxbuf ( & s - > req . buf ) - > data : b_data ( & s - > req . buf ) ) ;
s - > req . flags | = ( s - > req . total ? CF_READ_PARTIAL : 0 ) ;
}
s - > flags & = ~ SF_IGNORE ;
task_wakeup ( s - > task , TASK_WOKEN_INIT ) ;
return 0 ;
}
2018-11-06 09:50:21 -05:00
/* Callback used to wake up a stream when an input buffer is available. The
2022-05-17 13:07:51 -04:00
* stream < s > ' s stream connectors are checked for a failed buffer allocation
2022-05-17 11:04:55 -04:00
* as indicated by the presence of the SE_FL_RXBLK_ROOM flag and the lack of a
2018-11-06 09:50:21 -05:00
* buffer , and and input buffer is assigned there ( at most one ) . The function
* returns 1 and wakes the stream up if a buffer was taken , otherwise zero .
* It ' s designed to be called from __offer_buffer ( ) .
*/
int stream_buf_available ( void * arg )
{
struct stream * s = arg ;
2022-05-17 13:40:40 -04:00
if ( ! s - > req . buf . size & & ! s - > req . pipe & & sc_ep_test ( s - > scf , SE_FL_RXBLK_BUFF ) & &
2021-03-22 09:44:31 -04:00
b_alloc ( & s - > req . buf ) )
2022-05-25 01:48:07 -04:00
sc_have_buff ( s - > scf ) ;
2022-05-17 13:40:40 -04:00
else if ( ! s - > res . buf . size & & ! s - > res . pipe & & sc_ep_test ( s - > scb , SE_FL_RXBLK_BUFF ) & &
2021-03-22 09:44:31 -04:00
b_alloc ( & s - > res . buf ) )
2022-05-25 01:48:07 -04:00
sc_have_buff ( s - > scb ) ;
2018-11-06 09:50:21 -05:00
else
return 0 ;
task_wakeup ( s - > task , TASK_WOKEN_RES ) ;
return 1 ;
}
2015-04-04 12:50:31 -04:00
/* This function is called from the session handler which detects the end of
2015-04-08 12:26:29 -04:00
* handshake , in order to complete initialization of a valid stream . It must be
2018-11-15 12:14:14 -05:00
* called with a completely initialized session . It returns the pointer to
2015-04-08 12:26:29 -04:00
* the newly created stream , or NULL in case of fatal error . The client - facing
2017-08-28 10:22:54 -04:00
* end point is assigned to < origin > , which must be valid . The stream ' s task
* is configured with a nice value inherited from the listener ' s nice if any .
* The task ' s context is set to the new stream , and its function is set to
BUG/MEDIUM: stream: Xfer the input buffer to a fully created stream
The input buffer passed as argument to create a new stream must not be
transferred when the request channel is initialized because the channel
flags are not set at this stage. In addition, the API is a bit confusing
regarding the buffer owner when an error occurred. The caller remains the
owner, but reading the code it is not obvious.
So, first of all, to avoid any ambiguities, comments are added on the
calling chain to make it clear. The buffer owner is the caller if any error
occurred. And the ownership is transferred to the stream on success.
Then, to make things simple, the ownership is transferred at the end of
stream_new(), in case of success. And the input buffer is updated to point
on BUF_NULL. Thus, in all cases, if the caller try to release it calling
b_free() on it, it is not a problem. Of course, it remains the caller
responsibility to release it on error.
The patch fixes a bug introduced by the commit 26256f86e ("MINOR: stream:
Pass an optional input buffer when a stream is created"). No backport is
needed.
2020-12-04 10:47:41 -05:00
* process_stream ( ) . Target and analysers are null . < input > is used as input
* buffer for the request channel and may contain data . On success , it is
* transfer to the stream and < input > is set to BUF_NULL . On error , < input >
* buffer is unchanged and it is the caller responsibility to release it .
2012-08-31 10:01:23 -04:00
*/
2022-05-17 13:07:51 -04:00
struct stream * stream_new ( struct session * sess , struct stconn * cs , struct buffer * input )
2012-08-31 10:01:23 -04:00
{
2015-04-04 12:08:21 -04:00
struct stream * s ;
2017-08-28 10:22:54 -04:00
struct task * t ;
2012-08-31 10:01:23 -04:00
2019-11-05 10:18:10 -05:00
DBG_TRACE_ENTER ( STRM_EV_STRM_NEW ) ;
2017-11-24 11:34:44 -05:00
if ( unlikely ( ( s = pool_alloc ( pool_head_stream ) ) = = NULL ) )
2017-08-28 10:22:54 -04:00
goto out_fail_alloc ;
2015-04-04 12:08:21 -04:00
/* minimum stream initialization required for an embryonic stream is
* fairly low . We need very little to execute L4 ACLs , then we need a
* task to make the client - side connection live on its own .
* - flags
* - stick - entry tracking
*/
s - > flags = 0 ;
2015-04-05 12:19:23 -04:00
s - > logs . logwait = sess - > fe - > to_log ;
2015-04-04 12:08:21 -04:00
s - > logs . level = 0 ;
2015-04-05 06:03:54 -04:00
tv_zero ( & s - > logs . tv_request ) ;
s - > logs . t_queue = - 1 ;
s - > logs . t_connect = - 1 ;
s - > logs . t_data = - 1 ;
s - > logs . t_close = 0 ;
s - > logs . bytes_in = s - > logs . bytes_out = 0 ;
2018-05-11 12:52:31 -04:00
s - > logs . prx_queue_pos = 0 ; /* we get the number of pending conns before us */
s - > logs . srv_queue_pos = 0 ; /* we will get this number soon */
2019-01-21 02:34:50 -05:00
s - > obj_type = OBJ_TYPE_STREAM ;
2015-04-05 06:03:54 -04:00
2020-09-30 08:03:54 -04:00
s - > logs . accept_date = sess - > accept_date ;
s - > logs . tv_accept = sess - > tv_accept ;
s - > logs . t_handshake = sess - > t_handshake ;
2020-09-30 07:49:56 -04:00
s - > logs . t_idle = sess - > t_idle ;
2018-11-29 09:19:05 -05:00
2015-04-05 06:03:54 -04:00
/* default logging function */
s - > do_log = strm_log ;
/* default error reporting function, may be changed by analysers */
s - > srv_error = default_srv_error ;
2015-04-04 12:08:21 -04:00
/* Initialise the current rule list pointer to NULL. We are sure that
* any rulelist match the NULL pointer .
*/
s - > current_rule_list = NULL ;
2015-07-22 11:10:58 -04:00
s - > current_rule = NULL ;
2020-07-28 05:56:13 -04:00
s - > rules_exp = TICK_ETERNITY ;
2022-03-09 11:23:10 -05:00
s - > last_rule_file = NULL ;
s - > last_rule_line = 0 ;
2015-04-04 12:08:21 -04:00
2015-09-21 11:48:24 -04:00
/* Copy SC counters for the stream. We don't touch refcounts because
* any reference we have is inherited from the session . Since the stream
* doesn ' t exist without the session , the session ' s existence guarantees
* we don ' t lose the entry . During the store operation , the stream won ' t
* touch these ones .
2015-08-18 05:34:18 -04:00
*/
2015-08-16 06:03:39 -04:00
memcpy ( s - > stkctr , sess - > stkctr , sizeof ( s - > stkctr ) ) ;
2015-04-04 12:08:21 -04:00
s - > sess = sess ;
2021-02-24 05:29:51 -05:00
s - > stream_epoch = _HA_ATOMIC_LOAD ( & stream_epoch ) ;
2021-04-06 05:57:41 -04:00
s - > uniq_id = _HA_ATOMIC_FETCH_ADD ( & global . req_count , 1 ) ;
2015-04-04 12:08:21 -04:00
REORG/MAJOR: session: rename the "session" entity to "stream"
With HTTP/2, we'll have to support multiplexed streams. A stream is in
fact the largest part of what we currently call a session, it has buffers,
logs, etc.
In order to catch any error, this commit removes any reference to the
struct session and tries to rename most "session" occurrences in function
names to "stream" and "sess" to "strm" when that's related to a session.
The files stream.{c,h} were added and session.{c,h} removed.
The session will be reintroduced later and a few parts of the stream
will progressively be moved overthere. It will more or less contain
only what we need in an embryonic session.
Sample fetch functions and converters will have to change a bit so
that they'll use an L5 (session) instead of what's currently called
"L4" which is in fact L6 for now.
Once all changes are completed, we should see approximately this :
L7 - http_txn
L6 - stream
L5 - session
L4 - connection | applet
There will be at most one http_txn per stream, and a same session will
possibly be referenced by multiple streams. A connection will point to
a session and to a stream. The session will hold all the information
we need to keep even when we don't yet have a stream.
Some more cleanup is needed because some code was already far from
being clean. The server queue management still refers to sessions at
many places while comments talk about connections. This will have to
be cleaned up once we have a server-side connection pool manager.
Stream flags "SN_*" still need to be renamed, it doesn't seem like
any of them will need to move to the session.
2015-04-02 18:22:06 -04:00
/* OK, we're keeping the stream, so let's properly initialize the stream */
2012-08-31 10:01:23 -04:00
LIST_INIT ( & s - > back_refs ) ;
BUG/MAJOR: Fix how the list of entities waiting for a buffer is handled
When an entity tries to get a buffer, if it cannot be allocted, for example
because the number of buffers which may be allocated per process is limited,
this entity is added in a list (called <buffer_wq>) and wait for an available
buffer.
Historically, the <buffer_wq> list was logically attached to streams because it
were the only entities likely to be added in it. Now, applets can also be
waiting for a free buffer. And with filters, we could imagine to have more other
entities waiting for a buffer. So it make sense to have a generic list.
Anyway, with the current design there is a bug. When an applet failed to get a
buffer, it will wait. But we add the stream attached to the applet in
<buffer_wq>, instead of the applet itself. So when a buffer is available, we
wake up the stream and not the waiting applet. So, it is possible to have
waiting applets and never awakened.
So, now, <buffer_wq> is independant from streams. And we really add the waiting
entity in <buffer_wq>. To be generic, the entity is responsible to define the
callback used to awaken it.
In addition, applets will still request an input buffer when they become
active. But they will not be sleeped anymore if no buffer are available. So this
is the responsibility to the applet I/O handler to check if this buffer is
allocated or not. This way, an applet can decide if this buffer is required or
not and can do additional processing if not.
[wt: backport to 1.7 and 1.6]
2016-12-09 11:30:18 -05:00
2021-02-20 05:49:49 -05:00
LIST_INIT ( & s - > buffer_wait . list ) ;
BUG/MAJOR: Fix how the list of entities waiting for a buffer is handled
When an entity tries to get a buffer, if it cannot be allocted, for example
because the number of buffers which may be allocated per process is limited,
this entity is added in a list (called <buffer_wq>) and wait for an available
buffer.
Historically, the <buffer_wq> list was logically attached to streams because it
were the only entities likely to be added in it. Now, applets can also be
waiting for a free buffer. And with filters, we could imagine to have more other
entities waiting for a buffer. So it make sense to have a generic list.
Anyway, with the current design there is a bug. When an applet failed to get a
buffer, it will wait. But we add the stream attached to the applet in
<buffer_wq>, instead of the applet itself. So when a buffer is available, we
wake up the stream and not the waiting applet. So, it is possible to have
waiting applets and never awakened.
So, now, <buffer_wq> is independant from streams. And we really add the waiting
entity in <buffer_wq>. To be generic, the entity is responsible to define the
callback used to awaken it.
In addition, applets will still request an input buffer when they become
active. But they will not be sleeped anymore if no buffer are available. So this
is the responsibility to the applet I/O handler to check if this buffer is
allocated or not. This way, an applet can decide if this buffer is required or
not and can do additional processing if not.
[wt: backport to 1.7 and 1.6]
2016-12-09 11:30:18 -05:00
s - > buffer_wait . target = s ;
2018-11-06 09:50:21 -05:00
s - > buffer_wait . wakeup_cb = stream_buf_available ;
2013-10-14 15:32:07 -04:00
2021-04-10 17:00:53 -04:00
s - > call_rate . curr_tick = s - > call_rate . curr_ctr = s - > call_rate . prev_ctr = 0 ;
2018-10-26 08:47:40 -04:00
s - > pcli_next_pid = 0 ;
2018-12-11 10:10:57 -05:00
s - > pcli_flags = 0 ;
2020-03-05 14:19:02 -05:00
s - > unique_id = IST_NULL ;
2012-08-31 10:01:23 -04:00
2021-10-01 12:23:30 -04:00
if ( ( t = task_new_here ( ) ) = = NULL )
2017-08-28 10:22:54 -04:00
goto out_fail_alloc ;
2015-04-04 12:08:21 -04:00
s - > task = t ;
BUG/MEDIUM: stream: Save unprocessed events for a stream
A stream can be awakened for different reasons. During its processing, it can be
early stopped if no buffer is available. In this situation, the reason why the
stream was awakened is lost, because we rely on the task state, which is reset
after each processing loop.
In many cases, that's not a big deal. But it can be useful to accumulate the
task states if the stream processing is interrupted, especially if some filters
need to be called.
To be clearer, here is an simple example:
1) A stream is awakened with the reason TASK_WOKEN_MSG.
2) Because no buffer is available, the processing is interrupted, the stream
is back to sleep. And the task state is reset.
3) Some buffers become available, so the stream is awakened with the reason
TASK_WOKEN_RES. At this step, the previous reason (TASK_WOKEN_MSG) is lost.
Now, the task states are saved for a stream and reset only when the stream
processing is not interrupted. The correspoing bitfield represents the pending
events for a stream. And we use this one instead of the task state during the
stream processing.
Note that TASK_WOKEN_TIMER and TASK_WOKEN_RES are always removed because these
events are always handled during the stream processing.
[wt: backport to 1.7 and 1.6]
2016-12-08 16:33:52 -05:00
s - > pending_events = 0 ;
2022-03-29 09:42:09 -04:00
s - > conn_retries = 0 ;
2022-03-29 13:02:31 -04:00
s - > conn_exp = TICK_ETERNITY ;
2022-03-30 13:39:30 -04:00
s - > conn_err_type = STRM_ET_NONE ;
2022-05-17 13:47:17 -04:00
s - > prev_conn_state = SC_ST_INI ;
2015-04-05 18:25:48 -04:00
t - > process = process_stream ;
2012-08-31 10:01:23 -04:00
t - > context = s ;
t - > expire = TICK_ETERNITY ;
2017-08-28 10:22:54 -04:00
if ( sess - > listener )
t - > nice = sess - > listener - > nice ;
2012-08-31 10:01:23 -04:00
REORG/MAJOR: session: rename the "session" entity to "stream"
With HTTP/2, we'll have to support multiplexed streams. A stream is in
fact the largest part of what we currently call a session, it has buffers,
logs, etc.
In order to catch any error, this commit removes any reference to the
struct session and tries to rename most "session" occurrences in function
names to "stream" and "sess" to "strm" when that's related to a session.
The files stream.{c,h} were added and session.{c,h} removed.
The session will be reintroduced later and a few parts of the stream
will progressively be moved overthere. It will more or less contain
only what we need in an embryonic session.
Sample fetch functions and converters will have to change a bit so
that they'll use an L5 (session) instead of what's currently called
"L4" which is in fact L6 for now.
Once all changes are completed, we should see approximately this :
L7 - http_txn
L6 - stream
L5 - session
L4 - connection | applet
There will be at most one http_txn per stream, and a same session will
possibly be referenced by multiple streams. A connection will point to
a session and to a stream. The session will hold all the information
we need to keep even when we don't yet have a stream.
Some more cleanup is needed because some code was already far from
being clean. The server queue management still refers to sessions at
many places while comments talk about connections. This will have to
be cleaned up once we have a server-side connection pool manager.
Stream flags "SN_*" still need to be renamed, it doesn't seem like
any of them will need to move to the session.
2015-04-02 18:22:06 -04:00
/* Note: initially, the stream's backend points to the frontend.
2012-08-31 10:01:23 -04:00
* This changes later when switching rules are executed or
* when the default backend is assigned .
*/
2015-04-03 09:40:56 -04:00
s - > be = sess - > fe ;
2015-04-03 16:16:32 -04:00
s - > req_cap = NULL ;
s - > res_cap = NULL ;
2010-11-11 04:56:04 -05:00
2015-06-19 05:59:02 -04:00
/* Initialise all the variables contexts even if not used.
* This permits to prune these contexts without errors .
2015-06-06 13:29:07 -04:00
*/
2021-08-31 02:13:25 -04:00
vars_init_head ( & s - > vars_txn , SCOPE_TXN ) ;
vars_init_head ( & s - > vars_reqres , SCOPE_REQ ) ;
2015-06-06 13:29:07 -04:00
2021-12-23 06:06:45 -05:00
/* Set SF_HTX flag for HTTP frontends. */
if ( sess - > fe - > mode = = PR_MODE_HTTP )
s - > flags | = SF_HTX ;
2022-05-17 13:40:40 -04:00
s - > scf = cs ;
if ( cs_attach_strm ( s - > scf , s ) < 0 )
goto out_fail_attach_scf ;
2022-03-23 06:01:09 -04:00
2022-05-17 13:44:42 -04:00
s - > scb = cs_new_from_strm ( s , SC_FL_ISBACK ) ;
2022-05-17 13:40:40 -04:00
if ( ! s - > scb )
goto out_fail_alloc_scb ;
2021-12-22 08:22:03 -05:00
2022-05-17 13:47:17 -04:00
cs_set_state ( s - > scf , SC_ST_EST ) ;
2022-05-17 13:40:40 -04:00
s - > scf - > hcto = sess - > fe - > timeout . clientfin ;
2021-12-23 06:06:45 -05:00
2021-12-23 11:28:17 -05:00
if ( likely ( sess - > fe - > options2 & PR_O2_INDEPSTR ) )
2022-05-17 13:44:42 -04:00
s - > scf - > flags | = SC_FL_INDEP_STR ;
2021-12-23 06:06:45 -05:00
2022-05-17 13:40:40 -04:00
s - > scb - > hcto = TICK_ETERNITY ;
2021-12-23 06:06:45 -05:00
if ( likely ( sess - > fe - > options2 & PR_O2_INDEPSTR ) )
2022-05-17 13:44:42 -04:00
s - > scb - > flags | = SC_FL_INDEP_STR ;
2017-12-20 10:31:43 -05:00
2022-05-17 11:04:55 -04:00
if ( sc_ep_test ( cs , SE_FL_WEBSOCKET ) )
2021-12-20 09:34:16 -05:00
s - > flags | = SF_WEBSOCKET ;
2022-05-18 10:23:22 -04:00
if ( sc_conn ( cs ) ) {
2022-05-18 11:51:19 -04:00
const struct mux_ops * mux = sc_mux_ops ( cs ) ;
2021-12-17 11:28:35 -05:00
2022-03-30 10:31:41 -04:00
if ( mux & & mux - > flags & MX_FL_HTX )
s - > flags | = SF_HTX ;
2021-12-15 05:42:23 -05:00
}
REORG/MAJOR: session: rename the "session" entity to "stream"
With HTTP/2, we'll have to support multiplexed streams. A stream is in
fact the largest part of what we currently call a session, it has buffers,
logs, etc.
In order to catch any error, this commit removes any reference to the
struct session and tries to rename most "session" occurrences in function
names to "stream" and "sess" to "strm" when that's related to a session.
The files stream.{c,h} were added and session.{c,h} removed.
The session will be reintroduced later and a few parts of the stream
will progressively be moved overthere. It will more or less contain
only what we need in an embryonic session.
Sample fetch functions and converters will have to change a bit so
that they'll use an L5 (session) instead of what's currently called
"L4" which is in fact L6 for now.
Once all changes are completed, we should see approximately this :
L7 - http_txn
L6 - stream
L5 - session
L4 - connection | applet
There will be at most one http_txn per stream, and a same session will
possibly be referenced by multiple streams. A connection will point to
a session and to a stream. The session will hold all the information
we need to keep even when we don't yet have a stream.
Some more cleanup is needed because some code was already far from
being clean. The server queue management still refers to sessions at
many places while comments talk about connections. This will have to
be cleaned up once we have a server-side connection pool manager.
Stream flags "SN_*" still need to be renamed, it doesn't seem like
any of them will need to move to the session.
2015-04-02 18:22:06 -04:00
stream_init_srv_conn ( s ) ;
2016-12-04 18:26:31 -05:00
s - > target = sess - > listener ? sess - > listener - > default_target : NULL ;
2010-06-01 11:45:26 -04:00
s - > pend_pos = NULL ;
2018-05-11 12:52:31 -04:00
s - > priority_class = 0 ;
s - > priority_offset = 0 ;
2010-06-01 11:45:26 -04:00
/* init store persistence */
s - > store_count = 0 ;
BUG/MEDIUM: stream: Xfer the input buffer to a fully created stream
The input buffer passed as argument to create a new stream must not be
transferred when the request channel is initialized because the channel
flags are not set at this stage. In addition, the API is a bit confusing
regarding the buffer owner when an error occurred. The caller remains the
owner, but reading the code it is not obvious.
So, first of all, to avoid any ambiguities, comments are added on the
calling chain to make it clear. The buffer owner is the caller if any error
occurred. And the ownership is transferred to the stream on success.
Then, to make things simple, the ownership is transferred at the end of
stream_new(), in case of success. And the input buffer is updated to point
on BUF_NULL. Thus, in all cases, if the caller try to release it calling
b_free() on it, it is not a problem. Of course, it remains the caller
responsibility to release it on error.
The patch fixes a bug introduced by the commit 26256f86e ("MINOR: stream:
Pass an optional input buffer when a stream is created"). No backport is
needed.
2020-12-04 10:47:41 -05:00
channel_init ( & s - > req ) ;
2014-11-27 14:45:39 -05:00
s - > req . flags | = CF_READ_ATTACHED ; /* the producer is already connected */
BUG/MEDIUM: stream: Use the front analyzers for new listener-less streams
For now, for a stream, request analyzers are set at 2 stages. The first one
is when the stream is created. The session's listener analyzers, if any, are
set on the request channel. In addition, some HTTP analyzers are set for HTX
streams (AN_REQ_WAIT_HTTP and AN_REQ_HTTP_PROCESS_FE). The second one is
when the backend is set on the stream. At the stage, request analyzers are
updated using the backend settings.
It is an issue for client applets because there is no listener attached to
the stream. In addtion, it may have no specific/dedicated backend. Thus,
several request analyzers are missing. Among others, the HTTP analyzers for
HTTP applets. The HTTP client is the only one affected for now.
To fix the bug, when a stream is created without a listener, we use the
frontend to set the request analyzers. Note that there is no issue with the
response channel because its analyzers are set when the server connection is
established.
This patch may be backported to all stable versions. Because only the HTTP
client is affected, it must at least be backported to 2.5. It is related to
the issue #1593.
2022-03-07 09:31:46 -05:00
s - > req . analysers = sess - > listener ? sess - > listener - > analysers : sess - > fe - > fe_req_ana ;
2017-11-18 09:39:10 -05:00
2021-03-15 12:09:27 -04:00
if ( IS_HTX_STRM ( s ) ) {
/* Be sure to have HTTP analysers because in case of
* " destructive " stream upgrade , they may be missing ( e . g
* TCP > H2 )
*/
s - > req . analysers | = AN_REQ_WAIT_HTTP | AN_REQ_HTTP_PROCESS_FE ;
}
2017-11-18 09:39:10 -05:00
if ( ! sess - > fe - > fe_req_ana ) {
channel_auto_connect ( & s - > req ) ; /* don't wait to establish connection */
channel_auto_close ( & s - > req ) ; /* let the producer forward close requests */
}
2015-04-05 12:15:59 -04:00
s - > req . rto = sess - > fe - > timeout . client ;
2014-11-27 14:45:39 -05:00
s - > req . wto = TICK_ETERNITY ;
s - > req . rex = TICK_ETERNITY ;
s - > req . wex = TICK_ETERNITY ;
s - > req . analyse_exp = TICK_ETERNITY ;
2014-11-24 05:36:57 -05:00
BUG/MEDIUM: stream: Xfer the input buffer to a fully created stream
The input buffer passed as argument to create a new stream must not be
transferred when the request channel is initialized because the channel
flags are not set at this stage. In addition, the API is a bit confusing
regarding the buffer owner when an error occurred. The caller remains the
owner, but reading the code it is not obvious.
So, first of all, to avoid any ambiguities, comments are added on the
calling chain to make it clear. The buffer owner is the caller if any error
occurred. And the ownership is transferred to the stream on success.
Then, to make things simple, the ownership is transferred at the end of
stream_new(), in case of success. And the input buffer is updated to point
on BUF_NULL. Thus, in all cases, if the caller try to release it calling
b_free() on it, it is not a problem. Of course, it remains the caller
responsibility to release it on error.
The patch fixes a bug introduced by the commit 26256f86e ("MINOR: stream:
Pass an optional input buffer when a stream is created"). No backport is
needed.
2020-12-04 10:47:41 -05:00
channel_init ( & s - > res ) ;
2014-11-28 08:17:09 -05:00
s - > res . flags | = CF_ISRESP ;
2014-11-27 14:45:39 -05:00
s - > res . analysers = 0 ;
2010-06-01 11:45:26 -04:00
2015-04-03 09:40:56 -04:00
if ( sess - > fe - > options2 & PR_O2_NODELAY ) {
2014-11-27 14:45:39 -05:00
s - > req . flags | = CF_NEVER_WAIT ;
s - > res . flags | = CF_NEVER_WAIT ;
2011-05-30 12:10:30 -04:00
}
2015-04-05 12:15:59 -04:00
s - > res . wto = sess - > fe - > timeout . client ;
2014-11-27 14:45:39 -05:00
s - > res . rto = TICK_ETERNITY ;
s - > res . rex = TICK_ETERNITY ;
s - > res . wex = TICK_ETERNITY ;
s - > res . analyse_exp = TICK_ETERNITY ;
2010-06-01 11:45:26 -04:00
2015-04-03 17:46:31 -04:00
s - > txn = NULL ;
2016-12-17 06:45:32 -05:00
s - > hlua = NULL ;
2015-02-16 14:11:43 -05:00
2020-12-23 11:41:43 -05:00
s - > resolv_ctx . requester = NULL ;
s - > resolv_ctx . hostname_dn = NULL ;
s - > resolv_ctx . hostname_dn_len = 0 ;
s - > resolv_ctx . parent = NULL ;
2019-04-23 11:26:33 -04:00
2020-12-10 07:43:53 -05:00
s - > tunnel_timeout = TICK_ETERNITY ;
2021-09-30 13:02:18 -04:00
LIST_APPEND ( & th_ctx - > streams , & s - > list ) ;
2017-06-30 10:23:45 -04:00
2015-11-05 07:35:03 -05:00
if ( flt_stream_init ( s ) < 0 | | flt_stream_start ( s ) < 0 )
MAJOR: filters: Add filters support
This patch adds the support of filters in HAProxy. The main idea is to have a
way to "easely" extend HAProxy by adding some "modules", called filters, that
will be able to change HAProxy behavior in a programmatic way.
To do so, many entry points has been added in code to let filters to hook up to
different steps of the processing. A filter must define a flt_ops sutrctures
(see include/types/filters.h for details). This structure contains all available
callbacks that a filter can define:
struct flt_ops {
/*
* Callbacks to manage the filter lifecycle
*/
int (*init) (struct proxy *p);
void (*deinit)(struct proxy *p);
int (*check) (struct proxy *p);
/*
* Stream callbacks
*/
void (*stream_start) (struct stream *s);
void (*stream_accept) (struct stream *s);
void (*session_establish)(struct stream *s);
void (*stream_stop) (struct stream *s);
/*
* HTTP callbacks
*/
int (*http_start) (struct stream *s, struct http_msg *msg);
int (*http_start_body) (struct stream *s, struct http_msg *msg);
int (*http_start_chunk) (struct stream *s, struct http_msg *msg);
int (*http_data) (struct stream *s, struct http_msg *msg);
int (*http_last_chunk) (struct stream *s, struct http_msg *msg);
int (*http_end_chunk) (struct stream *s, struct http_msg *msg);
int (*http_chunk_trailers)(struct stream *s, struct http_msg *msg);
int (*http_end_body) (struct stream *s, struct http_msg *msg);
void (*http_end) (struct stream *s, struct http_msg *msg);
void (*http_reset) (struct stream *s, struct http_msg *msg);
int (*http_pre_process) (struct stream *s, struct http_msg *msg);
int (*http_post_process) (struct stream *s, struct http_msg *msg);
void (*http_reply) (struct stream *s, short status,
const struct chunk *msg);
};
To declare and use a filter, in the configuration, the "filter" keyword must be
used in a listener/frontend section:
frontend test
...
filter <FILTER-NAME> [OPTIONS...]
The filter referenced by the <FILTER-NAME> must declare a configuration parser
on its own name to fill flt_ops and filter_conf field in the proxy's
structure. An exemple will be provided later to make it perfectly clear.
For now, filters cannot be used in backend section. But this is only a matter of
time. Documentation will also be added later. This is the first commit of a long
list about filters.
It is possible to have several filters on the same listener/frontend. These
filters are stored in an array of at most MAX_FILTERS elements (define in
include/types/filters.h). Again, this will be replaced later by a list of
filters.
The filter API has been highly refactored. Main changes are:
* Now, HA supports an infinite number of filters per proxy. To do so, filters
are stored in list.
* Because filters are stored in list, filters state has been moved from the
channel structure to the filter structure. This is cleaner because there is no
more info about filters in channel structure.
* It is possible to defined filters on backends only. For such filters,
stream_start/stream_stop callbacks are not called. Of course, it is possible
to mix frontend and backend filters.
* Now, TCP streams are also filtered. All callbacks without the 'http_' prefix
are called for all kind of streams. In addition, 2 new callbacks were added to
filter data exchanged through a TCP stream:
- tcp_data: it is called when new data are available or when old unprocessed
data are still waiting.
- tcp_forward_data: it is called when some data can be consumed.
* New callbacks attached to channel were added:
- channel_start_analyze: it is called when a filter is ready to process data
exchanged through a channel. 2 new analyzers (a frontend and a backend)
are attached to channels to call this callback. For a frontend filter, it
is called before any other analyzer. For a backend filter, it is called
when a backend is attached to a stream. So some processing cannot be
filtered in that case.
- channel_analyze: it is called before each analyzer attached to a channel,
expects analyzers responsible for data sending.
- channel_end_analyze: it is called when all other analyzers have finished
their processing. A new analyzers is attached to channels to call this
callback. For a TCP stream, this is always the last one called. For a HTTP
one, the callback is called when a request/response ends, so it is called
one time for each request/response.
* 'session_established' callback has been removed. Everything that is done in
this callback can be handled by 'channel_start_analyze' on the response
channel.
* 'http_pre_process' and 'http_post_process' callbacks have been replaced by
'channel_analyze'.
* 'http_start' callback has been replaced by 'http_headers'. This new one is
called just before headers sending and parsing of the body.
* 'http_end' callback has been replaced by 'channel_end_analyze'.
* It is possible to set a forwarder for TCP channels. It was already possible to
do it for HTTP ones.
* Forwarders can partially consumed forwardable data. For this reason a new
HTTP message state was added before HTTP_MSG_DONE : HTTP_MSG_ENDING.
Now all filters can define corresponding callbacks (http_forward_data
and tcp_forward_data). Each filter owns 2 offsets relative to buf->p, next and
forward, to track, respectively, input data already parsed but not forwarded yet
by the filter and parsed data considered as forwarded by the filter. A any time,
we have the warranty that a filter cannot parse or forward more input than
previous ones. And, of course, it cannot forward more input than it has
parsed. 2 macros has been added to retrieve these offets: FLT_NXT and FLT_FWD.
In addition, 2 functions has been added to change the 'next size' and the
'forward size' of a filter. When a filter parses input data, it can alter these
data, so the size of these data can vary. This action has an effet on all
previous filters that must be handled. To do so, the function
'filter_change_next_size' must be called, passing the size variation. In the
same spirit, if a filter alter forwarded data, it must call the function
'filter_change_forward_size'. 'filter_change_next_size' can be called in
'http_data' and 'tcp_data' callbacks and only these ones. And
'filter_change_forward_size' can be called in 'http_forward_data' and
'tcp_forward_data' callbacks and only these ones. The data changes are the
filter responsability, but with some limitation. It must not change already
parsed/forwarded data or data that previous filters have not parsed/forwarded
yet.
Because filters can be used on backends, when we the backend is set for a
stream, we add filters defined for this backend in the filter list of the
stream. But we must only do that when the backend and the frontend of the stream
are not the same. Else same filters are added a second time leading to undefined
behavior.
The HTTP compression code had to be moved.
So it simplifies http_response_forward_body function. To do so, the way the data
are forwarded has changed. Now, a filter (and only one) can forward data. In a
commit to come, this limitation will be removed to let all filters take part to
data forwarding. There are 2 new functions that filters should use to deal with
this feature:
* flt_set_http_data_forwarder: This function sets the filter (using its id)
that will forward data for the specified HTTP message. It is possible if it
was not already set by another filter _AND_ if no data was yet forwarded
(msg->msg_state <= HTTP_MSG_BODY). It returns -1 if an error occurs.
* flt_http_data_forwarder: This function returns the filter id that will
forward data for the specified HTTP message. If there is no forwarder set, it
returns -1.
When an HTTP data forwarder is set for the response, the HTTP compression is
disabled. Of course, this is not definitive.
2015-04-30 05:48:27 -04:00
goto out_fail_accept ;
2010-06-01 11:45:26 -04:00
/* finish initialization of the accepted file descriptor */
2022-05-18 11:58:02 -04:00
if ( sc_appctx ( cs ) )
2022-05-17 13:40:40 -04:00
cs_want_get ( s - > scf ) ;
2010-06-01 11:45:26 -04:00
2015-04-05 12:19:23 -04:00
if ( sess - > fe - > accept & & sess - > fe - > accept ( s ) < 0 )
2015-04-05 05:52:08 -04:00
goto out_fail_accept ;
2010-06-01 11:45:26 -04:00
BUG/MEDIUM: stream: Xfer the input buffer to a fully created stream
The input buffer passed as argument to create a new stream must not be
transferred when the request channel is initialized because the channel
flags are not set at this stage. In addition, the API is a bit confusing
regarding the buffer owner when an error occurred. The caller remains the
owner, but reading the code it is not obvious.
So, first of all, to avoid any ambiguities, comments are added on the
calling chain to make it clear. The buffer owner is the caller if any error
occurred. And the ownership is transferred to the stream on success.
Then, to make things simple, the ownership is transferred at the end of
stream_new(), in case of success. And the input buffer is updated to point
on BUF_NULL. Thus, in all cases, if the caller try to release it calling
b_free() on it, it is not a problem. Of course, it remains the caller
responsibility to release it on error.
The patch fixes a bug introduced by the commit 26256f86e ("MINOR: stream:
Pass an optional input buffer when a stream is created"). No backport is
needed.
2020-12-04 10:47:41 -05:00
if ( ! b_is_null ( input ) ) {
/* Xfer the input buffer to the request channel. <input> will
* than point to BUF_NULL . From this point , it is the stream
* responsibility to release it .
*/
s - > req . buf = * input ;
* input = BUF_NULL ;
2020-12-04 11:22:49 -05:00
s - > req . total = ( IS_HTX_STRM ( s ) ? htxbuf ( & s - > req . buf ) - > data : b_data ( & s - > req . buf ) ) ;
BUG/MEDIUM: stream: Xfer the input buffer to a fully created stream
The input buffer passed as argument to create a new stream must not be
transferred when the request channel is initialized because the channel
flags are not set at this stage. In addition, the API is a bit confusing
regarding the buffer owner when an error occurred. The caller remains the
owner, but reading the code it is not obvious.
So, first of all, to avoid any ambiguities, comments are added on the
calling chain to make it clear. The buffer owner is the caller if any error
occurred. And the ownership is transferred to the stream on success.
Then, to make things simple, the ownership is transferred at the end of
stream_new(), in case of success. And the input buffer is updated to point
on BUF_NULL. Thus, in all cases, if the caller try to release it calling
b_free() on it, it is not a problem. Of course, it remains the caller
responsibility to release it on error.
The patch fixes a bug introduced by the commit 26256f86e ("MINOR: stream:
Pass an optional input buffer when a stream is created"). No backport is
needed.
2020-12-04 10:47:41 -05:00
s - > req . flags | = ( s - > req . total ? CF_READ_PARTIAL : 0 ) ;
}
2010-06-01 11:45:26 -04:00
/* it is important not to call the wakeup function directly but to
* pass through task_wakeup ( ) , because this one knows how to apply
2017-05-29 09:26:51 -04:00
* priorities to tasks . Using multi thread we must be sure that
* stream is fully initialized before calling task_wakeup . So
* the caller must handle the task_wakeup
2010-06-01 11:45:26 -04:00
*/
2019-11-05 10:18:10 -05:00
DBG_TRACE_LEAVE ( STRM_EV_STRM_NEW , s ) ;
2021-12-20 09:34:16 -05:00
task_wakeup ( s - > task , TASK_WOKEN_INIT ) ;
2015-04-05 06:00:52 -04:00
return s ;
2010-06-01 11:45:26 -04:00
/* Error unrolling */
2015-04-05 05:52:08 -04:00
out_fail_accept :
2015-11-05 07:35:03 -05:00
flt_stream_release ( s , 0 ) ;
2021-04-21 01:32:39 -04:00
LIST_DELETE ( & s - > list ) ;
2022-05-17 13:40:40 -04:00
out_fail_attach_scf :
cs_free ( s - > scb ) ;
out_fail_alloc_scb :
2021-12-23 06:06:45 -05:00
task_destroy ( t ) ;
2017-08-28 10:22:54 -04:00
out_fail_alloc :
2017-11-24 11:34:44 -05:00
pool_free ( pool_head_stream , s ) ;
2019-11-05 10:18:10 -05:00
DBG_TRACE_DEVEL ( " leaving on error " , STRM_EV_STRM_NEW | STRM_EV_STRM_ERR ) ;
2015-04-05 06:00:52 -04:00
return NULL ;
2010-06-01 11:45:26 -04:00
}
2006-06-25 20:48:02 -04:00
/*
REORG/MAJOR: session: rename the "session" entity to "stream"
With HTTP/2, we'll have to support multiplexed streams. A stream is in
fact the largest part of what we currently call a session, it has buffers,
logs, etc.
In order to catch any error, this commit removes any reference to the
struct session and tries to rename most "session" occurrences in function
names to "stream" and "sess" to "strm" when that's related to a session.
The files stream.{c,h} were added and session.{c,h} removed.
The session will be reintroduced later and a few parts of the stream
will progressively be moved overthere. It will more or less contain
only what we need in an embryonic session.
Sample fetch functions and converters will have to change a bit so
that they'll use an L5 (session) instead of what's currently called
"L4" which is in fact L6 for now.
Once all changes are completed, we should see approximately this :
L7 - http_txn
L6 - stream
L5 - session
L4 - connection | applet
There will be at most one http_txn per stream, and a same session will
possibly be referenced by multiple streams. A connection will point to
a session and to a stream. The session will hold all the information
we need to keep even when we don't yet have a stream.
Some more cleanup is needed because some code was already far from
being clean. The server queue management still refers to sessions at
many places while comments talk about connections. This will have to
be cleaned up once we have a server-side connection pool manager.
Stream flags "SN_*" still need to be renamed, it doesn't seem like
any of them will need to move to the session.
2015-04-02 18:22:06 -04:00
* frees the context associated to a stream . It must have been removed first .
2006-06-25 20:48:02 -04:00
*/
2022-05-12 08:56:55 -04:00
void stream_free ( struct stream * s )
2006-06-25 20:48:02 -04:00
{
2015-04-03 13:19:59 -04:00
struct session * sess = strm_sess ( s ) ;
struct proxy * fe = sess - > fe ;
2008-12-07 14:16:23 -05:00
struct bref * bref , * back ;
2010-06-06 12:28:49 -04:00
int i ;
2007-01-07 09:46:13 -05:00
2019-11-05 10:18:10 -05:00
DBG_TRACE_POINT ( STRM_EV_STRM_FREE , s ) ;
2019-05-17 08:20:05 -04:00
/* detach the stream from its own task before even releasing it so
* that walking over a task list never exhibits a dying stream .
*/
s - > task - > context = NULL ;
__ha_barrier_store ( ) ;
2018-07-25 05:13:53 -04:00
pendconn_free ( s ) ;
2008-12-04 03:33:58 -05:00
2012-11-11 18:42:33 -05:00
if ( objt_server ( s - > target ) ) { /* there may be requests left pending in queue */
2015-04-02 19:14:29 -04:00
if ( s - > flags & SF_CURR_SESS ) {
s - > flags & = ~ SF_CURR_SESS ;
2021-04-06 07:53:36 -04:00
_HA_ATOMIC_DEC ( & __objt_server ( s - > target ) - > cur_sess ) ;
2008-11-11 14:20:02 -05:00
}
2021-12-06 02:01:02 -05:00
if ( may_dequeue_tasks ( __objt_server ( s - > target ) , s - > be ) )
process_srv_queue ( __objt_server ( s - > target ) ) ;
2008-11-11 14:20:02 -05:00
}
2008-12-04 03:33:58 -05:00
[BUG] fix the dequeuing logic to ensure that all requests get served
The dequeuing logic was completely wrong. First, a task was assigned
to all servers to process the queue, but this task was never scheduled
and was only woken up on session free. Second, there was no reservation
of server entries when a task was assigned a server. This means that
as long as the task was not connected to the server, its presence was
not accounted for. This was causing trouble when detecting whether or
not a server had reached maxconn. Third, during a redispatch, a session
could lose its place at the server's and get blocked because another
session at the same moment would have stolen the entry. Fourth, the
redispatch option did not work when maxqueue was reached for a server,
and it was not possible to do so without indefinitely hanging a session.
The root cause of all those problems was the lack of pre-reservation of
connections at the server's, and the lack of tracking of servers during
a redispatch. Everything relied on combinations of flags which could
appear similarly in quite distinct situations.
This patch is a major rework but there was no other solution, as the
internal logic was deeply flawed. The resulting code is cleaner, more
understandable, uses less magics and is overall more robust.
As an added bonus, "option redispatch" now works when maxqueue has
been reached on a server.
2008-06-20 09:04:11 -04:00
if ( unlikely ( s - > srv_conn ) ) {
REORG/MAJOR: session: rename the "session" entity to "stream"
With HTTP/2, we'll have to support multiplexed streams. A stream is in
fact the largest part of what we currently call a session, it has buffers,
logs, etc.
In order to catch any error, this commit removes any reference to the
struct session and tries to rename most "session" occurrences in function
names to "stream" and "sess" to "strm" when that's related to a session.
The files stream.{c,h} were added and session.{c,h} removed.
The session will be reintroduced later and a few parts of the stream
will progressively be moved overthere. It will more or less contain
only what we need in an embryonic session.
Sample fetch functions and converters will have to change a bit so
that they'll use an L5 (session) instead of what's currently called
"L4" which is in fact L6 for now.
Once all changes are completed, we should see approximately this :
L7 - http_txn
L6 - stream
L5 - session
L4 - connection | applet
There will be at most one http_txn per stream, and a same session will
possibly be referenced by multiple streams. A connection will point to
a session and to a stream. The session will hold all the information
we need to keep even when we don't yet have a stream.
Some more cleanup is needed because some code was already far from
being clean. The server queue management still refers to sessions at
many places while comments talk about connections. This will have to
be cleaned up once we have a server-side connection pool manager.
Stream flags "SN_*" still need to be renamed, it doesn't seem like
any of them will need to move to the session.
2015-04-02 18:22:06 -04:00
/* the stream still has a reserved slot on a server, but
[BUG] fix the dequeuing logic to ensure that all requests get served
The dequeuing logic was completely wrong. First, a task was assigned
to all servers to process the queue, but this task was never scheduled
and was only woken up on session free. Second, there was no reservation
of server entries when a task was assigned a server. This means that
as long as the task was not connected to the server, its presence was
not accounted for. This was causing trouble when detecting whether or
not a server had reached maxconn. Third, during a redispatch, a session
could lose its place at the server's and get blocked because another
session at the same moment would have stolen the entry. Fourth, the
redispatch option did not work when maxqueue was reached for a server,
and it was not possible to do so without indefinitely hanging a session.
The root cause of all those problems was the lack of pre-reservation of
connections at the server's, and the lack of tracking of servers during
a redispatch. Everything relied on combinations of flags which could
appear similarly in quite distinct situations.
This patch is a major rework but there was no other solution, as the
internal logic was deeply flawed. The resulting code is cleaner, more
understandable, uses less magics and is overall more robust.
As an added bonus, "option redispatch" now works when maxqueue has
been reached on a server.
2008-06-20 09:04:11 -04:00
* it should normally be only the same as the one above ,
* so this should not happen in fact .
*/
sess_change_server ( s , NULL ) ;
}
2014-11-27 14:45:39 -05:00
if ( s - > req . pipe )
put_pipe ( s - > req . pipe ) ;
2009-01-18 15:56:21 -05:00
2014-11-27 14:45:39 -05:00
if ( s - > res . pipe )
put_pipe ( s - > res . pipe ) ;
2009-01-18 15:56:21 -05:00
MAJOR: session: implement a wait-queue for sessions who need a buffer
When a session_alloc_buffers() fails to allocate one or two buffers,
it subscribes the session to buffer_wq, and waits for another session
to release buffers. It's then removed from the queue and woken up with
TASK_WAKE_RES, and can attempt its allocation again.
We decide to try to wake as many waiters as we release buffers so
that if we release 2 and two waiters need only once, they both have
their chance. We must never come to the situation where we don't wake
enough tasks up.
It's common to release buffers after the completion of an I/O callback,
which can happen even if the I/O could not be performed due to half a
failure on memory allocation. In this situation, we don't want to move
out of the wait queue the session that was just added, otherwise it
will never get any buffer. Thus, we only force ourselves out of the
queue when freeing the session.
Note: at the moment, since session_alloc_buffers() is not used, no task
is subscribed to the wait queue.
2014-11-25 15:10:35 -05:00
/* We may still be present in the buffer wait queue */
2021-04-21 01:32:39 -04:00
if ( LIST_INLIST ( & s - > buffer_wait . list ) )
2021-02-20 05:49:49 -05:00
LIST_DEL_INIT ( & s - > buffer_wait . list ) ;
2020-02-26 04:39:36 -05:00
2018-07-10 11:43:27 -04:00
if ( s - > req . buf . size | | s - > res . buf . size ) {
2021-02-20 06:02:46 -05:00
int count = ! ! s - > req . buf . size + ! ! s - > res . buf . size ;
2019-08-08 02:06:27 -04:00
b_free ( & s - > req . buf ) ;
b_free ( & s - > res . buf ) ;
2021-02-20 06:02:46 -05:00
offer_buffers ( NULL , count ) ;
MAJOR: session: implement a wait-queue for sessions who need a buffer
When a session_alloc_buffers() fails to allocate one or two buffers,
it subscribes the session to buffer_wq, and waits for another session
to release buffers. It's then removed from the queue and woken up with
TASK_WAKE_RES, and can attempt its allocation again.
We decide to try to wake as many waiters as we release buffers so
that if we release 2 and two waiters need only once, they both have
their chance. We must never come to the situation where we don't wake
enough tasks up.
It's common to release buffers after the completion of an I/O callback,
which can happen even if the I/O could not be performed due to half a
failure on memory allocation. In this situation, we don't want to move
out of the wait queue the session that was just added, otherwise it
will never get any buffer. Thus, we only force ourselves out of the
queue when freeing the session.
Note: at the moment, since session_alloc_buffers() is not used, no task
is subscribed to the wait queue.
2014-11-25 15:10:35 -05:00
}
2012-10-12 17:49:43 -04:00
2020-03-05 14:19:02 -05:00
pool_free ( pool_head_uniqueid , s - > unique_id . ptr ) ;
s - > unique_id = IST_NULL ;
2019-02-01 12:10:46 -05:00
2020-02-24 10:26:55 -05:00
flt_stream_stop ( s ) ;
flt_stream_release ( s , 0 ) ;
2016-12-17 06:45:32 -05:00
hlua_ctx_destroy ( s - > hlua ) ;
s - > hlua = NULL ;
2015-04-03 17:46:31 -04:00
if ( s - > txn )
2021-03-08 13:12:58 -05:00
http_destroy_txn ( s ) ;
2010-01-07 16:51:47 -05:00
2012-10-12 11:50:05 -04:00
/* ensure the client-side transport layer is destroyed */
2021-12-20 09:34:16 -05:00
/* Be sure it is useless !! */
/* if (cli_cs) */
/* cs_close(cli_cs); */
2012-10-12 11:50:05 -04:00
2010-06-06 12:28:49 -04:00
for ( i = 0 ; i < s - > store_count ; i + + ) {
if ( ! s - > store [ i ] . ts )
continue ;
stksess_free ( s - > store [ i ] . table , s - > store [ i ] . ts ) ;
s - > store [ i ] . ts = NULL ;
}
2020-12-23 11:41:43 -05:00
if ( s - > resolv_ctx . requester ) {
2020-12-23 12:01:04 -05:00
__decl_thread ( struct resolvers * resolvers = s - > resolv_ctx . parent - > arg . resolv . resolvers ) ;
2020-07-22 05:46:32 -04:00
HA_SPIN_LOCK ( DNS_LOCK , & resolvers - > lock ) ;
2021-02-20 04:46:51 -05:00
ha_free ( & s - > resolv_ctx . hostname_dn ) ;
2020-12-23 11:41:43 -05:00
s - > resolv_ctx . hostname_dn_len = 0 ;
2021-10-20 08:07:31 -04:00
resolv_unlink_resolution ( s - > resolv_ctx . requester ) ;
2020-07-22 05:46:32 -04:00
HA_SPIN_UNLOCK ( DNS_LOCK , & resolvers - > lock ) ;
2019-01-21 02:34:50 -05:00
2020-12-23 11:41:43 -05:00
pool_free ( resolv_requester_pool , s - > resolv_ctx . requester ) ;
s - > resolv_ctx . requester = NULL ;
2019-01-21 02:34:50 -05:00
}
2007-10-16 11:34:28 -04:00
if ( fe ) {
BUG/MEDIUM: stream: Be sure to release allocated captures for TCP streams
All TCP and HTTP captures are stored in 2 arrays, one for the request and
another for the response. In HAPRoxy 1.5, these arrays are part of the HTTP
transaction and thus are released during its cleanup. Because in this version,
the transaction is part of the stream (in 1.5, streams are still called
sessions), the cleanup is always performed, for HTTP and TCP streams.
In HAProxy 1.6, the HTTP transaction was moved out from the stream and is now
dynamically allocated only when required (becaues of an HTTP proxy or an HTTP
sample fetch). In addition, still in 1.6, the captures arrays were moved from
the HTTP transaction to the stream. This way, it is still possible to capture
elements from TCP rules for a full TCP stream. Unfortunately, the release is
still exclusively performed during the HTTP transaction cleanup. Thus, for a TCP
stream where the HTTP transaction is not required, the TCP captures, if any, are
never released.
Now, all captures are released when the stream is freed. This fixes the memory
leak for TCP streams. For streams with an HTTP transaction, the captures are now
released when the transaction is reset and not systematically during its
cleanup.
This patch must be backported as fas as 1.6.
2019-11-07 08:27:52 -05:00
if ( s - > req_cap ) {
struct cap_hdr * h ;
for ( h = fe - > req_cap ; h ; h = h - > next )
pool_free ( h - > pool , s - > req_cap [ h - > index ] ) ;
}
if ( s - > res_cap ) {
struct cap_hdr * h ;
for ( h = fe - > rsp_cap ; h ; h = h - > next )
pool_free ( h - > pool , s - > res_cap [ h - > index ] ) ;
}
2017-11-24 11:34:44 -05:00
pool_free ( fe - > rsp_cap_pool , s - > res_cap ) ;
pool_free ( fe - > req_cap_pool , s - > req_cap ) ;
2006-06-25 20:48:02 -04:00
}
2009-12-22 09:03:09 -05:00
2015-06-06 13:29:07 -04:00
/* Cleanup all variable contexts. */
2018-10-28 08:44:36 -04:00
if ( ! LIST_ISEMPTY ( & s - > vars_txn . head ) )
vars_prune ( & s - > vars_txn , s - > sess , s ) ;
if ( ! LIST_ISEMPTY ( & s - > vars_reqres . head ) )
vars_prune ( & s - > vars_reqres , s - > sess , s ) ;
2015-06-06 13:29:07 -04:00
REORG/MAJOR: session: rename the "session" entity to "stream"
With HTTP/2, we'll have to support multiplexed streams. A stream is in
fact the largest part of what we currently call a session, it has buffers,
logs, etc.
In order to catch any error, this commit removes any reference to the
struct session and tries to rename most "session" occurrences in function
names to "stream" and "sess" to "strm" when that's related to a session.
The files stream.{c,h} were added and session.{c,h} removed.
The session will be reintroduced later and a few parts of the stream
will progressively be moved overthere. It will more or less contain
only what we need in an embryonic session.
Sample fetch functions and converters will have to change a bit so
that they'll use an L5 (session) instead of what's currently called
"L4" which is in fact L6 for now.
Once all changes are completed, we should see approximately this :
L7 - http_txn
L6 - stream
L5 - session
L4 - connection | applet
There will be at most one http_txn per stream, and a same session will
possibly be referenced by multiple streams. A connection will point to
a session and to a stream. The session will hold all the information
we need to keep even when we don't yet have a stream.
Some more cleanup is needed because some code was already far from
being clean. The server queue management still refers to sessions at
many places while comments talk about connections. This will have to
be cleaned up once we have a server-side connection pool manager.
Stream flags "SN_*" still need to be renamed, it doesn't seem like
any of them will need to move to the session.
2015-04-02 18:22:06 -04:00
stream_store_counters ( s ) ;
2010-06-14 15:04:55 -04:00
2008-12-07 14:16:23 -05:00
list_for_each_entry_safe ( bref , back , & s - > back_refs , users ) {
2009-02-22 09:17:24 -05:00
/* we have to unlink all watchers. We must not relink them if
2021-02-24 07:46:12 -05:00
* this stream was the last one in the list . This is safe to do
* here because we ' re touching our thread ' s list so we know
* that other streams are not active , and the watchers will
* only touch their node under thread isolation .
2009-02-22 09:17:24 -05:00
*/
2021-02-24 07:46:12 -05:00
LIST_DEL_INIT ( & bref - > users ) ;
2021-09-30 13:02:18 -04:00
if ( s - > list . n ! = & th_ctx - > streams )
2021-04-21 01:32:39 -04:00
LIST_APPEND ( & LIST_ELEM ( s - > list . n , struct stream * , list ) - > back_refs , & bref - > users ) ;
2008-12-07 14:16:23 -05:00
bref - > ref = s - > list . n ;
2021-02-24 07:46:12 -05:00
__ha_barrier_store ( ) ;
2008-12-07 14:16:23 -05:00
}
2021-04-21 01:32:39 -04:00
LIST_DELETE ( & s - > list ) ;
2017-06-30 10:23:45 -04:00
2022-05-17 13:40:40 -04:00
cs_destroy ( s - > scb ) ;
cs_destroy ( s - > scf ) ;
2018-10-11 11:09:14 -04:00
2017-11-24 11:34:44 -05:00
pool_free ( pool_head_stream , s ) ;
2007-07-11 04:42:35 -04:00
/* We may want to free the maximum amount of pools if the proxy is stopping */
2021-10-06 08:24:19 -04:00
if ( fe & & unlikely ( fe - > flags & ( PR_FL_DISABLED | PR_FL_STOPPED ) ) ) {
2017-11-24 11:34:44 -05:00
pool_flush ( pool_head_buffer ) ;
pool_flush ( pool_head_http_txn ) ;
pool_flush ( pool_head_requri ) ;
pool_flush ( pool_head_capture ) ;
pool_flush ( pool_head_stream ) ;
pool_flush ( pool_head_session ) ;
pool_flush ( pool_head_connection ) ;
pool_flush ( pool_head_pendconn ) ;
pool_flush ( fe - > req_cap_pool ) ;
pool_flush ( fe - > rsp_cap_pool ) ;
2007-07-11 04:42:35 -04:00
}
2007-05-13 13:43:47 -04:00
}
2014-11-25 13:46:36 -05:00
REORG/MAJOR: session: rename the "session" entity to "stream"
With HTTP/2, we'll have to support multiplexed streams. A stream is in
fact the largest part of what we currently call a session, it has buffers,
logs, etc.
In order to catch any error, this commit removes any reference to the
struct session and tries to rename most "session" occurrences in function
names to "stream" and "sess" to "strm" when that's related to a session.
The files stream.{c,h} were added and session.{c,h} removed.
The session will be reintroduced later and a few parts of the stream
will progressively be moved overthere. It will more or less contain
only what we need in an embryonic session.
Sample fetch functions and converters will have to change a bit so
that they'll use an L5 (session) instead of what's currently called
"L4" which is in fact L6 for now.
Once all changes are completed, we should see approximately this :
L7 - http_txn
L6 - stream
L5 - session
L4 - connection | applet
There will be at most one http_txn per stream, and a same session will
possibly be referenced by multiple streams. A connection will point to
a session and to a stream. The session will hold all the information
we need to keep even when we don't yet have a stream.
Some more cleanup is needed because some code was already far from
being clean. The server queue management still refers to sessions at
many places while comments talk about connections. This will have to
be cleaned up once we have a server-side connection pool manager.
Stream flags "SN_*" still need to be renamed, it doesn't seem like
any of them will need to move to the session.
2015-04-02 18:22:06 -04:00
/* Allocates a work buffer for stream <s>. It is meant to be called inside
* process_stream ( ) . It will only allocate the side needed for the function
2015-04-20 09:52:18 -04:00
* to work fine , which is the response buffer so that an error message may be
* built and returned . Response buffers may be allocated from the reserve , this
* is critical to ensure that a response may always flow and will never block a
* server from releasing a connection . Returns 0 in case of failure , non - zero
* otherwise .
2014-11-25 13:46:36 -05:00
*/
BUG/MAJOR: Fix how the list of entities waiting for a buffer is handled
When an entity tries to get a buffer, if it cannot be allocted, for example
because the number of buffers which may be allocated per process is limited,
this entity is added in a list (called <buffer_wq>) and wait for an available
buffer.
Historically, the <buffer_wq> list was logically attached to streams because it
were the only entities likely to be added in it. Now, applets can also be
waiting for a free buffer. And with filters, we could imagine to have more other
entities waiting for a buffer. So it make sense to have a generic list.
Anyway, with the current design there is a bug. When an applet failed to get a
buffer, it will wait. But we add the stream attached to the applet in
<buffer_wq>, instead of the applet itself. So when a buffer is available, we
wake up the stream and not the waiting applet. So, it is possible to have
waiting applets and never awakened.
So, now, <buffer_wq> is independant from streams. And we really add the waiting
entity in <buffer_wq>. To be generic, the entity is responsible to define the
callback used to awaken it.
In addition, applets will still request an input buffer when they become
active. But they will not be sleeped anymore if no buffer are available. So this
is the responsibility to the applet I/O handler to check if this buffer is
allocated or not. This way, an applet can decide if this buffer is required or
not and can do additional processing if not.
[wt: backport to 1.7 and 1.6]
2016-12-09 11:30:18 -05:00
static int stream_alloc_work_buffer ( struct stream * s )
2014-11-25 13:46:36 -05:00
{
2021-03-22 09:44:31 -04:00
if ( b_alloc ( & s - > res . buf ) )
2014-11-25 13:46:36 -05:00
return 1 ;
return 0 ;
}
/* releases unused buffers after processing. Typically used at the end of the
BUG/MAJOR: Fix how the list of entities waiting for a buffer is handled
When an entity tries to get a buffer, if it cannot be allocted, for example
because the number of buffers which may be allocated per process is limited,
this entity is added in a list (called <buffer_wq>) and wait for an available
buffer.
Historically, the <buffer_wq> list was logically attached to streams because it
were the only entities likely to be added in it. Now, applets can also be
waiting for a free buffer. And with filters, we could imagine to have more other
entities waiting for a buffer. So it make sense to have a generic list.
Anyway, with the current design there is a bug. When an applet failed to get a
buffer, it will wait. But we add the stream attached to the applet in
<buffer_wq>, instead of the applet itself. So when a buffer is available, we
wake up the stream and not the waiting applet. So, it is possible to have
waiting applets and never awakened.
So, now, <buffer_wq> is independant from streams. And we really add the waiting
entity in <buffer_wq>. To be generic, the entity is responsible to define the
callback used to awaken it.
In addition, applets will still request an input buffer when they become
active. But they will not be sleeped anymore if no buffer are available. So this
is the responsibility to the applet I/O handler to check if this buffer is
allocated or not. This way, an applet can decide if this buffer is required or
not and can do additional processing if not.
[wt: backport to 1.7 and 1.6]
2016-12-09 11:30:18 -05:00
* update ( ) functions . It will try to wake up as many tasks / applets as the
* number of buffers that it releases . In practice , most often streams are
* blocked on a single buffer , so it makes sense to try to wake two up when two
* buffers are released at once .
2014-11-25 13:46:36 -05:00
*/
REORG/MAJOR: session: rename the "session" entity to "stream"
With HTTP/2, we'll have to support multiplexed streams. A stream is in
fact the largest part of what we currently call a session, it has buffers,
logs, etc.
In order to catch any error, this commit removes any reference to the
struct session and tries to rename most "session" occurrences in function
names to "stream" and "sess" to "strm" when that's related to a session.
The files stream.{c,h} were added and session.{c,h} removed.
The session will be reintroduced later and a few parts of the stream
will progressively be moved overthere. It will more or less contain
only what we need in an embryonic session.
Sample fetch functions and converters will have to change a bit so
that they'll use an L5 (session) instead of what's currently called
"L4" which is in fact L6 for now.
Once all changes are completed, we should see approximately this :
L7 - http_txn
L6 - stream
L5 - session
L4 - connection | applet
There will be at most one http_txn per stream, and a same session will
possibly be referenced by multiple streams. A connection will point to
a session and to a stream. The session will hold all the information
we need to keep even when we don't yet have a stream.
Some more cleanup is needed because some code was already far from
being clean. The server queue management still refers to sessions at
many places while comments talk about connections. This will have to
be cleaned up once we have a server-side connection pool manager.
Stream flags "SN_*" still need to be renamed, it doesn't seem like
any of them will need to move to the session.
2015-04-02 18:22:06 -04:00
void stream_release_buffers ( struct stream * s )
2014-11-25 13:46:36 -05:00
{
BUG/MAJOR: Fix how the list of entities waiting for a buffer is handled
When an entity tries to get a buffer, if it cannot be allocted, for example
because the number of buffers which may be allocated per process is limited,
this entity is added in a list (called <buffer_wq>) and wait for an available
buffer.
Historically, the <buffer_wq> list was logically attached to streams because it
were the only entities likely to be added in it. Now, applets can also be
waiting for a free buffer. And with filters, we could imagine to have more other
entities waiting for a buffer. So it make sense to have a generic list.
Anyway, with the current design there is a bug. When an applet failed to get a
buffer, it will wait. But we add the stream attached to the applet in
<buffer_wq>, instead of the applet itself. So when a buffer is available, we
wake up the stream and not the waiting applet. So, it is possible to have
waiting applets and never awakened.
So, now, <buffer_wq> is independant from streams. And we really add the waiting
entity in <buffer_wq>. To be generic, the entity is responsible to define the
callback used to awaken it.
In addition, applets will still request an input buffer when they become
active. But they will not be sleeped anymore if no buffer are available. So this
is the responsibility to the applet I/O handler to check if this buffer is
allocated or not. This way, an applet can decide if this buffer is required or
not and can do additional processing if not.
[wt: backport to 1.7 and 1.6]
2016-12-09 11:30:18 -05:00
int offer = 0 ;
2014-11-25 13:46:36 -05:00
2018-07-10 03:53:31 -04:00
if ( c_size ( & s - > req ) & & c_empty ( & s - > req ) ) {
2021-02-20 06:02:46 -05:00
offer + + ;
BUG/MAJOR: Fix how the list of entities waiting for a buffer is handled
When an entity tries to get a buffer, if it cannot be allocted, for example
because the number of buffers which may be allocated per process is limited,
this entity is added in a list (called <buffer_wq>) and wait for an available
buffer.
Historically, the <buffer_wq> list was logically attached to streams because it
were the only entities likely to be added in it. Now, applets can also be
waiting for a free buffer. And with filters, we could imagine to have more other
entities waiting for a buffer. So it make sense to have a generic list.
Anyway, with the current design there is a bug. When an applet failed to get a
buffer, it will wait. But we add the stream attached to the applet in
<buffer_wq>, instead of the applet itself. So when a buffer is available, we
wake up the stream and not the waiting applet. So, it is possible to have
waiting applets and never awakened.
So, now, <buffer_wq> is independant from streams. And we really add the waiting
entity in <buffer_wq>. To be generic, the entity is responsible to define the
callback used to awaken it.
In addition, applets will still request an input buffer when they become
active. But they will not be sleeped anymore if no buffer are available. So this
is the responsibility to the applet I/O handler to check if this buffer is
allocated or not. This way, an applet can decide if this buffer is required or
not and can do additional processing if not.
[wt: backport to 1.7 and 1.6]
2016-12-09 11:30:18 -05:00
b_free ( & s - > req . buf ) ;
}
2018-07-10 03:53:31 -04:00
if ( c_size ( & s - > res ) & & c_empty ( & s - > res ) ) {
2021-02-20 06:02:46 -05:00
offer + + ;
2014-11-27 14:45:39 -05:00
b_free ( & s - > res . buf ) ;
BUG/MAJOR: Fix how the list of entities waiting for a buffer is handled
When an entity tries to get a buffer, if it cannot be allocted, for example
because the number of buffers which may be allocated per process is limited,
this entity is added in a list (called <buffer_wq>) and wait for an available
buffer.
Historically, the <buffer_wq> list was logically attached to streams because it
were the only entities likely to be added in it. Now, applets can also be
waiting for a free buffer. And with filters, we could imagine to have more other
entities waiting for a buffer. So it make sense to have a generic list.
Anyway, with the current design there is a bug. When an applet failed to get a
buffer, it will wait. But we add the stream attached to the applet in
<buffer_wq>, instead of the applet itself. So when a buffer is available, we
wake up the stream and not the waiting applet. So, it is possible to have
waiting applets and never awakened.
So, now, <buffer_wq> is independant from streams. And we really add the waiting
entity in <buffer_wq>. To be generic, the entity is responsible to define the
callback used to awaken it.
In addition, applets will still request an input buffer when they become
active. But they will not be sleeped anymore if no buffer are available. So this
is the responsibility to the applet I/O handler to check if this buffer is
allocated or not. This way, an applet can decide if this buffer is required or
not and can do additional processing if not.
[wt: backport to 1.7 and 1.6]
2016-12-09 11:30:18 -05:00
}
2014-11-25 13:46:36 -05:00
MAJOR: session: implement a wait-queue for sessions who need a buffer
When a session_alloc_buffers() fails to allocate one or two buffers,
it subscribes the session to buffer_wq, and waits for another session
to release buffers. It's then removed from the queue and woken up with
TASK_WAKE_RES, and can attempt its allocation again.
We decide to try to wake as many waiters as we release buffers so
that if we release 2 and two waiters need only once, they both have
their chance. We must never come to the situation where we don't wake
enough tasks up.
It's common to release buffers after the completion of an I/O callback,
which can happen even if the I/O could not be performed due to half a
failure on memory allocation. In this situation, we don't want to move
out of the wait queue the session that was just added, otherwise it
will never get any buffer. Thus, we only force ourselves out of the
queue when freeing the session.
Note: at the moment, since session_alloc_buffers() is not used, no task
is subscribed to the wait queue.
2014-11-25 15:10:35 -05:00
/* if we're certain to have at least 1 buffer available, and there is
* someone waiting , we can wake up a waiter and offer them .
*/
BUG/MAJOR: Fix how the list of entities waiting for a buffer is handled
When an entity tries to get a buffer, if it cannot be allocted, for example
because the number of buffers which may be allocated per process is limited,
this entity is added in a list (called <buffer_wq>) and wait for an available
buffer.
Historically, the <buffer_wq> list was logically attached to streams because it
were the only entities likely to be added in it. Now, applets can also be
waiting for a free buffer. And with filters, we could imagine to have more other
entities waiting for a buffer. So it make sense to have a generic list.
Anyway, with the current design there is a bug. When an applet failed to get a
buffer, it will wait. But we add the stream attached to the applet in
<buffer_wq>, instead of the applet itself. So when a buffer is available, we
wake up the stream and not the waiting applet. So, it is possible to have
waiting applets and never awakened.
So, now, <buffer_wq> is independant from streams. And we really add the waiting
entity in <buffer_wq>. To be generic, the entity is responsible to define the
callback used to awaken it.
In addition, applets will still request an input buffer when they become
active. But they will not be sleeped anymore if no buffer are available. So this
is the responsibility to the applet I/O handler to check if this buffer is
allocated or not. This way, an applet can decide if this buffer is required or
not and can do additional processing if not.
[wt: backport to 1.7 and 1.6]
2016-12-09 11:30:18 -05:00
if ( offer )
2021-02-20 06:02:46 -05:00
offer_buffers ( s , offer ) ;
2014-11-25 13:46:36 -05:00
}
2007-05-13 13:43:47 -04:00
REORG/MAJOR: session: rename the "session" entity to "stream"
With HTTP/2, we'll have to support multiplexed streams. A stream is in
fact the largest part of what we currently call a session, it has buffers,
logs, etc.
In order to catch any error, this commit removes any reference to the
struct session and tries to rename most "session" occurrences in function
names to "stream" and "sess" to "strm" when that's related to a session.
The files stream.{c,h} were added and session.{c,h} removed.
The session will be reintroduced later and a few parts of the stream
will progressively be moved overthere. It will more or less contain
only what we need in an embryonic session.
Sample fetch functions and converters will have to change a bit so
that they'll use an L5 (session) instead of what's currently called
"L4" which is in fact L6 for now.
Once all changes are completed, we should see approximately this :
L7 - http_txn
L6 - stream
L5 - session
L4 - connection | applet
There will be at most one http_txn per stream, and a same session will
possibly be referenced by multiple streams. A connection will point to
a session and to a stream. The session will hold all the information
we need to keep even when we don't yet have a stream.
Some more cleanup is needed because some code was already far from
being clean. The server queue management still refers to sessions at
many places while comments talk about connections. This will have to
be cleaned up once we have a server-side connection pool manager.
Stream flags "SN_*" still need to be renamed, it doesn't seem like
any of them will need to move to the session.
2015-04-02 18:22:06 -04:00
void stream_process_counters ( struct stream * s )
2007-11-26 14:15:35 -05:00
{
2015-04-03 08:46:27 -04:00
struct session * sess = s - > sess ;
2007-11-24 16:12:47 -05:00
unsigned long long bytes ;
2012-12-09 09:55:40 -05:00
int i ;
2007-11-24 16:12:47 -05:00
2014-11-27 14:45:39 -05:00
bytes = s - > req . total - s - > logs . bytes_in ;
s - > logs . bytes_in = s - > req . total ;
if ( bytes ) {
2019-03-08 12:54:51 -05:00
_HA_ATOMIC_ADD ( & sess - > fe - > fe_counters . bytes_in , bytes ) ;
_HA_ATOMIC_ADD ( & s - > be - > be_counters . bytes_in , bytes ) ;
2009-10-04 09:43:17 -04:00
2014-11-27 14:45:39 -05:00
if ( objt_server ( s - > target ) )
2021-12-06 02:01:02 -05:00
_HA_ATOMIC_ADD ( & __objt_server ( s - > target ) - > counters . bytes_in , bytes ) ;
2010-06-18 12:33:32 -04:00
2015-04-03 08:46:27 -04:00
if ( sess - > listener & & sess - > listener - > counters )
2019-03-08 12:54:51 -05:00
_HA_ATOMIC_ADD ( & sess - > listener - > counters - > bytes_in , bytes ) ;
2010-06-20 05:56:30 -04:00
2014-11-27 14:45:39 -05:00
for ( i = 0 ; i < MAX_SESS_STKCTR ; i + + ) {
2020-10-06 07:52:40 -04:00
if ( ! stkctr_inc_bytes_in_ctr ( & s - > stkctr [ i ] , bytes ) )
stkctr_inc_bytes_in_ctr ( & sess - > stkctr [ i ] , bytes ) ;
2007-11-26 14:15:35 -05:00
}
2007-11-24 16:12:47 -05:00
}
2014-11-27 14:45:39 -05:00
bytes = s - > res . total - s - > logs . bytes_out ;
s - > logs . bytes_out = s - > res . total ;
if ( bytes ) {
2019-03-08 12:54:51 -05:00
_HA_ATOMIC_ADD ( & sess - > fe - > fe_counters . bytes_out , bytes ) ;
_HA_ATOMIC_ADD ( & s - > be - > be_counters . bytes_out , bytes ) ;
2009-10-04 09:43:17 -04:00
2014-11-27 14:45:39 -05:00
if ( objt_server ( s - > target ) )
2021-12-06 02:01:02 -05:00
_HA_ATOMIC_ADD ( & __objt_server ( s - > target ) - > counters . bytes_out , bytes ) ;
2010-08-03 10:29:52 -04:00
2015-04-03 08:46:27 -04:00
if ( sess - > listener & & sess - > listener - > counters )
2019-03-08 12:54:51 -05:00
_HA_ATOMIC_ADD ( & sess - > listener - > counters - > bytes_out , bytes ) ;
2010-06-20 05:56:30 -04:00
2014-11-27 14:45:39 -05:00
for ( i = 0 ; i < MAX_SESS_STKCTR ; i + + ) {
2020-10-06 07:52:40 -04:00
if ( ! stkctr_inc_bytes_out_ctr ( & s - > stkctr [ i ] , bytes ) )
stkctr_inc_bytes_out_ctr ( & sess - > stkctr [ i ] , bytes ) ;
2007-11-26 14:15:35 -05:00
}
2007-11-24 16:12:47 -05:00
}
}
2006-06-25 20:48:02 -04:00
2022-03-31 03:47:24 -04:00
/*
* Returns a message to the client ; the connection is shut down for read ,
* and the request is cleared so that no server connection can be initiated .
* The buffer is marked for read shutdown on the other side to protect the
* message , and the buffer write is enabled . The message is contained in a
* " chunk " . If it is null , then an empty message is used . The reply buffer does
* not need to be empty before this , and its contents will not be overwritten .
* The primary goal of this function is to return error messages to a client .
*/
void stream_retnclose ( struct stream * s , const struct buffer * msg )
{
struct channel * ic = & s - > req ;
struct channel * oc = & s - > res ;
channel_auto_read ( ic ) ;
channel_abort ( ic ) ;
channel_auto_close ( ic ) ;
channel_erase ( ic ) ;
channel_truncate ( oc ) ;
if ( likely ( msg & & msg - > data ) )
co_inject ( oc , msg - > area , msg - > data ) ;
oc - > wex = tick_add_ifset ( now_ms , oc - > wto ) ;
channel_auto_read ( oc ) ;
channel_auto_close ( oc ) ;
channel_shutr_now ( oc ) ;
}
2020-12-10 07:43:51 -05:00
int stream_set_timeout ( struct stream * s , enum act_timeout_name name , int timeout )
{
2020-12-10 07:43:52 -05:00
switch ( name ) {
case ACT_TIMEOUT_SERVER :
s - > req . wto = timeout ;
s - > res . rto = timeout ;
return 1 ;
2020-12-10 07:43:53 -05:00
case ACT_TIMEOUT_TUNNEL :
s - > tunnel_timeout = timeout ;
return 1 ;
2020-12-10 07:43:52 -05:00
default :
return 0 ;
}
2020-12-10 07:43:51 -05:00
}
2008-11-30 12:47:21 -05:00
/*
2022-05-17 13:47:17 -04:00
* This function handles the transition between the SC_ST_CON state and the
* SC_ST_EST state . It must only be called after switching from SC_ST_CON ( or
* SC_ST_INI or SC_ST_RDY ) to SC_ST_EST , but only when a - > proto is defined .
* Note that it will switch the interface to SC_ST_DIS if we already have
2019-05-21 11:43:50 -04:00
* the CF_SHUTR flag , it means we were able to forward the request , and
* receive the response , before process_stream ( ) had the opportunity to
2022-05-17 13:47:17 -04:00
* make the switch from SC_ST_CON to SC_ST_EST . When that happens , we want
2020-01-09 12:43:15 -05:00
* to go through back_establish ( ) anyway , to make sure the analysers run .
MEDIUM: stream: re-arrange the connection setup status reporting
Till now when a wakeup happens after a connection is attempted, we go
through sess_update_st_con_tcp() to deal with the various possible events,
then to sess_update_st_cer() to deal with a possible error detected by the
former, or to sess_establish() to complete the connection validation. There
are multiple issues in the way this is handled, which have accumulated over
time. One of them is that any spurious wakeup during SI_ST_CON would validate
the READ_ATTACHED flag and wake the analysers up. Another one is that nobody
feels responsible for clearing SI_FL_EXP if it happened at the same time as
a success (and it is present in all reports of loops to date). And another
issue is that aborts cannot happen after a clean connection setup with no
data transfer (since CF_WRITE_NULL is part of CF_WRITE_ACTIVITY). Last, the
flags cleanup work was hackish, added here and there to please the next
function (typically what had to be donne in commit 7a3367cca to work around
the url_param+reuse issue by moving READ_ATTACHED to CON).
This patch performs a significant lift up of this setup code. First, it
makes sure that the state handlers are the ones responsible for the cleanup
of the stuff they rely on. Typically sess_sestablish() will clean up the
SI_FL_EXP flag because if we decided to validate the connection it means
that we want to ignore this late timeout. Second, it splits the CON and
RDY state handlers because the former only has to deal with failures,
timeouts and non-events, while the latter has to deal with partial or
total successes. Third, everything related to connection success was
moved to sess_establish() since it's the only safe place to do so, and
this function is also called at a few places to deal with synchronous
connections, which are not seen by intermediary state handlers.
The code was made a bit more robust, for example by making sure we
always set SI_FL_NOLINGER when aborting a connection so that we don't
have any risk to leave a connection in SHUTW state in case it was
validated late. The useless return codes of some of these functions
were dropped so that callers only rely on the stream-int's state now
(which was already partially the case anyway).
The code is now a bit cleaner, could be further improved (and functions
renamed) but given the sensitivity of this part, better limit changes to
strictly necessary. It passes all reg tests.
2019-06-05 12:02:04 -04:00
* Timeouts are cleared . Error are reported on the channel so that analysers
* can handle them .
2008-11-30 12:47:21 -05:00
*/
2020-01-09 12:43:15 -05:00
static void back_establish ( struct stream * s )
2008-11-30 12:47:21 -05:00
{
2022-05-18 10:23:22 -04:00
struct connection * conn = sc_conn ( s - > scb ) ;
2014-11-28 09:15:44 -05:00
struct channel * req = & s - > req ;
struct channel * rep = & s - > res ;
2008-11-30 12:47:21 -05:00
2022-03-31 03:16:34 -04:00
DBG_TRACE_ENTER ( STRM_EV_STRM_PROC | STRM_EV_CS_ST , s ) ;
MEDIUM: stream: re-arrange the connection setup status reporting
Till now when a wakeup happens after a connection is attempted, we go
through sess_update_st_con_tcp() to deal with the various possible events,
then to sess_update_st_cer() to deal with a possible error detected by the
former, or to sess_establish() to complete the connection validation. There
are multiple issues in the way this is handled, which have accumulated over
time. One of them is that any spurious wakeup during SI_ST_CON would validate
the READ_ATTACHED flag and wake the analysers up. Another one is that nobody
feels responsible for clearing SI_FL_EXP if it happened at the same time as
a success (and it is present in all reports of loops to date). And another
issue is that aborts cannot happen after a clean connection setup with no
data transfer (since CF_WRITE_NULL is part of CF_WRITE_ACTIVITY). Last, the
flags cleanup work was hackish, added here and there to please the next
function (typically what had to be donne in commit 7a3367cca to work around
the url_param+reuse issue by moving READ_ATTACHED to CON).
This patch performs a significant lift up of this setup code. First, it
makes sure that the state handlers are the ones responsible for the cleanup
of the stuff they rely on. Typically sess_sestablish() will clean up the
SI_FL_EXP flag because if we decided to validate the connection it means
that we want to ignore this late timeout. Second, it splits the CON and
RDY state handlers because the former only has to deal with failures,
timeouts and non-events, while the latter has to deal with partial or
total successes. Third, everything related to connection success was
moved to sess_establish() since it's the only safe place to do so, and
this function is also called at a few places to deal with synchronous
connections, which are not seen by intermediary state handlers.
The code was made a bit more robust, for example by making sure we
always set SI_FL_NOLINGER when aborting a connection so that we don't
have any risk to leave a connection in SHUTW state in case it was
validated late. The useless return codes of some of these functions
were dropped so that callers only rely on the stream-int's state now
(which was already partially the case anyway).
The code is now a bit cleaner, could be further improved (and functions
renamed) but given the sensitivity of this part, better limit changes to
strictly necessary. It passes all reg tests.
2019-06-05 12:02:04 -04:00
/* First, centralize the timers information, and clear any irrelevant
* timeout .
*/
2013-12-31 17:06:46 -05:00
s - > logs . t_connect = tv_ms_elapsed ( & s - > logs . tv_accept , & now ) ;
2022-03-29 13:02:31 -04:00
s - > conn_exp = TICK_ETERNITY ;
s - > flags & = ~ SF_CONN_EXP ;
MEDIUM: stream: re-arrange the connection setup status reporting
Till now when a wakeup happens after a connection is attempted, we go
through sess_update_st_con_tcp() to deal with the various possible events,
then to sess_update_st_cer() to deal with a possible error detected by the
former, or to sess_establish() to complete the connection validation. There
are multiple issues in the way this is handled, which have accumulated over
time. One of them is that any spurious wakeup during SI_ST_CON would validate
the READ_ATTACHED flag and wake the analysers up. Another one is that nobody
feels responsible for clearing SI_FL_EXP if it happened at the same time as
a success (and it is present in all reports of loops to date). And another
issue is that aborts cannot happen after a clean connection setup with no
data transfer (since CF_WRITE_NULL is part of CF_WRITE_ACTIVITY). Last, the
flags cleanup work was hackish, added here and there to please the next
function (typically what had to be donne in commit 7a3367cca to work around
the url_param+reuse issue by moving READ_ATTACHED to CON).
This patch performs a significant lift up of this setup code. First, it
makes sure that the state handlers are the ones responsible for the cleanup
of the stuff they rely on. Typically sess_sestablish() will clean up the
SI_FL_EXP flag because if we decided to validate the connection it means
that we want to ignore this late timeout. Second, it splits the CON and
RDY state handlers because the former only has to deal with failures,
timeouts and non-events, while the latter has to deal with partial or
total successes. Third, everything related to connection success was
moved to sess_establish() since it's the only safe place to do so, and
this function is also called at a few places to deal with synchronous
connections, which are not seen by intermediary state handlers.
The code was made a bit more robust, for example by making sure we
always set SI_FL_NOLINGER when aborting a connection so that we don't
have any risk to leave a connection in SHUTW state in case it was
validated late. The useless return codes of some of these functions
were dropped so that callers only rely on the stream-int's state now
(which was already partially the case anyway).
The code is now a bit cleaner, could be further improved (and functions
renamed) but given the sensitivity of this part, better limit changes to
strictly necessary. It passes all reg tests.
2019-06-05 12:02:04 -04:00
/* errors faced after sending data need to be reported */
2022-05-17 13:40:40 -04:00
if ( sc_ep_test ( s - > scb , SE_FL_ERROR ) & & req - > flags & CF_WROTE_DATA ) {
MEDIUM: stream: re-arrange the connection setup status reporting
Till now when a wakeup happens after a connection is attempted, we go
through sess_update_st_con_tcp() to deal with the various possible events,
then to sess_update_st_cer() to deal with a possible error detected by the
former, or to sess_establish() to complete the connection validation. There
are multiple issues in the way this is handled, which have accumulated over
time. One of them is that any spurious wakeup during SI_ST_CON would validate
the READ_ATTACHED flag and wake the analysers up. Another one is that nobody
feels responsible for clearing SI_FL_EXP if it happened at the same time as
a success (and it is present in all reports of loops to date). And another
issue is that aborts cannot happen after a clean connection setup with no
data transfer (since CF_WRITE_NULL is part of CF_WRITE_ACTIVITY). Last, the
flags cleanup work was hackish, added here and there to please the next
function (typically what had to be donne in commit 7a3367cca to work around
the url_param+reuse issue by moving READ_ATTACHED to CON).
This patch performs a significant lift up of this setup code. First, it
makes sure that the state handlers are the ones responsible for the cleanup
of the stuff they rely on. Typically sess_sestablish() will clean up the
SI_FL_EXP flag because if we decided to validate the connection it means
that we want to ignore this late timeout. Second, it splits the CON and
RDY state handlers because the former only has to deal with failures,
timeouts and non-events, while the latter has to deal with partial or
total successes. Third, everything related to connection success was
moved to sess_establish() since it's the only safe place to do so, and
this function is also called at a few places to deal with synchronous
connections, which are not seen by intermediary state handlers.
The code was made a bit more robust, for example by making sure we
always set SI_FL_NOLINGER when aborting a connection so that we don't
have any risk to leave a connection in SHUTW state in case it was
validated late. The useless return codes of some of these functions
were dropped so that callers only rely on the stream-int's state now
(which was already partially the case anyway).
The code is now a bit cleaner, could be further improved (and functions
renamed) but given the sensitivity of this part, better limit changes to
strictly necessary. It passes all reg tests.
2019-06-05 12:02:04 -04:00
/* Don't add CF_WRITE_ERROR if we're here because
* early data were rejected by the server , or
* http_wait_for_response ( ) will never be called
* to send a 425.
*/
if ( conn & & conn - > err_code ! = CO_ER_SSL_EARLY_FAILED )
req - > flags | = CF_WRITE_ERROR ;
rep - > flags | = CF_READ_ERROR ;
2022-03-30 13:39:30 -04:00
s - > conn_err_type = STRM_ET_DATA_ERR ;
2022-03-31 03:16:34 -04:00
DBG_TRACE_STATE ( " read/write error " , STRM_EV_STRM_PROC | STRM_EV_CS_ST | STRM_EV_STRM_ERR , s ) ;
MEDIUM: stream: re-arrange the connection setup status reporting
Till now when a wakeup happens after a connection is attempted, we go
through sess_update_st_con_tcp() to deal with the various possible events,
then to sess_update_st_cer() to deal with a possible error detected by the
former, or to sess_establish() to complete the connection validation. There
are multiple issues in the way this is handled, which have accumulated over
time. One of them is that any spurious wakeup during SI_ST_CON would validate
the READ_ATTACHED flag and wake the analysers up. Another one is that nobody
feels responsible for clearing SI_FL_EXP if it happened at the same time as
a success (and it is present in all reports of loops to date). And another
issue is that aborts cannot happen after a clean connection setup with no
data transfer (since CF_WRITE_NULL is part of CF_WRITE_ACTIVITY). Last, the
flags cleanup work was hackish, added here and there to please the next
function (typically what had to be donne in commit 7a3367cca to work around
the url_param+reuse issue by moving READ_ATTACHED to CON).
This patch performs a significant lift up of this setup code. First, it
makes sure that the state handlers are the ones responsible for the cleanup
of the stuff they rely on. Typically sess_sestablish() will clean up the
SI_FL_EXP flag because if we decided to validate the connection it means
that we want to ignore this late timeout. Second, it splits the CON and
RDY state handlers because the former only has to deal with failures,
timeouts and non-events, while the latter has to deal with partial or
total successes. Third, everything related to connection success was
moved to sess_establish() since it's the only safe place to do so, and
this function is also called at a few places to deal with synchronous
connections, which are not seen by intermediary state handlers.
The code was made a bit more robust, for example by making sure we
always set SI_FL_NOLINGER when aborting a connection so that we don't
have any risk to leave a connection in SHUTW state in case it was
validated late. The useless return codes of some of these functions
were dropped so that callers only rely on the stream-int's state now
(which was already partially the case anyway).
The code is now a bit cleaner, could be further improved (and functions
renamed) but given the sensitivity of this part, better limit changes to
strictly necessary. It passes all reg tests.
2019-06-05 12:02:04 -04:00
}
2012-11-11 18:42:33 -05:00
if ( objt_server ( s - > target ) )
2021-12-06 02:01:02 -05:00
health_adjust ( __objt_server ( s - > target ) , HANA_STATUS_L4_OK ) ;
2009-12-15 16:31:24 -05:00
2021-03-08 11:57:53 -05:00
if ( ! IS_HTX_STRM ( s ) ) { /* let's allow immediate data connection in this case */
2008-11-30 12:47:21 -05:00
/* if the user wants to log as soon as possible, without counting
* bytes from the server , then this is the right moment . */
2015-04-03 20:10:38 -04:00
if ( ! LIST_ISEMPTY ( & strm_fe ( s ) - > logformat ) & & ! ( s - > logs . logwait & LW_BYTES ) ) {
2018-07-25 00:55:12 -04:00
/* note: no pend_pos here, session is established */
2008-11-30 12:47:21 -05:00
s - > logs . t_close = s - > logs . t_connect ; /* to get a valid end date */
2008-11-30 13:02:32 -05:00
s - > do_log ( s ) ;
2008-11-30 12:47:21 -05:00
}
}
else {
2013-12-31 16:33:13 -05:00
rep - > flags | = CF_READ_DONTWAIT ; /* a single read is enough to get response headers */
2008-11-30 12:47:21 -05:00
}
2018-12-11 12:01:38 -05:00
rep - > analysers | = strm_fe ( s ) - > fe_rsp_ana | s - > be - > be_rsp_ana ;
2015-12-02 03:57:32 -05:00
2022-05-17 13:40:40 -04:00
cs_rx_endp_more ( s - > scb ) ;
2012-08-27 17:14:58 -04:00
rep - > flags | = CF_READ_ATTACHED ; /* producer is now attached */
2021-12-15 03:50:17 -05:00
if ( conn ) {
2020-12-10 07:43:52 -05:00
/* real connections have timeouts
* if already defined , it means that a set - timeout rule has
* been executed so do not overwrite them
*/
if ( ! tick_isset ( req - > wto ) )
req - > wto = s - > be - > timeout . server ;
if ( ! tick_isset ( rep - > rto ) )
rep - > rto = s - > be - > timeout . server ;
2020-12-10 07:43:53 -05:00
if ( ! tick_isset ( s - > tunnel_timeout ) )
s - > tunnel_timeout = s - > be - > timeout . tunnel ;
2020-12-10 07:43:52 -05:00
2018-11-07 11:55:19 -05:00
/* The connection is now established, try to read data from the
* underlying layer , and subscribe to recv events . We use a
* delayed recv here to give a chance to the data to flow back
* by the time we process other tasks .
*/
2022-05-17 13:40:40 -04:00
cs_chk_rcv ( s - > scb ) ;
2010-05-31 06:31:35 -04:00
}
2008-11-30 12:47:21 -05:00
req - > wex = TICK_ETERNITY ;
2019-07-26 08:54:34 -04:00
/* If we managed to get the whole response, and we don't have anything
2022-05-17 13:47:17 -04:00
* left to send , or can ' t , switch to SC_ST_DIS now . */
2019-11-05 10:18:10 -05:00
if ( rep - > flags & ( CF_SHUTR | CF_SHUTW ) ) {
2022-05-17 13:47:17 -04:00
s - > scb - > state = SC_ST_DIS ;
2022-03-31 03:16:34 -04:00
DBG_TRACE_STATE ( " response channel shutdwn for read/write " , STRM_EV_STRM_PROC | STRM_EV_CS_ST | STRM_EV_STRM_ERR , s ) ;
2019-11-05 10:18:10 -05:00
}
2022-03-31 03:16:34 -04:00
DBG_TRACE_LEAVE ( STRM_EV_STRM_PROC | STRM_EV_CS_ST , s ) ;
2008-11-30 12:47:21 -05:00
}
REORG/MAJOR: session: rename the "session" entity to "stream"
With HTTP/2, we'll have to support multiplexed streams. A stream is in
fact the largest part of what we currently call a session, it has buffers,
logs, etc.
In order to catch any error, this commit removes any reference to the
struct session and tries to rename most "session" occurrences in function
names to "stream" and "sess" to "strm" when that's related to a session.
The files stream.{c,h} were added and session.{c,h} removed.
The session will be reintroduced later and a few parts of the stream
will progressively be moved overthere. It will more or less contain
only what we need in an embryonic session.
Sample fetch functions and converters will have to change a bit so
that they'll use an L5 (session) instead of what's currently called
"L4" which is in fact L6 for now.
Once all changes are completed, we should see approximately this :
L7 - http_txn
L6 - stream
L5 - session
L4 - connection | applet
There will be at most one http_txn per stream, and a same session will
possibly be referenced by multiple streams. A connection will point to
a session and to a stream. The session will hold all the information
we need to keep even when we don't yet have a stream.
Some more cleanup is needed because some code was already far from
being clean. The server queue management still refers to sessions at
many places while comments talk about connections. This will have to
be cleaned up once we have a server-side connection pool manager.
Stream flags "SN_*" still need to be renamed, it doesn't seem like
any of them will need to move to the session.
2015-04-02 18:22:06 -04:00
/* Set correct stream termination flags in case no analyser has done it. It
2011-06-07 20:19:07 -04:00
* also counts a failed request if the server state has not reached the request
* stage .
*/
REORG/MAJOR: session: rename the "session" entity to "stream"
With HTTP/2, we'll have to support multiplexed streams. A stream is in
fact the largest part of what we currently call a session, it has buffers,
logs, etc.
In order to catch any error, this commit removes any reference to the
struct session and tries to rename most "session" occurrences in function
names to "stream" and "sess" to "strm" when that's related to a session.
The files stream.{c,h} were added and session.{c,h} removed.
The session will be reintroduced later and a few parts of the stream
will progressively be moved overthere. It will more or less contain
only what we need in an embryonic session.
Sample fetch functions and converters will have to change a bit so
that they'll use an L5 (session) instead of what's currently called
"L4" which is in fact L6 for now.
Once all changes are completed, we should see approximately this :
L7 - http_txn
L6 - stream
L5 - session
L4 - connection | applet
There will be at most one http_txn per stream, and a same session will
possibly be referenced by multiple streams. A connection will point to
a session and to a stream. The session will hold all the information
we need to keep even when we don't yet have a stream.
Some more cleanup is needed because some code was already far from
being clean. The server queue management still refers to sessions at
many places while comments talk about connections. This will have to
be cleaned up once we have a server-side connection pool manager.
Stream flags "SN_*" still need to be renamed, it doesn't seem like
any of them will need to move to the session.
2015-04-02 18:22:06 -04:00
static void sess_set_term_flags ( struct stream * s )
2011-06-07 20:19:07 -04:00
{
2015-04-02 19:14:29 -04:00
if ( ! ( s - > flags & SF_FINST_MASK ) ) {
2022-05-17 13:47:17 -04:00
if ( s - > scb - > state = = SC_ST_INI ) {
2019-06-05 08:53:22 -04:00
/* anything before REQ in fact */
2021-04-06 07:53:36 -04:00
_HA_ATOMIC_INC ( & strm_fe ( s ) - > fe_counters . failed_req ) ;
2015-09-23 06:21:21 -04:00
if ( strm_li ( s ) & & strm_li ( s ) - > counters )
2021-04-06 07:53:36 -04:00
_HA_ATOMIC_INC ( & strm_li ( s ) - > counters - > failed_req ) ;
2011-06-07 20:19:07 -04:00
2015-04-02 19:14:29 -04:00
s - > flags | = SF_FINST_R ;
2011-06-07 20:19:07 -04:00
}
2022-05-17 13:47:17 -04:00
else if ( s - > scb - > state = = SC_ST_QUE )
2015-04-02 19:14:29 -04:00
s - > flags | = SF_FINST_Q ;
2022-05-17 13:47:17 -04:00
else if ( cs_state_in ( s - > scb - > state , SC_SB_REQ | SC_SB_TAR | SC_SB_ASS | SC_SB_CON | SC_SB_CER | SC_SB_RDY ) )
2015-04-02 19:14:29 -04:00
s - > flags | = SF_FINST_C ;
2022-05-17 13:47:17 -04:00
else if ( s - > scb - > state = = SC_ST_EST | | s - > prev_conn_state = = SC_ST_EST )
2015-04-02 19:14:29 -04:00
s - > flags | = SF_FINST_D ;
2011-06-07 20:19:07 -04:00
else
2015-04-02 19:14:29 -04:00
s - > flags | = SF_FINST_L ;
2011-06-07 20:19:07 -04:00
}
}
2015-09-27 13:29:33 -04:00
/* This function parses the use-service action ruleset. It executes
* the associated ACL and set an applet as a stream or txn final node .
* it returns ACT_RET_ERR if an error occurs , the proxy left in
2020-05-05 15:53:22 -04:00
* consistent state . It returns ACT_RET_STOP in success case because
2015-09-27 13:29:33 -04:00
* use - service must be a terminal action . Returns ACT_RET_YIELD
* if the initialisation function require more data .
*/
enum act_return process_use_service ( struct act_rule * rule , struct proxy * px ,
struct session * sess , struct stream * s , int flags )
{
struct appctx * appctx ;
/* Initialises the applet if it is required. */
2019-12-18 08:41:51 -05:00
if ( flags & ACT_OPT_FIRST ) {
2015-09-27 13:29:33 -04:00
/* Register applet. this function schedules the applet. */
s - > target = & rule - > applet . obj_type ;
2022-05-17 13:40:40 -04:00
appctx = cs_applet_create ( s - > scb , objt_applet ( s - > target ) ) ;
2022-02-24 07:45:27 -05:00
if ( unlikely ( ! appctx ) )
2015-09-27 13:29:33 -04:00
return ACT_RET_ERR ;
2022-01-19 08:56:50 -05:00
/* Finish initialisation of the context. */
2015-09-27 13:29:33 -04:00
appctx - > rule = rule ;
2022-05-12 08:59:28 -04:00
if ( appctx_init ( appctx ) = = - 1 )
2022-01-13 10:01:35 -05:00
return ACT_RET_ERR ;
2015-09-27 13:29:33 -04:00
}
else
2022-05-18 11:58:02 -04:00
appctx = __sc_appctx ( s - > scb ) ;
2015-09-27 13:29:33 -04:00
2019-03-01 05:44:26 -05:00
if ( rule - > from ! = ACT_F_HTTP_REQ ) {
if ( sess - > fe = = s - > be ) /* report it if the request was intercepted by the frontend */
2021-04-06 07:53:36 -04:00
_HA_ATOMIC_INC ( & sess - > fe - > fe_counters . intercepted_req ) ;
2019-03-01 05:44:26 -05:00
/* The flag SF_ASSIGNED prevent from server assignment. */
s - > flags | = SF_ASSIGNED ;
}
2015-09-27 13:29:33 -04:00
/* Now we can schedule the applet. */
2022-05-17 13:40:40 -04:00
cs_cant_get ( s - > scb ) ;
2015-09-27 13:29:33 -04:00
appctx_wakeup ( appctx ) ;
return ACT_RET_STOP ;
}
2009-07-07 09:10:31 -04:00
/* This stream analyser checks the switching rules and changes the backend
2010-01-22 13:10:05 -05:00
* if appropriate . The default_backend rule is also considered , then the
* target backend ' s forced persistence rules are also evaluated last if any .
2009-07-07 09:10:31 -04:00
* It returns 1 if the processing can continue on next analysers , or zero if it
* either needs more data or wants to immediately abort the request .
*/
REORG/MAJOR: session: rename the "session" entity to "stream"
With HTTP/2, we'll have to support multiplexed streams. A stream is in
fact the largest part of what we currently call a session, it has buffers,
logs, etc.
In order to catch any error, this commit removes any reference to the
struct session and tries to rename most "session" occurrences in function
names to "stream" and "sess" to "strm" when that's related to a session.
The files stream.{c,h} were added and session.{c,h} removed.
The session will be reintroduced later and a few parts of the stream
will progressively be moved overthere. It will more or less contain
only what we need in an embryonic session.
Sample fetch functions and converters will have to change a bit so
that they'll use an L5 (session) instead of what's currently called
"L4" which is in fact L6 for now.
Once all changes are completed, we should see approximately this :
L7 - http_txn
L6 - stream
L5 - session
L4 - connection | applet
There will be at most one http_txn per stream, and a same session will
possibly be referenced by multiple streams. A connection will point to
a session and to a stream. The session will hold all the information
we need to keep even when we don't yet have a stream.
Some more cleanup is needed because some code was already far from
being clean. The server queue management still refers to sessions at
many places while comments talk about connections. This will have to
be cleaned up once we have a server-side connection pool manager.
Stream flags "SN_*" still need to be renamed, it doesn't seem like
any of them will need to move to the session.
2015-04-02 18:22:06 -04:00
static int process_switching_rules ( struct stream * s , struct channel * req , int an_bit )
2009-07-07 09:10:31 -04:00
{
2010-04-24 18:00:51 -04:00
struct persist_rule * prst_rule ;
2015-04-03 19:47:55 -04:00
struct session * sess = s - > sess ;
struct proxy * fe = sess - > fe ;
2010-01-22 13:10:05 -05:00
2009-07-07 09:10:31 -04:00
req - > analysers & = ~ an_bit ;
req - > analyse_exp = TICK_ETERNITY ;
2019-11-05 10:18:10 -05:00
DBG_TRACE_ENTER ( STRM_EV_STRM_ANA , s ) ;
2009-07-07 09:10:31 -04:00
/* now check whether we have some switching rules for this request */
2015-04-02 19:14:29 -04:00
if ( ! ( s - > flags & SF_BE_ASSIGNED ) ) {
2009-07-07 09:10:31 -04:00
struct switching_rule * rule ;
2015-04-03 09:40:56 -04:00
list_for_each_entry ( rule , & fe - > switching_rules , list ) {
2014-04-22 19:21:56 -04:00
int ret = 1 ;
2009-07-07 09:10:31 -04:00
2014-04-22 19:21:56 -04:00
if ( rule - > cond ) {
2015-04-03 19:47:55 -04:00
ret = acl_exec_cond ( rule - > cond , fe , sess , s , SMP_OPT_DIR_REQ | SMP_OPT_FINAL ) ;
2014-04-22 19:21:56 -04:00
ret = acl_pass ( ret ) ;
if ( rule - > cond - > pol = = ACL_COND_UNLESS )
ret = ! ret ;
}
2009-07-07 09:10:31 -04:00
if ( ret ) {
2013-11-19 05:43:06 -05:00
/* If the backend name is dynamic, try to resolve the name.
* If we can ' t resolve the name , or if any error occurs , break
* the loop and fallback to the default backend .
*/
2017-10-26 05:25:10 -04:00
struct proxy * backend = NULL ;
2013-11-19 05:43:06 -05:00
if ( rule - > dynamic ) {
2018-07-13 05:56:34 -04:00
struct buffer * tmp ;
2017-10-26 05:25:10 -04:00
tmp = alloc_trash_chunk ( ) ;
if ( ! tmp )
goto sw_failed ;
2018-07-13 04:54:26 -04:00
if ( build_logline ( s , tmp - > area , tmp - > size , & rule - > be . expr ) )
backend = proxy_be_by_name ( tmp - > area ) ;
2017-10-26 05:25:10 -04:00
free_trash_chunk ( tmp ) ;
tmp = NULL ;
2013-11-19 05:43:06 -05:00
if ( ! backend )
break ;
}
else
backend = rule - > be . backend ;
REORG/MAJOR: session: rename the "session" entity to "stream"
With HTTP/2, we'll have to support multiplexed streams. A stream is in
fact the largest part of what we currently call a session, it has buffers,
logs, etc.
In order to catch any error, this commit removes any reference to the
struct session and tries to rename most "session" occurrences in function
names to "stream" and "sess" to "strm" when that's related to a session.
The files stream.{c,h} were added and session.{c,h} removed.
The session will be reintroduced later and a few parts of the stream
will progressively be moved overthere. It will more or less contain
only what we need in an embryonic session.
Sample fetch functions and converters will have to change a bit so
that they'll use an L5 (session) instead of what's currently called
"L4" which is in fact L6 for now.
Once all changes are completed, we should see approximately this :
L7 - http_txn
L6 - stream
L5 - session
L4 - connection | applet
There will be at most one http_txn per stream, and a same session will
possibly be referenced by multiple streams. A connection will point to
a session and to a stream. The session will hold all the information
we need to keep even when we don't yet have a stream.
Some more cleanup is needed because some code was already far from
being clean. The server queue management still refers to sessions at
many places while comments talk about connections. This will have to
be cleaned up once we have a server-side connection pool manager.
Stream flags "SN_*" still need to be renamed, it doesn't seem like
any of them will need to move to the session.
2015-04-02 18:22:06 -04:00
if ( ! stream_set_backend ( s , backend ) )
2009-07-12 02:27:39 -04:00
goto sw_failed ;
2009-07-07 09:10:31 -04:00
break ;
}
}
/* To ensure correct connection accounting on the backend, we
* have to assign one if it was not set ( eg : a listen ) . This
* measure also takes care of correctly setting the default
2021-03-22 10:07:51 -04:00
* backend if any . Don ' t do anything if an upgrade is already in
* progress .
2009-07-07 09:10:31 -04:00
*/
2021-03-22 10:07:51 -04:00
if ( ! ( s - > flags & ( SF_BE_ASSIGNED | SF_IGNORE ) ) )
2015-04-03 09:40:56 -04:00
if ( ! stream_set_backend ( s , fe - > defbe . be ? fe - > defbe . be : s - > be ) )
2009-07-12 02:27:39 -04:00
goto sw_failed ;
2021-03-22 10:07:51 -04:00
/* No backend assigned but no error reported. It happens when a
* TCP stream is upgraded to HTTP / 2.
*/
if ( ( s - > flags & ( SF_BE_ASSIGNED | SF_IGNORE ) ) = = SF_IGNORE ) {
DBG_TRACE_DEVEL ( " leaving with no backend because of a destructive upgrade " , STRM_EV_STRM_ANA , s ) ;
return 0 ;
}
2009-07-07 09:10:31 -04:00
}
2010-08-03 08:02:05 -04:00
/* we don't want to run the TCP or HTTP filters again if the backend has not changed */
2015-04-03 09:40:56 -04:00
if ( fe = = s - > be ) {
2014-11-27 14:45:39 -05:00
s - > req . analysers & = ~ AN_REQ_INSPECT_BE ;
s - > req . analysers & = ~ AN_REQ_HTTP_PROCESS_BE ;
BUG/MAJOR: channel: Fix the definition order of channel analyzers
It is important to defined analyzers (AN_REQ_* and AN_RES_*) in the same order
they are evaluated in process_stream. This order is really important because
during analyzers evaluation, we run them in the order of the lower bit to the
higher one. This way, when an analyzer adds/removes another one during its
evaluation, we know if it is located before or after it. So, when it adds an
analyzer which is located before it, we can switch to it immediately, even if it
has already been called once but removed since.
With the time, and introduction of new analyzers, this order was broken up. the
main problems come from the filter analyzers. We used values not related with
their evaluation order. Furthermore, we used same values for request and response
analyzers.
So, to fix the bug, filter analyzers have been splitted in 2 distinct lists to
have different analyzers for the request channel than those for the response
channel. And of course, we have moved them to the right place.
Some other analyzers have been reordered to respect the evaluation order:
* AN_REQ_HTTP_TARPIT has been moved just before AN_REQ_SRV_RULES
* AN_REQ_PRST_RDP_COOKIE has been moved just before AN_REQ_STICKING_RULES
* AN_RES_STORE_RULES has been moved just after AN_RES_WAIT_HTTP
Note today we have 29 analyzers, all stored into a 32 bits bitfield. So we can
still add 4 more analyzers before having a problem. A good way to fend off the
problem for a while could be to have a different bitfield for request and
response analyzers.
[wt: all of this must be backported to 1.7, and part of it must be backported
to 1.6 and 1.5]
2017-01-05 08:06:34 -05:00
s - > req . analysers & = ~ AN_REQ_FLT_START_BE ;
2010-08-03 08:02:05 -04:00
}
2009-07-07 09:10:31 -04:00
2010-04-24 18:00:51 -04:00
/* as soon as we know the backend, we must check if we have a matching forced or ignored
REORG/MAJOR: session: rename the "session" entity to "stream"
With HTTP/2, we'll have to support multiplexed streams. A stream is in
fact the largest part of what we currently call a session, it has buffers,
logs, etc.
In order to catch any error, this commit removes any reference to the
struct session and tries to rename most "session" occurrences in function
names to "stream" and "sess" to "strm" when that's related to a session.
The files stream.{c,h} were added and session.{c,h} removed.
The session will be reintroduced later and a few parts of the stream
will progressively be moved overthere. It will more or less contain
only what we need in an embryonic session.
Sample fetch functions and converters will have to change a bit so
that they'll use an L5 (session) instead of what's currently called
"L4" which is in fact L6 for now.
Once all changes are completed, we should see approximately this :
L7 - http_txn
L6 - stream
L5 - session
L4 - connection | applet
There will be at most one http_txn per stream, and a same session will
possibly be referenced by multiple streams. A connection will point to
a session and to a stream. The session will hold all the information
we need to keep even when we don't yet have a stream.
Some more cleanup is needed because some code was already far from
being clean. The server queue management still refers to sessions at
many places while comments talk about connections. This will have to
be cleaned up once we have a server-side connection pool manager.
Stream flags "SN_*" still need to be renamed, it doesn't seem like
any of them will need to move to the session.
2015-04-02 18:22:06 -04:00
* persistence rule , and report that in the stream .
2010-01-22 13:10:05 -05:00
*/
2010-04-24 18:00:51 -04:00
list_for_each_entry ( prst_rule , & s - > be - > persist_rules , list ) {
2010-01-22 13:10:05 -05:00
int ret = 1 ;
if ( prst_rule - > cond ) {
2015-04-03 19:47:55 -04:00
ret = acl_exec_cond ( prst_rule - > cond , s - > be , sess , s , SMP_OPT_DIR_REQ | SMP_OPT_FINAL ) ;
2010-01-22 13:10:05 -05:00
ret = acl_pass ( ret ) ;
if ( prst_rule - > cond - > pol = = ACL_COND_UNLESS )
ret = ! ret ;
}
if ( ret ) {
/* no rule, or the rule matches */
2010-04-24 18:00:51 -04:00
if ( prst_rule - > type = = PERSIST_TYPE_FORCE ) {
2015-04-02 19:14:29 -04:00
s - > flags | = SF_FORCE_PRST ;
2010-04-24 18:00:51 -04:00
} else {
2015-04-02 19:14:29 -04:00
s - > flags | = SF_IGNORE_PRST ;
2010-04-24 18:00:51 -04:00
}
2010-01-22 13:10:05 -05:00
break ;
}
}
2019-11-05 10:18:10 -05:00
DBG_TRACE_LEAVE ( STRM_EV_STRM_ANA , s ) ;
2009-07-07 09:10:31 -04:00
return 1 ;
2009-07-12 02:27:39 -04:00
sw_failed :
/* immediately abort this request in case of allocation failure */
2014-11-27 14:45:39 -05:00
channel_abort ( & s - > req ) ;
channel_abort ( & s - > res ) ;
2009-07-12 02:27:39 -04:00
2015-04-02 19:14:29 -04:00
if ( ! ( s - > flags & SF_ERR_MASK ) )
s - > flags | = SF_ERR_RESOURCE ;
if ( ! ( s - > flags & SF_FINST_MASK ) )
s - > flags | = SF_FINST_R ;
2009-07-12 02:27:39 -04:00
2015-04-03 17:46:31 -04:00
if ( s - > txn )
s - > txn - > status = 500 ;
BUG/MAJOR: channel: Fix the definition order of channel analyzers
It is important to defined analyzers (AN_REQ_* and AN_RES_*) in the same order
they are evaluated in process_stream. This order is really important because
during analyzers evaluation, we run them in the order of the lower bit to the
higher one. This way, when an analyzer adds/removes another one during its
evaluation, we know if it is located before or after it. So, when it adds an
analyzer which is located before it, we can switch to it immediately, even if it
has already been called once but removed since.
With the time, and introduction of new analyzers, this order was broken up. the
main problems come from the filter analyzers. We used values not related with
their evaluation order. Furthermore, we used same values for request and response
analyzers.
So, to fix the bug, filter analyzers have been splitted in 2 distinct lists to
have different analyzers for the request channel than those for the response
channel. And of course, we have moved them to the right place.
Some other analyzers have been reordered to respect the evaluation order:
* AN_REQ_HTTP_TARPIT has been moved just before AN_REQ_SRV_RULES
* AN_REQ_PRST_RDP_COOKIE has been moved just before AN_REQ_STICKING_RULES
* AN_RES_STORE_RULES has been moved just after AN_RES_WAIT_HTTP
Note today we have 29 analyzers, all stored into a 32 bits bitfield. So we can
still add 4 more analyzers before having a problem. A good way to fend off the
problem for a while could be to have a different bitfield for request and
response analyzers.
[wt: all of this must be backported to 1.7, and part of it must be backported
to 1.6 and 1.5]
2017-01-05 08:06:34 -05:00
s - > req . analysers & = AN_REQ_FLT_END ;
2014-11-27 14:45:39 -05:00
s - > req . analyse_exp = TICK_ETERNITY ;
2019-11-05 10:18:10 -05:00
DBG_TRACE_DEVEL ( " leaving on error " , STRM_EV_STRM_ANA | STRM_EV_STRM_ERR , s ) ;
2009-07-12 02:27:39 -04:00
return 0 ;
2009-07-07 09:10:31 -04:00
}
2012-04-05 15:09:48 -04:00
/* This stream analyser works on a request. It applies all use-server rules on
* it then returns 1. The data must already be present in the buffer otherwise
* they won ' t match . It always returns 1.
*/
REORG/MAJOR: session: rename the "session" entity to "stream"
With HTTP/2, we'll have to support multiplexed streams. A stream is in
fact the largest part of what we currently call a session, it has buffers,
logs, etc.
In order to catch any error, this commit removes any reference to the
struct session and tries to rename most "session" occurrences in function
names to "stream" and "sess" to "strm" when that's related to a session.
The files stream.{c,h} were added and session.{c,h} removed.
The session will be reintroduced later and a few parts of the stream
will progressively be moved overthere. It will more or less contain
only what we need in an embryonic session.
Sample fetch functions and converters will have to change a bit so
that they'll use an L5 (session) instead of what's currently called
"L4" which is in fact L6 for now.
Once all changes are completed, we should see approximately this :
L7 - http_txn
L6 - stream
L5 - session
L4 - connection | applet
There will be at most one http_txn per stream, and a same session will
possibly be referenced by multiple streams. A connection will point to
a session and to a stream. The session will hold all the information
we need to keep even when we don't yet have a stream.
Some more cleanup is needed because some code was already far from
being clean. The server queue management still refers to sessions at
many places while comments talk about connections. This will have to
be cleaned up once we have a server-side connection pool manager.
Stream flags "SN_*" still need to be renamed, it doesn't seem like
any of them will need to move to the session.
2015-04-02 18:22:06 -04:00
static int process_server_rules ( struct stream * s , struct channel * req , int an_bit )
2012-04-05 15:09:48 -04:00
{
struct proxy * px = s - > be ;
2015-04-03 19:47:55 -04:00
struct session * sess = s - > sess ;
2012-04-05 15:09:48 -04:00
struct server_rule * rule ;
2019-11-05 10:18:10 -05:00
DBG_TRACE_ENTER ( STRM_EV_STRM_ANA , s ) ;
2012-04-05 15:09:48 -04:00
2015-04-02 19:14:29 -04:00
if ( ! ( s - > flags & SF_ASSIGNED ) ) {
2012-04-05 15:09:48 -04:00
list_for_each_entry ( rule , & px - > server_rules , list ) {
int ret ;
2015-04-03 19:47:55 -04:00
ret = acl_exec_cond ( rule - > cond , s - > be , sess , s , SMP_OPT_DIR_REQ | SMP_OPT_FINAL ) ;
2012-04-05 15:09:48 -04:00
ret = acl_pass ( ret ) ;
if ( rule - > cond - > pol = = ACL_COND_UNLESS )
ret = ! ret ;
if ( ret ) {
2020-03-29 03:37:12 -04:00
struct server * srv ;
if ( rule - > dynamic ) {
struct buffer * tmp = get_trash_chunk ( ) ;
if ( ! build_logline ( s , tmp - > area , tmp - > size , & rule - > expr ) )
break ;
srv = findserver ( s - > be , tmp - > area ) ;
if ( ! srv )
break ;
}
else
srv = rule - > srv . ptr ;
2012-04-05 15:09:48 -04:00
2017-08-31 08:41:55 -04:00
if ( ( srv - > cur_state ! = SRV_ST_STOPPED ) | |
2012-04-05 15:09:48 -04:00
( px - > options & PR_O_PERSIST ) | |
2015-04-02 19:14:29 -04:00
( s - > flags & SF_FORCE_PRST ) ) {
s - > flags | = SF_DIRECT | SF_ASSIGNED ;
2012-11-11 18:42:33 -05:00
s - > target = & srv - > obj_type ;
2012-04-05 15:09:48 -04:00
break ;
}
/* if the server is not UP, let's go on with next rules
* just in case another one is suited .
*/
}
}
}
req - > analysers & = ~ an_bit ;
req - > analyse_exp = TICK_ETERNITY ;
2019-11-05 10:18:10 -05:00
DBG_TRACE_LEAVE ( STRM_EV_STRM_ANA , s ) ;
2012-04-05 15:09:48 -04:00
return 1 ;
}
2019-05-20 04:08:27 -04:00
static inline void sticking_rule_find_target ( struct stream * s ,
struct stktable * t , struct stksess * ts )
{
struct proxy * px = s - > be ;
struct eb32_node * node ;
struct dict_entry * de ;
void * ptr ;
struct server * srv ;
/* Look for the server name previously stored in <t> stick-table */
HA_RWLOCK_RDLOCK ( STK_SESS_LOCK , & ts - > lock ) ;
2020-11-20 03:28:26 -05:00
ptr = __stktable_data_ptr ( t , ts , STKTABLE_DT_SERVER_KEY ) ;
2021-06-30 11:18:28 -04:00
de = stktable_data_cast ( ptr , std_t_dict ) ;
2019-05-20 04:08:27 -04:00
HA_RWLOCK_RDUNLOCK ( STK_SESS_LOCK , & ts - > lock ) ;
if ( de ) {
2020-11-20 03:28:26 -05:00
struct ebpt_node * node ;
2019-05-20 04:08:27 -04:00
2020-11-20 03:28:26 -05:00
if ( t - > server_key_type = = STKTABLE_SRV_NAME ) {
node = ebis_lookup ( & px - > conf . used_server_name , de - > value . key ) ;
if ( node ) {
srv = container_of ( node , struct server , conf . name ) ;
goto found ;
}
} else if ( t - > server_key_type = = STKTABLE_SRV_ADDR ) {
HA_RWLOCK_RDLOCK ( PROXY_LOCK , & px - > lock ) ;
node = ebis_lookup ( & px - > used_server_addr , de - > value . key ) ;
HA_RWLOCK_RDUNLOCK ( PROXY_LOCK , & px - > lock ) ;
if ( node ) {
srv = container_of ( node , struct server , addr_node ) ;
goto found ;
}
2019-05-20 04:08:27 -04:00
}
}
/* Look for the server ID */
HA_RWLOCK_RDLOCK ( STK_SESS_LOCK , & ts - > lock ) ;
ptr = __stktable_data_ptr ( t , ts , STKTABLE_DT_SERVER_ID ) ;
2021-06-30 11:18:28 -04:00
node = eb32_lookup ( & px - > conf . used_server_id , stktable_data_cast ( ptr , std_t_sint ) ) ;
2019-05-20 04:08:27 -04:00
HA_RWLOCK_RDUNLOCK ( STK_SESS_LOCK , & ts - > lock ) ;
if ( ! node )
return ;
srv = container_of ( node , struct server , conf . id ) ;
found :
if ( ( srv - > cur_state ! = SRV_ST_STOPPED ) | |
( px - > options & PR_O_PERSIST ) | | ( s - > flags & SF_FORCE_PRST ) ) {
s - > flags | = SF_DIRECT | SF_ASSIGNED ;
s - > target = & srv - > obj_type ;
}
}
2010-01-04 09:47:17 -05:00
/* This stream analyser works on a request. It applies all sticking rules on
* it then returns 1. The data must already be present in the buffer otherwise
* they won ' t match . It always returns 1.
*/
REORG/MAJOR: session: rename the "session" entity to "stream"
With HTTP/2, we'll have to support multiplexed streams. A stream is in
fact the largest part of what we currently call a session, it has buffers,
logs, etc.
In order to catch any error, this commit removes any reference to the
struct session and tries to rename most "session" occurrences in function
names to "stream" and "sess" to "strm" when that's related to a session.
The files stream.{c,h} were added and session.{c,h} removed.
The session will be reintroduced later and a few parts of the stream
will progressively be moved overthere. It will more or less contain
only what we need in an embryonic session.
Sample fetch functions and converters will have to change a bit so
that they'll use an L5 (session) instead of what's currently called
"L4" which is in fact L6 for now.
Once all changes are completed, we should see approximately this :
L7 - http_txn
L6 - stream
L5 - session
L4 - connection | applet
There will be at most one http_txn per stream, and a same session will
possibly be referenced by multiple streams. A connection will point to
a session and to a stream. The session will hold all the information
we need to keep even when we don't yet have a stream.
Some more cleanup is needed because some code was already far from
being clean. The server queue management still refers to sessions at
many places while comments talk about connections. This will have to
be cleaned up once we have a server-side connection pool manager.
Stream flags "SN_*" still need to be renamed, it doesn't seem like
any of them will need to move to the session.
2015-04-02 18:22:06 -04:00
static int process_sticking_rules ( struct stream * s , struct channel * req , int an_bit )
2010-01-04 09:47:17 -05:00
{
struct proxy * px = s - > be ;
2015-04-03 19:47:55 -04:00
struct session * sess = s - > sess ;
2010-01-04 09:47:17 -05:00
struct sticking_rule * rule ;
2019-11-05 10:18:10 -05:00
DBG_TRACE_ENTER ( STRM_EV_STRM_ANA , s ) ;
2010-01-04 09:47:17 -05:00
list_for_each_entry ( rule , & px - > sticking_rules , list ) {
int ret = 1 ;
int i ;
2013-12-09 06:52:13 -05:00
/* Only the first stick store-request of each table is applied
* and other ones are ignored . The purpose is to allow complex
* configurations which look for multiple entries by decreasing
* order of precision and to stop at the first which matches .
* An example could be a store of the IP address from an HTTP
* header first , then from the source if not found .
*/
2020-01-16 11:37:21 -05:00
if ( rule - > flags & STK_IS_STORE ) {
for ( i = 0 ; i < s - > store_count ; i + + ) {
if ( rule - > table . t = = s - > store [ i ] . table )
break ;
}
2010-01-04 09:47:17 -05:00
2020-01-16 11:37:21 -05:00
if ( i ! = s - > store_count )
continue ;
}
2010-01-04 09:47:17 -05:00
if ( rule - > cond ) {
2015-04-03 19:47:55 -04:00
ret = acl_exec_cond ( rule - > cond , px , sess , s , SMP_OPT_DIR_REQ | SMP_OPT_FINAL ) ;
2010-01-04 09:47:17 -05:00
ret = acl_pass ( ret ) ;
if ( rule - > cond - > pol = = ACL_COND_UNLESS )
ret = ! ret ;
}
if ( ret ) {
struct stktable_key * key ;
2015-04-03 19:47:55 -04:00
key = stktable_fetch_key ( rule - > table . t , px , sess , s , SMP_OPT_DIR_REQ | SMP_OPT_FINAL , rule - > expr , NULL ) ;
2010-01-04 09:47:17 -05:00
if ( ! key )
continue ;
if ( rule - > flags & STK_IS_MATCH ) {
struct stksess * ts ;
2010-06-06 09:38:59 -04:00
if ( ( ts = stktable_lookup_key ( rule - > table . t , key ) ) ! = NULL ) {
2019-05-20 04:08:27 -04:00
if ( ! ( s - > flags & SF_ASSIGNED ) )
sticking_rule_find_target ( s , rule - > table . t , ts ) ;
2017-06-13 13:37:32 -04:00
stktable_touch_local ( rule - > table . t , ts , 1 ) ;
2010-01-04 09:47:17 -05:00
}
}
if ( rule - > flags & STK_IS_STORE ) {
if ( s - > store_count < ( sizeof ( s - > store ) / sizeof ( s - > store [ 0 ] ) ) ) {
struct stksess * ts ;
ts = stksess_new ( rule - > table . t , key ) ;
if ( ts ) {
s - > store [ s - > store_count ] . table = rule - > table . t ;
s - > store [ s - > store_count + + ] . ts = ts ;
}
}
}
}
}
req - > analysers & = ~ an_bit ;
req - > analyse_exp = TICK_ETERNITY ;
2019-11-05 10:18:10 -05:00
DBG_TRACE_LEAVE ( STRM_EV_STRM_ANA , s ) ;
2010-01-04 09:47:17 -05:00
return 1 ;
}
/* This stream analyser works on a response. It applies all store rules on it
* then returns 1. The data must already be present in the buffer otherwise
* they won ' t match . It always returns 1.
*/
REORG/MAJOR: session: rename the "session" entity to "stream"
With HTTP/2, we'll have to support multiplexed streams. A stream is in
fact the largest part of what we currently call a session, it has buffers,
logs, etc.
In order to catch any error, this commit removes any reference to the
struct session and tries to rename most "session" occurrences in function
names to "stream" and "sess" to "strm" when that's related to a session.
The files stream.{c,h} were added and session.{c,h} removed.
The session will be reintroduced later and a few parts of the stream
will progressively be moved overthere. It will more or less contain
only what we need in an embryonic session.
Sample fetch functions and converters will have to change a bit so
that they'll use an L5 (session) instead of what's currently called
"L4" which is in fact L6 for now.
Once all changes are completed, we should see approximately this :
L7 - http_txn
L6 - stream
L5 - session
L4 - connection | applet
There will be at most one http_txn per stream, and a same session will
possibly be referenced by multiple streams. A connection will point to
a session and to a stream. The session will hold all the information
we need to keep even when we don't yet have a stream.
Some more cleanup is needed because some code was already far from
being clean. The server queue management still refers to sessions at
many places while comments talk about connections. This will have to
be cleaned up once we have a server-side connection pool manager.
Stream flags "SN_*" still need to be renamed, it doesn't seem like
any of them will need to move to the session.
2015-04-02 18:22:06 -04:00
static int process_store_rules ( struct stream * s , struct channel * rep , int an_bit )
2010-01-04 09:47:17 -05:00
{
struct proxy * px = s - > be ;
2015-04-03 19:47:55 -04:00
struct session * sess = s - > sess ;
2010-01-04 09:47:17 -05:00
struct sticking_rule * rule ;
int i ;
2013-12-09 06:52:13 -05:00
int nbreq = s - > store_count ;
2010-01-04 09:47:17 -05:00
2019-11-05 10:18:10 -05:00
DBG_TRACE_ENTER ( STRM_EV_STRM_ANA , s ) ;
2010-01-04 09:47:17 -05:00
list_for_each_entry ( rule , & px - > storersp_rules , list ) {
int ret = 1 ;
2013-12-09 06:52:13 -05:00
/* Only the first stick store-response of each table is applied
* and other ones are ignored . The purpose is to allow complex
* configurations which look for multiple entries by decreasing
* order of precision and to stop at the first which matches .
* An example could be a store of a set - cookie value , with a
* fallback to a parameter found in a 302 redirect .
*
* The store - response rules are not allowed to override the
* store - request rules for the same table , but they may coexist .
* Thus we can have up to one store - request entry and one store -
* response entry for the same table at any time .
*/
for ( i = nbreq ; i < s - > store_count ; i + + ) {
if ( rule - > table . t = = s - > store [ i ] . table )
break ;
}
/* skip existing entries for this table */
if ( i < s - > store_count )
continue ;
2010-01-04 09:47:17 -05:00
if ( rule - > cond ) {
2015-04-03 19:47:55 -04:00
ret = acl_exec_cond ( rule - > cond , px , sess , s , SMP_OPT_DIR_RES | SMP_OPT_FINAL ) ;
2010-01-04 09:47:17 -05:00
ret = acl_pass ( ret ) ;
if ( rule - > cond - > pol = = ACL_COND_UNLESS )
ret = ! ret ;
}
if ( ret ) {
struct stktable_key * key ;
2015-04-03 19:47:55 -04:00
key = stktable_fetch_key ( rule - > table . t , px , sess , s , SMP_OPT_DIR_RES | SMP_OPT_FINAL , rule - > expr , NULL ) ;
2010-01-04 09:47:17 -05:00
if ( ! key )
continue ;
BUG/MEDIUM: stick: completely remove the unused flag from the store entries
The store[] array in the session holds a flag which probably aimed to
differenciate store entries learned from the request from those learned
from the response, and allowing responses to overwrite only the request
ones (eg: have a server set a response cookie which overwrites the request
one).
But this flag is set when a response data is stored, and is never cleared.
So in practice, haproxy always runs with this flag set, meaning that
responses prevent themselves from overriding the request data.
It is desirable anyway to keep the ability not to override data, because
the override is performed only based on the table and not on the key, so
that would mean that it would be impossible to retrieve two different
keys to store into a same table. For example, if a client sets a cookie
and a server another one, both need to be updated in the table in the
proper order. This is especially true when multiple keys may be tracked
on each side into the same table (eg: list of IP addresses in a header).
So the correct fix which also maintains the current behaviour consists in
simply removing this flag and never try to optimize for the overwrite case.
This fix also has the benefit of significantly reducing the session size,
by 64 bytes due to alignment issues caused by this flag!
The bug has been there forever (since 1.4-dev7), so a backport to 1.4
would be appropriate.
2013-12-06 17:05:21 -05:00
if ( s - > store_count < ( sizeof ( s - > store ) / sizeof ( s - > store [ 0 ] ) ) ) {
2010-01-04 09:47:17 -05:00
struct stksess * ts ;
ts = stksess_new ( rule - > table . t , key ) ;
if ( ts ) {
s - > store [ s - > store_count ] . table = rule - > table . t ;
s - > store [ s - > store_count + + ] . ts = ts ;
}
}
}
}
/* process store request and store response */
for ( i = 0 ; i < s - > store_count ; i + + ) {
2010-06-06 09:38:59 -04:00
struct stksess * ts ;
2010-06-06 10:40:39 -04:00
void * ptr ;
2020-11-20 03:28:26 -05:00
char * key ;
2019-05-20 04:08:27 -04:00
struct dict_entry * de ;
2020-11-20 03:28:26 -05:00
struct stktable * t = s - > store [ i ] . table ;
2010-06-06 09:38:59 -04:00
2021-12-06 02:01:02 -05:00
if ( objt_server ( s - > target ) & & __objt_server ( s - > target ) - > flags & SRV_F_NON_STICK ) {
2011-06-24 20:39:49 -04:00
stksess_free ( s - > store [ i ] . table , s - > store [ i ] . ts ) ;
s - > store [ i ] . ts = NULL ;
continue ;
}
2020-11-20 03:28:26 -05:00
ts = stktable_set_entry ( t , s - > store [ i ] . ts ) ;
2017-06-13 13:37:32 -04:00
if ( ts ! = s - > store [ i ] . ts ) {
2010-06-06 09:38:59 -04:00
/* the entry already existed, we can free ours */
2020-11-20 03:28:26 -05:00
stksess_free ( t , s - > store [ i ] . ts ) ;
2010-01-04 09:47:17 -05:00
}
2010-06-06 09:38:59 -04:00
s - > store [ i ] . ts = NULL ;
2017-06-13 13:37:32 -04:00
2017-11-07 04:42:54 -05:00
HA_RWLOCK_WRLOCK ( STK_SESS_LOCK , & ts - > lock ) ;
2020-11-20 03:28:26 -05:00
ptr = __stktable_data_ptr ( t , ts , STKTABLE_DT_SERVER_ID ) ;
2021-06-30 11:18:28 -04:00
stktable_data_cast ( ptr , std_t_sint ) = __objt_server ( s - > target ) - > puid ;
2017-11-07 04:42:54 -05:00
HA_RWLOCK_WRUNLOCK ( STK_SESS_LOCK , & ts - > lock ) ;
2019-05-20 04:08:27 -04:00
2020-11-20 03:28:26 -05:00
if ( t - > server_key_type = = STKTABLE_SRV_NAME )
key = __objt_server ( s - > target ) - > id ;
else if ( t - > server_key_type = = STKTABLE_SRV_ADDR )
key = __objt_server ( s - > target ) - > addr_node . key ;
else
continue ;
2019-05-20 04:08:27 -04:00
HA_RWLOCK_WRLOCK ( STK_SESS_LOCK , & ts - > lock ) ;
2020-11-20 03:28:26 -05:00
de = dict_insert ( & server_key_dict , key ) ;
2019-05-20 04:08:27 -04:00
if ( de ) {
2020-11-20 03:28:26 -05:00
ptr = __stktable_data_ptr ( t , ts , STKTABLE_DT_SERVER_KEY ) ;
2021-06-30 11:18:28 -04:00
stktable_data_cast ( ptr , std_t_dict ) = de ;
2019-05-20 04:08:27 -04:00
}
HA_RWLOCK_WRUNLOCK ( STK_SESS_LOCK , & ts - > lock ) ;
2020-11-20 03:28:26 -05:00
stktable_touch_local ( t , ts , 1 ) ;
2010-01-04 09:47:17 -05:00
}
2010-06-18 03:57:45 -04:00
s - > store_count = 0 ; /* everything is stored */
2010-01-04 09:47:17 -05:00
rep - > analysers & = ~ an_bit ;
rep - > analyse_exp = TICK_ETERNITY ;
2019-11-05 10:18:10 -05:00
DBG_TRACE_LEAVE ( STRM_EV_STRM_ANA , s ) ;
2010-01-04 09:47:17 -05:00
return 1 ;
}
2021-03-15 05:42:02 -04:00
/* Set the stream to HTTP mode, if necessary. The minimal request HTTP analysers
* are set and the client mux is upgraded . It returns 1 if the stream processing
* may continue or 0 if it should be stopped . It happens on error or if the
2021-03-15 07:03:44 -04:00
* upgrade required a new stream . The mux protocol may be specified .
2021-03-15 05:42:02 -04:00
*/
2021-03-15 07:03:44 -04:00
int stream_set_http_mode ( struct stream * s , const struct mux_proto_list * mux_proto )
2021-03-15 05:42:02 -04:00
{
2022-05-17 13:40:40 -04:00
struct stconn * cs = s - > scf ;
2021-03-15 05:42:02 -04:00
struct connection * conn ;
/* Already an HTTP stream */
if ( IS_HTX_STRM ( s ) )
return 1 ;
s - > req . analysers | = AN_REQ_WAIT_HTTP | AN_REQ_HTTP_PROCESS_FE ;
if ( unlikely ( ! s - > txn & & ! http_create_txn ( s ) ) )
return 0 ;
2022-05-18 10:23:22 -04:00
conn = sc_conn ( cs ) ;
2021-12-20 09:34:16 -05:00
if ( conn ) {
2022-05-17 13:40:40 -04:00
cs_rx_endp_more ( s - > scf ) ;
2021-03-15 05:42:02 -04:00
/* Make sure we're unsubscribed, the the new
* mux will probably want to subscribe to
* the underlying XPRT
*/
2022-05-17 13:40:40 -04:00
if ( s - > scf - > wait_event . events )
conn - > mux - > unsubscribe ( cs , s - > scf - > wait_event . events , & ( s - > scf - > wait_event ) ) ;
2021-03-15 07:03:44 -04:00
2021-03-15 05:42:02 -04:00
if ( conn - > mux - > flags & MX_FL_NO_UPG )
return 0 ;
2021-03-15 07:03:44 -04:00
if ( conn_upgrade_mux_fe ( conn , cs , & s - > req . buf ,
( mux_proto ? mux_proto - > token : ist ( " " ) ) ,
PROTO_MODE_HTTP ) = = - 1 )
2021-03-15 05:42:02 -04:00
return 0 ;
s - > req . flags & = ~ ( CF_READ_PARTIAL | CF_AUTO_CONNECT ) ;
s - > req . total = 0 ;
s - > flags | = SF_IGNORE ;
if ( strcmp ( conn - > mux - > name , " H2 " ) = = 0 ) {
2022-05-17 13:07:51 -04:00
/* For HTTP/2, destroy the stream connector, disable logging,
2021-03-15 05:42:02 -04:00
* and abort the stream process . Thus it will be
* silently destroyed . The new mux will create new
* streams .
*/
s - > logs . logwait = 0 ;
s - > logs . level = 0 ;
channel_abort ( & s - > req ) ;
channel_abort ( & s - > res ) ;
s - > req . analysers & = AN_REQ_FLT_END ;
s - > req . analyse_exp = TICK_ETERNITY ;
}
}
return 1 ;
}
2022-05-17 13:07:51 -04:00
/* Updates at once the channel flags, and timers of both stream connectors of a
2022-04-01 08:48:06 -04:00
* same stream , to complete the work after the analysers , then updates the data
* layer below . This will ensure that any synchronous update performed at the
2022-05-17 13:07:51 -04:00
* data layer will be reflected in the channel flags and / or stream connector .
* Note that this does not change the stream connector ' s current state , though
2022-04-01 08:48:06 -04:00
* it updates the previous state to the current one .
*/
static void stream_update_both_cs ( struct stream * s )
{
2022-05-17 13:40:40 -04:00
struct stconn * scf = s - > scf ;
struct stconn * scb = s - > scb ;
2022-04-01 08:48:06 -04:00
struct channel * req = & s - > req ;
struct channel * res = & s - > res ;
req - > flags & = ~ ( CF_READ_NULL | CF_READ_PARTIAL | CF_READ_ATTACHED | CF_WRITE_NULL | CF_WRITE_PARTIAL ) ;
res - > flags & = ~ ( CF_READ_NULL | CF_READ_PARTIAL | CF_READ_ATTACHED | CF_WRITE_NULL | CF_WRITE_PARTIAL ) ;
2022-05-17 13:40:40 -04:00
s - > prev_conn_state = scb - > state ;
2022-04-01 08:48:06 -04:00
/* let's recompute both sides states */
2022-05-17 13:47:17 -04:00
if ( cs_state_in ( scf - > state , SC_SB_RDY | SC_SB_EST ) )
2022-05-17 13:40:40 -04:00
cs_update ( scf ) ;
2022-04-01 08:48:06 -04:00
2022-05-17 13:47:17 -04:00
if ( cs_state_in ( scb - > state , SC_SB_RDY | SC_SB_EST ) )
2022-05-17 13:40:40 -04:00
cs_update ( scb ) ;
2022-04-01 08:48:06 -04:00
2022-05-17 13:07:51 -04:00
/* stream connectors are processed outside of process_stream() and must be
2022-04-01 08:48:06 -04:00
* handled at the latest moment .
*/
2022-05-18 11:58:02 -04:00
if ( sc_appctx ( scf ) ) {
2022-05-24 10:56:55 -04:00
if ( ( cs_rx_endp_ready ( scf ) & & ! cs_rx_blocked ( scf ) & & ! sc_ep_test ( scf , SE_FL_APPLET_NEED_CONN ) & &
! ( req - > flags & CF_SHUTR ) ) | | sc_is_send_allowed ( scf ) )
2022-05-18 11:58:02 -04:00
appctx_wakeup ( __sc_appctx ( scf ) ) ;
2022-04-01 08:48:06 -04:00
}
2022-05-18 11:58:02 -04:00
if ( sc_appctx ( scb ) ) {
2022-05-24 10:56:55 -04:00
if ( ( cs_rx_endp_ready ( scb ) & & ! cs_rx_blocked ( scb ) & & ! sc_ep_test ( scb , SE_FL_APPLET_NEED_CONN ) & &
! ( res - > flags & CF_SHUTR ) ) | | sc_is_send_allowed ( scb ) )
2022-05-18 11:58:02 -04:00
appctx_wakeup ( __sc_appctx ( scb ) ) ;
2022-04-01 08:48:06 -04:00
}
}
2021-03-15 05:42:02 -04:00
2010-01-06 17:53:24 -05:00
/* This macro is very specific to the function below. See the comments in
REORG/MAJOR: session: rename the "session" entity to "stream"
With HTTP/2, we'll have to support multiplexed streams. A stream is in
fact the largest part of what we currently call a session, it has buffers,
logs, etc.
In order to catch any error, this commit removes any reference to the
struct session and tries to rename most "session" occurrences in function
names to "stream" and "sess" to "strm" when that's related to a session.
The files stream.{c,h} were added and session.{c,h} removed.
The session will be reintroduced later and a few parts of the stream
will progressively be moved overthere. It will more or less contain
only what we need in an embryonic session.
Sample fetch functions and converters will have to change a bit so
that they'll use an L5 (session) instead of what's currently called
"L4" which is in fact L6 for now.
Once all changes are completed, we should see approximately this :
L7 - http_txn
L6 - stream
L5 - session
L4 - connection | applet
There will be at most one http_txn per stream, and a same session will
possibly be referenced by multiple streams. A connection will point to
a session and to a stream. The session will hold all the information
we need to keep even when we don't yet have a stream.
Some more cleanup is needed because some code was already far from
being clean. The server queue management still refers to sessions at
many places while comments talk about connections. This will have to
be cleaned up once we have a server-side connection pool manager.
Stream flags "SN_*" still need to be renamed, it doesn't seem like
any of them will need to move to the session.
2015-04-02 18:22:06 -04:00
* process_stream ( ) below to understand the logic and the tests .
2010-01-06 17:53:24 -05:00
*/
# define UPDATE_ANALYSERS(real, list, back, flag) { \
list = ( ( ( list ) & ~ ( flag ) ) | ~ ( back ) ) & ( real ) ; \
back = real ; \
if ( ! ( list ) ) \
break ; \
if ( ( ( list ) ^ ( ( list ) & ( ( list ) - 1 ) ) ) < ( flag ) ) \
continue ; \
}
2016-05-11 11:06:28 -04:00
/* These 2 following macros call an analayzer for the specified channel if the
* right flag is set . The first one is used for " filterable " analyzers . If a
2016-05-11 11:13:39 -04:00
* stream has some registered filters , pre and post analyaze callbacks are
BUG/MAJOR: channel: Fix the definition order of channel analyzers
It is important to defined analyzers (AN_REQ_* and AN_RES_*) in the same order
they are evaluated in process_stream. This order is really important because
during analyzers evaluation, we run them in the order of the lower bit to the
higher one. This way, when an analyzer adds/removes another one during its
evaluation, we know if it is located before or after it. So, when it adds an
analyzer which is located before it, we can switch to it immediately, even if it
has already been called once but removed since.
With the time, and introduction of new analyzers, this order was broken up. the
main problems come from the filter analyzers. We used values not related with
their evaluation order. Furthermore, we used same values for request and response
analyzers.
So, to fix the bug, filter analyzers have been splitted in 2 distinct lists to
have different analyzers for the request channel than those for the response
channel. And of course, we have moved them to the right place.
Some other analyzers have been reordered to respect the evaluation order:
* AN_REQ_HTTP_TARPIT has been moved just before AN_REQ_SRV_RULES
* AN_REQ_PRST_RDP_COOKIE has been moved just before AN_REQ_STICKING_RULES
* AN_RES_STORE_RULES has been moved just after AN_RES_WAIT_HTTP
Note today we have 29 analyzers, all stored into a 32 bits bitfield. So we can
still add 4 more analyzers before having a problem. A good way to fend off the
problem for a while could be to have a different bitfield for request and
response analyzers.
[wt: all of this must be backported to 1.7, and part of it must be backported
to 1.6 and 1.5]
2017-01-05 08:06:34 -05:00
* called . The second are used for other analyzers ( AN_REQ / RES_FLT_ * and
2016-05-11 11:06:28 -04:00
* AN_REQ / RES_HTTP_XFER_BODY ) */
# define FLT_ANALYZE(strm, chn, fun, list, back, flag, ...) \
{ \
if ( ( list ) & ( flag ) ) { \
if ( HAS_FILTERS ( strm ) ) { \
2016-05-11 11:13:39 -04:00
if ( ! flt_pre_analyze ( ( strm ) , ( chn ) , ( flag ) ) ) \
2016-05-11 11:06:28 -04:00
break ; \
if ( ! fun ( ( strm ) , ( chn ) , ( flag ) , # # __VA_ARGS__ ) ) \
break ; \
2016-05-11 11:13:39 -04:00
if ( ! flt_post_analyze ( ( strm ) , ( chn ) , ( flag ) ) ) \
break ; \
2016-05-11 11:06:28 -04:00
} \
else { \
if ( ! fun ( ( strm ) , ( chn ) , ( flag ) , # # __VA_ARGS__ ) ) \
break ; \
} \
UPDATE_ANALYSERS ( ( chn ) - > analysers , ( list ) , \
( back ) , ( flag ) ) ; \
} \
}
# define ANALYZE(strm, chn, fun, list, back, flag, ...) \
{ \
if ( ( list ) & ( flag ) ) { \
if ( ! fun ( ( strm ) , ( chn ) , ( flag ) , # # __VA_ARGS__ ) ) \
break ; \
UPDATE_ANALYSERS ( ( chn ) - > analysers , ( list ) , \
( back ) , ( flag ) ) ; \
} \
}
REORG/MAJOR: session: rename the "session" entity to "stream"
With HTTP/2, we'll have to support multiplexed streams. A stream is in
fact the largest part of what we currently call a session, it has buffers,
logs, etc.
In order to catch any error, this commit removes any reference to the
struct session and tries to rename most "session" occurrences in function
names to "stream" and "sess" to "strm" when that's related to a session.
The files stream.{c,h} were added and session.{c,h} removed.
The session will be reintroduced later and a few parts of the stream
will progressively be moved overthere. It will more or less contain
only what we need in an embryonic session.
Sample fetch functions and converters will have to change a bit so
that they'll use an L5 (session) instead of what's currently called
"L4" which is in fact L6 for now.
Once all changes are completed, we should see approximately this :
L7 - http_txn
L6 - stream
L5 - session
L4 - connection | applet
There will be at most one http_txn per stream, and a same session will
possibly be referenced by multiple streams. A connection will point to
a session and to a stream. The session will hold all the information
we need to keep even when we don't yet have a stream.
Some more cleanup is needed because some code was already far from
being clean. The server queue management still refers to sessions at
many places while comments talk about connections. This will have to
be cleaned up once we have a server-side connection pool manager.
Stream flags "SN_*" still need to be renamed, it doesn't seem like
any of them will need to move to the session.
2015-04-02 18:22:06 -04:00
/* Processes the client, server, request and response jobs of a stream task,
2008-11-30 12:47:21 -05:00
* then puts it back to the wait queue in a clean state , or cleans up its
* resources if it must be deleted . Returns in < next > the date the task wants
* to be woken up , or TICK_ETERNITY . In order not to call all functions for
* nothing too many times , the request and response buffers flags are monitored
* and each function is called only if at least another function has changed at
* least one flag it is interested in .
*/
2021-03-02 10:09:26 -05:00
struct task * process_stream ( struct task * t , void * context , unsigned int state )
2008-11-30 12:47:21 -05:00
{
2011-03-10 10:55:02 -05:00
struct server * srv ;
2018-05-25 08:04:04 -04:00
struct stream * s = context ;
2015-04-03 08:46:27 -04:00
struct session * sess = s - > sess ;
2008-11-30 12:47:21 -05:00
unsigned int rqf_last , rpf_last ;
2010-07-27 11:15:12 -04:00
unsigned int rq_prod_last , rq_cons_last ;
unsigned int rp_cons_last , rp_prod_last ;
2010-01-06 18:09:04 -05:00
unsigned int req_ana_back ;
2014-11-28 09:07:47 -05:00
struct channel * req , * res ;
2022-05-17 13:40:40 -04:00
struct stconn * scf , * scb ;
2019-04-25 13:15:20 -04:00
unsigned int rate ;
2014-11-28 09:07:47 -05:00
2019-11-05 10:18:10 -05:00
DBG_TRACE_ENTER ( STRM_EV_STRM_PROC , s ) ;
2020-06-17 14:49:49 -04:00
activity [ tid ] . stream_calls + + ;
2018-01-20 13:30:13 -05:00
2014-11-28 09:07:47 -05:00
req = & s - > req ;
res = & s - > res ;
2022-05-17 13:40:40 -04:00
scf = s - > scf ;
scb = s - > scb ;
2008-11-30 12:47:21 -05:00
2018-10-25 04:42:39 -04:00
/* First, attempt to receive pending data from I/O layers */
2022-05-18 12:06:53 -04:00
sc_conn_sync_recv ( scf ) ;
sc_conn_sync_recv ( scb ) ;
2018-11-17 13:51:07 -05:00
BUG/MINOR: stream: make the call_rate only count the no-progress calls
We have an anti-looping protection in process_stream() that detects bugs
that used to affect a few filters like compression in the past which
sometimes forgot to handle a read0 or a particular error, leaving a
thread looping at 100% CPU forever. When such a condition is detected,
an alert it emitted and the process is killed so that it can be replaced
by a sane one:
[ALERT] (19061) : A bogus STREAM [0x274abe0] is spinning at 2057156
calls per second and refuses to die, aborting now! Please
report this error to developers [strm=0x274abe0,3 src=unix
fe=MASTER be=MASTER dst=<MCLI> txn=(nil),0 txn.req=-,0
txn.rsp=-,0 rqf=c02000 rqa=10000 rpf=88000021 rpa=8000000
sif=EST,40008 sib=DIS,84018 af=(nil),0 csf=0x274ab90,8600
ab=0x272fd40,1 csb=(nil),0
cof=0x25d5d80,1300:PASS(0x274aaf0)/RAW((nil))/unix_stream(9)
cob=(nil),0:NONE((nil))/NONE((nil))/NONE(0) filters={}]
call trace(11):
| 0x4dbaab [c7 04 25 01 00 00 00 00]: stream_dump_and_crash+0x17b/0x1b4
| 0x4df31f [e9 bd c8 ff ff 49 83 7c]: process_stream+0x382f/0x53a3
(...)
One problem with this detection is that it used to only count the call
rate because we weren't sure how to make it more accurate, but the
threshold was high enough to prevent accidental false positives.
There is actually one case that manages to trigger it, which is when
sending huge amounts of requests pipelined on the master CLI. Some
short requests such as "show version" are sufficient to be handled
extremely fast and to cause a wake up of an analyser to parse the
next request, then an applet to handle it, back and forth. But this
condition is not an error, since some data are being forwarded by
the stream, and it's easy to detect it.
This patch modifies the detection so that update_freq_ctr() only
applies to calls made without CF_READ_PARTIAL nor CF_WRITE_PARTIAL
set on any of the channels, which really indicates that nothing is
happening at all.
This is greatly sufficient and extremely effective, as the call above
is still caught (shutr being ignored by an analyser) while a loop on
the master CLI now has no effect. The "call_rate" field in the detailed
"show sess" output will now be much lower, except for bogus streams,
which may help spot them. This field is only there for developers
anyway so it's pretty fine to slightly adjust its meaning.
This patch could be backported to stable versions in case of reports
of such an issue, but as that's unlikely, it's not really needed.
2022-01-20 12:42:16 -05:00
/* Let's check if we're looping without making any progress, e.g. due
* to a bogus analyser or the fact that we ' re ignoring a read0 . The
* call_rate counter only counts calls with no progress made .
*/
if ( ! ( ( req - > flags | res - > flags ) & ( CF_READ_PARTIAL | CF_WRITE_PARTIAL ) ) ) {
rate = update_freq_ctr ( & s - > call_rate , 1 ) ;
if ( rate > = 100000 & & s - > call_rate . prev_ctr ) // make sure to wait at least a full second
stream_dump_and_crash ( & s - > obj_type , read_freq_ctr ( & s - > call_rate ) ) ;
2019-04-25 13:15:20 -04:00
}
2018-09-11 12:27:21 -04:00
2010-01-29 13:26:18 -05:00
/* this data may be no longer valid, clear it */
2015-04-03 17:46:31 -04:00
if ( s - > txn )
memset ( & s - > txn - > auth , 0 , sizeof ( s - > txn - > auth ) ) ;
2010-01-29 13:26:18 -05:00
2014-06-23 09:22:31 -04:00
/* This flag must explicitly be set every time */
2014-11-28 09:07:47 -05:00
req - > flags & = ~ ( CF_READ_NOEXP | CF_WAKE_WRITE ) ;
res - > flags & = ~ ( CF_READ_NOEXP | CF_WAKE_WRITE ) ;
2009-06-21 16:03:51 -04:00
/* Keep a copy of req/rep flags so that we can detect shutdowns */
2014-11-28 09:07:47 -05:00
rqf_last = req - > flags & ~ CF_MASK_ANALYSER ;
rpf_last = res - > flags & ~ CF_MASK_ANALYSER ;
2009-06-21 16:03:51 -04:00
2022-05-17 13:07:51 -04:00
/* we don't want the stream connector functions to recursively wake us up */
2022-05-17 13:44:42 -04:00
scf - > flags | = SC_FL_DONT_WAKE ;
scb - > flags | = SC_FL_DONT_WAKE ;
2009-09-05 14:57:35 -04:00
BUG/MEDIUM: stream: Save unprocessed events for a stream
A stream can be awakened for different reasons. During its processing, it can be
early stopped if no buffer is available. In this situation, the reason why the
stream was awakened is lost, because we rely on the task state, which is reset
after each processing loop.
In many cases, that's not a big deal. But it can be useful to accumulate the
task states if the stream processing is interrupted, especially if some filters
need to be called.
To be clearer, here is an simple example:
1) A stream is awakened with the reason TASK_WOKEN_MSG.
2) Because no buffer is available, the processing is interrupted, the stream
is back to sleep. And the task state is reset.
3) Some buffers become available, so the stream is awakened with the reason
TASK_WOKEN_RES. At this step, the previous reason (TASK_WOKEN_MSG) is lost.
Now, the task states are saved for a stream and reset only when the stream
processing is not interrupted. The correspoing bitfield represents the pending
events for a stream. And we use this one instead of the task state during the
stream processing.
Note that TASK_WOKEN_TIMER and TASK_WOKEN_RES are always removed because these
events are always handled during the stream processing.
[wt: backport to 1.7 and 1.6]
2016-12-08 16:33:52 -05:00
/* update pending events */
2018-05-25 08:04:04 -04:00
s - > pending_events | = ( state & TASK_WOKEN_ANY ) ;
BUG/MEDIUM: stream: Save unprocessed events for a stream
A stream can be awakened for different reasons. During its processing, it can be
early stopped if no buffer is available. In this situation, the reason why the
stream was awakened is lost, because we rely on the task state, which is reset
after each processing loop.
In many cases, that's not a big deal. But it can be useful to accumulate the
task states if the stream processing is interrupted, especially if some filters
need to be called.
To be clearer, here is an simple example:
1) A stream is awakened with the reason TASK_WOKEN_MSG.
2) Because no buffer is available, the processing is interrupted, the stream
is back to sleep. And the task state is reset.
3) Some buffers become available, so the stream is awakened with the reason
TASK_WOKEN_RES. At this step, the previous reason (TASK_WOKEN_MSG) is lost.
Now, the task states are saved for a stream and reset only when the stream
processing is not interrupted. The correspoing bitfield represents the pending
events for a stream. And we use this one instead of the task state during the
stream processing.
Note that TASK_WOKEN_TIMER and TASK_WOKEN_RES are always removed because these
events are always handled during the stream processing.
[wt: backport to 1.7 and 1.6]
2016-12-08 16:33:52 -05:00
2008-11-30 12:47:21 -05:00
/* 1a: Check for low level timeouts if needed. We just set a flag on
2022-05-17 13:07:51 -04:00
* stream connectors when their timeouts have expired .
2008-11-30 12:47:21 -05:00
*/
BUG/MEDIUM: stream: Save unprocessed events for a stream
A stream can be awakened for different reasons. During its processing, it can be
early stopped if no buffer is available. In this situation, the reason why the
stream was awakened is lost, because we rely on the task state, which is reset
after each processing loop.
In many cases, that's not a big deal. But it can be useful to accumulate the
task states if the stream processing is interrupted, especially if some filters
need to be called.
To be clearer, here is an simple example:
1) A stream is awakened with the reason TASK_WOKEN_MSG.
2) Because no buffer is available, the processing is interrupted, the stream
is back to sleep. And the task state is reset.
3) Some buffers become available, so the stream is awakened with the reason
TASK_WOKEN_RES. At this step, the previous reason (TASK_WOKEN_MSG) is lost.
Now, the task states are saved for a stream and reset only when the stream
processing is not interrupted. The correspoing bitfield represents the pending
events for a stream. And we use this one instead of the task state during the
stream processing.
Note that TASK_WOKEN_TIMER and TASK_WOKEN_RES are always removed because these
events are always handled during the stream processing.
[wt: backport to 1.7 and 1.6]
2016-12-08 16:33:52 -05:00
if ( unlikely ( s - > pending_events & TASK_WOKEN_TIMER ) ) {
2022-03-29 13:02:31 -04:00
stream_check_conn_timeout ( s ) ;
2009-06-21 16:03:51 -04:00
2022-05-17 13:07:51 -04:00
/* check channel timeouts, and close the corresponding stream connectors
2009-06-21 16:03:51 -04:00
* for future reads or writes . Note : this will also concern upper layers
* but we do not touch any other flag . We must be careful and correctly
* detect state changes when calling them .
*/
2014-11-28 09:07:47 -05:00
channel_check_timeouts ( req ) ;
2009-06-21 16:03:51 -04:00
2014-11-28 09:07:47 -05:00
if ( unlikely ( ( req - > flags & ( CF_SHUTW | CF_WRITE_TIMEOUT ) ) = = CF_WRITE_TIMEOUT ) ) {
2022-05-17 13:44:42 -04:00
scb - > flags | = SC_FL_NOLINGER ;
2022-05-17 13:40:40 -04:00
cs_shutw ( scb ) ;
2009-12-29 08:49:56 -05:00
}
2014-11-28 09:07:47 -05:00
if ( unlikely ( ( req - > flags & ( CF_SHUTR | CF_READ_TIMEOUT ) ) = = CF_READ_TIMEOUT ) ) {
2022-05-17 13:44:42 -04:00
if ( scf - > flags & SC_FL_NOHALF )
scf - > flags | = SC_FL_NOLINGER ;
2022-05-17 13:40:40 -04:00
cs_shutr ( scf ) ;
2012-05-13 08:48:59 -04:00
}
2009-06-21 16:03:51 -04:00
2014-11-28 09:07:47 -05:00
channel_check_timeouts ( res ) ;
2008-11-30 12:47:21 -05:00
2014-11-28 09:07:47 -05:00
if ( unlikely ( ( res - > flags & ( CF_SHUTW | CF_WRITE_TIMEOUT ) ) = = CF_WRITE_TIMEOUT ) ) {
2022-05-17 13:44:42 -04:00
scf - > flags | = SC_FL_NOLINGER ;
2022-05-17 13:40:40 -04:00
cs_shutw ( scf ) ;
2009-12-29 08:49:56 -05:00
}
2014-11-28 09:07:47 -05:00
if ( unlikely ( ( res - > flags & ( CF_SHUTR | CF_READ_TIMEOUT ) ) = = CF_READ_TIMEOUT ) ) {
2022-05-17 13:44:42 -04:00
if ( scb - > flags & SC_FL_NOHALF )
scb - > flags | = SC_FL_NOLINGER ;
2022-05-17 13:40:40 -04:00
cs_shutr ( scb ) ;
2012-05-13 08:48:59 -04:00
}
2012-11-08 08:49:17 -05:00
2016-11-10 08:58:05 -05:00
if ( HAS_FILTERS ( s ) )
flt_stream_check_timeouts ( s ) ;
2012-11-08 08:49:17 -05:00
/* Once in a while we're woken up because the task expires. But
* this does not necessarily mean that a timeout has been reached .
REORG/MAJOR: session: rename the "session" entity to "stream"
With HTTP/2, we'll have to support multiplexed streams. A stream is in
fact the largest part of what we currently call a session, it has buffers,
logs, etc.
In order to catch any error, this commit removes any reference to the
struct session and tries to rename most "session" occurrences in function
names to "stream" and "sess" to "strm" when that's related to a session.
The files stream.{c,h} were added and session.{c,h} removed.
The session will be reintroduced later and a few parts of the stream
will progressively be moved overthere. It will more or less contain
only what we need in an embryonic session.
Sample fetch functions and converters will have to change a bit so
that they'll use an L5 (session) instead of what's currently called
"L4" which is in fact L6 for now.
Once all changes are completed, we should see approximately this :
L7 - http_txn
L6 - stream
L5 - session
L4 - connection | applet
There will be at most one http_txn per stream, and a same session will
possibly be referenced by multiple streams. A connection will point to
a session and to a stream. The session will hold all the information
we need to keep even when we don't yet have a stream.
Some more cleanup is needed because some code was already far from
being clean. The server queue management still refers to sessions at
many places while comments talk about connections. This will have to
be cleaned up once we have a server-side connection pool manager.
Stream flags "SN_*" still need to be renamed, it doesn't seem like
any of them will need to move to the session.
2015-04-02 18:22:06 -04:00
* So let ' s not run a whole stream processing if only an expiration
2012-11-08 08:49:17 -05:00
* timeout needs to be refreshed .
*/
2014-11-28 09:07:47 -05:00
if ( ! ( ( req - > flags | res - > flags ) &
2012-11-08 08:49:17 -05:00
( CF_SHUTR | CF_READ_ACTIVITY | CF_READ_TIMEOUT | CF_SHUTW |
2018-10-24 11:17:56 -04:00
CF_WRITE_ACTIVITY | CF_WRITE_TIMEOUT | CF_ANA_TIMEOUT ) ) & &
2022-03-29 13:02:31 -04:00
! ( s - > flags & SF_CONN_EXP ) & &
2022-05-17 13:40:40 -04:00
! ( ( sc_ep_get ( scf ) | scb - > flags ) & SE_FL_ERROR ) & &
BUG/MEDIUM: stream: Save unprocessed events for a stream
A stream can be awakened for different reasons. During its processing, it can be
early stopped if no buffer is available. In this situation, the reason why the
stream was awakened is lost, because we rely on the task state, which is reset
after each processing loop.
In many cases, that's not a big deal. But it can be useful to accumulate the
task states if the stream processing is interrupted, especially if some filters
need to be called.
To be clearer, here is an simple example:
1) A stream is awakened with the reason TASK_WOKEN_MSG.
2) Because no buffer is available, the processing is interrupted, the stream
is back to sleep. And the task state is reset.
3) Some buffers become available, so the stream is awakened with the reason
TASK_WOKEN_RES. At this step, the previous reason (TASK_WOKEN_MSG) is lost.
Now, the task states are saved for a stream and reset only when the stream
processing is not interrupted. The correspoing bitfield represents the pending
events for a stream. And we use this one instead of the task state during the
stream processing.
Note that TASK_WOKEN_TIMER and TASK_WOKEN_RES are always removed because these
events are always handled during the stream processing.
[wt: backport to 1.7 and 1.6]
2016-12-08 16:33:52 -05:00
( ( s - > pending_events & TASK_WOKEN_ANY ) = = TASK_WOKEN_TIMER ) ) {
2022-05-17 13:44:42 -04:00
scf - > flags & = ~ SC_FL_DONT_WAKE ;
scb - > flags & = ~ SC_FL_DONT_WAKE ;
2012-11-08 08:49:17 -05:00
goto update_exp_and_leave ;
2016-05-04 04:18:37 -04:00
}
2009-06-21 16:03:51 -04:00
}
2008-11-30 12:47:21 -05:00
2022-05-17 13:07:51 -04:00
resync_stconns :
MAJOR: session: only allocate buffers when needed
A session doesn't need buffers all the time, especially when they're
empty. With this patch, we don't allocate buffers anymore when the
session is initialized, we only allocate them in two cases :
- during process_session()
- during I/O operations
During process_session(), we try hard to allocate both buffers at once
so that we know for sure that a started operation can complete. Indeed,
a previous version of this patch used to allocate one buffer at a time,
but it can result in a deadlock when all buffers are allocated for
requests for example, and there's no buffer left to emit error responses.
Here, if any of the buffers cannot be allocated, the whole operation is
cancelled and the session is added at the tail of the buffer wait queue.
At the end of process_session(), a call to session_release_buffers() is
done so that we can offer unused buffers to other sessions waiting for
them.
For I/O operations, we only need to allocate a buffer on the Rx path.
For this, we only allocate a single buffer but ensure that at least two
are available to avoid the deadlock situation. In case buffers are not
available, SI_FL_WAIT_ROOM is set on the stream interface and the session
is queued. Unused buffers resulting either from a successful send() or
from an unused read buffer are offered to pending sessions during the
->wake() callback.
2014-11-25 13:46:36 -05:00
/* below we may emit error messages so we have to ensure that we have
BUG/MEDIUM: stream: Abort processing if response buffer allocation fails
In process_stream(), we force the response buffer allocation before any
processing to be able to return an error message. It is important because,
when an error is triggered, the stream is immediately closed. Thus we cannot
wait for the response buffer allocation.
When the allocation fails, the stream analysis is stopped and the expiration
date of the stream's task is updated before exiting process_stream(). But if
the stream was woken up because of a connection or an analysis timeout, the
expiration date remains blocked in the past. This means the stream is woken
up in loop as long as the response buffer is not properly allocated.
Alone, this behavior is already a bug. But because the mechanism to handle
buffer allocation failures is totally broken since a while, this bug becomes
more problematic. Because, most of time, the watchdog will kill HAProxy in
this case because it will detect a spinning loop.
To fix it, at least temporarily, an allocation failure at this stage is now
reported as an error and the processing is aborted. It's not satisfying but
it is better than nothing. If the buffers allocation mechanism is
refactored, this part will be reviewed.
This patch must be backported, probably as far as 2.0. It may be perceived
as a regression, but the actual behavior is probably even worse. And
because it was not reported, it is probably not a common situation.
2022-02-01 12:53:53 -05:00
* our buffers properly allocated . If the allocation failed , an error is
* triggered .
*
* NOTE : An error is returned because the mechanism to queue entities
* waiting for a buffer is totally broken for now . However , this
* part must be refactored . When it will be handled , this part
* must be be reviewed too .
MAJOR: session: only allocate buffers when needed
A session doesn't need buffers all the time, especially when they're
empty. With this patch, we don't allocate buffers anymore when the
session is initialized, we only allocate them in two cases :
- during process_session()
- during I/O operations
During process_session(), we try hard to allocate both buffers at once
so that we know for sure that a started operation can complete. Indeed,
a previous version of this patch used to allocate one buffer at a time,
but it can result in a deadlock when all buffers are allocated for
requests for example, and there's no buffer left to emit error responses.
Here, if any of the buffers cannot be allocated, the whole operation is
cancelled and the session is added at the tail of the buffer wait queue.
At the end of process_session(), a call to session_release_buffers() is
done so that we can offer unused buffers to other sessions waiting for
them.
For I/O operations, we only need to allocate a buffer on the Rx path.
For this, we only allocate a single buffer but ensure that at least two
are available to avoid the deadlock situation. In case buffers are not
available, SI_FL_WAIT_ROOM is set on the stream interface and the session
is queued. Unused buffers resulting either from a successful send() or
from an unused read buffer are offered to pending sessions during the
->wake() callback.
2014-11-25 13:46:36 -05:00
*/
REORG/MAJOR: session: rename the "session" entity to "stream"
With HTTP/2, we'll have to support multiplexed streams. A stream is in
fact the largest part of what we currently call a session, it has buffers,
logs, etc.
In order to catch any error, this commit removes any reference to the
struct session and tries to rename most "session" occurrences in function
names to "stream" and "sess" to "strm" when that's related to a session.
The files stream.{c,h} were added and session.{c,h} removed.
The session will be reintroduced later and a few parts of the stream
will progressively be moved overthere. It will more or less contain
only what we need in an embryonic session.
Sample fetch functions and converters will have to change a bit so
that they'll use an L5 (session) instead of what's currently called
"L4" which is in fact L6 for now.
Once all changes are completed, we should see approximately this :
L7 - http_txn
L6 - stream
L5 - session
L4 - connection | applet
There will be at most one http_txn per stream, and a same session will
possibly be referenced by multiple streams. A connection will point to
a session and to a stream. The session will hold all the information
we need to keep even when we don't yet have a stream.
Some more cleanup is needed because some code was already far from
being clean. The server queue management still refers to sessions at
many places while comments talk about connections. This will have to
be cleaned up once we have a server-side connection pool manager.
Stream flags "SN_*" still need to be renamed, it doesn't seem like
any of them will need to move to the session.
2015-04-02 18:22:06 -04:00
if ( ! stream_alloc_work_buffer ( s ) ) {
2022-05-17 13:40:40 -04:00
sc_ep_set ( s - > scf , SE_FL_ERROR ) ;
2022-03-30 13:39:30 -04:00
s - > conn_err_type = STRM_ET_CONN_RES ;
BUG/MEDIUM: stream: Abort processing if response buffer allocation fails
In process_stream(), we force the response buffer allocation before any
processing to be able to return an error message. It is important because,
when an error is triggered, the stream is immediately closed. Thus we cannot
wait for the response buffer allocation.
When the allocation fails, the stream analysis is stopped and the expiration
date of the stream's task is updated before exiting process_stream(). But if
the stream was woken up because of a connection or an analysis timeout, the
expiration date remains blocked in the past. This means the stream is woken
up in loop as long as the response buffer is not properly allocated.
Alone, this behavior is already a bug. But because the mechanism to handle
buffer allocation failures is totally broken since a while, this bug becomes
more problematic. Because, most of time, the watchdog will kill HAProxy in
this case because it will detect a spinning loop.
To fix it, at least temporarily, an allocation failure at this stage is now
reported as an error and the processing is aborted. It's not satisfying but
it is better than nothing. If the buffers allocation mechanism is
refactored, this part will be reviewed.
This patch must be backported, probably as far as 2.0. It may be perceived
as a regression, but the actual behavior is probably even worse. And
because it was not reported, it is probably not a common situation.
2022-02-01 12:53:53 -05:00
2022-05-17 13:40:40 -04:00
sc_ep_set ( s - > scb , SE_FL_ERROR ) ;
2022-03-30 13:39:30 -04:00
s - > conn_err_type = STRM_ET_CONN_RES ;
BUG/MEDIUM: stream: Abort processing if response buffer allocation fails
In process_stream(), we force the response buffer allocation before any
processing to be able to return an error message. It is important because,
when an error is triggered, the stream is immediately closed. Thus we cannot
wait for the response buffer allocation.
When the allocation fails, the stream analysis is stopped and the expiration
date of the stream's task is updated before exiting process_stream(). But if
the stream was woken up because of a connection or an analysis timeout, the
expiration date remains blocked in the past. This means the stream is woken
up in loop as long as the response buffer is not properly allocated.
Alone, this behavior is already a bug. But because the mechanism to handle
buffer allocation failures is totally broken since a while, this bug becomes
more problematic. Because, most of time, the watchdog will kill HAProxy in
this case because it will detect a spinning loop.
To fix it, at least temporarily, an allocation failure at this stage is now
reported as an error and the processing is aborted. It's not satisfying but
it is better than nothing. If the buffers allocation mechanism is
refactored, this part will be reviewed.
This patch must be backported, probably as far as 2.0. It may be perceived
as a regression, but the actual behavior is probably even worse. And
because it was not reported, it is probably not a common situation.
2022-02-01 12:53:53 -05:00
if ( ! ( s - > flags & SF_ERR_MASK ) )
s - > flags | = SF_ERR_RESOURCE ;
sess_set_term_flags ( s ) ;
MAJOR: session: only allocate buffers when needed
A session doesn't need buffers all the time, especially when they're
empty. With this patch, we don't allocate buffers anymore when the
session is initialized, we only allocate them in two cases :
- during process_session()
- during I/O operations
During process_session(), we try hard to allocate both buffers at once
so that we know for sure that a started operation can complete. Indeed,
a previous version of this patch used to allocate one buffer at a time,
but it can result in a deadlock when all buffers are allocated for
requests for example, and there's no buffer left to emit error responses.
Here, if any of the buffers cannot be allocated, the whole operation is
cancelled and the session is added at the tail of the buffer wait queue.
At the end of process_session(), a call to session_release_buffers() is
done so that we can offer unused buffers to other sessions waiting for
them.
For I/O operations, we only need to allocate a buffer on the Rx path.
For this, we only allocate a single buffer but ensure that at least two
are available to avoid the deadlock situation. In case buffers are not
available, SI_FL_WAIT_ROOM is set on the stream interface and the session
is queued. Unused buffers resulting either from a successful send() or
from an unused read buffer are offered to pending sessions during the
->wake() callback.
2014-11-25 13:46:36 -05:00
}
2022-05-17 13:07:51 -04:00
/* 1b: check for low-level errors reported at the stream connector.
2008-11-30 12:47:21 -05:00
* First we check if it ' s a retryable error ( in which case we don ' t
* want to tell the buffer ) . Otherwise we report the error one level
* upper by setting flags into the buffers . Note that the side towards
* the client cannot have connect ( hence retryable ) errors . Also , the
* connection setup code must be able to deal with any type of abort .
*/
2012-11-11 18:42:33 -05:00
srv = objt_server ( s - > target ) ;
2022-05-17 13:40:40 -04:00
if ( unlikely ( sc_ep_test ( scf , SE_FL_ERROR ) ) ) {
2022-05-17 13:47:17 -04:00
if ( cs_state_in ( scf - > state , SC_SB_EST | SC_SB_DIS ) ) {
2022-05-17 13:40:40 -04:00
cs_shutr ( scf ) ;
cs_shutw ( scf ) ;
cs_report_error ( scf ) ;
2014-11-28 09:07:47 -05:00
if ( ! ( req - > analysers ) & & ! ( res - > analysers ) ) {
2021-04-06 07:53:36 -04:00
_HA_ATOMIC_INC ( & s - > be - > be_counters . cli_aborts ) ;
_HA_ATOMIC_INC ( & sess - > fe - > fe_counters . cli_aborts ) ;
2020-01-24 05:45:05 -05:00
if ( sess - > listener & & sess - > listener - > counters )
2021-04-06 07:53:36 -04:00
_HA_ATOMIC_INC ( & sess - > listener - > counters - > cli_aborts ) ;
2011-03-10 10:55:02 -05:00
if ( srv )
2021-04-06 07:53:36 -04:00
_HA_ATOMIC_INC ( & srv - > counters . cli_aborts ) ;
2015-04-02 19:14:29 -04:00
if ( ! ( s - > flags & SF_ERR_MASK ) )
s - > flags | = SF_ERR_CLICL ;
if ( ! ( s - > flags & SF_FINST_MASK ) )
s - > flags | = SF_FINST_D ;
2008-12-14 05:44:04 -05:00
}
2008-11-30 12:47:21 -05:00
}
}
2022-05-17 13:40:40 -04:00
if ( unlikely ( sc_ep_test ( scb , SE_FL_ERROR ) ) ) {
2022-05-17 13:47:17 -04:00
if ( cs_state_in ( scb - > state , SC_SB_EST | SC_SB_DIS ) ) {
2022-05-17 13:40:40 -04:00
cs_shutr ( scb ) ;
cs_shutw ( scb ) ;
cs_report_error ( scb ) ;
2021-04-06 07:53:36 -04:00
_HA_ATOMIC_INC ( & s - > be - > be_counters . failed_resp ) ;
2011-03-10 10:55:02 -05:00
if ( srv )
2021-04-06 07:53:36 -04:00
_HA_ATOMIC_INC ( & srv - > counters . failed_resp ) ;
2014-11-28 09:07:47 -05:00
if ( ! ( req - > analysers ) & & ! ( res - > analysers ) ) {
2021-04-06 07:53:36 -04:00
_HA_ATOMIC_INC ( & s - > be - > be_counters . srv_aborts ) ;
_HA_ATOMIC_INC ( & sess - > fe - > fe_counters . srv_aborts ) ;
2020-01-24 05:45:05 -05:00
if ( sess - > listener & & sess - > listener - > counters )
2021-04-06 07:53:36 -04:00
_HA_ATOMIC_INC ( & sess - > listener - > counters - > srv_aborts ) ;
2011-03-10 10:55:02 -05:00
if ( srv )
2021-04-06 07:53:36 -04:00
_HA_ATOMIC_INC ( & srv - > counters . srv_aborts ) ;
2015-04-02 19:14:29 -04:00
if ( ! ( s - > flags & SF_ERR_MASK ) )
s - > flags | = SF_ERR_SRVCL ;
if ( ! ( s - > flags & SF_FINST_MASK ) )
s - > flags | = SF_FINST_D ;
2008-12-14 05:44:04 -05:00
}
2008-11-30 12:47:21 -05:00
}
/* note: maybe we should process connection errors here ? */
}
2022-05-17 13:47:17 -04:00
if ( cs_state_in ( scb - > state , SC_SB_CON | SC_SB_RDY ) ) {
2008-11-30 12:47:21 -05:00
/* we were trying to establish a connection on the server side,
* maybe it succeeded , maybe it failed , maybe we timed out , . . .
*/
2022-05-17 13:47:17 -04:00
if ( scb - > state = = SC_ST_RDY )
2020-01-09 12:43:15 -05:00
back_handle_st_rdy ( s ) ;
2022-05-17 13:47:17 -04:00
else if ( s - > scb - > state = = SC_ST_CON )
2020-01-09 12:43:15 -05:00
back_handle_st_con ( s ) ;
MEDIUM: stream: re-arrange the connection setup status reporting
Till now when a wakeup happens after a connection is attempted, we go
through sess_update_st_con_tcp() to deal with the various possible events,
then to sess_update_st_cer() to deal with a possible error detected by the
former, or to sess_establish() to complete the connection validation. There
are multiple issues in the way this is handled, which have accumulated over
time. One of them is that any spurious wakeup during SI_ST_CON would validate
the READ_ATTACHED flag and wake the analysers up. Another one is that nobody
feels responsible for clearing SI_FL_EXP if it happened at the same time as
a success (and it is present in all reports of loops to date). And another
issue is that aborts cannot happen after a clean connection setup with no
data transfer (since CF_WRITE_NULL is part of CF_WRITE_ACTIVITY). Last, the
flags cleanup work was hackish, added here and there to please the next
function (typically what had to be donne in commit 7a3367cca to work around
the url_param+reuse issue by moving READ_ATTACHED to CON).
This patch performs a significant lift up of this setup code. First, it
makes sure that the state handlers are the ones responsible for the cleanup
of the stuff they rely on. Typically sess_sestablish() will clean up the
SI_FL_EXP flag because if we decided to validate the connection it means
that we want to ignore this late timeout. Second, it splits the CON and
RDY state handlers because the former only has to deal with failures,
timeouts and non-events, while the latter has to deal with partial or
total successes. Third, everything related to connection success was
moved to sess_establish() since it's the only safe place to do so, and
this function is also called at a few places to deal with synchronous
connections, which are not seen by intermediary state handlers.
The code was made a bit more robust, for example by making sure we
always set SI_FL_NOLINGER when aborting a connection so that we don't
have any risk to leave a connection in SHUTW state in case it was
validated late. The useless return codes of some of these functions
were dropped so that callers only rely on the stream-int's state now
(which was already partially the case anyway).
The code is now a bit cleaner, could be further improved (and functions
renamed) but given the sensitivity of this part, better limit changes to
strictly necessary. It passes all reg tests.
2019-06-05 12:02:04 -04:00
2022-05-17 13:47:17 -04:00
if ( scb - > state = = SC_ST_CER )
2020-01-09 12:43:15 -05:00
back_handle_st_cer ( s ) ;
2022-05-17 13:47:17 -04:00
else if ( scb - > state = = SC_ST_EST )
2020-01-09 12:43:15 -05:00
back_establish ( s ) ;
2008-11-30 12:47:21 -05:00
2022-05-17 13:47:17 -04:00
/* state is now one of SC_ST_CON (still in progress), SC_ST_EST
* ( established ) , SC_ST_DIS ( abort ) , SC_ST_CLO ( last error ) ,
* SC_ST_ASS / SC_ST_TAR / SC_ST_REQ for retryable errors .
2008-11-30 12:47:21 -05:00
*/
}
2022-05-17 13:40:40 -04:00
rq_prod_last = scf - > state ;
rq_cons_last = scb - > state ;
rp_cons_last = scf - > state ;
rp_prod_last = scb - > state ;
2010-07-27 11:15:12 -04:00
2008-11-30 12:47:21 -05:00
/* Check for connection closure */
2019-11-05 10:18:10 -05:00
DBG_TRACE_POINT ( STRM_EV_STRM_PROC , s ) ;
2008-11-30 12:47:21 -05:00
/* nothing special to be done on client side */
2022-05-17 13:47:17 -04:00
if ( unlikely ( scf - > state = = SC_ST_DIS ) ) {
scf - > state = SC_ST_CLO ;
2008-11-30 12:47:21 -05:00
2022-03-30 11:13:02 -04:00
/* This is needed only when debugging is enabled, to indicate
* client - side close .
*/
if ( unlikely ( ( global . mode & MODE_DEBUG ) & &
( ! ( global . mode & MODE_QUIET ) | |
( global . mode & MODE_VERBOSE ) ) ) ) {
chunk_printf ( & trash , " %08x:%s.clicls[%04x:%04x] \n " ,
s - > uniq_id , s - > be - > id ,
2022-05-18 10:23:22 -04:00
( unsigned short ) conn_fd ( sc_conn ( scf ) ) ,
( unsigned short ) conn_fd ( sc_conn ( scb ) ) ) ;
2022-03-30 11:13:02 -04:00
DISGUISE ( write ( 1 , trash . area , trash . data ) ) ;
}
}
2008-11-30 12:47:21 -05:00
/* When a server-side connection is released, we have to count it and
* check for pending connections on this server .
*/
2022-05-17 13:47:17 -04:00
if ( unlikely ( scb - > state = = SC_ST_DIS ) ) {
scb - > state = SC_ST_CLO ;
2012-11-11 18:42:33 -05:00
srv = objt_server ( s - > target ) ;
2011-03-10 10:55:02 -05:00
if ( srv ) {
2015-04-02 19:14:29 -04:00
if ( s - > flags & SF_CURR_SESS ) {
s - > flags & = ~ SF_CURR_SESS ;
2021-04-06 07:53:36 -04:00
_HA_ATOMIC_DEC ( & srv - > cur_sess ) ;
2008-11-30 12:47:21 -05:00
}
sess_change_server ( s , NULL ) ;
2011-03-10 10:55:02 -05:00
if ( may_dequeue_tasks ( srv , s - > be ) )
2021-06-22 12:47:51 -04:00
process_srv_queue ( srv ) ;
2008-11-30 12:47:21 -05:00
}
2022-03-30 11:13:02 -04:00
/* This is needed only when debugging is enabled, to indicate
* server - side close .
*/
if ( unlikely ( ( global . mode & MODE_DEBUG ) & &
( ! ( global . mode & MODE_QUIET ) | |
( global . mode & MODE_VERBOSE ) ) ) ) {
2022-05-17 13:47:17 -04:00
if ( s - > prev_conn_state = = SC_ST_EST ) {
2022-03-30 11:13:02 -04:00
chunk_printf ( & trash , " %08x:%s.srvcls[%04x:%04x] \n " ,
s - > uniq_id , s - > be - > id ,
2022-05-18 10:23:22 -04:00
( unsigned short ) conn_fd ( sc_conn ( scf ) ) ,
( unsigned short ) conn_fd ( sc_conn ( scb ) ) ) ;
2022-03-30 11:13:02 -04:00
DISGUISE ( write ( 1 , trash . area , trash . data ) ) ;
}
}
2008-11-30 12:47:21 -05:00
}
/*
* Note : of the transient states ( REQ , CER , DIS ) , only REQ may remain
* at this point .
*/
2009-03-08 14:20:25 -04:00
resync_request :
2008-11-30 12:47:21 -05:00
/* Analyse request */
2014-11-28 09:07:47 -05:00
if ( ( ( req - > flags & ~ rqf_last ) & CF_MASK_ANALYSER ) | |
( ( req - > flags ^ rqf_last ) & CF_MASK_STATIC ) | |
BUG/MAJOR: stream: ensure analysers are always called upon close
A recent issue affecting HTTP/2 + redirect + cache has uncovered an old
problem affecting all existing versions regarding the way events are
reported to analysers.
It happens that when an event is reported, analysers see it and may
decide to temporarily pause processing and prevent other analysers from
processing the same event. Then the event may be cleared and upon the
next call to the analysers, some of them will never see it.
This is exactly what happens with CF_READ_NULL if it is received before
the request is processed, like during redirects : the first time, some
analysers see it, pause, then the event may be converted to a SHUTW and
cleared, and on next call, there's nothing to process. In practice it's
hard to get the CF_READ_NULL flag during the request because requests
have CF_READ_DONTWAIT, preventing the read0 from happening. But on
HTTP/2 it's presented along with any incoming request. Also on a TCP
frontend the flag is not set and it's possible to read the NULL before
the request is parsed.
This causes a problem when filters are present because flt_end_analyse
needs to be called to release allocated resources and remove the
CF_FLT_ANALYZE flag. And the loss of this event prevents the analyser
from being called and from removing itself, preventing the connection
from ever ending.
This problem just shows that the event processing needs a serious revamp
after 1.8. In the mean time we can deal with the really problematic case
which is that we *want* to call analysers if CF_SHUTW is set on any side
ad it's the last opportunity to terminate a processing. It may
occasionally result in some analysers being called for nothing in half-
closed situations but it will take care of the issue.
An example of problematic configuration triggering the bug in 1.7 is :
frontend tcp
bind :4445
default_backend http
backend http
redirect location /
compression algo identity
Then submitting requests which immediately close will have for effect
to accumulate streams which will never be freed :
$ printf "GET / HTTP/1.1\r\n\r\n" >/dev/tcp/0/4445
This fix must be backported to 1.7 as well as any version where commit
c0c672a ("BUG/MINOR: http: Fix conditions to clean up a txn and to
handle the next request") was backported. This commit didn't cause the
bug but made it much more likely to happen.
2017-11-20 09:37:13 -05:00
( req - > analysers & & ( req - > flags & CF_SHUTW ) ) | |
2022-05-17 13:40:40 -04:00
scf - > state ! = rq_prod_last | |
scb - > state ! = rq_cons_last | |
BUG/MEDIUM: stream: Save unprocessed events for a stream
A stream can be awakened for different reasons. During its processing, it can be
early stopped if no buffer is available. In this situation, the reason why the
stream was awakened is lost, because we rely on the task state, which is reset
after each processing loop.
In many cases, that's not a big deal. But it can be useful to accumulate the
task states if the stream processing is interrupted, especially if some filters
need to be called.
To be clearer, here is an simple example:
1) A stream is awakened with the reason TASK_WOKEN_MSG.
2) Because no buffer is available, the processing is interrupted, the stream
is back to sleep. And the task state is reset.
3) Some buffers become available, so the stream is awakened with the reason
TASK_WOKEN_RES. At this step, the previous reason (TASK_WOKEN_MSG) is lost.
Now, the task states are saved for a stream and reset only when the stream
processing is not interrupted. The correspoing bitfield represents the pending
events for a stream. And we use this one instead of the task state during the
stream processing.
Note that TASK_WOKEN_TIMER and TASK_WOKEN_RES are always removed because these
events are always handled during the stream processing.
[wt: backport to 1.7 and 1.6]
2016-12-08 16:33:52 -05:00
s - > pending_events & TASK_WOKEN_MSG ) {
2014-11-28 09:07:47 -05:00
unsigned int flags = req - > flags ;
2008-11-30 12:47:21 -05:00
2022-05-17 13:47:17 -04:00
if ( cs_state_in ( scf - > state , SC_SB_EST | SC_SB_DIS | SC_SB_CLO ) ) {
2010-01-07 18:32:27 -05:00
int max_loops = global . tune . maxpollevents ;
2010-01-06 17:53:24 -05:00
unsigned int ana_list ;
unsigned int ana_back ;
2009-06-28 13:37:53 -04:00
2010-01-06 18:20:41 -05:00
/* it's up to the analysers to stop new connections,
* disable reading or closing . Note : if an analyser
* disables any of these bits , it is responsible for
* enabling them again when it disables itself , so
* that other analysers are called in similar conditions .
*/
2014-11-28 09:07:47 -05:00
channel_auto_read ( req ) ;
channel_auto_connect ( req ) ;
channel_auto_close ( req ) ;
2008-11-30 17:15:34 -05:00
/* We will call all analysers for which a bit is set in
2014-11-28 09:07:47 -05:00
* req - > analysers , following the bit order from LSB
2008-11-30 17:15:34 -05:00
* to MSB . The analysers must remove themselves from
2009-06-28 13:37:53 -04:00
* the list when not needed . Any analyser may return 0
* to break out of the loop , either because of missing
* data to take a decision , or because it decides to
REORG/MAJOR: session: rename the "session" entity to "stream"
With HTTP/2, we'll have to support multiplexed streams. A stream is in
fact the largest part of what we currently call a session, it has buffers,
logs, etc.
In order to catch any error, this commit removes any reference to the
struct session and tries to rename most "session" occurrences in function
names to "stream" and "sess" to "strm" when that's related to a session.
The files stream.{c,h} were added and session.{c,h} removed.
The session will be reintroduced later and a few parts of the stream
will progressively be moved overthere. It will more or less contain
only what we need in an embryonic session.
Sample fetch functions and converters will have to change a bit so
that they'll use an L5 (session) instead of what's currently called
"L4" which is in fact L6 for now.
Once all changes are completed, we should see approximately this :
L7 - http_txn
L6 - stream
L5 - session
L4 - connection | applet
There will be at most one http_txn per stream, and a same session will
possibly be referenced by multiple streams. A connection will point to
a session and to a stream. The session will hold all the information
we need to keep even when we don't yet have a stream.
Some more cleanup is needed because some code was already far from
being clean. The server queue management still refers to sessions at
many places while comments talk about connections. This will have to
be cleaned up once we have a server-side connection pool manager.
Stream flags "SN_*" still need to be renamed, it doesn't seem like
any of them will need to move to the session.
2015-04-02 18:22:06 -04:00
* kill the stream . We loop at least once through each
2009-06-28 13:37:53 -04:00
* analyser , and we may loop again if other analysers
* are added in the middle .
2010-01-06 17:53:24 -05:00
*
* We build a list of analysers to run . We evaluate all
* of these analysers in the order of the lower bit to
* the higher bit . This ordering is very important .
* An analyser will often add / remove other analysers ,
* including itself . Any changes to itself have no effect
* on the loop . If it removes any other analysers , we
* want those analysers not to be called anymore during
* this loop . If it adds an analyser that is located
* after itself , we want it to be scheduled for being
* processed during the loop . If it adds an analyser
* which is located before it , we want it to switch to
* it immediately , even if it has already been called
* once but removed since .
*
* In order to achieve this , we compare the analyser
* list after the call with a copy of it before the
* call . The work list is fed with analyser bits that
* appeared during the call . Then we compare previous
* work list with the new one , and check the bits that
* appeared . If the lowest of these bits is lower than
* the current bit , it means we have enabled a previous
* analyser and must immediately loop again .
2008-11-30 17:15:34 -05:00
*/
2009-06-28 13:37:53 -04:00
2014-11-28 09:07:47 -05:00
ana_list = ana_back = req - > analysers ;
2010-01-07 18:32:27 -05:00
while ( ana_list & & max_loops - - ) {
2010-01-06 17:53:24 -05:00
/* Warning! ensure that analysers are always placed in ascending order! */
BUG/MAJOR: channel: Fix the definition order of channel analyzers
It is important to defined analyzers (AN_REQ_* and AN_RES_*) in the same order
they are evaluated in process_stream. This order is really important because
during analyzers evaluation, we run them in the order of the lower bit to the
higher one. This way, when an analyzer adds/removes another one during its
evaluation, we know if it is located before or after it. So, when it adds an
analyzer which is located before it, we can switch to it immediately, even if it
has already been called once but removed since.
With the time, and introduction of new analyzers, this order was broken up. the
main problems come from the filter analyzers. We used values not related with
their evaluation order. Furthermore, we used same values for request and response
analyzers.
So, to fix the bug, filter analyzers have been splitted in 2 distinct lists to
have different analyzers for the request channel than those for the response
channel. And of course, we have moved them to the right place.
Some other analyzers have been reordered to respect the evaluation order:
* AN_REQ_HTTP_TARPIT has been moved just before AN_REQ_SRV_RULES
* AN_REQ_PRST_RDP_COOKIE has been moved just before AN_REQ_STICKING_RULES
* AN_RES_STORE_RULES has been moved just after AN_RES_WAIT_HTTP
Note today we have 29 analyzers, all stored into a 32 bits bitfield. So we can
still add 4 more analyzers before having a problem. A good way to fend off the
problem for a while could be to have a different bitfield for request and
response analyzers.
[wt: all of this must be backported to 1.7, and part of it must be backported
to 1.6 and 1.5]
2017-01-05 08:06:34 -05:00
ANALYZE ( s , req , flt_start_analyze , ana_list , ana_back , AN_REQ_FLT_START_FE ) ;
2016-05-11 11:06:28 -04:00
FLT_ANALYZE ( s , req , tcp_inspect_request , ana_list , ana_back , AN_REQ_INSPECT_FE ) ;
2019-07-16 08:54:53 -04:00
FLT_ANALYZE ( s , req , http_wait_for_request , ana_list , ana_back , AN_REQ_WAIT_HTTP ) ;
FLT_ANALYZE ( s , req , http_wait_for_request_body , ana_list , ana_back , AN_REQ_HTTP_BODY ) ;
FLT_ANALYZE ( s , req , http_process_req_common , ana_list , ana_back , AN_REQ_HTTP_PROCESS_FE , sess - > fe ) ;
2016-05-11 11:06:28 -04:00
FLT_ANALYZE ( s , req , process_switching_rules , ana_list , ana_back , AN_REQ_SWITCHING_RULES ) ;
BUG/MAJOR: channel: Fix the definition order of channel analyzers
It is important to defined analyzers (AN_REQ_* and AN_RES_*) in the same order
they are evaluated in process_stream. This order is really important because
during analyzers evaluation, we run them in the order of the lower bit to the
higher one. This way, when an analyzer adds/removes another one during its
evaluation, we know if it is located before or after it. So, when it adds an
analyzer which is located before it, we can switch to it immediately, even if it
has already been called once but removed since.
With the time, and introduction of new analyzers, this order was broken up. the
main problems come from the filter analyzers. We used values not related with
their evaluation order. Furthermore, we used same values for request and response
analyzers.
So, to fix the bug, filter analyzers have been splitted in 2 distinct lists to
have different analyzers for the request channel than those for the response
channel. And of course, we have moved them to the right place.
Some other analyzers have been reordered to respect the evaluation order:
* AN_REQ_HTTP_TARPIT has been moved just before AN_REQ_SRV_RULES
* AN_REQ_PRST_RDP_COOKIE has been moved just before AN_REQ_STICKING_RULES
* AN_RES_STORE_RULES has been moved just after AN_RES_WAIT_HTTP
Note today we have 29 analyzers, all stored into a 32 bits bitfield. So we can
still add 4 more analyzers before having a problem. A good way to fend off the
problem for a while could be to have a different bitfield for request and
response analyzers.
[wt: all of this must be backported to 1.7, and part of it must be backported
to 1.6 and 1.5]
2017-01-05 08:06:34 -05:00
ANALYZE ( s , req , flt_start_analyze , ana_list , ana_back , AN_REQ_FLT_START_BE ) ;
2016-05-11 11:06:28 -04:00
FLT_ANALYZE ( s , req , tcp_inspect_request , ana_list , ana_back , AN_REQ_INSPECT_BE ) ;
2019-07-16 08:54:53 -04:00
FLT_ANALYZE ( s , req , http_process_req_common , ana_list , ana_back , AN_REQ_HTTP_PROCESS_BE , s - > be ) ;
FLT_ANALYZE ( s , req , http_process_tarpit , ana_list , ana_back , AN_REQ_HTTP_TARPIT ) ;
2016-05-11 11:06:28 -04:00
FLT_ANALYZE ( s , req , process_server_rules , ana_list , ana_back , AN_REQ_SRV_RULES ) ;
2019-07-16 08:54:53 -04:00
FLT_ANALYZE ( s , req , http_process_request , ana_list , ana_back , AN_REQ_HTTP_INNER ) ;
2016-05-11 11:06:28 -04:00
FLT_ANALYZE ( s , req , tcp_persist_rdp_cookie , ana_list , ana_back , AN_REQ_PRST_RDP_COOKIE ) ;
FLT_ANALYZE ( s , req , process_sticking_rules , ana_list , ana_back , AN_REQ_STICKING_RULES ) ;
BUG/MAJOR: channel: Fix the definition order of channel analyzers
It is important to defined analyzers (AN_REQ_* and AN_RES_*) in the same order
they are evaluated in process_stream. This order is really important because
during analyzers evaluation, we run them in the order of the lower bit to the
higher one. This way, when an analyzer adds/removes another one during its
evaluation, we know if it is located before or after it. So, when it adds an
analyzer which is located before it, we can switch to it immediately, even if it
has already been called once but removed since.
With the time, and introduction of new analyzers, this order was broken up. the
main problems come from the filter analyzers. We used values not related with
their evaluation order. Furthermore, we used same values for request and response
analyzers.
So, to fix the bug, filter analyzers have been splitted in 2 distinct lists to
have different analyzers for the request channel than those for the response
channel. And of course, we have moved them to the right place.
Some other analyzers have been reordered to respect the evaluation order:
* AN_REQ_HTTP_TARPIT has been moved just before AN_REQ_SRV_RULES
* AN_REQ_PRST_RDP_COOKIE has been moved just before AN_REQ_STICKING_RULES
* AN_RES_STORE_RULES has been moved just after AN_RES_WAIT_HTTP
Note today we have 29 analyzers, all stored into a 32 bits bitfield. So we can
still add 4 more analyzers before having a problem. A good way to fend off the
problem for a while could be to have a different bitfield for request and
response analyzers.
[wt: all of this must be backported to 1.7, and part of it must be backported
to 1.6 and 1.5]
2017-01-05 08:06:34 -05:00
ANALYZE ( s , req , flt_analyze_http_headers , ana_list , ana_back , AN_REQ_FLT_HTTP_HDRS ) ;
2019-07-16 08:54:53 -04:00
ANALYZE ( s , req , http_request_forward_body , ana_list , ana_back , AN_REQ_HTTP_XFER_BODY ) ;
2018-10-26 08:47:40 -04:00
ANALYZE ( s , req , pcli_wait_for_request , ana_list , ana_back , AN_REQ_WAIT_CLI ) ;
BUG/MAJOR: channel: Fix the definition order of channel analyzers
It is important to defined analyzers (AN_REQ_* and AN_RES_*) in the same order
they are evaluated in process_stream. This order is really important because
during analyzers evaluation, we run them in the order of the lower bit to the
higher one. This way, when an analyzer adds/removes another one during its
evaluation, we know if it is located before or after it. So, when it adds an
analyzer which is located before it, we can switch to it immediately, even if it
has already been called once but removed since.
With the time, and introduction of new analyzers, this order was broken up. the
main problems come from the filter analyzers. We used values not related with
their evaluation order. Furthermore, we used same values for request and response
analyzers.
So, to fix the bug, filter analyzers have been splitted in 2 distinct lists to
have different analyzers for the request channel than those for the response
channel. And of course, we have moved them to the right place.
Some other analyzers have been reordered to respect the evaluation order:
* AN_REQ_HTTP_TARPIT has been moved just before AN_REQ_SRV_RULES
* AN_REQ_PRST_RDP_COOKIE has been moved just before AN_REQ_STICKING_RULES
* AN_RES_STORE_RULES has been moved just after AN_RES_WAIT_HTTP
Note today we have 29 analyzers, all stored into a 32 bits bitfield. So we can
still add 4 more analyzers before having a problem. A good way to fend off the
problem for a while could be to have a different bitfield for request and
response analyzers.
[wt: all of this must be backported to 1.7, and part of it must be backported
to 1.6 and 1.5]
2017-01-05 08:06:34 -05:00
ANALYZE ( s , req , flt_xfer_data , ana_list , ana_back , AN_REQ_FLT_XFER_DATA ) ;
ANALYZE ( s , req , flt_end_analyze , ana_list , ana_back , AN_REQ_FLT_END ) ;
2010-01-07 18:32:27 -05:00
break ;
}
2008-11-30 12:47:21 -05:00
}
2009-03-15 17:34:05 -04:00
2022-05-17 13:40:40 -04:00
rq_prod_last = scf - > state ;
rq_cons_last = scb - > state ;
2014-11-28 09:07:47 -05:00
req - > flags & = ~ CF_WAKE_ONCE ;
rqf_last = req - > flags ;
2010-07-27 11:15:12 -04:00
2019-06-06 08:45:26 -04:00
if ( ( req - > flags ^ flags ) & ( CF_SHUTR | CF_SHUTW ) )
2009-06-21 16:43:05 -04:00
goto resync_request ;
}
2010-01-06 18:09:04 -05:00
/* we'll monitor the request analysers while parsing the response,
* because some response analysers may indirectly enable new request
* analysers ( eg : HTTP keep - alive ) .
*/
2014-11-28 09:07:47 -05:00
req_ana_back = req - > analysers ;
2010-01-06 18:09:04 -05:00
2009-06-21 16:43:05 -04:00
resync_response :
/* Analyse response */
2014-11-28 09:07:47 -05:00
if ( ( ( res - > flags & ~ rpf_last ) & CF_MASK_ANALYSER ) | |
( res - > flags ^ rpf_last ) & CF_MASK_STATIC | |
BUG/MAJOR: stream: ensure analysers are always called upon close
A recent issue affecting HTTP/2 + redirect + cache has uncovered an old
problem affecting all existing versions regarding the way events are
reported to analysers.
It happens that when an event is reported, analysers see it and may
decide to temporarily pause processing and prevent other analysers from
processing the same event. Then the event may be cleared and upon the
next call to the analysers, some of them will never see it.
This is exactly what happens with CF_READ_NULL if it is received before
the request is processed, like during redirects : the first time, some
analysers see it, pause, then the event may be converted to a SHUTW and
cleared, and on next call, there's nothing to process. In practice it's
hard to get the CF_READ_NULL flag during the request because requests
have CF_READ_DONTWAIT, preventing the read0 from happening. But on
HTTP/2 it's presented along with any incoming request. Also on a TCP
frontend the flag is not set and it's possible to read the NULL before
the request is parsed.
This causes a problem when filters are present because flt_end_analyse
needs to be called to release allocated resources and remove the
CF_FLT_ANALYZE flag. And the loss of this event prevents the analyser
from being called and from removing itself, preventing the connection
from ever ending.
This problem just shows that the event processing needs a serious revamp
after 1.8. In the mean time we can deal with the really problematic case
which is that we *want* to call analysers if CF_SHUTW is set on any side
ad it's the last opportunity to terminate a processing. It may
occasionally result in some analysers being called for nothing in half-
closed situations but it will take care of the issue.
An example of problematic configuration triggering the bug in 1.7 is :
frontend tcp
bind :4445
default_backend http
backend http
redirect location /
compression algo identity
Then submitting requests which immediately close will have for effect
to accumulate streams which will never be freed :
$ printf "GET / HTTP/1.1\r\n\r\n" >/dev/tcp/0/4445
This fix must be backported to 1.7 as well as any version where commit
c0c672a ("BUG/MINOR: http: Fix conditions to clean up a txn and to
handle the next request") was backported. This commit didn't cause the
bug but made it much more likely to happen.
2017-11-20 09:37:13 -05:00
( res - > analysers & & ( res - > flags & CF_SHUTW ) ) | |
2022-05-17 13:40:40 -04:00
scf - > state ! = rp_cons_last | |
scb - > state ! = rp_prod_last | |
BUG/MEDIUM: stream: Save unprocessed events for a stream
A stream can be awakened for different reasons. During its processing, it can be
early stopped if no buffer is available. In this situation, the reason why the
stream was awakened is lost, because we rely on the task state, which is reset
after each processing loop.
In many cases, that's not a big deal. But it can be useful to accumulate the
task states if the stream processing is interrupted, especially if some filters
need to be called.
To be clearer, here is an simple example:
1) A stream is awakened with the reason TASK_WOKEN_MSG.
2) Because no buffer is available, the processing is interrupted, the stream
is back to sleep. And the task state is reset.
3) Some buffers become available, so the stream is awakened with the reason
TASK_WOKEN_RES. At this step, the previous reason (TASK_WOKEN_MSG) is lost.
Now, the task states are saved for a stream and reset only when the stream
processing is not interrupted. The correspoing bitfield represents the pending
events for a stream. And we use this one instead of the task state during the
stream processing.
Note that TASK_WOKEN_TIMER and TASK_WOKEN_RES are always removed because these
events are always handled during the stream processing.
[wt: backport to 1.7 and 1.6]
2016-12-08 16:33:52 -05:00
s - > pending_events & TASK_WOKEN_MSG ) {
2014-11-28 09:07:47 -05:00
unsigned int flags = res - > flags ;
2009-06-21 16:43:05 -04:00
2022-05-17 13:47:17 -04:00
if ( cs_state_in ( scb - > state , SC_SB_EST | SC_SB_DIS | SC_SB_CLO ) ) {
2010-01-07 18:32:27 -05:00
int max_loops = global . tune . maxpollevents ;
2010-01-06 17:53:24 -05:00
unsigned int ana_list ;
unsigned int ana_back ;
2009-10-18 16:53:08 -04:00
2010-01-06 18:20:41 -05:00
/* it's up to the analysers to stop disable reading or
* closing . Note : if an analyser disables any of these
* bits , it is responsible for enabling them again when
* it disables itself , so that other analysers are called
* in similar conditions .
*/
2014-11-28 09:07:47 -05:00
channel_auto_read ( res ) ;
channel_auto_close ( res ) ;
2009-10-18 16:53:08 -04:00
/* We will call all analysers for which a bit is set in
2014-11-28 09:07:47 -05:00
* res - > analysers , following the bit order from LSB
2009-10-18 16:53:08 -04:00
* to MSB . The analysers must remove themselves from
* the list when not needed . Any analyser may return 0
* to break out of the loop , either because of missing
* data to take a decision , or because it decides to
REORG/MAJOR: session: rename the "session" entity to "stream"
With HTTP/2, we'll have to support multiplexed streams. A stream is in
fact the largest part of what we currently call a session, it has buffers,
logs, etc.
In order to catch any error, this commit removes any reference to the
struct session and tries to rename most "session" occurrences in function
names to "stream" and "sess" to "strm" when that's related to a session.
The files stream.{c,h} were added and session.{c,h} removed.
The session will be reintroduced later and a few parts of the stream
will progressively be moved overthere. It will more or less contain
only what we need in an embryonic session.
Sample fetch functions and converters will have to change a bit so
that they'll use an L5 (session) instead of what's currently called
"L4" which is in fact L6 for now.
Once all changes are completed, we should see approximately this :
L7 - http_txn
L6 - stream
L5 - session
L4 - connection | applet
There will be at most one http_txn per stream, and a same session will
possibly be referenced by multiple streams. A connection will point to
a session and to a stream. The session will hold all the information
we need to keep even when we don't yet have a stream.
Some more cleanup is needed because some code was already far from
being clean. The server queue management still refers to sessions at
many places while comments talk about connections. This will have to
be cleaned up once we have a server-side connection pool manager.
Stream flags "SN_*" still need to be renamed, it doesn't seem like
any of them will need to move to the session.
2015-04-02 18:22:06 -04:00
* kill the stream . We loop at least once through each
2009-10-18 16:53:08 -04:00
* analyser , and we may loop again if other analysers
* are added in the middle .
*/
2014-11-28 09:07:47 -05:00
ana_list = ana_back = res - > analysers ;
2010-01-07 18:32:27 -05:00
while ( ana_list & & max_loops - - ) {
2010-01-06 17:53:24 -05:00
/* Warning! ensure that analysers are always placed in ascending order! */
BUG/MAJOR: channel: Fix the definition order of channel analyzers
It is important to defined analyzers (AN_REQ_* and AN_RES_*) in the same order
they are evaluated in process_stream. This order is really important because
during analyzers evaluation, we run them in the order of the lower bit to the
higher one. This way, when an analyzer adds/removes another one during its
evaluation, we know if it is located before or after it. So, when it adds an
analyzer which is located before it, we can switch to it immediately, even if it
has already been called once but removed since.
With the time, and introduction of new analyzers, this order was broken up. the
main problems come from the filter analyzers. We used values not related with
their evaluation order. Furthermore, we used same values for request and response
analyzers.
So, to fix the bug, filter analyzers have been splitted in 2 distinct lists to
have different analyzers for the request channel than those for the response
channel. And of course, we have moved them to the right place.
Some other analyzers have been reordered to respect the evaluation order:
* AN_REQ_HTTP_TARPIT has been moved just before AN_REQ_SRV_RULES
* AN_REQ_PRST_RDP_COOKIE has been moved just before AN_REQ_STICKING_RULES
* AN_RES_STORE_RULES has been moved just after AN_RES_WAIT_HTTP
Note today we have 29 analyzers, all stored into a 32 bits bitfield. So we can
still add 4 more analyzers before having a problem. A good way to fend off the
problem for a while could be to have a different bitfield for request and
response analyzers.
[wt: all of this must be backported to 1.7, and part of it must be backported
to 1.6 and 1.5]
2017-01-05 08:06:34 -05:00
ANALYZE ( s , res , flt_start_analyze , ana_list , ana_back , AN_RES_FLT_START_FE ) ;
ANALYZE ( s , res , flt_start_analyze , ana_list , ana_back , AN_RES_FLT_START_BE ) ;
2016-05-11 11:06:28 -04:00
FLT_ANALYZE ( s , res , tcp_inspect_response , ana_list , ana_back , AN_RES_INSPECT ) ;
2019-07-16 08:54:53 -04:00
FLT_ANALYZE ( s , res , http_wait_for_response , ana_list , ana_back , AN_RES_WAIT_HTTP ) ;
2016-05-11 11:06:28 -04:00
FLT_ANALYZE ( s , res , process_store_rules , ana_list , ana_back , AN_RES_STORE_RULES ) ;
2019-07-16 08:54:53 -04:00
FLT_ANALYZE ( s , res , http_process_res_common , ana_list , ana_back , AN_RES_HTTP_PROCESS_BE , s - > be ) ;
BUG/MAJOR: channel: Fix the definition order of channel analyzers
It is important to defined analyzers (AN_REQ_* and AN_RES_*) in the same order
they are evaluated in process_stream. This order is really important because
during analyzers evaluation, we run them in the order of the lower bit to the
higher one. This way, when an analyzer adds/removes another one during its
evaluation, we know if it is located before or after it. So, when it adds an
analyzer which is located before it, we can switch to it immediately, even if it
has already been called once but removed since.
With the time, and introduction of new analyzers, this order was broken up. the
main problems come from the filter analyzers. We used values not related with
their evaluation order. Furthermore, we used same values for request and response
analyzers.
So, to fix the bug, filter analyzers have been splitted in 2 distinct lists to
have different analyzers for the request channel than those for the response
channel. And of course, we have moved them to the right place.
Some other analyzers have been reordered to respect the evaluation order:
* AN_REQ_HTTP_TARPIT has been moved just before AN_REQ_SRV_RULES
* AN_REQ_PRST_RDP_COOKIE has been moved just before AN_REQ_STICKING_RULES
* AN_RES_STORE_RULES has been moved just after AN_RES_WAIT_HTTP
Note today we have 29 analyzers, all stored into a 32 bits bitfield. So we can
still add 4 more analyzers before having a problem. A good way to fend off the
problem for a while could be to have a different bitfield for request and
response analyzers.
[wt: all of this must be backported to 1.7, and part of it must be backported
to 1.6 and 1.5]
2017-01-05 08:06:34 -05:00
ANALYZE ( s , res , flt_analyze_http_headers , ana_list , ana_back , AN_RES_FLT_HTTP_HDRS ) ;
2019-07-16 08:54:53 -04:00
ANALYZE ( s , res , http_response_forward_body , ana_list , ana_back , AN_RES_HTTP_XFER_BODY ) ;
2018-10-26 08:47:40 -04:00
ANALYZE ( s , res , pcli_wait_for_response , ana_list , ana_back , AN_RES_WAIT_CLI ) ;
BUG/MAJOR: channel: Fix the definition order of channel analyzers
It is important to defined analyzers (AN_REQ_* and AN_RES_*) in the same order
they are evaluated in process_stream. This order is really important because
during analyzers evaluation, we run them in the order of the lower bit to the
higher one. This way, when an analyzer adds/removes another one during its
evaluation, we know if it is located before or after it. So, when it adds an
analyzer which is located before it, we can switch to it immediately, even if it
has already been called once but removed since.
With the time, and introduction of new analyzers, this order was broken up. the
main problems come from the filter analyzers. We used values not related with
their evaluation order. Furthermore, we used same values for request and response
analyzers.
So, to fix the bug, filter analyzers have been splitted in 2 distinct lists to
have different analyzers for the request channel than those for the response
channel. And of course, we have moved them to the right place.
Some other analyzers have been reordered to respect the evaluation order:
* AN_REQ_HTTP_TARPIT has been moved just before AN_REQ_SRV_RULES
* AN_REQ_PRST_RDP_COOKIE has been moved just before AN_REQ_STICKING_RULES
* AN_RES_STORE_RULES has been moved just after AN_RES_WAIT_HTTP
Note today we have 29 analyzers, all stored into a 32 bits bitfield. So we can
still add 4 more analyzers before having a problem. A good way to fend off the
problem for a while could be to have a different bitfield for request and
response analyzers.
[wt: all of this must be backported to 1.7, and part of it must be backported
to 1.6 and 1.5]
2017-01-05 08:06:34 -05:00
ANALYZE ( s , res , flt_xfer_data , ana_list , ana_back , AN_RES_FLT_XFER_DATA ) ;
ANALYZE ( s , res , flt_end_analyze , ana_list , ana_back , AN_RES_FLT_END ) ;
2010-01-07 18:32:27 -05:00
break ;
}
2009-06-21 16:43:05 -04:00
}
2022-05-17 13:40:40 -04:00
rp_cons_last = scf - > state ;
rp_prod_last = scb - > state ;
2017-07-06 09:49:30 -04:00
res - > flags & = ~ CF_WAKE_ONCE ;
2014-11-28 09:07:47 -05:00
rpf_last = res - > flags ;
2010-07-27 11:15:12 -04:00
2019-06-06 08:45:26 -04:00
if ( ( res - > flags ^ flags ) & ( CF_SHUTR | CF_SHUTW ) )
2009-06-21 16:43:05 -04:00
goto resync_response ;
}
2010-01-06 18:09:04 -05:00
/* maybe someone has added some request analysers, so we must check and loop */
2014-11-28 09:07:47 -05:00
if ( req - > analysers & ~ req_ana_back )
2010-01-06 18:09:04 -05:00
goto resync_request ;
2014-11-28 09:07:47 -05:00
if ( ( req - > flags & ~ rqf_last ) & CF_MASK_ANALYSER )
2010-12-17 01:13:42 -05:00
goto resync_request ;
2009-06-21 16:43:05 -04:00
/* FIXME: here we should call protocol handlers which rely on
* both buffers .
*/
/*
REORG/MAJOR: session: rename the "session" entity to "stream"
With HTTP/2, we'll have to support multiplexed streams. A stream is in
fact the largest part of what we currently call a session, it has buffers,
logs, etc.
In order to catch any error, this commit removes any reference to the
struct session and tries to rename most "session" occurrences in function
names to "stream" and "sess" to "strm" when that's related to a session.
The files stream.{c,h} were added and session.{c,h} removed.
The session will be reintroduced later and a few parts of the stream
will progressively be moved overthere. It will more or less contain
only what we need in an embryonic session.
Sample fetch functions and converters will have to change a bit so
that they'll use an L5 (session) instead of what's currently called
"L4" which is in fact L6 for now.
Once all changes are completed, we should see approximately this :
L7 - http_txn
L6 - stream
L5 - session
L4 - connection | applet
There will be at most one http_txn per stream, and a same session will
possibly be referenced by multiple streams. A connection will point to
a session and to a stream. The session will hold all the information
we need to keep even when we don't yet have a stream.
Some more cleanup is needed because some code was already far from
being clean. The server queue management still refers to sessions at
many places while comments talk about connections. This will have to
be cleaned up once we have a server-side connection pool manager.
Stream flags "SN_*" still need to be renamed, it doesn't seem like
any of them will need to move to the session.
2015-04-02 18:22:06 -04:00
* Now we propagate unhandled errors to the stream . Normally
2010-03-04 14:34:23 -05:00
* we ' re just in a data phase here since it means we have not
* seen any analyser who could set an error status .
2009-06-21 16:43:05 -04:00
*/
2012-11-11 18:42:33 -05:00
srv = objt_server ( s - > target ) ;
2015-04-02 19:14:29 -04:00
if ( unlikely ( ! ( s - > flags & SF_ERR_MASK ) ) ) {
2014-11-28 09:07:47 -05:00
if ( req - > flags & ( CF_READ_ERROR | CF_READ_TIMEOUT | CF_WRITE_ERROR | CF_WRITE_TIMEOUT ) ) {
2009-06-21 16:43:05 -04:00
/* Report it if the client got an error or a read timeout expired */
2021-10-18 09:06:20 -04:00
req - > analysers & = AN_REQ_FLT_END ;
2014-11-28 09:07:47 -05:00
if ( req - > flags & CF_READ_ERROR ) {
2021-04-06 07:53:36 -04:00
_HA_ATOMIC_INC ( & s - > be - > be_counters . cli_aborts ) ;
_HA_ATOMIC_INC ( & sess - > fe - > fe_counters . cli_aborts ) ;
2020-01-24 05:45:05 -05:00
if ( sess - > listener & & sess - > listener - > counters )
2021-04-06 07:53:36 -04:00
_HA_ATOMIC_INC ( & sess - > listener - > counters - > cli_aborts ) ;
2011-03-10 10:55:02 -05:00
if ( srv )
2021-04-06 07:53:36 -04:00
_HA_ATOMIC_INC ( & srv - > counters . cli_aborts ) ;
2015-04-02 19:14:29 -04:00
s - > flags | = SF_ERR_CLICL ;
2010-03-04 14:34:23 -05:00
}
2014-11-28 09:07:47 -05:00
else if ( req - > flags & CF_READ_TIMEOUT ) {
2021-04-06 07:53:36 -04:00
_HA_ATOMIC_INC ( & s - > be - > be_counters . cli_aborts ) ;
_HA_ATOMIC_INC ( & sess - > fe - > fe_counters . cli_aborts ) ;
2020-01-24 05:45:05 -05:00
if ( sess - > listener & & sess - > listener - > counters )
2021-04-06 07:53:36 -04:00
_HA_ATOMIC_INC ( & sess - > listener - > counters - > cli_aborts ) ;
2011-03-10 10:55:02 -05:00
if ( srv )
2021-04-06 07:53:36 -04:00
_HA_ATOMIC_INC ( & srv - > counters . cli_aborts ) ;
2015-04-02 19:14:29 -04:00
s - > flags | = SF_ERR_CLITO ;
2010-03-04 14:34:23 -05:00
}
2014-11-28 09:07:47 -05:00
else if ( req - > flags & CF_WRITE_ERROR ) {
2021-04-06 07:53:36 -04:00
_HA_ATOMIC_INC ( & s - > be - > be_counters . srv_aborts ) ;
_HA_ATOMIC_INC ( & sess - > fe - > fe_counters . srv_aborts ) ;
2020-01-24 05:45:05 -05:00
if ( sess - > listener & & sess - > listener - > counters )
2021-04-06 07:53:36 -04:00
_HA_ATOMIC_INC ( & sess - > listener - > counters - > srv_aborts ) ;
2011-03-10 10:55:02 -05:00
if ( srv )
2021-04-06 07:53:36 -04:00
_HA_ATOMIC_INC ( & srv - > counters . srv_aborts ) ;
2015-04-02 19:14:29 -04:00
s - > flags | = SF_ERR_SRVCL ;
2010-03-04 14:34:23 -05:00
}
else {
2021-04-06 07:53:36 -04:00
_HA_ATOMIC_INC ( & s - > be - > be_counters . srv_aborts ) ;
_HA_ATOMIC_INC ( & sess - > fe - > fe_counters . srv_aborts ) ;
2020-01-24 05:45:05 -05:00
if ( sess - > listener & & sess - > listener - > counters )
2021-04-06 07:53:36 -04:00
_HA_ATOMIC_INC ( & sess - > listener - > counters - > srv_aborts ) ;
2011-03-10 10:55:02 -05:00
if ( srv )
2021-04-06 07:53:36 -04:00
_HA_ATOMIC_INC ( & srv - > counters . srv_aborts ) ;
2015-04-02 19:14:29 -04:00
s - > flags | = SF_ERR_SRVTO ;
2010-03-04 14:34:23 -05:00
}
2009-03-15 17:34:05 -04:00
sess_set_term_flags ( s ) ;
2019-04-23 11:34:22 -04:00
/* Abort the request if a client error occurred while
2022-05-17 13:47:17 -04:00
* the backend stream connector is in the SC_ST_INI
* state . It is switched into the SC_ST_CLO state and
2019-04-23 11:34:22 -04:00
* the request channel is erased . */
2022-05-17 13:47:17 -04:00
if ( scb - > state = = SC_ST_INI ) {
s - > scb - > state = SC_ST_CLO ;
2019-04-23 11:34:22 -04:00
channel_abort ( req ) ;
if ( IS_HTX_STRM ( s ) )
channel_htx_erase ( req , htxbuf ( & req - > buf ) ) ;
else
channel_erase ( req ) ;
}
2009-03-15 17:34:05 -04:00
}
2014-11-28 09:07:47 -05:00
else if ( res - > flags & ( CF_READ_ERROR | CF_READ_TIMEOUT | CF_WRITE_ERROR | CF_WRITE_TIMEOUT ) ) {
2009-06-21 16:43:05 -04:00
/* Report it if the server got an error or a read timeout expired */
2021-10-18 09:06:20 -04:00
res - > analysers & = AN_RES_FLT_END ;
2014-11-28 09:07:47 -05:00
if ( res - > flags & CF_READ_ERROR ) {
2021-04-06 07:53:36 -04:00
_HA_ATOMIC_INC ( & s - > be - > be_counters . srv_aborts ) ;
_HA_ATOMIC_INC ( & sess - > fe - > fe_counters . srv_aborts ) ;
2020-01-24 05:45:05 -05:00
if ( sess - > listener & & sess - > listener - > counters )
2021-04-06 07:53:36 -04:00
_HA_ATOMIC_INC ( & sess - > listener - > counters - > srv_aborts ) ;
2011-03-10 10:55:02 -05:00
if ( srv )
2021-04-06 07:53:36 -04:00
_HA_ATOMIC_INC ( & srv - > counters . srv_aborts ) ;
2015-04-02 19:14:29 -04:00
s - > flags | = SF_ERR_SRVCL ;
2010-03-04 14:34:23 -05:00
}
2014-11-28 09:07:47 -05:00
else if ( res - > flags & CF_READ_TIMEOUT ) {
2021-04-06 07:53:36 -04:00
_HA_ATOMIC_INC ( & s - > be - > be_counters . srv_aborts ) ;
_HA_ATOMIC_INC ( & sess - > fe - > fe_counters . srv_aborts ) ;
2020-01-24 05:45:05 -05:00
if ( sess - > listener & & sess - > listener - > counters )
2021-04-06 07:53:36 -04:00
_HA_ATOMIC_INC ( & sess - > listener - > counters - > srv_aborts ) ;
2011-03-10 10:55:02 -05:00
if ( srv )
2021-04-06 07:53:36 -04:00
_HA_ATOMIC_INC ( & srv - > counters . srv_aborts ) ;
2015-04-02 19:14:29 -04:00
s - > flags | = SF_ERR_SRVTO ;
2010-03-04 14:34:23 -05:00
}
2014-11-28 09:07:47 -05:00
else if ( res - > flags & CF_WRITE_ERROR ) {
2021-04-06 07:53:36 -04:00
_HA_ATOMIC_INC ( & s - > be - > be_counters . cli_aborts ) ;
_HA_ATOMIC_INC ( & sess - > fe - > fe_counters . cli_aborts ) ;
2020-01-24 05:45:05 -05:00
if ( sess - > listener & & sess - > listener - > counters )
2021-04-06 07:53:36 -04:00
_HA_ATOMIC_INC ( & sess - > listener - > counters - > cli_aborts ) ;
2011-03-10 10:55:02 -05:00
if ( srv )
2021-04-06 07:53:36 -04:00
_HA_ATOMIC_INC ( & srv - > counters . cli_aborts ) ;
2015-04-02 19:14:29 -04:00
s - > flags | = SF_ERR_CLICL ;
2010-03-04 14:34:23 -05:00
}
else {
2021-04-06 07:53:36 -04:00
_HA_ATOMIC_INC ( & s - > be - > be_counters . cli_aborts ) ;
_HA_ATOMIC_INC ( & sess - > fe - > fe_counters . cli_aborts ) ;
2020-01-24 05:45:05 -05:00
if ( sess - > listener & & sess - > listener - > counters )
2021-04-06 07:53:36 -04:00
_HA_ATOMIC_INC ( & sess - > listener - > counters - > cli_aborts ) ;
2011-03-10 10:55:02 -05:00
if ( srv )
2021-04-06 07:53:36 -04:00
_HA_ATOMIC_INC ( & srv - > counters . cli_aborts ) ;
2015-04-02 19:14:29 -04:00
s - > flags | = SF_ERR_CLITO ;
2010-03-04 14:34:23 -05:00
}
2009-06-21 16:43:05 -04:00
sess_set_term_flags ( s ) ;
}
2009-03-15 17:34:05 -04:00
}
2009-06-21 16:43:05 -04:00
/*
* Here we take care of forwarding unhandled data . This also includes
* connection establishments and shutdown requests .
*/
2021-01-06 11:20:16 -05:00
/* If no one is interested in analysing data, it's time to forward
2009-09-20 06:07:52 -04:00
* everything . We configure the buffer to forward indefinitely .
2012-08-27 17:14:58 -04:00
* Note that we ' re checking CF_SHUTR_NOW as an indication of a possible
2012-08-27 18:06:31 -04:00
* recent call to channel_abort ( ) .
2008-12-14 11:31:54 -05:00
*/
2017-08-29 10:06:38 -04:00
if ( unlikely ( ( ! req - > analysers | | ( req - > analysers = = AN_REQ_FLT_END & & ! ( req - > flags & CF_FLT_ANALYZE ) ) ) & &
2014-11-28 09:07:47 -05:00
! ( req - > flags & ( CF_SHUTW | CF_SHUTR_NOW ) ) & &
2022-05-17 13:47:17 -04:00
( cs_state_in ( scf - > state , SC_SB_EST | SC_SB_DIS | SC_SB_CLO ) ) & &
2014-11-28 09:07:47 -05:00
( req - > to_forward ! = CHN_INFINITE_FORWARD ) ) ) {
2012-11-11 17:05:39 -05:00
/* This buffer is freewheeling, there's no analyser
2009-03-08 16:38:23 -04:00
* attached to it . If any data are left in , we ' ll permit them to
* move .
*/
2014-11-28 09:07:47 -05:00
channel_auto_read ( req ) ;
channel_auto_connect ( req ) ;
channel_auto_close ( req ) ;
2009-03-08 16:38:23 -04:00
2018-12-18 15:57:24 -05:00
if ( IS_HTX_STRM ( s ) ) {
struct htx * htx = htxbuf ( & req - > buf ) ;
2018-12-05 05:56:15 -05:00
/* We'll let data flow between the producer (if still connected)
2018-12-18 15:57:24 -05:00
* to the consumer .
2018-12-05 05:56:15 -05:00
*/
2018-12-18 15:57:24 -05:00
co_set_data ( req , htx - > data ) ;
2018-12-05 05:56:15 -05:00
if ( ! ( req - > flags & ( CF_SHUTR | CF_SHUTW_NOW ) ) )
2018-12-18 15:57:24 -05:00
channel_htx_forward_forever ( req , htx ) ;
2018-12-05 05:56:15 -05:00
}
else {
/* We'll let data flow between the producer (if still connected)
* to the consumer ( which might possibly not be connected yet ) .
*/
2018-12-18 15:57:24 -05:00
c_adv ( req , ci_data ( req ) ) ;
2018-12-05 05:56:15 -05:00
if ( ! ( req - > flags & ( CF_SHUTR | CF_SHUTW_NOW ) ) )
channel_forward_forever ( req ) ;
}
2008-12-14 11:31:54 -05:00
}
2008-12-13 15:12:26 -05:00
2009-03-08 16:38:23 -04:00
/* check if it is wise to enable kernel splicing to forward request data */
2014-11-28 09:07:47 -05:00
if ( ! ( req - > flags & ( CF_KERN_SPLICING | CF_SHUTR ) ) & &
req - > to_forward & &
2009-03-08 16:38:23 -04:00
( global . tune . options & GTUNE_USE_SPLICE ) & &
2022-05-18 10:23:22 -04:00
( sc_conn ( scf ) & & __sc_conn ( scf ) - > xprt & & __sc_conn ( scf ) - > xprt - > rcv_pipe & &
__sc_conn ( scf ) - > mux & & __sc_conn ( scf ) - > mux - > rcv_pipe ) & &
( sc_conn ( scb ) & & __sc_conn ( scb ) - > xprt & & __sc_conn ( scb ) - > xprt - > snd_pipe & &
__sc_conn ( scb ) - > mux & & __sc_conn ( scb ) - > mux - > snd_pipe ) & &
2009-03-08 16:38:23 -04:00
( pipes_used < global . maxpipes ) & &
2015-04-03 09:40:56 -04:00
( ( ( sess - > fe - > options2 | s - > be - > options2 ) & PR_O2_SPLIC_REQ ) | |
( ( ( sess - > fe - > options2 | s - > be - > options2 ) & PR_O2_SPLIC_AUT ) & &
2014-11-28 09:07:47 -05:00
( req - > flags & CF_STREAMER_FAST ) ) ) ) {
req - > flags | = CF_KERN_SPLICING ;
2009-03-08 16:38:23 -04:00
}
2008-11-30 12:47:21 -05:00
/* reflect what the L7 analysers have seen last */
2014-11-28 09:07:47 -05:00
rqf_last = req - > flags ;
2008-11-30 12:47:21 -05:00
2009-09-19 15:04:57 -04:00
/* it's possible that an upper layer has requested a connection setup or abort.
* There are 2 situations where we decide to establish a new connection :
* - there are data scheduled for emission in the buffer
2012-08-27 17:14:58 -04:00
* - the CF_AUTO_CONNECT flag is set ( active connection )
2009-09-19 15:04:57 -04:00
*/
2022-05-17 13:47:17 -04:00
if ( scb - > state = = SC_ST_INI ) {
2019-04-23 11:34:22 -04:00
if ( ! ( req - > flags & CF_SHUTW ) ) {
2014-11-28 09:07:47 -05:00
if ( ( req - > flags & CF_AUTO_CONNECT ) | | ! channel_is_empty ( req ) ) {
2013-09-29 11:19:56 -04:00
/* If we have an appctx, there is no connect method, so we
* immediately switch to the connected state , otherwise we
* perform a connection request .
2009-09-19 15:04:57 -04:00
*/
2022-05-17 13:47:17 -04:00
scb - > state = SC_ST_REQ ; /* new connection requested */
2022-03-29 10:08:44 -04:00
s - > conn_retries = 0 ;
2021-05-21 07:46:14 -04:00
if ( ( s - > be - > retry_type & ~ PR_RE_CONN_FAILED ) & &
( s - > be - > mode = = PR_MODE_HTTP ) & &
2022-03-29 09:23:40 -04:00
! ( s - > txn - > flags & TX_D_L7_RETRY ) )
s - > txn - > flags | = TX_L7_RETRY ;
2009-09-19 15:04:57 -04:00
}
2009-08-16 12:27:24 -04:00
}
2009-09-20 02:19:25 -04:00
else {
2022-05-17 13:47:17 -04:00
s - > scb - > state = SC_ST_CLO ; /* shutw+ini = abort */
2014-11-28 09:07:47 -05:00
channel_shutw_now ( req ) ; /* fix buffer flags upon abort */
channel_shutr_now ( res ) ;
2009-09-20 02:19:25 -04:00
}
2009-03-06 06:51:23 -05:00
}
2008-11-30 12:47:21 -05:00
/* we may have a pending connection request, or a connection waiting
* for completion .
*/
2022-05-17 13:47:17 -04:00
if ( cs_state_in ( scb - > state , SC_SB_REQ | SC_SB_QUE | SC_SB_TAR | SC_SB_ASS ) ) {
2015-06-06 13:29:07 -04:00
/* prune the request variables and swap to the response variables. */
if ( s - > vars_reqres . scope ! = SCOPE_RES ) {
2019-11-09 12:00:47 -05:00
if ( ! LIST_ISEMPTY ( & s - > vars_reqres . head ) )
2018-10-28 08:44:36 -04:00
vars_prune ( & s - > vars_reqres , s - > sess , s ) ;
2021-08-31 02:13:25 -04:00
vars_init_head ( & s - > vars_reqres , SCOPE_RES ) ;
2015-06-06 13:29:07 -04:00
}
2008-11-30 12:47:21 -05:00
do {
/* nb: step 1 might switch from QUE to ASS, but we first want
* to give a chance to step 2 to perform a redirect if needed .
*/
2022-05-17 13:47:17 -04:00
if ( scb - > state ! = SC_ST_REQ )
2020-01-09 12:43:15 -05:00
back_try_conn_req ( s ) ;
2022-05-17 13:47:17 -04:00
if ( scb - > state = = SC_ST_REQ )
2020-01-09 12:43:15 -05:00
back_handle_st_req ( s ) ;
2008-11-30 12:47:21 -05:00
2020-03-04 10:42:03 -05:00
/* get a chance to complete an immediate connection setup */
2022-05-17 13:47:17 -04:00
if ( scb - > state = = SC_ST_RDY )
2022-05-17 13:07:51 -04:00
goto resync_stconns ;
2020-03-04 10:42:03 -05:00
2013-12-31 17:32:12 -05:00
/* applets directly go to the ESTABLISHED state. Similarly,
* servers experience the same fate when their connection
* is reused .
*/
2022-05-17 13:47:17 -04:00
if ( unlikely ( scb - > state = = SC_ST_EST ) )
2020-01-09 12:43:15 -05:00
back_establish ( s ) ;
2013-12-31 17:16:50 -05:00
2012-11-11 18:42:33 -05:00
srv = objt_server ( s - > target ) ;
2022-05-17 13:47:17 -04:00
if ( scb - > state = = SC_ST_ASS & & srv & & srv - > rdr_len & & ( s - > flags & SF_REDIRECTABLE ) )
2022-05-17 13:40:40 -04:00
http_perform_server_redirect ( s , scb ) ;
2022-05-17 13:47:17 -04:00
} while ( scb - > state = = SC_ST_ASS ) ;
2008-11-30 12:47:21 -05:00
}
2019-06-06 03:17:23 -04:00
/* Let's see if we can send the pending request now */
2022-05-18 12:06:53 -04:00
sc_conn_sync_send ( scb ) ;
2019-06-06 03:17:23 -04:00
/*
* Now forward all shutdown requests between both sides of the request buffer
*/
/* first, let's check if the request buffer needs to shutdown(write), which may
* happen either because the input is closed or because we want to force a close
* once the server has begun to respond . If a half - closed timeout is set , we adjust
2022-04-14 11:39:48 -04:00
* the other side ' s timeout as well . However this doesn ' t have effect during the
* connection setup unless the backend has abortonclose set .
2019-06-06 03:17:23 -04:00
*/
if ( unlikely ( ( req - > flags & ( CF_SHUTW | CF_SHUTW_NOW | CF_AUTO_CLOSE | CF_SHUTR ) ) = =
2022-04-14 11:39:48 -04:00
( CF_AUTO_CLOSE | CF_SHUTR ) & &
2022-05-17 13:47:17 -04:00
( scb - > state ! = SC_ST_CON | | ( s - > be - > options & PR_O_ABRT_CLOSE ) ) ) ) {
2019-06-06 03:17:23 -04:00
channel_shutw_now ( req ) ;
}
/* shutdown(write) pending */
if ( unlikely ( ( req - > flags & ( CF_SHUTW | CF_SHUTW_NOW ) ) = = CF_SHUTW_NOW & &
channel_is_empty ( req ) ) ) {
if ( req - > flags & CF_READ_ERROR )
2022-05-17 13:44:42 -04:00
scb - > flags | = SC_FL_NOLINGER ;
2022-05-17 13:40:40 -04:00
cs_shutw ( scb ) ;
2019-06-06 03:17:23 -04:00
}
/* shutdown(write) done on server side, we must stop the client too */
if ( unlikely ( ( req - > flags & ( CF_SHUTW | CF_SHUTR | CF_SHUTR_NOW ) ) = = CF_SHUTW & &
! req - > analysers ) )
channel_shutr_now ( req ) ;
/* shutdown(read) pending */
if ( unlikely ( ( req - > flags & ( CF_SHUTR | CF_SHUTR_NOW ) ) = = CF_SHUTR_NOW ) ) {
2022-05-17 13:44:42 -04:00
if ( scf - > flags & SC_FL_NOHALF )
scf - > flags | = SC_FL_NOLINGER ;
2022-05-17 13:40:40 -04:00
cs_shutr ( scf ) ;
2019-06-06 03:17:23 -04:00
}
2009-06-21 16:43:05 -04:00
/* Benchmarks have shown that it's optimal to do a full resync now */
2022-05-17 13:47:17 -04:00
if ( scf - > state = = SC_ST_DIS | |
cs_state_in ( scb - > state , SC_SB_RDY | SC_SB_DIS ) | |
( sc_ep_test ( scf , SE_FL_ERROR ) & & scf - > state ! = SC_ST_CLO ) | |
( sc_ep_test ( scb , SE_FL_ERROR ) & & scb - > state ! = SC_ST_CLO ) )
2022-05-17 13:07:51 -04:00
goto resync_stconns ;
2008-11-30 12:47:21 -05:00
2010-07-27 11:15:12 -04:00
/* otherwise we want to check if we need to resync the req buffer or not */
2019-06-06 08:45:26 -04:00
if ( ( req - > flags ^ rqf_last ) & ( CF_SHUTR | CF_SHUTW ) )
2009-03-08 14:20:25 -04:00
goto resync_request ;
2009-06-21 16:43:05 -04:00
/* perform output updates to the response buffer */
2009-03-15 17:34:05 -04:00
2021-01-06 11:20:16 -05:00
/* If no one is interested in analysing data, it's time to forward
2009-09-20 06:07:52 -04:00
* everything . We configure the buffer to forward indefinitely .
2012-08-27 17:14:58 -04:00
* Note that we ' re checking CF_SHUTR_NOW as an indication of a possible
2012-08-27 18:06:31 -04:00
* recent call to channel_abort ( ) .
2008-12-14 11:31:54 -05:00
*/
2017-08-29 10:06:38 -04:00
if ( unlikely ( ( ! res - > analysers | | ( res - > analysers = = AN_RES_FLT_END & & ! ( res - > flags & CF_FLT_ANALYZE ) ) ) & &
2014-11-28 09:07:47 -05:00
! ( res - > flags & ( CF_SHUTW | CF_SHUTR_NOW ) ) & &
2022-05-17 13:47:17 -04:00
cs_state_in ( scb - > state , SC_SB_EST | SC_SB_DIS | SC_SB_CLO ) & &
2014-11-28 09:07:47 -05:00
( res - > to_forward ! = CHN_INFINITE_FORWARD ) ) ) {
2012-11-11 17:05:39 -05:00
/* This buffer is freewheeling, there's no analyser
2009-03-08 16:38:23 -04:00
* attached to it . If any data are left in , we ' ll permit them to
* move .
*/
2014-11-28 09:07:47 -05:00
channel_auto_read ( res ) ;
channel_auto_close ( res ) ;
2010-11-07 14:26:56 -05:00
2018-12-18 15:57:24 -05:00
if ( IS_HTX_STRM ( s ) ) {
struct htx * htx = htxbuf ( & res - > buf ) ;
2012-05-12 06:50:00 -04:00
2018-12-05 05:56:15 -05:00
/* We'll let data flow between the producer (if still connected)
* to the consumer .
*/
2018-12-18 15:57:24 -05:00
co_set_data ( res , htx - > data ) ;
2018-12-05 05:56:15 -05:00
if ( ! ( res - > flags & ( CF_SHUTR | CF_SHUTW_NOW ) ) )
2018-12-18 15:57:24 -05:00
channel_htx_forward_forever ( res , htx ) ;
2018-12-05 05:56:15 -05:00
}
else {
/* We'll let data flow between the producer (if still connected)
* to the consumer .
*/
2018-12-18 15:57:24 -05:00
c_adv ( res , ci_data ( res ) ) ;
2018-12-05 05:56:15 -05:00
if ( ! ( res - > flags & ( CF_SHUTR | CF_SHUTW_NOW ) ) )
channel_forward_forever ( res ) ;
}
2015-07-09 12:38:57 -04:00
2012-05-12 06:50:00 -04:00
/* if we have no analyser anymore in any direction and have a
2014-05-10 08:30:07 -04:00
* tunnel timeout set , use it now . Note that we must respect
* the half - closed timeouts as well .
2012-05-12 06:50:00 -04:00
*/
2020-12-10 07:43:53 -05:00
if ( ! req - > analysers & & s - > tunnel_timeout ) {
2014-11-28 09:07:47 -05:00
req - > rto = req - > wto = res - > rto = res - > wto =
2020-12-10 07:43:53 -05:00
s - > tunnel_timeout ;
2014-05-10 08:30:07 -04:00
2015-04-03 09:40:56 -04:00
if ( ( req - > flags & CF_SHUTR ) & & tick_isset ( sess - > fe - > timeout . clientfin ) )
res - > wto = sess - > fe - > timeout . clientfin ;
2014-11-28 09:07:47 -05:00
if ( ( req - > flags & CF_SHUTW ) & & tick_isset ( s - > be - > timeout . serverfin ) )
res - > rto = s - > be - > timeout . serverfin ;
if ( ( res - > flags & CF_SHUTR ) & & tick_isset ( s - > be - > timeout . serverfin ) )
req - > wto = s - > be - > timeout . serverfin ;
2015-04-03 09:40:56 -04:00
if ( ( res - > flags & CF_SHUTW ) & & tick_isset ( sess - > fe - > timeout . clientfin ) )
req - > rto = sess - > fe - > timeout . clientfin ;
2014-11-28 09:07:47 -05:00
req - > rex = tick_add ( now_ms , req - > rto ) ;
req - > wex = tick_add ( now_ms , req - > wto ) ;
res - > rex = tick_add ( now_ms , res - > rto ) ;
res - > wex = tick_add ( now_ms , res - > wto ) ;
2012-05-12 06:50:00 -04:00
}
2008-12-14 11:31:54 -05:00
}
2008-12-13 15:12:26 -05:00
2009-03-08 16:38:23 -04:00
/* check if it is wise to enable kernel splicing to forward response data */
2014-11-28 09:07:47 -05:00
if ( ! ( res - > flags & ( CF_KERN_SPLICING | CF_SHUTR ) ) & &
res - > to_forward & &
2009-03-08 16:38:23 -04:00
( global . tune . options & GTUNE_USE_SPLICE ) & &
2022-05-18 10:23:22 -04:00
( sc_conn ( scf ) & & __sc_conn ( scf ) - > xprt & & __sc_conn ( scf ) - > xprt - > snd_pipe & &
__sc_conn ( scf ) - > mux & & __sc_conn ( scf ) - > mux - > snd_pipe ) & &
( sc_conn ( scb ) & & __sc_conn ( scb ) - > xprt & & __sc_conn ( scb ) - > xprt - > rcv_pipe & &
__sc_conn ( scb ) - > mux & & __sc_conn ( scb ) - > mux - > rcv_pipe ) & &
2009-03-08 16:38:23 -04:00
( pipes_used < global . maxpipes ) & &
2015-04-03 09:40:56 -04:00
( ( ( sess - > fe - > options2 | s - > be - > options2 ) & PR_O2_SPLIC_RTR ) | |
( ( ( sess - > fe - > options2 | s - > be - > options2 ) & PR_O2_SPLIC_AUT ) & &
2014-11-28 09:07:47 -05:00
( res - > flags & CF_STREAMER_FAST ) ) ) ) {
res - > flags | = CF_KERN_SPLICING ;
2009-03-08 16:38:23 -04:00
}
2008-11-30 12:47:21 -05:00
/* reflect what the L7 analysers have seen last */
2014-11-28 09:07:47 -05:00
rpf_last = res - > flags ;
2008-11-30 12:47:21 -05:00
2019-06-06 03:17:23 -04:00
/* Let's see if we can send the pending response now */
2022-05-18 12:06:53 -04:00
sc_conn_sync_send ( scf ) ;
2019-06-06 03:17:23 -04:00
2008-11-30 12:47:21 -05:00
/*
* Now forward all shutdown requests between both sides of the buffer
*/
/*
* FIXME : this is probably where we should produce error responses .
*/
2008-12-14 11:31:54 -05:00
/* first, let's check if the response buffer needs to shutdown(write) */
2014-11-28 09:07:47 -05:00
if ( unlikely ( ( res - > flags & ( CF_SHUTW | CF_SHUTW_NOW | CF_AUTO_CLOSE | CF_SHUTR ) ) = =
2014-05-10 08:30:07 -04:00
( CF_AUTO_CLOSE | CF_SHUTR ) ) ) {
2014-11-28 09:07:47 -05:00
channel_shutw_now ( res ) ;
2014-05-10 08:30:07 -04:00
}
2008-11-30 12:47:21 -05:00
/* shutdown(write) pending */
2014-11-28 09:07:47 -05:00
if ( unlikely ( ( res - > flags & ( CF_SHUTW | CF_SHUTW_NOW ) ) = = CF_SHUTW_NOW & &
channel_is_empty ( res ) ) ) {
2022-05-17 13:40:40 -04:00
cs_shutw ( scf ) ;
2014-05-10 08:30:07 -04:00
}
2008-11-30 12:47:21 -05:00
/* shutdown(write) done on the client side, we must stop the server too */
2014-11-28 09:07:47 -05:00
if ( unlikely ( ( res - > flags & ( CF_SHUTW | CF_SHUTR | CF_SHUTR_NOW ) ) = = CF_SHUTW ) & &
! res - > analysers )
channel_shutr_now ( res ) ;
2008-11-30 12:47:21 -05:00
/* shutdown(read) pending */
2014-11-28 09:07:47 -05:00
if ( unlikely ( ( res - > flags & ( CF_SHUTR | CF_SHUTR_NOW ) ) = = CF_SHUTR_NOW ) ) {
2022-05-17 13:44:42 -04:00
if ( scb - > flags & SC_FL_NOHALF )
scb - > flags | = SC_FL_NOLINGER ;
2022-05-17 13:40:40 -04:00
cs_shutr ( scb ) ;
2012-05-13 08:48:59 -04:00
}
2008-11-30 12:47:21 -05:00
2022-05-17 13:47:17 -04:00
if ( scf - > state = = SC_ST_DIS | |
cs_state_in ( scb - > state , SC_SB_RDY | SC_SB_DIS ) | |
( sc_ep_test ( scf , SE_FL_ERROR ) & & scf - > state ! = SC_ST_CLO ) | |
( sc_ep_test ( scb , SE_FL_ERROR ) & & scb - > state ! = SC_ST_CLO ) )
2022-05-17 13:07:51 -04:00
goto resync_stconns ;
2008-11-30 12:47:21 -05:00
2019-06-06 08:32:49 -04:00
if ( ( req - > flags & ~ rqf_last ) & CF_MASK_ANALYSER )
2009-03-08 14:20:25 -04:00
goto resync_request ;
2014-11-28 09:07:47 -05:00
if ( ( res - > flags ^ rpf_last ) & CF_MASK_STATIC )
2009-03-08 14:20:25 -04:00
goto resync_response ;
2008-11-30 12:47:21 -05:00
2019-06-06 03:17:23 -04:00
if ( ( ( req - > flags ^ rqf_last ) | ( res - > flags ^ rpf_last ) ) & CF_MASK_ANALYSER )
goto resync_request ;
2009-09-05 14:57:35 -04:00
/* we're interested in getting wakeups again */
2022-05-17 13:44:42 -04:00
scf - > flags & = ~ SC_FL_DONT_WAKE ;
scb - > flags & = ~ SC_FL_DONT_WAKE ;
2009-09-05 14:57:35 -04:00
2022-05-17 13:47:17 -04:00
if ( likely ( ( scf - > state ! = SC_ST_CLO ) | | ! cs_state_in ( scb - > state , SC_SB_INI | SC_SB_CLO ) | |
2019-11-13 05:12:32 -05:00
( req - > analysers & AN_REQ_FLT_END ) | | ( res - > analysers & AN_RES_FLT_END ) ) ) {
2019-07-31 12:05:26 -04:00
if ( ( sess - > fe - > options & PR_O_CONTSTATS ) & & ( s - > flags & SF_BE_ASSIGNED ) & & ! ( s - > flags & SF_IGNORE ) )
REORG/MAJOR: session: rename the "session" entity to "stream"
With HTTP/2, we'll have to support multiplexed streams. A stream is in
fact the largest part of what we currently call a session, it has buffers,
logs, etc.
In order to catch any error, this commit removes any reference to the
struct session and tries to rename most "session" occurrences in function
names to "stream" and "sess" to "strm" when that's related to a session.
The files stream.{c,h} were added and session.{c,h} removed.
The session will be reintroduced later and a few parts of the stream
will progressively be moved overthere. It will more or less contain
only what we need in an embryonic session.
Sample fetch functions and converters will have to change a bit so
that they'll use an L5 (session) instead of what's currently called
"L4" which is in fact L6 for now.
Once all changes are completed, we should see approximately this :
L7 - http_txn
L6 - stream
L5 - session
L4 - connection | applet
There will be at most one http_txn per stream, and a same session will
possibly be referenced by multiple streams. A connection will point to
a session and to a stream. The session will hold all the information
we need to keep even when we don't yet have a stream.
Some more cleanup is needed because some code was already far from
being clean. The server queue management still refers to sessions at
many places while comments talk about connections. This will have to
be cleaned up once we have a server-side connection pool manager.
Stream flags "SN_*" still need to be renamed, it doesn't seem like
any of them will need to move to the session.
2015-04-02 18:22:06 -04:00
stream_process_counters ( s ) ;
2008-11-30 12:47:21 -05:00
2022-04-01 08:48:06 -04:00
stream_update_both_cs ( s ) ;
2008-11-30 12:47:21 -05:00
2014-06-23 09:22:31 -04:00
/* Trick: if a request is being waiting for the server to respond,
* and if we know the server can timeout , we don ' t want the timeout
* to expire on the client side first , but we ' re still interested
* in passing data from the client to the server ( eg : POST ) . Thus ,
* we can cancel the client ' s request timeout if the server ' s
* request timeout is set and the server has not yet sent a response .
*/
2014-11-28 09:07:47 -05:00
if ( ( res - > flags & ( CF_AUTO_CLOSE | CF_SHUTR ) ) = = 0 & &
( tick_isset ( req - > wex ) | | tick_isset ( res - > rex ) ) ) {
req - > flags | = CF_READ_NOEXP ;
req - > rex = TICK_ETERNITY ;
2014-06-23 09:22:31 -04:00
}
BUG/MEDIUM: stream: Save unprocessed events for a stream
A stream can be awakened for different reasons. During its processing, it can be
early stopped if no buffer is available. In this situation, the reason why the
stream was awakened is lost, because we rely on the task state, which is reset
after each processing loop.
In many cases, that's not a big deal. But it can be useful to accumulate the
task states if the stream processing is interrupted, especially if some filters
need to be called.
To be clearer, here is an simple example:
1) A stream is awakened with the reason TASK_WOKEN_MSG.
2) Because no buffer is available, the processing is interrupted, the stream
is back to sleep. And the task state is reset.
3) Some buffers become available, so the stream is awakened with the reason
TASK_WOKEN_RES. At this step, the previous reason (TASK_WOKEN_MSG) is lost.
Now, the task states are saved for a stream and reset only when the stream
processing is not interrupted. The correspoing bitfield represents the pending
events for a stream. And we use this one instead of the task state during the
stream processing.
Note that TASK_WOKEN_TIMER and TASK_WOKEN_RES are always removed because these
events are always handled during the stream processing.
[wt: backport to 1.7 and 1.6]
2016-12-08 16:33:52 -05:00
/* Reset pending events now */
s - > pending_events = 0 ;
2012-11-08 08:49:17 -05:00
update_exp_and_leave :
2022-05-17 13:44:42 -04:00
/* Note: please ensure that if you branch here you disable SC_FL_DONT_WAKE */
2016-11-10 08:58:05 -05:00
t - > expire = tick_first ( ( tick_is_expired ( t - > expire , now_ms ) ? 0 : t - > expire ) ,
tick_first ( tick_first ( req - > rex , req - > wex ) ,
tick_first ( res - > rex , res - > wex ) ) ) ;
2016-11-08 16:03:00 -05:00
if ( ! req - > analysers )
req - > analyse_exp = TICK_ETERNITY ;
if ( ( sess - > fe - > options & PR_O_CONTSTATS ) & & ( s - > flags & SF_BE_ASSIGNED ) & &
( ! tick_isset ( req - > analyse_exp ) | | tick_is_expired ( req - > analyse_exp , now_ms ) ) )
req - > analyse_exp = tick_add ( now_ms , 5000 ) ;
t - > expire = tick_first ( t - > expire , req - > analyse_exp ) ;
2008-11-30 12:47:21 -05:00
2017-11-10 11:14:23 -05:00
t - > expire = tick_first ( t - > expire , res - > analyse_exp ) ;
2022-03-29 13:02:31 -04:00
t - > expire = tick_first ( t - > expire , s - > conn_exp ) ;
2008-11-30 12:47:21 -05:00
BUG/MEDIUM: stream: Save unprocessed events for a stream
A stream can be awakened for different reasons. During its processing, it can be
early stopped if no buffer is available. In this situation, the reason why the
stream was awakened is lost, because we rely on the task state, which is reset
after each processing loop.
In many cases, that's not a big deal. But it can be useful to accumulate the
task states if the stream processing is interrupted, especially if some filters
need to be called.
To be clearer, here is an simple example:
1) A stream is awakened with the reason TASK_WOKEN_MSG.
2) Because no buffer is available, the processing is interrupted, the stream
is back to sleep. And the task state is reset.
3) Some buffers become available, so the stream is awakened with the reason
TASK_WOKEN_RES. At this step, the previous reason (TASK_WOKEN_MSG) is lost.
Now, the task states are saved for a stream and reset only when the stream
processing is not interrupted. The correspoing bitfield represents the pending
events for a stream. And we use this one instead of the task state during the
stream processing.
Note that TASK_WOKEN_TIMER and TASK_WOKEN_RES are always removed because these
events are always handled during the stream processing.
[wt: backport to 1.7 and 1.6]
2016-12-08 16:33:52 -05:00
s - > pending_events & = ~ ( TASK_WOKEN_TIMER | TASK_WOKEN_RES ) ;
REORG/MAJOR: session: rename the "session" entity to "stream"
With HTTP/2, we'll have to support multiplexed streams. A stream is in
fact the largest part of what we currently call a session, it has buffers,
logs, etc.
In order to catch any error, this commit removes any reference to the
struct session and tries to rename most "session" occurrences in function
names to "stream" and "sess" to "strm" when that's related to a session.
The files stream.{c,h} were added and session.{c,h} removed.
The session will be reintroduced later and a few parts of the stream
will progressively be moved overthere. It will more or less contain
only what we need in an embryonic session.
Sample fetch functions and converters will have to change a bit so
that they'll use an L5 (session) instead of what's currently called
"L4" which is in fact L6 for now.
Once all changes are completed, we should see approximately this :
L7 - http_txn
L6 - stream
L5 - session
L4 - connection | applet
There will be at most one http_txn per stream, and a same session will
possibly be referenced by multiple streams. A connection will point to
a session and to a stream. The session will hold all the information
we need to keep even when we don't yet have a stream.
Some more cleanup is needed because some code was already far from
being clean. The server queue management still refers to sessions at
many places while comments talk about connections. This will have to
be cleaned up once we have a server-side connection pool manager.
Stream flags "SN_*" still need to be renamed, it doesn't seem like
any of them will need to move to the session.
2015-04-02 18:22:06 -04:00
stream_release_buffers ( s ) ;
2019-11-05 10:18:10 -05:00
DBG_TRACE_DEVEL ( " queuing " , STRM_EV_STRM_PROC , s ) ;
2009-03-08 04:38:41 -04:00
return t ; /* nothing more to do */
2008-11-30 12:47:21 -05:00
}
2019-11-05 10:18:10 -05:00
DBG_TRACE_DEVEL ( " releasing " , STRM_EV_STRM_PROC , s ) ;
2015-04-02 19:14:29 -04:00
if ( s - > flags & SF_BE_ASSIGNED )
2021-04-06 07:53:36 -04:00
_HA_ATOMIC_DEC ( & s - > be - > beconn ) ;
2017-09-15 03:07:56 -04:00
2008-11-30 12:47:21 -05:00
if ( unlikely ( ( global . mode & MODE_DEBUG ) & &
( ! ( global . mode & MODE_QUIET ) | | ( global . mode & MODE_VERBOSE ) ) ) ) {
2012-10-29 11:51:55 -04:00
chunk_printf ( & trash , " %08x:%s.closed[%04x:%04x] \n " ,
2021-12-15 03:50:17 -05:00
s - > uniq_id , s - > be - > id ,
2022-05-18 10:23:22 -04:00
( unsigned short ) conn_fd ( sc_conn ( scf ) ) ,
( unsigned short ) conn_fd ( sc_conn ( scb ) ) ) ;
2020-03-14 06:03:20 -04:00
DISGUISE ( write ( 1 , trash . area , trash . data ) ) ;
2008-11-30 12:47:21 -05:00
}
2021-01-21 11:10:44 -05:00
if ( ! ( s - > flags & SF_IGNORE ) ) {
s - > logs . t_close = tv_ms_elapsed ( & s - > logs . tv_accept , & now ) ;
2019-07-31 12:05:26 -04:00
stream_process_counters ( s ) ;
2008-11-30 12:47:21 -05:00
2021-01-21 11:10:44 -05:00
if ( s - > txn & & s - > txn - > status ) {
int n ;
2009-10-24 09:36:15 -04:00
2021-01-21 11:10:44 -05:00
n = s - > txn - > status / 100 ;
if ( n < 1 | | n > 5 )
n = 0 ;
2009-10-24 09:36:15 -04:00
2021-01-21 11:10:44 -05:00
if ( sess - > fe - > mode = = PR_MODE_HTTP ) {
2021-04-06 07:53:36 -04:00
_HA_ATOMIC_INC ( & sess - > fe - > fe_counters . p . http . rsp [ n ] ) ;
2021-01-21 11:10:44 -05:00
}
if ( ( s - > flags & SF_BE_ASSIGNED ) & &
( s - > be - > mode = = PR_MODE_HTTP ) ) {
2021-04-06 07:53:36 -04:00
_HA_ATOMIC_INC ( & s - > be - > be_counters . p . http . rsp [ n ] ) ;
_HA_ATOMIC_INC ( & s - > be - > be_counters . p . http . cum_req ) ;
2021-01-21 11:10:44 -05:00
}
2012-11-24 08:54:13 -05:00
}
2021-01-21 11:10:44 -05:00
/* let's do a final log if we need it */
if ( ! LIST_ISEMPTY ( & sess - > fe - > logformat ) & & s - > logs . logwait & &
! ( s - > flags & SF_MONITOR ) & &
( ! ( sess - > fe - > options & PR_O_NULLNOLOG ) | | req - > total ) ) {
/* we may need to know the position in the queue */
pendconn_free ( s ) ;
s - > do_log ( s ) ;
2012-11-24 08:54:13 -05:00
}
2009-10-24 09:36:15 -04:00
2021-01-21 11:10:44 -05:00
/* update time stats for this stream */
stream_update_time_stats ( s ) ;
2008-11-30 12:47:21 -05:00
}
/* the task MUST not be in the run queue anymore */
REORG/MAJOR: session: rename the "session" entity to "stream"
With HTTP/2, we'll have to support multiplexed streams. A stream is in
fact the largest part of what we currently call a session, it has buffers,
logs, etc.
In order to catch any error, this commit removes any reference to the
struct session and tries to rename most "session" occurrences in function
names to "stream" and "sess" to "strm" when that's related to a session.
The files stream.{c,h} were added and session.{c,h} removed.
The session will be reintroduced later and a few parts of the stream
will progressively be moved overthere. It will more or less contain
only what we need in an embryonic session.
Sample fetch functions and converters will have to change a bit so
that they'll use an L5 (session) instead of what's currently called
"L4" which is in fact L6 for now.
Once all changes are completed, we should see approximately this :
L7 - http_txn
L6 - stream
L5 - session
L4 - connection | applet
There will be at most one http_txn per stream, and a same session will
possibly be referenced by multiple streams. A connection will point to
a session and to a stream. The session will hold all the information
we need to keep even when we don't yet have a stream.
Some more cleanup is needed because some code was already far from
being clean. The server queue management still refers to sessions at
many places while comments talk about connections. This will have to
be cleaned up once we have a server-side connection pool manager.
Stream flags "SN_*" still need to be renamed, it doesn't seem like
any of them will need to move to the session.
2015-04-02 18:22:06 -04:00
stream_free ( s ) ;
2019-04-17 16:51:06 -04:00
task_destroy ( t ) ;
2009-03-08 04:38:41 -04:00
return NULL ;
2008-11-30 12:47:21 -05:00
}
REORG/MAJOR: session: rename the "session" entity to "stream"
With HTTP/2, we'll have to support multiplexed streams. A stream is in
fact the largest part of what we currently call a session, it has buffers,
logs, etc.
In order to catch any error, this commit removes any reference to the
struct session and tries to rename most "session" occurrences in function
names to "stream" and "sess" to "strm" when that's related to a session.
The files stream.{c,h} were added and session.{c,h} removed.
The session will be reintroduced later and a few parts of the stream
will progressively be moved overthere. It will more or less contain
only what we need in an embryonic session.
Sample fetch functions and converters will have to change a bit so
that they'll use an L5 (session) instead of what's currently called
"L4" which is in fact L6 for now.
Once all changes are completed, we should see approximately this :
L7 - http_txn
L6 - stream
L5 - session
L4 - connection | applet
There will be at most one http_txn per stream, and a same session will
possibly be referenced by multiple streams. A connection will point to
a session and to a stream. The session will hold all the information
we need to keep even when we don't yet have a stream.
Some more cleanup is needed because some code was already far from
being clean. The server queue management still refers to sessions at
many places while comments talk about connections. This will have to
be cleaned up once we have a server-side connection pool manager.
Stream flags "SN_*" still need to be renamed, it doesn't seem like
any of them will need to move to the session.
2015-04-02 18:22:06 -04:00
/* Update the stream's backend and server time stats */
void stream_update_time_stats ( struct stream * s )
2014-06-17 06:19:18 -04:00
{
int t_request ;
int t_queue ;
int t_connect ;
int t_data ;
int t_close ;
struct server * srv ;
2020-05-15 14:02:40 -04:00
unsigned int samples_window ;
2014-06-17 06:19:18 -04:00
t_request = 0 ;
t_queue = s - > logs . t_queue ;
t_connect = s - > logs . t_connect ;
t_close = s - > logs . t_close ;
t_data = s - > logs . t_data ;
if ( s - > be - > mode ! = PR_MODE_HTTP )
t_data = t_connect ;
if ( t_connect < 0 | | t_data < 0 )
return ;
if ( tv_isge ( & s - > logs . tv_request , & s - > logs . tv_accept ) )
t_request = tv_ms_elapsed ( & s - > logs . tv_accept , & s - > logs . tv_request ) ;
t_data - = t_connect ;
t_connect - = t_queue ;
t_queue - = t_request ;
srv = objt_server ( s - > target ) ;
if ( srv ) {
2020-05-15 14:02:40 -04:00
samples_window = ( ( ( s - > be - > mode = = PR_MODE_HTTP ) ?
srv - > counters . p . http . cum_req : srv - > counters . cum_lbconn ) > TIME_STATS_SAMPLES ) ? TIME_STATS_SAMPLES : 0 ;
swrate_add_dynamic ( & srv - > counters . q_time , samples_window , t_queue ) ;
swrate_add_dynamic ( & srv - > counters . c_time , samples_window , t_connect ) ;
swrate_add_dynamic ( & srv - > counters . d_time , samples_window , t_data ) ;
swrate_add_dynamic ( & srv - > counters . t_time , samples_window , t_close ) ;
MINOR: counters: Add fields to store the max observed for {q,c,d,t}_time
For backends and servers, some average times for last 1024 connections are
already calculated. For the moment, the averages for the time passed in the
queue, the connect time, the response time (for HTTP session only) and the total
time are calculated. Now, in addition, the maximum time observed for these
values are also stored.
In addition, These new counters are cleared as all other max values with the CLI
command "clear counters".
This patch is related to #272.
2019-11-08 08:53:15 -05:00
HA_ATOMIC_UPDATE_MAX ( & srv - > counters . qtime_max , t_queue ) ;
HA_ATOMIC_UPDATE_MAX ( & srv - > counters . ctime_max , t_connect ) ;
HA_ATOMIC_UPDATE_MAX ( & srv - > counters . dtime_max , t_data ) ;
HA_ATOMIC_UPDATE_MAX ( & srv - > counters . ttime_max , t_close ) ;
2014-06-17 06:19:18 -04:00
}
2020-05-15 14:02:40 -04:00
samples_window = ( ( ( s - > be - > mode = = PR_MODE_HTTP ) ?
s - > be - > be_counters . p . http . cum_req : s - > be - > be_counters . cum_lbconn ) > TIME_STATS_SAMPLES ) ? TIME_STATS_SAMPLES : 0 ;
swrate_add_dynamic ( & s - > be - > be_counters . q_time , samples_window , t_queue ) ;
swrate_add_dynamic ( & s - > be - > be_counters . c_time , samples_window , t_connect ) ;
swrate_add_dynamic ( & s - > be - > be_counters . d_time , samples_window , t_data ) ;
swrate_add_dynamic ( & s - > be - > be_counters . t_time , samples_window , t_close ) ;
MINOR: counters: Add fields to store the max observed for {q,c,d,t}_time
For backends and servers, some average times for last 1024 connections are
already calculated. For the moment, the averages for the time passed in the
queue, the connect time, the response time (for HTTP session only) and the total
time are calculated. Now, in addition, the maximum time observed for these
values are also stored.
In addition, These new counters are cleared as all other max values with the CLI
command "clear counters".
This patch is related to #272.
2019-11-08 08:53:15 -05:00
HA_ATOMIC_UPDATE_MAX ( & s - > be - > be_counters . qtime_max , t_queue ) ;
HA_ATOMIC_UPDATE_MAX ( & s - > be - > be_counters . ctime_max , t_connect ) ;
HA_ATOMIC_UPDATE_MAX ( & s - > be - > be_counters . dtime_max , t_data ) ;
HA_ATOMIC_UPDATE_MAX ( & s - > be - > be_counters . ttime_max , t_close ) ;
2014-06-17 06:19:18 -04:00
}
[BUG] fix the dequeuing logic to ensure that all requests get served
The dequeuing logic was completely wrong. First, a task was assigned
to all servers to process the queue, but this task was never scheduled
and was only woken up on session free. Second, there was no reservation
of server entries when a task was assigned a server. This means that
as long as the task was not connected to the server, its presence was
not accounted for. This was causing trouble when detecting whether or
not a server had reached maxconn. Third, during a redispatch, a session
could lose its place at the server's and get blocked because another
session at the same moment would have stolen the entry. Fourth, the
redispatch option did not work when maxqueue was reached for a server,
and it was not possible to do so without indefinitely hanging a session.
The root cause of all those problems was the lack of pre-reservation of
connections at the server's, and the lack of tracking of servers during
a redispatch. Everything relied on combinations of flags which could
appear similarly in quite distinct situations.
This patch is a major rework but there was no other solution, as the
internal logic was deeply flawed. The resulting code is cleaner, more
understandable, uses less magics and is overall more robust.
As an added bonus, "option redispatch" now works when maxqueue has
been reached on a server.
2008-06-20 09:04:11 -04:00
/*
* This function adjusts sess - > srv_conn and maintains the previous and new
REORG/MAJOR: session: rename the "session" entity to "stream"
With HTTP/2, we'll have to support multiplexed streams. A stream is in
fact the largest part of what we currently call a session, it has buffers,
logs, etc.
In order to catch any error, this commit removes any reference to the
struct session and tries to rename most "session" occurrences in function
names to "stream" and "sess" to "strm" when that's related to a session.
The files stream.{c,h} were added and session.{c,h} removed.
The session will be reintroduced later and a few parts of the stream
will progressively be moved overthere. It will more or less contain
only what we need in an embryonic session.
Sample fetch functions and converters will have to change a bit so
that they'll use an L5 (session) instead of what's currently called
"L4" which is in fact L6 for now.
Once all changes are completed, we should see approximately this :
L7 - http_txn
L6 - stream
L5 - session
L4 - connection | applet
There will be at most one http_txn per stream, and a same session will
possibly be referenced by multiple streams. A connection will point to
a session and to a stream. The session will hold all the information
we need to keep even when we don't yet have a stream.
Some more cleanup is needed because some code was already far from
being clean. The server queue management still refers to sessions at
many places while comments talk about connections. This will have to
be cleaned up once we have a server-side connection pool manager.
Stream flags "SN_*" still need to be renamed, it doesn't seem like
any of them will need to move to the session.
2015-04-02 18:22:06 -04:00
* server ' s served stream counts . Setting newsrv to NULL is enough to release
[BUG] fix the dequeuing logic to ensure that all requests get served
The dequeuing logic was completely wrong. First, a task was assigned
to all servers to process the queue, but this task was never scheduled
and was only woken up on session free. Second, there was no reservation
of server entries when a task was assigned a server. This means that
as long as the task was not connected to the server, its presence was
not accounted for. This was causing trouble when detecting whether or
not a server had reached maxconn. Third, during a redispatch, a session
could lose its place at the server's and get blocked because another
session at the same moment would have stolen the entry. Fourth, the
redispatch option did not work when maxqueue was reached for a server,
and it was not possible to do so without indefinitely hanging a session.
The root cause of all those problems was the lack of pre-reservation of
connections at the server's, and the lack of tracking of servers during
a redispatch. Everything relied on combinations of flags which could
appear similarly in quite distinct situations.
This patch is a major rework but there was no other solution, as the
internal logic was deeply flawed. The resulting code is cleaner, more
understandable, uses less magics and is overall more robust.
As an added bonus, "option redispatch" now works when maxqueue has
been reached on a server.
2008-06-20 09:04:11 -04:00
* current connection slot . This function also notifies any LB algo which might
REORG/MAJOR: session: rename the "session" entity to "stream"
With HTTP/2, we'll have to support multiplexed streams. A stream is in
fact the largest part of what we currently call a session, it has buffers,
logs, etc.
In order to catch any error, this commit removes any reference to the
struct session and tries to rename most "session" occurrences in function
names to "stream" and "sess" to "strm" when that's related to a session.
The files stream.{c,h} were added and session.{c,h} removed.
The session will be reintroduced later and a few parts of the stream
will progressively be moved overthere. It will more or less contain
only what we need in an embryonic session.
Sample fetch functions and converters will have to change a bit so
that they'll use an L5 (session) instead of what's currently called
"L4" which is in fact L6 for now.
Once all changes are completed, we should see approximately this :
L7 - http_txn
L6 - stream
L5 - session
L4 - connection | applet
There will be at most one http_txn per stream, and a same session will
possibly be referenced by multiple streams. A connection will point to
a session and to a stream. The session will hold all the information
we need to keep even when we don't yet have a stream.
Some more cleanup is needed because some code was already far from
being clean. The server queue management still refers to sessions at
many places while comments talk about connections. This will have to
be cleaned up once we have a server-side connection pool manager.
Stream flags "SN_*" still need to be renamed, it doesn't seem like
any of them will need to move to the session.
2015-04-02 18:22:06 -04:00
* expect to be informed about any change in the number of active streams on a
[BUG] fix the dequeuing logic to ensure that all requests get served
The dequeuing logic was completely wrong. First, a task was assigned
to all servers to process the queue, but this task was never scheduled
and was only woken up on session free. Second, there was no reservation
of server entries when a task was assigned a server. This means that
as long as the task was not connected to the server, its presence was
not accounted for. This was causing trouble when detecting whether or
not a server had reached maxconn. Third, during a redispatch, a session
could lose its place at the server's and get blocked because another
session at the same moment would have stolen the entry. Fourth, the
redispatch option did not work when maxqueue was reached for a server,
and it was not possible to do so without indefinitely hanging a session.
The root cause of all those problems was the lack of pre-reservation of
connections at the server's, and the lack of tracking of servers during
a redispatch. Everything relied on combinations of flags which could
appear similarly in quite distinct situations.
This patch is a major rework but there was no other solution, as the
internal logic was deeply flawed. The resulting code is cleaner, more
understandable, uses less magics and is overall more robust.
As an added bonus, "option redispatch" now works when maxqueue has
been reached on a server.
2008-06-20 09:04:11 -04:00
* server .
*/
2021-03-09 09:43:32 -05:00
void sess_change_server ( struct stream * strm , struct server * newsrv )
[BUG] fix the dequeuing logic to ensure that all requests get served
The dequeuing logic was completely wrong. First, a task was assigned
to all servers to process the queue, but this task was never scheduled
and was only woken up on session free. Second, there was no reservation
of server entries when a task was assigned a server. This means that
as long as the task was not connected to the server, its presence was
not accounted for. This was causing trouble when detecting whether or
not a server had reached maxconn. Third, during a redispatch, a session
could lose its place at the server's and get blocked because another
session at the same moment would have stolen the entry. Fourth, the
redispatch option did not work when maxqueue was reached for a server,
and it was not possible to do so without indefinitely hanging a session.
The root cause of all those problems was the lack of pre-reservation of
connections at the server's, and the lack of tracking of servers during
a redispatch. Everything relied on combinations of flags which could
appear similarly in quite distinct situations.
This patch is a major rework but there was no other solution, as the
internal logic was deeply flawed. The resulting code is cleaner, more
understandable, uses less magics and is overall more robust.
As an added bonus, "option redispatch" now works when maxqueue has
been reached on a server.
2008-06-20 09:04:11 -04:00
{
2021-03-09 09:43:32 -05:00
struct server * oldsrv = strm - > srv_conn ;
2021-02-17 07:33:24 -05:00
if ( oldsrv = = newsrv )
[BUG] fix the dequeuing logic to ensure that all requests get served
The dequeuing logic was completely wrong. First, a task was assigned
to all servers to process the queue, but this task was never scheduled
and was only woken up on session free. Second, there was no reservation
of server entries when a task was assigned a server. This means that
as long as the task was not connected to the server, its presence was
not accounted for. This was causing trouble when detecting whether or
not a server had reached maxconn. Third, during a redispatch, a session
could lose its place at the server's and get blocked because another
session at the same moment would have stolen the entry. Fourth, the
redispatch option did not work when maxqueue was reached for a server,
and it was not possible to do so without indefinitely hanging a session.
The root cause of all those problems was the lack of pre-reservation of
connections at the server's, and the lack of tracking of servers during
a redispatch. Everything relied on combinations of flags which could
appear similarly in quite distinct situations.
This patch is a major rework but there was no other solution, as the
internal logic was deeply flawed. The resulting code is cleaner, more
understandable, uses less magics and is overall more robust.
As an added bonus, "option redispatch" now works when maxqueue has
been reached on a server.
2008-06-20 09:04:11 -04:00
return ;
2021-02-17 07:33:24 -05:00
if ( oldsrv ) {
2021-04-06 07:53:36 -04:00
_HA_ATOMIC_DEC ( & oldsrv - > served ) ;
_HA_ATOMIC_DEC ( & oldsrv - > proxy - > served ) ;
2019-03-08 12:54:51 -05:00
__ha_barrier_atomic_store ( ) ;
2021-02-17 10:01:37 -05:00
if ( oldsrv - > proxy - > lbprm . server_drop_conn )
2021-06-18 12:29:25 -04:00
oldsrv - > proxy - > lbprm . server_drop_conn ( oldsrv ) ;
2021-03-09 09:43:32 -05:00
stream_del_srv_conn ( strm ) ;
[BUG] fix the dequeuing logic to ensure that all requests get served
The dequeuing logic was completely wrong. First, a task was assigned
to all servers to process the queue, but this task was never scheduled
and was only woken up on session free. Second, there was no reservation
of server entries when a task was assigned a server. This means that
as long as the task was not connected to the server, its presence was
not accounted for. This was causing trouble when detecting whether or
not a server had reached maxconn. Third, during a redispatch, a session
could lose its place at the server's and get blocked because another
session at the same moment would have stolen the entry. Fourth, the
redispatch option did not work when maxqueue was reached for a server,
and it was not possible to do so without indefinitely hanging a session.
The root cause of all those problems was the lack of pre-reservation of
connections at the server's, and the lack of tracking of servers during
a redispatch. Everything relied on combinations of flags which could
appear similarly in quite distinct situations.
This patch is a major rework but there was no other solution, as the
internal logic was deeply flawed. The resulting code is cleaner, more
understandable, uses less magics and is overall more robust.
As an added bonus, "option redispatch" now works when maxqueue has
been reached on a server.
2008-06-20 09:04:11 -04:00
}
if ( newsrv ) {
2021-04-06 07:53:36 -04:00
_HA_ATOMIC_INC ( & newsrv - > served ) ;
_HA_ATOMIC_INC ( & newsrv - > proxy - > served ) ;
2019-03-08 12:54:51 -05:00
__ha_barrier_atomic_store ( ) ;
2021-02-17 10:01:37 -05:00
if ( newsrv - > proxy - > lbprm . server_take_conn )
2021-06-18 12:29:25 -04:00
newsrv - > proxy - > lbprm . server_take_conn ( newsrv ) ;
2021-03-09 09:43:32 -05:00
stream_add_srv_conn ( strm , newsrv ) ;
[BUG] fix the dequeuing logic to ensure that all requests get served
The dequeuing logic was completely wrong. First, a task was assigned
to all servers to process the queue, but this task was never scheduled
and was only woken up on session free. Second, there was no reservation
of server entries when a task was assigned a server. This means that
as long as the task was not connected to the server, its presence was
not accounted for. This was causing trouble when detecting whether or
not a server had reached maxconn. Third, during a redispatch, a session
could lose its place at the server's and get blocked because another
session at the same moment would have stolen the entry. Fourth, the
redispatch option did not work when maxqueue was reached for a server,
and it was not possible to do so without indefinitely hanging a session.
The root cause of all those problems was the lack of pre-reservation of
connections at the server's, and the lack of tracking of servers during
a redispatch. Everything relied on combinations of flags which could
appear similarly in quite distinct situations.
This patch is a major rework but there was no other solution, as the
internal logic was deeply flawed. The resulting code is cleaner, more
understandable, uses less magics and is overall more robust.
As an added bonus, "option redispatch" now works when maxqueue has
been reached on a server.
2008-06-20 09:04:11 -04:00
}
}
2009-03-15 17:34:05 -04:00
/* Handle server-side errors for default protocols. It is called whenever a a
* connection setup is aborted or a request is aborted in queue . It sets the
REORG/MAJOR: session: rename the "session" entity to "stream"
With HTTP/2, we'll have to support multiplexed streams. A stream is in
fact the largest part of what we currently call a session, it has buffers,
logs, etc.
In order to catch any error, this commit removes any reference to the
struct session and tries to rename most "session" occurrences in function
names to "stream" and "sess" to "strm" when that's related to a session.
The files stream.{c,h} were added and session.{c,h} removed.
The session will be reintroduced later and a few parts of the stream
will progressively be moved overthere. It will more or less contain
only what we need in an embryonic session.
Sample fetch functions and converters will have to change a bit so
that they'll use an L5 (session) instead of what's currently called
"L4" which is in fact L6 for now.
Once all changes are completed, we should see approximately this :
L7 - http_txn
L6 - stream
L5 - session
L4 - connection | applet
There will be at most one http_txn per stream, and a same session will
possibly be referenced by multiple streams. A connection will point to
a session and to a stream. The session will hold all the information
we need to keep even when we don't yet have a stream.
Some more cleanup is needed because some code was already far from
being clean. The server queue management still refers to sessions at
many places while comments talk about connections. This will have to
be cleaned up once we have a server-side connection pool manager.
Stream flags "SN_*" still need to be renamed, it doesn't seem like
any of them will need to move to the session.
2015-04-02 18:22:06 -04:00
* stream termination flags so that the caller does not have to worry about
2022-05-17 13:07:51 -04:00
* them . It ' s installed as - > srv_error for the server - side stream connector .
2009-03-15 17:34:05 -04:00
*/
2022-05-17 13:07:51 -04:00
void default_srv_error ( struct stream * s , struct stconn * cs )
2009-03-15 17:34:05 -04:00
{
2022-03-30 13:39:30 -04:00
int err_type = s - > conn_err_type ;
2009-03-15 17:34:05 -04:00
int err = 0 , fin = 0 ;
2022-03-30 13:39:30 -04:00
if ( err_type & STRM_ET_QUEUE_ABRT ) {
2015-04-02 19:14:29 -04:00
err = SF_ERR_CLICL ;
fin = SF_FINST_Q ;
2009-03-15 17:34:05 -04:00
}
2022-03-30 13:39:30 -04:00
else if ( err_type & STRM_ET_CONN_ABRT ) {
2015-04-02 19:14:29 -04:00
err = SF_ERR_CLICL ;
fin = SF_FINST_C ;
2009-03-15 17:34:05 -04:00
}
2022-03-30 13:39:30 -04:00
else if ( err_type & STRM_ET_QUEUE_TO ) {
2015-04-02 19:14:29 -04:00
err = SF_ERR_SRVTO ;
fin = SF_FINST_Q ;
2009-03-15 17:34:05 -04:00
}
2022-03-30 13:39:30 -04:00
else if ( err_type & STRM_ET_QUEUE_ERR ) {
2015-04-02 19:14:29 -04:00
err = SF_ERR_SRVCL ;
fin = SF_FINST_Q ;
2009-03-15 17:34:05 -04:00
}
2022-03-30 13:39:30 -04:00
else if ( err_type & STRM_ET_CONN_TO ) {
2015-04-02 19:14:29 -04:00
err = SF_ERR_SRVTO ;
fin = SF_FINST_C ;
2009-03-15 17:34:05 -04:00
}
2022-03-30 13:39:30 -04:00
else if ( err_type & STRM_ET_CONN_ERR ) {
2015-04-02 19:14:29 -04:00
err = SF_ERR_SRVCL ;
fin = SF_FINST_C ;
2009-03-15 17:34:05 -04:00
}
2022-03-30 13:39:30 -04:00
else if ( err_type & STRM_ET_CONN_RES ) {
2015-04-02 19:14:29 -04:00
err = SF_ERR_RESOURCE ;
fin = SF_FINST_C ;
2012-05-14 06:11:47 -04:00
}
2022-03-30 13:39:30 -04:00
else /* STRM_ET_CONN_OTHER and others */ {
2015-04-02 19:14:29 -04:00
err = SF_ERR_INTERNAL ;
fin = SF_FINST_C ;
2009-03-15 17:34:05 -04:00
}
2015-04-02 19:14:29 -04:00
if ( ! ( s - > flags & SF_ERR_MASK ) )
2009-03-15 17:34:05 -04:00
s - > flags | = err ;
2015-04-02 19:14:29 -04:00
if ( ! ( s - > flags & SF_FINST_MASK ) )
2009-03-15 17:34:05 -04:00
s - > flags | = fin ;
}
[BUG] fix the dequeuing logic to ensure that all requests get served
The dequeuing logic was completely wrong. First, a task was assigned
to all servers to process the queue, but this task was never scheduled
and was only woken up on session free. Second, there was no reservation
of server entries when a task was assigned a server. This means that
as long as the task was not connected to the server, its presence was
not accounted for. This was causing trouble when detecting whether or
not a server had reached maxconn. Third, during a redispatch, a session
could lose its place at the server's and get blocked because another
session at the same moment would have stolen the entry. Fourth, the
redispatch option did not work when maxqueue was reached for a server,
and it was not possible to do so without indefinitely hanging a session.
The root cause of all those problems was the lack of pre-reservation of
connections at the server's, and the lack of tracking of servers during
a redispatch. Everything relied on combinations of flags which could
appear similarly in quite distinct situations.
This patch is a major rework but there was no other solution, as the
internal logic was deeply flawed. The resulting code is cleaner, more
understandable, uses less magics and is overall more robust.
As an added bonus, "option redispatch" now works when maxqueue has
been reached on a server.
2008-06-20 09:04:11 -04:00
2015-04-02 19:14:29 -04:00
/* kill a stream and set the termination flags to <why> (one of SF_ERR_*) */
REORG/MAJOR: session: rename the "session" entity to "stream"
With HTTP/2, we'll have to support multiplexed streams. A stream is in
fact the largest part of what we currently call a session, it has buffers,
logs, etc.
In order to catch any error, this commit removes any reference to the
struct session and tries to rename most "session" occurrences in function
names to "stream" and "sess" to "strm" when that's related to a session.
The files stream.{c,h} were added and session.{c,h} removed.
The session will be reintroduced later and a few parts of the stream
will progressively be moved overthere. It will more or less contain
only what we need in an embryonic session.
Sample fetch functions and converters will have to change a bit so
that they'll use an L5 (session) instead of what's currently called
"L4" which is in fact L6 for now.
Once all changes are completed, we should see approximately this :
L7 - http_txn
L6 - stream
L5 - session
L4 - connection | applet
There will be at most one http_txn per stream, and a same session will
possibly be referenced by multiple streams. A connection will point to
a session and to a stream. The session will hold all the information
we need to keep even when we don't yet have a stream.
Some more cleanup is needed because some code was already far from
being clean. The server queue management still refers to sessions at
many places while comments talk about connections. This will have to
be cleaned up once we have a server-side connection pool manager.
Stream flags "SN_*" still need to be renamed, it doesn't seem like
any of them will need to move to the session.
2015-04-02 18:22:06 -04:00
void stream_shutdown ( struct stream * stream , int why )
2011-09-07 17:01:56 -04:00
{
REORG/MAJOR: session: rename the "session" entity to "stream"
With HTTP/2, we'll have to support multiplexed streams. A stream is in
fact the largest part of what we currently call a session, it has buffers,
logs, etc.
In order to catch any error, this commit removes any reference to the
struct session and tries to rename most "session" occurrences in function
names to "stream" and "sess" to "strm" when that's related to a session.
The files stream.{c,h} were added and session.{c,h} removed.
The session will be reintroduced later and a few parts of the stream
will progressively be moved overthere. It will more or less contain
only what we need in an embryonic session.
Sample fetch functions and converters will have to change a bit so
that they'll use an L5 (session) instead of what's currently called
"L4" which is in fact L6 for now.
Once all changes are completed, we should see approximately this :
L7 - http_txn
L6 - stream
L5 - session
L4 - connection | applet
There will be at most one http_txn per stream, and a same session will
possibly be referenced by multiple streams. A connection will point to
a session and to a stream. The session will hold all the information
we need to keep even when we don't yet have a stream.
Some more cleanup is needed because some code was already far from
being clean. The server queue management still refers to sessions at
many places while comments talk about connections. This will have to
be cleaned up once we have a server-side connection pool manager.
Stream flags "SN_*" still need to be renamed, it doesn't seem like
any of them will need to move to the session.
2015-04-02 18:22:06 -04:00
if ( stream - > req . flags & ( CF_SHUTW | CF_SHUTW_NOW ) )
2011-09-07 17:01:56 -04:00
return ;
REORG/MAJOR: session: rename the "session" entity to "stream"
With HTTP/2, we'll have to support multiplexed streams. A stream is in
fact the largest part of what we currently call a session, it has buffers,
logs, etc.
In order to catch any error, this commit removes any reference to the
struct session and tries to rename most "session" occurrences in function
names to "stream" and "sess" to "strm" when that's related to a session.
The files stream.{c,h} were added and session.{c,h} removed.
The session will be reintroduced later and a few parts of the stream
will progressively be moved overthere. It will more or less contain
only what we need in an embryonic session.
Sample fetch functions and converters will have to change a bit so
that they'll use an L5 (session) instead of what's currently called
"L4" which is in fact L6 for now.
Once all changes are completed, we should see approximately this :
L7 - http_txn
L6 - stream
L5 - session
L4 - connection | applet
There will be at most one http_txn per stream, and a same session will
possibly be referenced by multiple streams. A connection will point to
a session and to a stream. The session will hold all the information
we need to keep even when we don't yet have a stream.
Some more cleanup is needed because some code was already far from
being clean. The server queue management still refers to sessions at
many places while comments talk about connections. This will have to
be cleaned up once we have a server-side connection pool manager.
Stream flags "SN_*" still need to be renamed, it doesn't seem like
any of them will need to move to the session.
2015-04-02 18:22:06 -04:00
channel_shutw_now ( & stream - > req ) ;
channel_shutr_now ( & stream - > res ) ;
stream - > task - > nice = 1024 ;
2015-04-02 19:14:29 -04:00
if ( ! ( stream - > flags & SF_ERR_MASK ) )
REORG/MAJOR: session: rename the "session" entity to "stream"
With HTTP/2, we'll have to support multiplexed streams. A stream is in
fact the largest part of what we currently call a session, it has buffers,
logs, etc.
In order to catch any error, this commit removes any reference to the
struct session and tries to rename most "session" occurrences in function
names to "stream" and "sess" to "strm" when that's related to a session.
The files stream.{c,h} were added and session.{c,h} removed.
The session will be reintroduced later and a few parts of the stream
will progressively be moved overthere. It will more or less contain
only what we need in an embryonic session.
Sample fetch functions and converters will have to change a bit so
that they'll use an L5 (session) instead of what's currently called
"L4" which is in fact L6 for now.
Once all changes are completed, we should see approximately this :
L7 - http_txn
L6 - stream
L5 - session
L4 - connection | applet
There will be at most one http_txn per stream, and a same session will
possibly be referenced by multiple streams. A connection will point to
a session and to a stream. The session will hold all the information
we need to keep even when we don't yet have a stream.
Some more cleanup is needed because some code was already far from
being clean. The server queue management still refers to sessions at
many places while comments talk about connections. This will have to
be cleaned up once we have a server-side connection pool manager.
Stream flags "SN_*" still need to be renamed, it doesn't seem like
any of them will need to move to the session.
2015-04-02 18:22:06 -04:00
stream - > flags | = why ;
task_wakeup ( stream - > task , TASK_WOKEN_OTHER ) ;
2011-09-07 17:01:56 -04:00
}
2010-06-14 15:04:55 -04:00
2019-05-22 03:33:03 -04:00
/* Appends a dump of the state of stream <s> into buffer <buf> which must have
* preliminary be prepared by its caller , with each line prepended by prefix
* < pfx > , and each line terminated by character < eol > .
2019-04-25 13:08:48 -04:00
*/
2019-05-22 03:33:03 -04:00
void stream_dump ( struct buffer * buf , const struct stream * s , const char * pfx , char eol )
2019-04-25 13:08:48 -04:00
{
2022-05-17 13:40:40 -04:00
const struct stconn * scf , * scb ;
2019-04-25 13:08:48 -04:00
const struct connection * cof , * cob ;
const struct appctx * acf , * acb ;
const struct server * srv ;
const char * src = " unknown " ;
const char * dst = " unknown " ;
char pn [ INET6_ADDRSTRLEN ] ;
const struct channel * req , * res ;
if ( ! s ) {
2019-05-22 03:33:03 -04:00
chunk_appendf ( buf , " %sstrm=%p%c " , pfx , s , eol ) ;
return ;
}
if ( s - > obj_type ! = OBJ_TYPE_STREAM ) {
chunk_appendf ( buf , " %sstrm=%p [invalid type=%d(%s)]%c " ,
pfx , s , s - > obj_type , obj_type_name ( & s - > obj_type ) , eol ) ;
return ;
2019-04-25 13:08:48 -04:00
}
req = & s - > req ;
res = & s - > res ;
2022-05-17 13:40:40 -04:00
scf = s - > scf ;
2022-05-18 10:23:22 -04:00
cof = sc_conn ( scf ) ;
2022-05-18 11:58:02 -04:00
acf = sc_appctx ( scf ) ;
2019-07-17 09:07:06 -04:00
if ( cof & & cof - > src & & addr_to_str ( cof - > src , pn , sizeof ( pn ) ) > = 0 )
2019-04-25 13:08:48 -04:00
src = pn ;
else if ( acf )
src = acf - > applet - > name ;
2022-05-17 13:40:40 -04:00
scb = s - > scb ;
2022-05-18 10:23:22 -04:00
cob = sc_conn ( scb ) ;
2022-05-18 11:58:02 -04:00
acb = sc_appctx ( scb ) ;
2019-04-25 13:08:48 -04:00
srv = objt_server ( s - > target ) ;
if ( srv )
dst = srv - > id ;
else if ( acb )
dst = acb - > applet - > name ;
2019-05-22 03:33:03 -04:00
chunk_appendf ( buf ,
2021-11-02 12:18:15 -04:00
" %sstrm=%p,%x src=%s fe=%s be=%s dst=%s%c "
" %stxn=%p,%x txn.req=%s,%x txn.rsp=%s,%x%c "
2022-04-04 05:08:42 -04:00
" %srqf=%x rqa=%x rpf=%x rpa=%x%c "
2022-05-17 13:40:40 -04:00
" %sscf=%p,%s,%x scb=%p,%s,%x%c "
2021-12-20 09:34:16 -05:00
" %saf=%p,%u sab=%p,%u%c "
2019-05-22 03:33:03 -04:00
" %scof=%p,%x:%s(%p)/%s(%p)/%s(%d)%c "
" %scob=%p,%x:%s(%p)/%s(%p)/%s(%d)%c "
" " ,
2021-11-02 12:18:15 -04:00
pfx , s , s - > flags , src , s - > sess - > fe - > id , s - > be - > id , dst , eol ,
pfx , s - > txn , ( s - > txn ? s - > txn - > flags : 0 ) ,
( s - > txn ? h1_msg_state_str ( s - > txn - > req . msg_state ) : " - " ) , ( s - > txn ? s - > txn - > req . flags : 0 ) ,
( s - > txn ? h1_msg_state_str ( s - > txn - > rsp . msg_state ) : " - " ) , ( s - > txn ? s - > txn - > rsp . flags : 0 ) , eol ,
2022-04-04 05:08:42 -04:00
pfx , req - > flags , req - > analysers , res - > flags , res - > analysers , eol ,
2022-05-17 13:40:40 -04:00
pfx , scf , cs_state_str ( scf - > state ) , scf - > flags , scb , cs_state_str ( scb - > state ) , scb - > flags , eol ,
2021-12-20 09:34:16 -05:00
pfx , acf , acf ? acf - > st0 : 0 , acb , acb ? acb - > st0 : 0 , eol ,
2019-05-22 03:33:03 -04:00
pfx , cof , cof ? cof - > flags : 0 , conn_get_mux_name ( cof ) , cof ? cof - > ctx : 0 , conn_get_xprt_name ( cof ) ,
2022-04-11 11:58:06 -04:00
cof ? cof - > xprt_ctx : 0 , conn_get_ctrl_name ( cof ) , conn_fd ( cof ) , eol ,
2019-05-22 03:33:03 -04:00
pfx , cob , cob ? cob - > flags : 0 , conn_get_mux_name ( cob ) , cob ? cob - > ctx : 0 , conn_get_xprt_name ( cob ) ,
2022-04-11 11:58:06 -04:00
cob ? cob - > xprt_ctx : 0 , conn_get_ctrl_name ( cob ) , conn_fd ( cob ) , eol ) ;
2019-05-22 03:33:03 -04:00
}
/* dumps an error message for type <type> at ptr <ptr> related to stream <s>,
2019-05-22 02:57:01 -04:00
* having reached loop rate < rate > , then aborts hoping to retrieve a core .
2019-05-22 03:33:03 -04:00
*/
void stream_dump_and_crash ( enum obj_type * obj , int rate )
{
const struct stream * s ;
char * msg = NULL ;
const void * ptr ;
ptr = s = objt_stream ( obj ) ;
if ( ! s ) {
const struct appctx * appctx = objt_appctx ( obj ) ;
if ( ! appctx )
return ;
ptr = appctx ;
2022-05-11 08:09:57 -04:00
s = appctx_strm ( appctx ) ;
2019-05-22 03:33:03 -04:00
if ( ! s )
return ;
}
chunk_reset ( & trash ) ;
stream_dump ( & trash , s , " " , ' ' ) ;
MINOR: stream: report the list of active filters on stream crashes
Now we very rarely catch spinning streams, and whenever we catch one it
seems a filter is involved, but we currently report no info about them.
Let's print the list of enabled filters on the stream with such a crash
to help with the reports. A typical output will now look like this:
[ALERT] 121/165908 (1110) : A bogus STREAM [0x7fcaf4016a60] is spinning at 2 calls per second and refuses to die, aborting now! Please report this error to developers [strm=0x7fcaf4016a60 src=127.0.0.1 fe=l1 be=l1 dst=<CACHE> rqf=6dc42000 rqa=48000 rpf=a0040223 rpa=24000000 sif=EST,10008 sib=DIS,80110 af=(nil),0 csf=0x7fcaf4023c00,10c000 ab=0x7fcaf40235f0,4 csb=(nil),0 cof=0x7fcaf4016610,1300:H1(0x7fcaf4016840)/RAW((nil))/tcpv4(29) cob=(nil),0:NONE((nil))/NONE((nil))/NONE(0) filters={0x7fcaf4016fb0="cache store filter", 0x7fcaf4017080="compression filter"}]
This may be backported to 2.0.
2020-05-01 10:57:02 -04:00
chunk_appendf ( & trash , " filters={ " ) ;
if ( HAS_FILTERS ( s ) ) {
struct filter * filter ;
list_for_each_entry ( filter , & s - > strm_flt . filters , list ) {
if ( filter - > list . p ! = & s - > strm_flt . filters )
chunk_appendf ( & trash , " , " ) ;
chunk_appendf ( & trash , " %p= \" %s \" " , filter , FLT_ID ( filter ) ) ;
}
}
chunk_appendf ( & trash , " } " ) ;
2019-05-22 02:57:01 -04:00
memprintf ( & msg ,
" A bogus %s [%p] is spinning at %d calls per second and refuses to die, "
" aborting now! Please report this error to developers "
" [%s] \n " ,
2019-05-22 03:33:03 -04:00
obj_type_name ( obj ) , ptr , rate , trash . area ) ;
2019-04-25 13:08:48 -04:00
ha_alert ( " %s " , msg ) ;
send_log ( NULL , LOG_EMERG , " %s " , msg ) ;
2021-03-02 13:19:41 -05:00
ABORT_NOW ( ) ;
2019-04-25 13:08:48 -04:00
}
2021-02-24 04:37:01 -05:00
/* initialize the require structures */
static void init_stream ( )
{
int thr ;
for ( thr = 0 ; thr < MAX_THREADS ; thr + + )
2021-09-30 13:02:18 -04:00
LIST_INIT ( & ha_thread_ctx [ thr ] . streams ) ;
2021-02-24 04:37:01 -05:00
}
INITCALL0 ( STG_INIT , init_stream ) ;
2020-02-28 09:13:33 -05:00
/* Generates a unique ID based on the given <format>, stores it in the given <strm> and
2020-03-05 14:19:02 -05:00
* returns the unique ID .
2022-05-17 18:22:15 -04:00
*
2020-03-05 14:19:02 -05:00
* If this function fails to allocate memory IST_NULL is returned .
2020-02-28 09:13:33 -05:00
*
2020-03-05 14:19:02 -05:00
* If an ID is already stored within the stream nothing happens existing unique ID is
* returned .
2020-02-28 09:13:33 -05:00
*/
2020-03-05 14:19:02 -05:00
struct ist stream_generate_unique_id ( struct stream * strm , struct list * format )
2020-02-28 09:13:33 -05:00
{
2020-03-05 14:19:02 -05:00
if ( isttest ( strm - > unique_id ) ) {
return strm - > unique_id ;
2020-02-28 09:13:33 -05:00
}
else {
char * unique_id ;
2020-03-05 14:19:02 -05:00
int length ;
2020-02-28 09:13:33 -05:00
if ( ( unique_id = pool_alloc ( pool_head_uniqueid ) ) = = NULL )
2020-03-05 14:19:02 -05:00
return IST_NULL ;
2020-02-28 09:13:33 -05:00
2020-03-05 14:19:02 -05:00
length = build_logline ( strm , unique_id , UNIQUEID_LEN , format ) ;
strm - > unique_id = ist2 ( unique_id , length ) ;
2020-02-28 09:13:33 -05:00
2020-03-05 14:19:02 -05:00
return strm - > unique_id ;
2020-02-28 09:13:33 -05:00
}
}
2010-06-18 11:46:06 -04:00
/************************************************************************/
/* All supported ACL keywords must be declared here. */
/************************************************************************/
2021-06-25 08:35:29 -04:00
static enum act_return stream_action_set_log_level ( struct act_rule * rule , struct proxy * px ,
struct session * sess , struct stream * s , int flags )
{
s - > logs . level = ( uintptr_t ) rule - > arg . act . p [ 0 ] ;
return ACT_RET_CONT ;
}
/* Parse a "set-log-level" action. It takes the level value as argument. It
* returns ACT_RET_PRS_OK on success , ACT_RET_PRS_ERR on error .
*/
static enum act_parse_ret stream_parse_set_log_level ( const char * * args , int * cur_arg , struct proxy * px ,
struct act_rule * rule , char * * err )
{
int level ;
if ( ! * args [ * cur_arg ] ) {
bad_log_level :
memprintf ( err , " expects exactly 1 argument (log level name or 'silent') " ) ;
return ACT_RET_PRS_ERR ;
}
if ( strcmp ( args [ * cur_arg ] , " silent " ) = = 0 )
level = - 1 ;
else if ( ( level = get_log_level ( args [ * cur_arg ] ) + 1 ) = = 0 )
goto bad_log_level ;
( * cur_arg ) + + ;
/* Register processing function. */
rule - > action_ptr = stream_action_set_log_level ;
rule - > action = ACT_CUSTOM ;
rule - > arg . act . p [ 0 ] = ( void * ) ( uintptr_t ) level ;
return ACT_RET_PRS_OK ;
}
2021-06-25 08:46:02 -04:00
static enum act_return stream_action_set_nice ( struct act_rule * rule , struct proxy * px ,
struct session * sess , struct stream * s , int flags )
{
s - > task - > nice = ( uintptr_t ) rule - > arg . act . p [ 0 ] ;
return ACT_RET_CONT ;
}
/* Parse a "set-nice" action. It takes the nice value as argument. It returns
* ACT_RET_PRS_OK on success , ACT_RET_PRS_ERR on error .
*/
static enum act_parse_ret stream_parse_set_nice ( const char * * args , int * cur_arg , struct proxy * px ,
struct act_rule * rule , char * * err )
{
int nice ;
if ( ! * args [ * cur_arg ] ) {
bad_log_level :
memprintf ( err , " expects exactly 1 argument (integer value) " ) ;
return ACT_RET_PRS_ERR ;
}
nice = atoi ( args [ * cur_arg ] ) ;
if ( nice < - 1024 )
nice = - 1024 ;
else if ( nice > 1024 )
nice = 1024 ;
( * cur_arg ) + + ;
/* Register processing function. */
rule - > action_ptr = stream_action_set_nice ;
rule - > action = ACT_CUSTOM ;
rule - > arg . act . p [ 0 ] = ( void * ) ( uintptr_t ) nice ;
return ACT_RET_PRS_OK ;
}
2021-06-25 08:35:29 -04:00
2021-03-15 07:03:44 -04:00
static enum act_return tcp_action_switch_stream_mode ( struct act_rule * rule , struct proxy * px ,
struct session * sess , struct stream * s , int flags )
{
enum pr_mode mode = ( uintptr_t ) rule - > arg . act . p [ 0 ] ;
const struct mux_proto_list * mux_proto = rule - > arg . act . p [ 1 ] ;
if ( ! IS_HTX_STRM ( s ) & & mode = = PR_MODE_HTTP ) {
if ( ! stream_set_http_mode ( s , mux_proto ) ) {
channel_abort ( & s - > req ) ;
channel_abort ( & s - > res ) ;
return ACT_RET_ABRT ;
}
}
return ACT_RET_STOP ;
}
static int check_tcp_switch_stream_mode ( struct act_rule * rule , struct proxy * px , char * * err )
{
const struct mux_proto_list * mux_ent ;
const struct mux_proto_list * mux_proto = rule - > arg . act . p [ 1 ] ;
enum pr_mode pr_mode = ( uintptr_t ) rule - > arg . act . p [ 0 ] ;
enum proto_proxy_mode mode = ( 1 < < ( pr_mode = = PR_MODE_HTTP ) ) ;
2021-03-15 10:10:38 -04:00
if ( pr_mode = = PR_MODE_HTTP )
px - > options | = PR_O_HTTP_UPG ;
2021-03-15 07:03:44 -04:00
if ( mux_proto ) {
mux_ent = conn_get_best_mux_entry ( mux_proto - > token , PROTO_SIDE_FE , mode ) ;
if ( ! mux_ent | | ! isteq ( mux_ent - > token , mux_proto - > token ) ) {
memprintf ( err , " MUX protocol '%.*s' is not compatible with the selected mode " ,
( int ) mux_proto - > token . len , mux_proto - > token . ptr ) ;
return 0 ;
}
}
else {
mux_ent = conn_get_best_mux_entry ( IST_NULL , PROTO_SIDE_FE , mode ) ;
if ( ! mux_ent ) {
memprintf ( err , " Unable to find compatible MUX protocol with the selected mode " ) ;
return 0 ;
}
}
/* Update the mux */
rule - > arg . act . p [ 1 ] = ( void * ) mux_ent ;
return 1 ;
}
static enum act_parse_ret stream_parse_switch_mode ( const char * * args , int * cur_arg ,
struct proxy * px , struct act_rule * rule ,
char * * err )
{
const struct mux_proto_list * mux_proto = NULL ;
struct ist proto ;
enum pr_mode mode ;
/* must have at least the mode */
if ( * ( args [ * cur_arg ] ) = = 0 ) {
memprintf ( err , " '%s %s' expects a mode as argument. " , args [ 0 ] , args [ * cur_arg - 1 ] ) ;
return ACT_RET_PRS_ERR ;
}
if ( ! ( px - > cap & PR_CAP_FE ) ) {
memprintf ( err , " '%s %s' not allowed because %s '%s' has no frontend capability " ,
args [ 0 ] , args [ * cur_arg - 1 ] , proxy_type_str ( px ) , px - > id ) ;
return ACT_RET_PRS_ERR ;
}
/* Check if the mode. For now "tcp" is disabled because downgrade is not
* supported and PT is the only TCP mux .
*/
if ( strcmp ( args [ * cur_arg ] , " http " ) = = 0 )
mode = PR_MODE_HTTP ;
else {
memprintf ( err , " '%s %s' expects a valid mode (got '%s'). " , args [ 0 ] , args [ * cur_arg - 1 ] , args [ * cur_arg ] ) ;
return ACT_RET_PRS_ERR ;
}
/* check the proto, if specified */
if ( * ( args [ * cur_arg + 1 ] ) & & strcmp ( args [ * cur_arg + 1 ] , " proto " ) = = 0 ) {
if ( * ( args [ * cur_arg + 2 ] ) = = 0 ) {
memprintf ( err , " '%s %s': '%s' expects a protocol as argument. " ,
args [ 0 ] , args [ * cur_arg - 1 ] , args [ * cur_arg + 1 ] ) ;
return ACT_RET_PRS_ERR ;
}
2021-09-15 07:58:44 -04:00
proto = ist ( args [ * cur_arg + 2 ] ) ;
2021-03-15 07:03:44 -04:00
mux_proto = get_mux_proto ( proto ) ;
if ( ! mux_proto ) {
memprintf ( err , " '%s %s': '%s' expects a valid MUX protocol, if specified (got '%s') " ,
args [ 0 ] , args [ * cur_arg - 1 ] , args [ * cur_arg + 1 ] , args [ * cur_arg + 2 ] ) ;
return ACT_RET_PRS_ERR ;
}
* cur_arg + = 2 ;
}
( * cur_arg ) + + ;
/* Register processing function. */
rule - > action_ptr = tcp_action_switch_stream_mode ;
rule - > check_ptr = check_tcp_switch_stream_mode ;
rule - > action = ACT_CUSTOM ;
rule - > arg . act . p [ 0 ] = ( void * ) ( uintptr_t ) mode ;
rule - > arg . act . p [ 1 ] = ( void * ) mux_proto ;
return ACT_RET_PRS_OK ;
}
2010-06-18 11:46:06 -04:00
2015-09-27 13:29:33 -04:00
/* 0=OK, <0=Alert, >0=Warning */
static enum act_parse_ret stream_parse_use_service ( const char * * args , int * cur_arg ,
struct proxy * px , struct act_rule * rule ,
char * * err )
{
struct action_kw * kw ;
/* Check if the service name exists. */
if ( * ( args [ * cur_arg ] ) = = 0 ) {
memprintf ( err , " '%s' expects a service name. " , args [ 0 ] ) ;
2015-11-26 13:48:04 -05:00
return ACT_RET_PRS_ERR ;
2015-09-27 13:29:33 -04:00
}
/* lookup for keyword corresponding to a service. */
kw = action_lookup ( & service_keywords , args [ * cur_arg ] ) ;
if ( ! kw ) {
memprintf ( err , " '%s' unknown service name. " , args [ 1 ] ) ;
return ACT_RET_PRS_ERR ;
}
( * cur_arg ) + + ;
/* executes specific rule parser. */
rule - > kw = kw ;
if ( kw - > parse ( ( const char * * ) args , cur_arg , px , rule , err ) = = ACT_RET_PRS_ERR )
return ACT_RET_PRS_ERR ;
/* Register processing function. */
rule - > action_ptr = process_use_service ;
rule - > action = ACT_CUSTOM ;
return ACT_RET_PRS_OK ;
}
void service_keywords_register ( struct action_kw_list * kw_list )
{
2021-04-21 01:32:39 -04:00
LIST_APPEND ( & service_keywords , & kw_list - > list ) ;
2015-09-27 13:29:33 -04:00
}
2020-11-28 13:32:14 -05:00
struct action_kw * service_find ( const char * kw )
{
return action_lookup ( & service_keywords , kw ) ;
}
2022-03-29 09:10:44 -04:00
/* Lists the known services on <out>. If <out> is null, emit them on stdout one
* per line .
*/
2019-03-19 03:08:10 -04:00
void list_services ( FILE * out )
{
2022-03-30 06:12:44 -04:00
const struct action_kw * akwp , * akwn ;
2019-03-19 03:08:10 -04:00
struct action_kw_list * kw_list ;
int found = 0 ;
int i ;
2022-03-29 09:10:44 -04:00
if ( out )
fprintf ( out , " Available services : " ) ;
2022-03-30 06:12:44 -04:00
for ( akwn = akwp = NULL ; ; akwp = akwn ) {
list_for_each_entry ( kw_list , & service_keywords , list ) {
for ( i = 0 ; kw_list - > kw [ i ] . kw ! = NULL ; i + + ) {
if ( strordered ( akwp ? akwp - > kw : NULL ,
kw_list - > kw [ i ] . kw ,
akwn ! = akwp ? akwn - > kw : NULL ) )
akwn = & kw_list - > kw [ i ] ;
found = 1 ;
}
2019-03-19 03:08:10 -04:00
}
2022-03-30 06:12:44 -04:00
if ( akwn = = akwp )
break ;
if ( out )
fprintf ( out , " %s " , akwn - > kw ) ;
else
printf ( " %s \n " , akwn - > kw ) ;
2019-03-19 03:08:10 -04:00
}
2022-03-29 09:10:44 -04:00
if ( ! found & & out )
2019-03-19 03:08:10 -04:00
fprintf ( out , " none \n " ) ;
}
2016-11-21 02:51:11 -05:00
2022-05-03 04:49:00 -04:00
/* appctx context used by the "show sess" command */
struct show_sess_ctx {
struct bref bref ; /* back-reference from the session being dumped */
void * target ; /* session we want to dump, or NULL for all */
unsigned int thr ; /* the thread number being explored (0..MAX_THREADS-1) */
unsigned int uid ; /* if non-null, the uniq_id of the session being dumped */
int section ; /* section of the session being dumped */
int pos ; /* last position of the current session's buffer */
} ;
2022-05-17 13:07:51 -04:00
/* This function dumps a complete stream state onto the stream connector's
2016-11-21 02:51:11 -05:00
* read buffer . The stream has to be set in strm . It returns 0 if the output
* buffer is full and it needs to be called again , otherwise non - zero . It is
* designed to be called from stats_dump_strm_to_buffer ( ) below .
*/
2022-05-17 13:07:51 -04:00
static int stats_dump_full_strm_to_buffer ( struct stconn * cs , struct stream * strm )
2016-11-21 02:51:11 -05:00
{
2022-05-18 11:58:02 -04:00
struct appctx * appctx = __sc_appctx ( cs ) ;
2022-05-03 04:49:00 -04:00
struct show_sess_ctx * ctx = appctx - > svcctx ;
2022-05-17 13:40:40 -04:00
struct stconn * scf , * scb ;
2016-11-21 02:51:11 -05:00
struct tm tm ;
extern const char * monthname [ 12 ] ;
char pn [ INET6_ADDRSTRLEN ] ;
struct connection * conn ;
struct appctx * tmpctx ;
chunk_reset ( & trash ) ;
2022-05-03 04:49:00 -04:00
if ( ctx - > section > 0 & & ctx - > uid ! = strm - > uniq_id ) {
2016-11-21 02:51:11 -05:00
/* stream changed, no need to go any further */
chunk_appendf ( & trash , " *** session terminated while we were watching it *** \n " ) ;
2022-05-18 09:07:19 -04:00
if ( applet_putchk ( appctx , & trash ) = = - 1 )
2019-01-04 11:42:57 -05:00
goto full ;
goto done ;
2016-11-21 02:51:11 -05:00
}
2022-05-03 04:49:00 -04:00
switch ( ctx - > section ) {
2016-11-21 02:51:11 -05:00
case 0 : /* main status of the stream */
2022-05-03 04:49:00 -04:00
ctx - > uid = strm - > uniq_id ;
ctx - > section = 1 ;
2016-11-21 02:51:11 -05:00
/* fall through */
case 1 :
get_localtime ( strm - > logs . accept_date . tv_sec , & tm ) ;
chunk_appendf ( & trash ,
" %p: [%02d/%s/%04d:%02d:%02d:%02d.%06d] id=%u proto=%s " ,
strm ,
tm . tm_mday , monthname [ tm . tm_mon ] , tm . tm_year + 1900 ,
tm . tm_hour , tm . tm_min , tm . tm_sec , ( int ) ( strm - > logs . accept_date . tv_usec ) ,
strm - > uniq_id ,
2020-08-28 13:51:44 -04:00
strm_li ( strm ) ? strm_li ( strm ) - > rx . proto - > name : " ? " ) ;
2016-11-21 02:51:11 -05:00
conn = objt_conn ( strm_orig ( strm ) ) ;
2019-07-17 09:07:06 -04:00
switch ( conn & & conn_get_src ( conn ) ? addr_to_str ( conn - > src , pn , sizeof ( pn ) ) : AF_UNSPEC ) {
2016-11-21 02:51:11 -05:00
case AF_INET :
case AF_INET6 :
chunk_appendf ( & trash , " source=%s:%d \n " ,
2019-07-17 09:07:06 -04:00
pn , get_host_port ( conn - > src ) ) ;
2016-11-21 02:51:11 -05:00
break ;
case AF_UNIX :
chunk_appendf ( & trash , " source=unix:%d \n " , strm_li ( strm ) - > luid ) ;
break ;
default :
/* no more information to print right now */
chunk_appendf ( & trash , " \n " ) ;
break ;
}
chunk_appendf ( & trash ,
2022-03-30 13:39:30 -04:00
" flags=0x%x, conn_retries=%d, conn_exp=%s conn_et=0x%03x srv_conn=%p, pend_pos=%p waiting=%d epoch=%#x \n " ,
2022-03-29 13:02:31 -04:00
strm - > flags , strm - > conn_retries ,
strm - > conn_exp ?
tick_is_expired ( strm - > conn_exp , now_ms ) ? " <PAST> " :
human_time ( TICKS_TO_MS ( strm - > conn_exp - now_ms ) ,
TICKS_TO_MS ( 1000 ) ) : " <NEVER> " ,
2022-03-30 13:39:30 -04:00
strm - > conn_err_type , strm - > srv_conn , strm - > pend_pos ,
2021-04-21 01:32:39 -04:00
LIST_INLIST ( & strm - > buffer_wait . list ) , strm - > stream_epoch ) ;
2016-11-21 02:51:11 -05:00
chunk_appendf ( & trash ,
" frontend=%s (id=%u mode=%s), listener=%s (id=%u) " ,
2022-03-08 06:05:31 -05:00
strm_fe ( strm ) - > id , strm_fe ( strm ) - > uuid , proxy_mode_str ( strm_fe ( strm ) - > mode ) ,
2016-11-21 02:51:11 -05:00
strm_li ( strm ) ? strm_li ( strm ) - > name ? strm_li ( strm ) - > name : " ? " : " ? " ,
strm_li ( strm ) ? strm_li ( strm ) - > luid : 0 ) ;
2019-07-17 09:07:06 -04:00
switch ( conn & & conn_get_dst ( conn ) ? addr_to_str ( conn - > dst , pn , sizeof ( pn ) ) : AF_UNSPEC ) {
2016-11-21 02:51:11 -05:00
case AF_INET :
case AF_INET6 :
chunk_appendf ( & trash , " addr=%s:%d \n " ,
2019-07-17 09:07:06 -04:00
pn , get_host_port ( conn - > dst ) ) ;
2016-11-21 02:51:11 -05:00
break ;
case AF_UNIX :
chunk_appendf ( & trash , " addr=unix:%d \n " , strm_li ( strm ) - > luid ) ;
break ;
default :
/* no more information to print right now */
chunk_appendf ( & trash , " \n " ) ;
break ;
}
if ( strm - > be - > cap & PR_CAP_BE )
chunk_appendf ( & trash ,
" backend=%s (id=%u mode=%s) " ,
strm - > be - > id ,
2022-03-08 06:05:31 -05:00
strm - > be - > uuid , proxy_mode_str ( strm - > be - > mode ) ) ;
2016-11-21 02:51:11 -05:00
else
chunk_appendf ( & trash , " backend=<NONE> (id=-1 mode=-) " ) ;
2022-05-18 10:23:22 -04:00
conn = sc_conn ( strm - > scb ) ;
2019-07-17 09:07:06 -04:00
switch ( conn & & conn_get_src ( conn ) ? addr_to_str ( conn - > src , pn , sizeof ( pn ) ) : AF_UNSPEC ) {
2016-11-21 02:51:11 -05:00
case AF_INET :
case AF_INET6 :
chunk_appendf ( & trash , " addr=%s:%d \n " ,
2019-07-17 09:07:06 -04:00
pn , get_host_port ( conn - > src ) ) ;
2016-11-21 02:51:11 -05:00
break ;
case AF_UNIX :
chunk_appendf ( & trash , " addr=unix \n " ) ;
break ;
default :
/* no more information to print right now */
chunk_appendf ( & trash , " \n " ) ;
break ;
}
if ( strm - > be - > cap & PR_CAP_BE )
chunk_appendf ( & trash ,
" server=%s (id=%u) " ,
2021-12-06 02:01:02 -05:00
objt_server ( strm - > target ) ? __objt_server ( strm - > target ) - > id : " <none> " ,
objt_server ( strm - > target ) ? __objt_server ( strm - > target ) - > puid : 0 ) ;
2016-11-21 02:51:11 -05:00
else
chunk_appendf ( & trash , " server=<NONE> (id=-1) " ) ;
2019-07-17 09:07:06 -04:00
switch ( conn & & conn_get_dst ( conn ) ? addr_to_str ( conn - > dst , pn , sizeof ( pn ) ) : AF_UNSPEC ) {
2016-11-21 02:51:11 -05:00
case AF_INET :
case AF_INET6 :
chunk_appendf ( & trash , " addr=%s:%d \n " ,
2019-07-17 09:07:06 -04:00
pn , get_host_port ( conn - > dst ) ) ;
2016-11-21 02:51:11 -05:00
break ;
case AF_UNIX :
chunk_appendf ( & trash , " addr=unix \n " ) ;
break ;
default :
/* no more information to print right now */
chunk_appendf ( & trash , " \n " ) ;
break ;
}
chunk_appendf ( & trash ,
2019-04-24 02:28:31 -04:00
" task=%p (state=0x%02x nice=%d calls=%u rate=%u exp=%s tmask=0x%lx%s " ,
2016-11-21 02:51:11 -05:00
strm - > task ,
strm - > task - > state ,
2019-04-24 02:28:31 -04:00
strm - > task - > nice , strm - > task - > calls , read_freq_ctr ( & strm - > call_rate ) ,
2016-11-21 02:51:11 -05:00
strm - > task - > expire ?
tick_is_expired ( strm - > task - > expire , now_ms ) ? " <PAST> " :
human_time ( TICKS_TO_MS ( strm - > task - > expire - now_ms ) ,
TICKS_TO_MS ( 1000 ) ) : " <NEVER> " ,
2017-11-15 14:56:43 -05:00
strm - > task - > thread_mask ,
2016-11-21 02:51:11 -05:00
task_in_rq ( strm - > task ) ? " , running " : " " ) ;
chunk_appendf ( & trash ,
" age=%s) \n " ,
human_time ( now . tv_sec - strm - > logs . accept_date . tv_sec , 1 ) ) ;
if ( strm - > txn )
chunk_appendf ( & trash ,
2019-07-17 04:46:50 -04:00
" txn=%p flags=0x%x meth=%d status=%d req.st=%s rsp.st=%s req.f=0x%02x rsp.f=0x%02x \n " ,
2016-11-21 02:51:11 -05:00
strm - > txn , strm - > txn - > flags , strm - > txn - > meth , strm - > txn - > status ,
2019-01-07 04:38:10 -05:00
h1_msg_state_str ( strm - > txn - > req . msg_state ) , h1_msg_state_str ( strm - > txn - > rsp . msg_state ) ,
2019-07-17 04:46:50 -04:00
strm - > txn - > req . flags , strm - > txn - > rsp . flags ) ;
2016-11-21 02:51:11 -05:00
2022-05-17 13:40:40 -04:00
scf = strm - > scf ;
chunk_appendf ( & trash , " scf=%p flags=0x%08x state=%s endp=%s,%p,0x%08x sub=%d \n " ,
scf , scf - > flags , cs_state_str ( scf - > state ) ,
( sc_ep_test ( scf , SE_FL_T_MUX ) ? " CONN " : ( sc_ep_test ( scf , SE_FL_T_APPLET ) ? " APPCTX " : " NONE " ) ) ,
scf - > sedesc - > se , sc_ep_get ( scf ) , scf - > wait_event . events ) ;
2017-09-13 12:30:23 -04:00
2022-05-18 10:23:22 -04:00
if ( ( conn = sc_conn ( scf ) ) ! = NULL ) {
2016-11-21 02:51:11 -05:00
chunk_appendf ( & trash ,
2021-12-20 09:34:16 -05:00
" co0=%p ctrl=%s xprt=%s mux=%s data=%s target=%s:%p \n " ,
2016-11-21 02:51:11 -05:00
conn ,
conn_get_ctrl_name ( conn ) ,
conn_get_xprt_name ( conn ) ,
MEDIUM: connection: start to introduce a mux layer between xprt and data
For HTTP/2 and QUIC, we'll need to deal with multiplexed streams inside
a connection. After quite a long brainstorming, it appears that the
connection interface to the existing streams is appropriate just like
the connection interface to the lower layers. In fact we need to have
the mux layer in the middle of the connection, between the transport
and the data layer.
A mux can exist on two directions/sides. On the inbound direction, it
instanciates new streams from incoming connections, while on the outbound
direction it muxes streams into outgoing connections. The difference is
visible on the mux->init() call : in one case, an upper context is already
known (outgoing connection), and in the other case, the upper context is
not yet known (incoming connection) and will have to be allocated by the
mux. The session doesn't have to create the new streams anymore, as this
is performed by the mux itself.
This patch introduces this and creates a pass-through mux called
"mux_pt" which is used for all new connections and which only
calls the data layer's recv,send,wake() calls. One incoming stream
is immediately created when init() is called on the inbound direction.
There should not be any visible impact.
Note that the connection's mux is purposely not set until the session
is completed so that we don't accidently run with the wrong mux. This
must not cause any issue as the xprt_done_cb function is always called
prior to using mux's recv/send functions.
2017-08-28 04:53:00 -04:00
conn_get_mux_name ( conn ) ,
2022-05-18 12:00:31 -04:00
sc_get_data_name ( scf ) ,
2016-11-21 02:51:11 -05:00
obj_type_name ( conn - > target ) ,
obj_base_ptr ( conn - > target ) ) ;
chunk_appendf ( & trash ,
2019-08-30 08:33:11 -04:00
" flags=0x%08x fd=%d fd.state=%02x updt=%d fd.tmask=0x%lx \n " ,
2016-11-21 02:51:11 -05:00
conn - > flags ,
2022-04-11 11:58:06 -04:00
conn_fd ( conn ) ,
conn_fd ( conn ) > = 0 ? fdtab [ conn - > handle . fd ] . state : 0 ,
conn_fd ( conn ) > = 0 ? ! ! ( fdtab [ conn - > handle . fd ] . update_mask & tid_bit ) : 0 ,
conn_fd ( conn ) > = 0 ? fdtab [ conn - > handle . fd ] . thread_mask : 0 ) ;
2018-12-18 08:28:24 -05:00
2016-11-21 02:51:11 -05:00
}
2022-05-18 11:58:02 -04:00
else if ( ( tmpctx = sc_appctx ( scf ) ) ! = NULL ) {
2016-11-21 02:51:11 -05:00
chunk_appendf ( & trash ,
2021-12-20 09:34:16 -05:00
" app0=%p st0=%d st1=%d st2=%d applet=%s tmask=0x%lx nice=%d calls=%u rate=%u cpu=%llu lat=%llu \n " ,
2016-11-21 02:51:11 -05:00
tmpctx ,
tmpctx - > st0 ,
tmpctx - > st1 ,
2022-05-05 14:01:54 -04:00
tmpctx - > _st2 ,
2017-11-15 14:56:43 -05:00
tmpctx - > applet - > name ,
2021-07-13 12:01:46 -04:00
tmpctx - > t - > thread_mask ,
2019-04-24 02:41:29 -04:00
tmpctx - > t - > nice , tmpctx - > t - > calls , read_freq_ctr ( & tmpctx - > call_rate ) ,
2018-05-31 08:48:54 -04:00
( unsigned long long ) tmpctx - > t - > cpu_time , ( unsigned long long ) tmpctx - > t - > lat_time ) ;
2016-11-21 02:51:11 -05:00
}
2022-05-17 13:40:40 -04:00
scb = strm - > scb ;
chunk_appendf ( & trash , " scb=%p flags=0x%08x state=%s endp=%s,%p,0x%08x sub=%d \n " ,
scb , scb - > flags , cs_state_str ( scb - > state ) ,
( sc_ep_test ( scb , SE_FL_T_MUX ) ? " CONN " : ( sc_ep_test ( scb , SE_FL_T_APPLET ) ? " APPCTX " : " NONE " ) ) ,
scb - > sedesc - > se , sc_ep_get ( scb ) , scb - > wait_event . events ) ;
2022-05-17 11:04:55 -04:00
2022-05-18 10:23:22 -04:00
if ( ( conn = sc_conn ( scb ) ) ! = NULL ) {
2016-11-21 02:51:11 -05:00
chunk_appendf ( & trash ,
2021-12-20 09:34:16 -05:00
" co1=%p ctrl=%s xprt=%s mux=%s data=%s target=%s:%p \n " ,
2016-11-21 02:51:11 -05:00
conn ,
conn_get_ctrl_name ( conn ) ,
conn_get_xprt_name ( conn ) ,
MEDIUM: connection: start to introduce a mux layer between xprt and data
For HTTP/2 and QUIC, we'll need to deal with multiplexed streams inside
a connection. After quite a long brainstorming, it appears that the
connection interface to the existing streams is appropriate just like
the connection interface to the lower layers. In fact we need to have
the mux layer in the middle of the connection, between the transport
and the data layer.
A mux can exist on two directions/sides. On the inbound direction, it
instanciates new streams from incoming connections, while on the outbound
direction it muxes streams into outgoing connections. The difference is
visible on the mux->init() call : in one case, an upper context is already
known (outgoing connection), and in the other case, the upper context is
not yet known (incoming connection) and will have to be allocated by the
mux. The session doesn't have to create the new streams anymore, as this
is performed by the mux itself.
This patch introduces this and creates a pass-through mux called
"mux_pt" which is used for all new connections and which only
calls the data layer's recv,send,wake() calls. One incoming stream
is immediately created when init() is called on the inbound direction.
There should not be any visible impact.
Note that the connection's mux is purposely not set until the session
is completed so that we don't accidently run with the wrong mux. This
must not cause any issue as the xprt_done_cb function is always called
prior to using mux's recv/send functions.
2017-08-28 04:53:00 -04:00
conn_get_mux_name ( conn ) ,
2022-05-18 12:00:31 -04:00
sc_get_data_name ( scb ) ,
2016-11-21 02:51:11 -05:00
obj_type_name ( conn - > target ) ,
obj_base_ptr ( conn - > target ) ) ;
chunk_appendf ( & trash ,
2019-08-30 08:33:11 -04:00
" flags=0x%08x fd=%d fd.state=%02x updt=%d fd.tmask=0x%lx \n " ,
2016-11-21 02:51:11 -05:00
conn - > flags ,
2022-04-11 11:58:06 -04:00
conn_fd ( conn ) ,
conn_fd ( conn ) > = 0 ? fdtab [ conn - > handle . fd ] . state : 0 ,
conn_fd ( conn ) > = 0 ? ! ! ( fdtab [ conn - > handle . fd ] . update_mask & tid_bit ) : 0 ,
conn_fd ( conn ) > = 0 ? fdtab [ conn - > handle . fd ] . thread_mask : 0 ) ;
2018-12-18 08:28:24 -05:00
2016-11-21 02:51:11 -05:00
}
2022-05-18 11:58:02 -04:00
else if ( ( tmpctx = sc_appctx ( scb ) ) ! = NULL ) {
2016-11-21 02:51:11 -05:00
chunk_appendf ( & trash ,
2021-12-20 09:34:16 -05:00
" app1=%p st0=%d st1=%d st2=%d applet=%s tmask=0x%lx nice=%d calls=%u rate=%u cpu=%llu lat=%llu \n " ,
2016-11-21 02:51:11 -05:00
tmpctx ,
tmpctx - > st0 ,
tmpctx - > st1 ,
2022-05-05 14:01:54 -04:00
tmpctx - > _st2 ,
2017-11-15 14:56:43 -05:00
tmpctx - > applet - > name ,
2021-07-13 12:01:46 -04:00
tmpctx - > t - > thread_mask ,
2019-04-24 02:41:29 -04:00
tmpctx - > t - > nice , tmpctx - > t - > calls , read_freq_ctr ( & tmpctx - > call_rate ) ,
2018-05-31 08:48:54 -04:00
( unsigned long long ) tmpctx - > t - > cpu_time , ( unsigned long long ) tmpctx - > t - > lat_time ) ;
2016-11-21 02:51:11 -05:00
}
chunk_appendf ( & trash ,
" req=%p (f=0x%06x an=0x%x pipe=%d tofwd=%d total=%lld) \n "
" an_exp=%s " ,
& strm - > req ,
strm - > req . flags , strm - > req . analysers ,
strm - > req . pipe ? strm - > req . pipe - > data : 0 ,
strm - > req . to_forward , strm - > req . total ,
strm - > req . analyse_exp ?
human_time ( TICKS_TO_MS ( strm - > req . analyse_exp - now_ms ) ,
TICKS_TO_MS ( 1000 ) ) : " <NEVER> " ) ;
chunk_appendf ( & trash ,
" rex=%s " ,
strm - > req . rex ?
human_time ( TICKS_TO_MS ( strm - > req . rex - now_ms ) ,
TICKS_TO_MS ( 1000 ) ) : " <NEVER> " ) ;
chunk_appendf ( & trash ,
" wex=%s \n "
2019-07-17 04:46:50 -04:00
" buf=%p data=%p o=%u p=%u i=%u size=%u \n " ,
2016-11-21 02:51:11 -05:00
strm - > req . wex ?
human_time ( TICKS_TO_MS ( strm - > req . wex - now_ms ) ,
TICKS_TO_MS ( 1000 ) ) : " <NEVER> " ,
2018-07-10 11:43:27 -04:00
& strm - > req . buf ,
b_orig ( & strm - > req . buf ) , ( unsigned int ) co_data ( & strm - > req ) ,
2019-07-17 04:46:50 -04:00
( unsigned int ) ci_head_ofs ( & strm - > req ) , ( unsigned int ) ci_data ( & strm - > req ) ,
2018-07-10 11:43:27 -04:00
( unsigned int ) strm - > req . buf . size ) ;
2016-11-21 02:51:11 -05:00
2019-01-04 08:30:44 -05:00
if ( IS_HTX_STRM ( strm ) ) {
struct htx * htx = htxbuf ( & strm - > req . buf ) ;
chunk_appendf ( & trash ,
2019-01-07 04:01:34 -05:00
" htx=%p flags=0x%x size=%u data=%u used=%u wrap=%s extra=%llu \n " ,
2019-06-11 10:32:24 -04:00
htx , htx - > flags , htx - > size , htx - > data , htx_nbblks ( htx ) ,
2019-04-30 11:55:45 -04:00
( htx - > tail > = htx - > head ) ? " NO " : " YES " ,
2019-01-04 08:30:44 -05:00
( unsigned long long ) htx - > extra ) ;
}
2021-10-12 05:02:48 -04:00
if ( HAS_FILTERS ( strm ) & & strm_flt ( strm ) - > current [ 0 ] ) {
struct filter * flt = strm_flt ( strm ) - > current [ 0 ] ;
chunk_appendf ( & trash , " current_filter=%p (id= \" %s \" flags=0x%x pre=0x%x post=0x%x) \n " ,
flt , flt - > config - > id , flt - > flags , flt - > pre_analyzers , flt - > post_analyzers ) ;
}
2019-01-04 08:30:44 -05:00
2016-11-21 02:51:11 -05:00
chunk_appendf ( & trash ,
" res=%p (f=0x%06x an=0x%x pipe=%d tofwd=%d total=%lld) \n "
" an_exp=%s " ,
& strm - > res ,
strm - > res . flags , strm - > res . analysers ,
strm - > res . pipe ? strm - > res . pipe - > data : 0 ,
strm - > res . to_forward , strm - > res . total ,
strm - > res . analyse_exp ?
human_time ( TICKS_TO_MS ( strm - > res . analyse_exp - now_ms ) ,
TICKS_TO_MS ( 1000 ) ) : " <NEVER> " ) ;
chunk_appendf ( & trash ,
" rex=%s " ,
strm - > res . rex ?
human_time ( TICKS_TO_MS ( strm - > res . rex - now_ms ) ,
TICKS_TO_MS ( 1000 ) ) : " <NEVER> " ) ;
chunk_appendf ( & trash ,
" wex=%s \n "
2019-07-17 04:46:50 -04:00
" buf=%p data=%p o=%u p=%u i=%u size=%u \n " ,
2016-11-21 02:51:11 -05:00
strm - > res . wex ?
human_time ( TICKS_TO_MS ( strm - > res . wex - now_ms ) ,
TICKS_TO_MS ( 1000 ) ) : " <NEVER> " ,
2018-07-10 11:43:27 -04:00
& strm - > res . buf ,
b_orig ( & strm - > res . buf ) , ( unsigned int ) co_data ( & strm - > res ) ,
2019-07-17 04:46:50 -04:00
( unsigned int ) ci_head_ofs ( & strm - > res ) , ( unsigned int ) ci_data ( & strm - > res ) ,
2018-07-10 11:43:27 -04:00
( unsigned int ) strm - > res . buf . size ) ;
2016-11-21 02:51:11 -05:00
2019-01-04 08:30:44 -05:00
if ( IS_HTX_STRM ( strm ) ) {
struct htx * htx = htxbuf ( & strm - > res . buf ) ;
chunk_appendf ( & trash ,
" htx=%p flags=0x%x size=%u data=%u used=%u wrap=%s extra=%llu \n " ,
2019-06-11 10:32:24 -04:00
htx , htx - > flags , htx - > size , htx - > data , htx_nbblks ( htx ) ,
2019-04-30 11:55:45 -04:00
( htx - > tail > = htx - > head ) ? " NO " : " YES " ,
2019-01-04 08:30:44 -05:00
( unsigned long long ) htx - > extra ) ;
}
2021-10-12 05:02:48 -04:00
if ( HAS_FILTERS ( strm ) & & strm_flt ( strm ) - > current [ 1 ] ) {
struct filter * flt = strm_flt ( strm ) - > current [ 1 ] ;
chunk_appendf ( & trash , " current_filter=%p (id= \" %s \" flags=0x%x pre=0x%x post=0x%x) \n " ,
flt , flt - > config - > id , flt - > flags , flt - > pre_analyzers , flt - > post_analyzers ) ;
}
2019-01-04 08:30:44 -05:00
2021-10-11 03:49:03 -04:00
if ( strm - > current_rule_list & & strm - > current_rule ) {
const struct act_rule * rule = strm - > current_rule ;
2021-10-12 05:10:31 -04:00
chunk_appendf ( & trash , " current_rule= \" %s \" [%s:%d] \n " , rule - > kw - > kw , rule - > conf . file , rule - > conf . line ) ;
2021-10-11 03:49:03 -04:00
}
2022-05-18 09:07:19 -04:00
if ( applet_putchk ( appctx , & trash ) = = - 1 )
2019-01-04 11:42:57 -05:00
goto full ;
2016-11-21 02:51:11 -05:00
/* use other states to dump the contents */
}
/* end of dump */
2019-01-04 11:42:57 -05:00
done :
2022-05-03 04:49:00 -04:00
ctx - > uid = 0 ;
ctx - > section = 0 ;
2016-11-21 02:51:11 -05:00
return 1 ;
2019-01-04 11:42:57 -05:00
full :
return 0 ;
2016-11-21 02:51:11 -05:00
}
2018-04-18 07:26:46 -04:00
static int cli_parse_show_sess ( char * * args , char * payload , struct appctx * appctx , void * private )
2016-11-21 02:51:11 -05:00
{
2022-05-03 04:49:00 -04:00
struct show_sess_ctx * ctx = applet_reserve_svcctx ( appctx , sizeof ( * ctx ) ) ;
2016-11-21 02:51:11 -05:00
if ( ! cli_has_level ( appctx , ACCESS_LVL_OPER ) )
return 1 ;
if ( * args [ 2 ] & & strcmp ( args [ 2 ] , " all " ) = = 0 )
2022-05-03 04:49:00 -04:00
ctx - > target = ( void * ) - 1 ;
2016-11-21 02:51:11 -05:00
else if ( * args [ 2 ] )
2022-05-03 04:49:00 -04:00
ctx - > target = ( void * ) strtoul ( args [ 2 ] , NULL , 0 ) ;
2016-11-21 02:51:11 -05:00
else
2022-05-03 04:49:00 -04:00
ctx - > target = NULL ;
ctx - > section = 0 ; /* start with stream status */
ctx - > pos = 0 ;
ctx - > thr = 0 ;
2016-11-21 02:51:11 -05:00
2022-05-03 05:05:39 -04:00
/* The back-ref must be reset, it will be detected and set by
* the dump code upon first invocation .
*/
LIST_INIT ( & ctx - > bref . users ) ;
2021-02-24 05:29:51 -05:00
/* let's set our own stream's epoch to the current one and increment
* it so that we know which streams were already there before us .
*/
2022-05-11 08:09:57 -04:00
appctx_strm ( appctx ) - > stream_epoch = _HA_ATOMIC_FETCH_ADD ( & stream_epoch , 1 ) ;
2016-11-21 02:51:11 -05:00
return 0 ;
}
2022-05-17 13:07:51 -04:00
/* This function dumps all streams' states onto the stream connector's
2016-11-21 02:51:11 -05:00
* read buffer . It returns 0 if the output buffer is full and it needs
2019-01-04 11:42:57 -05:00
* to be called again , otherwise non - zero . It proceeds in an isolated
* thread so there is no thread safety issue here .
2016-11-21 02:51:11 -05:00
*/
static int cli_io_handler_dump_sess ( struct appctx * appctx )
{
2022-05-03 04:49:00 -04:00
struct show_sess_ctx * ctx = appctx - > svcctx ;
2022-05-17 13:07:51 -04:00
struct stconn * cs = appctx_cs ( appctx ) ;
2016-11-21 02:51:11 -05:00
struct connection * conn ;
2019-01-04 11:42:57 -05:00
thread_isolate ( ) ;
2022-05-03 05:10:19 -04:00
if ( ctx - > thr > = global . nbthread ) {
/* already terminated */
goto done ;
}
2022-05-18 09:55:18 -04:00
if ( unlikely ( sc_ic ( cs ) - > flags & ( CF_WRITE_ERROR | CF_SHUTW ) ) ) {
2016-11-21 02:51:11 -05:00
/* If we're forced to shut down, we might have to remove our
* reference to the last stream being dumped .
*/
2022-05-03 05:05:39 -04:00
if ( ! LIST_ISEMPTY ( & ctx - > bref . users ) ) {
LIST_DELETE ( & ctx - > bref . users ) ;
LIST_INIT ( & ctx - > bref . users ) ;
2016-11-21 02:51:11 -05:00
}
2019-01-04 11:42:57 -05:00
goto done ;
2016-11-21 02:51:11 -05:00
}
chunk_reset ( & trash ) ;
2022-05-03 05:17:35 -04:00
/* first, let's detach the back-ref from a possible previous stream */
if ( ! LIST_ISEMPTY ( & ctx - > bref . users ) ) {
LIST_DELETE ( & ctx - > bref . users ) ;
LIST_INIT ( & ctx - > bref . users ) ;
} else if ( ! ctx - > bref . ref ) {
/* first call, start with first stream */
ctx - > bref . ref = ha_thread_ctx [ ctx - > thr ] . streams . n ;
}
2016-11-21 02:51:11 -05:00
2022-05-03 05:17:35 -04:00
/* and start from where we stopped */
while ( 1 ) {
char pn [ INET6_ADDRSTRLEN ] ;
struct stream * curr_strm ;
int done = 0 ;
2016-11-21 02:51:11 -05:00
2022-05-03 05:17:35 -04:00
if ( ctx - > bref . ref = = & ha_thread_ctx [ ctx - > thr ] . streams )
done = 1 ;
else {
/* check if we've found a stream created after issuing the "show sess" */
curr_strm = LIST_ELEM ( ctx - > bref . ref , struct stream * , list ) ;
2022-05-11 08:09:57 -04:00
if ( ( int ) ( curr_strm - > stream_epoch - appctx_strm ( appctx ) - > stream_epoch ) > 0 )
2021-02-24 04:37:01 -05:00
done = 1 ;
2022-05-03 05:17:35 -04:00
}
2021-02-24 05:53:17 -05:00
2022-05-03 05:17:35 -04:00
if ( done ) {
ctx - > thr + + ;
if ( ctx - > thr > = global . nbthread )
break ;
ctx - > bref . ref = ha_thread_ctx [ ctx - > thr ] . streams . n ;
continue ;
}
2016-11-21 02:51:11 -05:00
2022-05-03 05:17:35 -04:00
if ( ctx - > target ) {
if ( ctx - > target ! = ( void * ) - 1 & & ctx - > target ! = curr_strm )
goto next_sess ;
2016-11-21 02:51:11 -05:00
2022-05-03 05:17:35 -04:00
LIST_APPEND ( & curr_strm - > back_refs , & ctx - > bref . users ) ;
/* call the proper dump() function and return if we're missing space */
if ( ! stats_dump_full_strm_to_buffer ( cs , curr_strm ) )
goto full ;
2016-11-21 02:51:11 -05:00
2022-05-03 05:17:35 -04:00
/* stream dump complete */
LIST_DELETE ( & ctx - > bref . users ) ;
LIST_INIT ( & ctx - > bref . users ) ;
if ( ctx - > target ! = ( void * ) - 1 ) {
ctx - > target = NULL ;
2016-11-21 02:51:11 -05:00
break ;
}
2022-05-03 05:17:35 -04:00
else
goto next_sess ;
}
2016-11-21 02:51:11 -05:00
2022-05-03 05:17:35 -04:00
chunk_appendf ( & trash ,
" %p: proto=%s " ,
curr_strm ,
strm_li ( curr_strm ) ? strm_li ( curr_strm ) - > rx . proto - > name : " ? " ) ;
2016-11-21 02:51:11 -05:00
2022-05-03 05:17:35 -04:00
conn = objt_conn ( strm_orig ( curr_strm ) ) ;
switch ( conn & & conn_get_src ( conn ) ? addr_to_str ( conn - > src , pn , sizeof ( pn ) ) : AF_UNSPEC ) {
case AF_INET :
case AF_INET6 :
2016-11-21 02:51:11 -05:00
chunk_appendf ( & trash ,
2022-05-03 05:17:35 -04:00
" src=%s:%d fe=%s be=%s srv=%s " ,
pn ,
get_host_port ( conn - > src ) ,
strm_fe ( curr_strm ) - > id ,
( curr_strm - > be - > cap & PR_CAP_BE ) ? curr_strm - > be - > id : " <NONE> " ,
objt_server ( curr_strm - > target ) ? __objt_server ( curr_strm - > target ) - > id : " <none> "
) ;
break ;
case AF_UNIX :
2016-11-21 02:51:11 -05:00
chunk_appendf ( & trash ,
2022-05-03 05:17:35 -04:00
" src=unix:%d fe=%s be=%s srv=%s " ,
strm_li ( curr_strm ) - > luid ,
strm_fe ( curr_strm ) - > id ,
( curr_strm - > be - > cap & PR_CAP_BE ) ? curr_strm - > be - > id : " <NONE> " ,
objt_server ( curr_strm - > target ) ? __objt_server ( curr_strm - > target ) - > id : " <none> "
) ;
break ;
}
2016-11-21 02:51:11 -05:00
2022-05-03 05:17:35 -04:00
chunk_appendf ( & trash ,
" ts=%02x epoch=%#x age=%s calls=%u rate=%u cpu=%llu lat=%llu " ,
curr_strm - > task - > state , curr_strm - > stream_epoch ,
human_time ( now . tv_sec - curr_strm - > logs . tv_accept . tv_sec , 1 ) ,
curr_strm - > task - > calls , read_freq_ctr ( & curr_strm - > call_rate ) ,
( unsigned long long ) curr_strm - > task - > cpu_time , ( unsigned long long ) curr_strm - > task - > lat_time ) ;
2016-11-21 02:51:11 -05:00
2022-05-03 05:17:35 -04:00
chunk_appendf ( & trash ,
" rq[f=%06xh,i=%u,an=%02xh,rx=%s " ,
curr_strm - > req . flags ,
( unsigned int ) ci_data ( & curr_strm - > req ) ,
curr_strm - > req . analysers ,
curr_strm - > req . rex ?
human_time ( TICKS_TO_MS ( curr_strm - > req . rex - now_ms ) ,
TICKS_TO_MS ( 1000 ) ) : " " ) ;
2016-11-21 02:51:11 -05:00
2022-05-03 05:17:35 -04:00
chunk_appendf ( & trash ,
" ,wx=%s " ,
curr_strm - > req . wex ?
human_time ( TICKS_TO_MS ( curr_strm - > req . wex - now_ms ) ,
TICKS_TO_MS ( 1000 ) ) : " " ) ;
2016-11-21 02:51:11 -05:00
2022-05-03 05:17:35 -04:00
chunk_appendf ( & trash ,
" ,ax=%s] " ,
curr_strm - > req . analyse_exp ?
human_time ( TICKS_TO_MS ( curr_strm - > req . analyse_exp - now_ms ) ,
TICKS_TO_MS ( 1000 ) ) : " " ) ;
2016-11-21 02:51:11 -05:00
2022-05-03 05:17:35 -04:00
chunk_appendf ( & trash ,
" rp[f=%06xh,i=%u,an=%02xh,rx=%s " ,
curr_strm - > res . flags ,
( unsigned int ) ci_data ( & curr_strm - > res ) ,
curr_strm - > res . analysers ,
curr_strm - > res . rex ?
human_time ( TICKS_TO_MS ( curr_strm - > res . rex - now_ms ) ,
TICKS_TO_MS ( 1000 ) ) : " " ) ;
2016-11-21 02:51:11 -05:00
2022-05-03 05:17:35 -04:00
chunk_appendf ( & trash ,
" ,wx=%s " ,
curr_strm - > res . wex ?
human_time ( TICKS_TO_MS ( curr_strm - > res . wex - now_ms ) ,
TICKS_TO_MS ( 1000 ) ) : " " ) ;
2016-11-21 02:51:11 -05:00
2022-05-03 05:17:35 -04:00
chunk_appendf ( & trash ,
" ,ax=%s] " ,
curr_strm - > res . analyse_exp ?
human_time ( TICKS_TO_MS ( curr_strm - > res . analyse_exp - now_ms ) ,
TICKS_TO_MS ( 1000 ) ) : " " ) ;
2016-11-21 02:51:11 -05:00
2022-05-18 10:23:22 -04:00
conn = sc_conn ( curr_strm - > scf ) ;
2022-05-03 05:17:35 -04:00
chunk_appendf ( & trash ,
2022-05-17 13:40:40 -04:00
" scf=[%d,%1xh,fd=%d] " ,
curr_strm - > scf - > state ,
curr_strm - > scf - > flags ,
2022-05-03 05:17:35 -04:00
conn_fd ( conn ) ) ;
2016-11-21 02:51:11 -05:00
2022-05-18 10:23:22 -04:00
conn = sc_conn ( curr_strm - > scb ) ;
2022-05-03 05:17:35 -04:00
chunk_appendf ( & trash ,
2022-05-17 13:40:40 -04:00
" scb=[%d,%1xh,fd=%d] " ,
curr_strm - > scb - > state ,
curr_strm - > scb - > flags ,
2022-05-03 05:17:35 -04:00
conn_fd ( conn ) ) ;
2016-11-21 02:51:11 -05:00
2022-05-03 05:17:35 -04:00
chunk_appendf ( & trash ,
" exp=%s rc=%d c_exp=%s " ,
curr_strm - > task - > expire ?
human_time ( TICKS_TO_MS ( curr_strm - > task - > expire - now_ms ) ,
TICKS_TO_MS ( 1000 ) ) : " " ,
curr_strm - > conn_retries ,
curr_strm - > conn_exp ?
human_time ( TICKS_TO_MS ( curr_strm - > conn_exp - now_ms ) ,
TICKS_TO_MS ( 1000 ) ) : " " ) ;
if ( task_in_rq ( curr_strm - > task ) )
chunk_appendf ( & trash , " run(nice=%d) " , curr_strm - > task - > nice ) ;
chunk_appendf ( & trash , " \n " ) ;
2022-05-18 09:07:19 -04:00
if ( applet_putchk ( appctx , & trash ) = = - 1 ) {
2022-05-03 05:17:35 -04:00
/* let's try again later from this stream. We add ourselves into
* this stream ' s users so that it can remove us upon termination .
*/
LIST_APPEND ( & curr_strm - > back_refs , & ctx - > bref . users ) ;
goto full ;
2016-11-21 02:51:11 -05:00
}
2022-05-03 05:17:35 -04:00
next_sess :
ctx - > bref . ref = curr_strm - > list . n ;
}
2016-11-21 02:51:11 -05:00
2022-05-03 05:17:35 -04:00
if ( ctx - > target & & ctx - > target ! = ( void * ) - 1 ) {
/* specified stream not found */
if ( ctx - > section > 0 )
chunk_appendf ( & trash , " *** session terminated while we were watching it *** \n " ) ;
else
chunk_appendf ( & trash , " Session not found. \n " ) ;
2016-11-21 02:51:11 -05:00
2022-05-18 09:07:19 -04:00
if ( applet_putchk ( appctx , & trash ) = = - 1 )
2022-05-03 05:17:35 -04:00
goto full ;
ctx - > target = NULL ;
ctx - > uid = 0 ;
goto done ;
2016-11-21 02:51:11 -05:00
}
2022-05-03 05:17:35 -04:00
2019-01-04 11:42:57 -05:00
done :
thread_release ( ) ;
return 1 ;
full :
thread_release ( ) ;
return 0 ;
2016-11-21 02:51:11 -05:00
}
static void cli_release_show_sess ( struct appctx * appctx )
{
2022-05-03 04:49:00 -04:00
struct show_sess_ctx * ctx = appctx - > svcctx ;
2022-05-03 05:10:19 -04:00
if ( ctx - > thr < global . nbthread ) {
2021-02-24 07:46:12 -05:00
/* a dump was aborted, either in error or timeout. We need to
* safely detach from the target stream ' s list . It ' s mandatory
* to lock because a stream on the target thread could be moving
* our node .
*/
thread_isolate ( ) ;
2022-05-03 04:49:00 -04:00
if ( ! LIST_ISEMPTY ( & ctx - > bref . users ) )
LIST_DELETE ( & ctx - > bref . users ) ;
2021-02-24 07:46:12 -05:00
thread_release ( ) ;
2016-11-21 02:51:11 -05:00
}
}
2016-11-24 05:09:25 -05:00
/* Parses the "shutdown session" directive, it always returns 1 */
2018-04-18 07:26:46 -04:00
static int cli_parse_shutdown_session ( char * * args , char * payload , struct appctx * appctx , void * private )
2016-11-24 05:09:25 -05:00
{
struct stream * strm , * ptr ;
2021-02-24 04:37:01 -05:00
int thr ;
2016-11-24 05:09:25 -05:00
if ( ! cli_has_level ( appctx , ACCESS_LVL_ADMIN ) )
return 1 ;
2022-03-31 08:49:45 -04:00
ptr = ( void * ) strtoul ( args [ 2 ] , NULL , 0 ) ;
if ( ! ptr )
2019-08-09 05:21:01 -04:00
return cli_err ( appctx , " Session pointer expected (use 'show sess') . \ n " ) ;
2016-11-24 05:09:25 -05:00
2021-02-24 04:37:01 -05:00
strm = NULL ;
2016-11-24 05:09:25 -05:00
2021-02-24 05:11:06 -05:00
thread_isolate ( ) ;
2016-11-24 05:09:25 -05:00
/* first, look for the requested stream in the stream table */
2022-03-31 08:49:45 -04:00
for ( thr = 0 ; strm ! = ptr & & thr < global . nbthread ; thr + + ) {
2021-09-30 13:02:18 -04:00
list_for_each_entry ( strm , & ha_thread_ctx [ thr ] . streams , list ) {
2021-02-24 04:37:01 -05:00
if ( strm = = ptr ) {
stream_shutdown ( strm , SF_ERR_KILLED ) ;
break ;
}
2021-02-24 05:11:06 -05:00
}
2016-11-24 05:09:25 -05:00
}
2021-02-24 05:11:06 -05:00
thread_release ( ) ;
2016-11-24 05:09:25 -05:00
/* do we have the stream ? */
2022-03-31 08:49:45 -04:00
if ( strm ! = ptr )
2019-08-09 05:21:01 -04:00
return cli_err ( appctx , " No such session (use 'show sess') . \ n " ) ;
2016-11-24 05:09:25 -05:00
return 1 ;
}
2016-11-23 10:50:48 -05:00
/* Parses the "shutdown session server" directive, it always returns 1 */
2018-04-18 07:26:46 -04:00
static int cli_parse_shutdown_sessions_server ( char * * args , char * payload , struct appctx * appctx , void * private )
2016-11-23 10:50:48 -05:00
{
struct server * sv ;
if ( ! cli_has_level ( appctx , ACCESS_LVL_ADMIN ) )
return 1 ;
sv = cli_find_server ( appctx , args [ 3 ] ) ;
if ( ! sv )
return 1 ;
/* kill all the stream that are on this server */
2017-11-07 04:42:54 -05:00
HA_SPIN_LOCK ( SERVER_LOCK , & sv - > lock ) ;
2019-11-14 10:37:16 -05:00
srv_shutdown_streams ( sv , SF_ERR_KILLED ) ;
2017-11-07 04:42:54 -05:00
HA_SPIN_UNLOCK ( SERVER_LOCK , & sv - > lock ) ;
2016-11-23 10:50:48 -05:00
return 1 ;
}
2016-11-21 02:51:11 -05:00
/* register cli keywords */
static struct cli_kw_list cli_kws = { { } , {
2021-05-07 05:38:37 -04:00
{ { " show " , " sess " , NULL } , " show sess [id] : report the list of current sessions or dump this exact session " , cli_parse_show_sess , cli_io_handler_dump_sess , cli_release_show_sess } ,
{ { " shutdown " , " session " , NULL } , " shutdown session [id] : kill a specific session " , cli_parse_shutdown_session , NULL , NULL } ,
{ { " shutdown " , " sessions " , " server " } , " shutdown sessions server <bk>/<srv> : kill sessions on a server " , cli_parse_shutdown_sessions_server , NULL , NULL } ,
2016-11-21 02:51:11 -05:00
{ { } , }
} } ;
2018-11-25 13:14:37 -05:00
INITCALL1 ( STG_REGISTER , cli_register_kw , & cli_kws ) ;
2015-09-27 13:29:33 -04:00
/* main configuration keyword registration. */
2021-06-25 08:35:29 -04:00
static struct action_kw_list stream_tcp_req_keywords = { ILH , {
{ " set-log-level " , stream_parse_set_log_level } ,
2021-06-25 08:46:02 -04:00
{ " set-nice " , stream_parse_set_nice } ,
2021-06-25 08:35:29 -04:00
{ " switch-mode " , stream_parse_switch_mode } ,
{ " use-service " , stream_parse_use_service } ,
{ /* END */ }
} } ;
INITCALL1 ( STG_REGISTER , tcp_req_cont_keywords_register , & stream_tcp_req_keywords ) ;
/* main configuration keyword registration. */
static struct action_kw_list stream_tcp_res_keywords = { ILH , {
{ " set-log-level " , stream_parse_set_log_level } ,
2021-06-25 08:46:02 -04:00
{ " set-nice " , stream_parse_set_nice } ,
2021-06-25 08:35:29 -04:00
{ /* END */ }
} } ;
INITCALL1 ( STG_REGISTER , tcp_res_cont_keywords_register , & stream_tcp_res_keywords ) ;
static struct action_kw_list stream_http_req_keywords = { ILH , {
{ " set-log-level " , stream_parse_set_log_level } ,
2021-06-25 08:46:02 -04:00
{ " set-nice " , stream_parse_set_nice } ,
2021-06-25 08:35:29 -04:00
{ " use-service " , stream_parse_use_service } ,
2015-09-27 13:29:33 -04:00
{ /* END */ }
} } ;
2021-06-25 08:35:29 -04:00
INITCALL1 ( STG_REGISTER , http_req_keywords_register , & stream_http_req_keywords ) ;
2018-11-25 13:14:37 -05:00
2021-06-25 08:35:29 -04:00
static struct action_kw_list stream_http_res_keywords = { ILH , {
{ " set-log-level " , stream_parse_set_log_level } ,
2021-06-25 08:46:02 -04:00
{ " set-nice " , stream_parse_set_nice } ,
2015-09-27 13:29:33 -04:00
{ /* END */ }
} } ;
2021-06-25 08:35:29 -04:00
INITCALL1 ( STG_REGISTER , http_res_keywords_register , & stream_http_res_keywords ) ;
2010-06-18 11:46:06 -04:00
2020-12-10 07:43:58 -05:00
static int smp_fetch_cur_server_timeout ( const struct arg * args , struct sample * smp , const char * km , void * private )
{
smp - > flags = SMP_F_VOL_TXN ;
smp - > data . type = SMP_T_SINT ;
if ( ! smp - > strm )
return 0 ;
smp - > data . u . sint = TICKS_TO_MS ( smp - > strm - > res . rto ) ;
return 1 ;
}
static int smp_fetch_cur_tunnel_timeout ( const struct arg * args , struct sample * smp , const char * km , void * private )
{
smp - > flags = SMP_F_VOL_TXN ;
smp - > data . type = SMP_T_SINT ;
if ( ! smp - > strm )
return 0 ;
smp - > data . u . sint = TICKS_TO_MS ( smp - > strm - > tunnel_timeout ) ;
return 1 ;
}
2022-03-09 11:33:05 -05:00
static int smp_fetch_last_rule_file ( const struct arg * args , struct sample * smp , const char * km , void * private )
{
smp - > flags = SMP_F_VOL_TXN ;
smp - > data . type = SMP_T_STR ;
if ( ! smp - > strm | | ! smp - > strm - > last_rule_file )
return 0 ;
smp - > flags | = SMP_F_CONST ;
smp - > data . u . str . area = ( char * ) smp - > strm - > last_rule_file ;
smp - > data . u . str . data = strlen ( smp - > strm - > last_rule_file ) ;
return 1 ;
}
static int smp_fetch_last_rule_line ( const struct arg * args , struct sample * smp , const char * km , void * private )
{
smp - > flags = SMP_F_VOL_TXN ;
smp - > data . type = SMP_T_SINT ;
if ( ! smp - > strm | | ! smp - > strm - > last_rule_line )
return 0 ;
smp - > data . u . sint = smp - > strm - > last_rule_line ;
return 1 ;
}
2020-12-10 07:43:57 -05:00
/* Note: must not be declared <const> as its list will be overwritten.
* Please take care of keeping this list alphabetically sorted .
*/
static struct sample_fetch_kw_list smp_kws = { ILH , {
2020-12-10 07:43:58 -05:00
{ " cur_server_timeout " , smp_fetch_cur_server_timeout , 0 , NULL , SMP_T_SINT , SMP_USE_BKEND , } ,
{ " cur_tunnel_timeout " , smp_fetch_cur_tunnel_timeout , 0 , NULL , SMP_T_SINT , SMP_USE_BKEND , } ,
2022-03-09 11:33:05 -05:00
{ " last_rule_file " , smp_fetch_last_rule_file , 0 , NULL , SMP_T_STR , SMP_USE_INTRN , } ,
{ " last_rule_line " , smp_fetch_last_rule_line , 0 , NULL , SMP_T_SINT , SMP_USE_INTRN , } ,
2020-12-10 07:43:57 -05:00
{ NULL , NULL , 0 , 0 , 0 } ,
} } ;
INITCALL1 ( STG_REGISTER , sample_register_fetches , & smp_kws ) ;
2006-06-25 20:48:02 -04:00
/*
* Local variables :
* c - indent - level : 8
* c - basic - offset : 8
* End :
*/