2015-04-03 07:53:24 -04:00
|
|
|
/*
|
2020-06-04 12:58:52 -04:00
|
|
|
* include/haproxy/session.h
|
|
|
|
|
* This file contains functions used to manage sessions.
|
2015-04-03 07:53:24 -04:00
|
|
|
*
|
2020-06-04 12:58:52 -04:00
|
|
|
* Copyright (C) 2000-2020 Willy Tarreau - w@1wt.eu
|
2015-04-03 07:53:24 -04:00
|
|
|
*
|
|
|
|
|
* This library is free software; you can redistribute it and/or
|
|
|
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
|
|
|
* License as published by the Free Software Foundation, version 2.1
|
|
|
|
|
* exclusively.
|
|
|
|
|
*
|
|
|
|
|
* This library is distributed in the hope that it will be useful,
|
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
|
* Lesser General Public License for more details.
|
|
|
|
|
*
|
|
|
|
|
* You should have received a copy of the GNU Lesser General Public
|
|
|
|
|
* License along with this library; if not, write to the Free Software
|
|
|
|
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
|
|
|
|
*/
|
|
|
|
|
|
2020-06-04 12:58:52 -04:00
|
|
|
#ifndef _HAPROXY_SESSION_H
|
|
|
|
|
#define _HAPROXY_SESSION_H
|
2015-04-03 07:53:24 -04:00
|
|
|
|
2020-05-27 06:58:42 -04:00
|
|
|
#include <haproxy/api.h>
|
2021-10-22 09:41:57 -04:00
|
|
|
#include <haproxy/connection.h>
|
2020-06-04 11:05:57 -04:00
|
|
|
#include <haproxy/global-t.h>
|
2020-06-04 12:58:52 -04:00
|
|
|
#include <haproxy/obj_type-t.h>
|
|
|
|
|
#include <haproxy/pool.h>
|
2020-06-04 17:20:13 -04:00
|
|
|
#include <haproxy/server.h>
|
2020-06-04 12:58:52 -04:00
|
|
|
#include <haproxy/session-t.h>
|
2020-06-04 12:46:44 -04:00
|
|
|
#include <haproxy/stick_table.h>
|
2015-04-04 10:31:16 -04:00
|
|
|
|
2017-11-24 11:34:44 -05:00
|
|
|
extern struct pool_head *pool_head_session;
|
2018-12-27 11:20:54 -05:00
|
|
|
extern struct pool_head *pool_head_sess_srv_list;
|
|
|
|
|
|
2015-04-04 18:38:48 -04:00
|
|
|
struct session *session_new(struct proxy *fe, struct listener *li, enum obj_type *origin);
|
2015-04-04 09:54:03 -04:00
|
|
|
void session_free(struct session *sess);
|
2020-10-14 11:37:17 -04:00
|
|
|
int session_accept_fd(struct connection *cli_conn);
|
2020-01-22 12:08:48 -05:00
|
|
|
int conn_complete_session(struct connection *conn);
|
2021-03-02 10:09:26 -05:00
|
|
|
struct task *session_expire_embryonic(struct task *t, void *context, unsigned int state);
|
2015-04-03 07:53:24 -04:00
|
|
|
|
2015-04-04 10:31:16 -04:00
|
|
|
/* Remove the refcount from the session to the tracked counters, and clear the
|
|
|
|
|
* pointer to ensure this is only performed once. The caller is responsible for
|
|
|
|
|
* ensuring that the pointer is valid first.
|
|
|
|
|
*/
|
|
|
|
|
static inline void session_store_counters(struct session *sess)
|
|
|
|
|
{
|
|
|
|
|
void *ptr;
|
|
|
|
|
int i;
|
2017-06-13 13:37:32 -04:00
|
|
|
struct stksess *ts;
|
2015-04-04 10:31:16 -04:00
|
|
|
|
|
|
|
|
for (i = 0; i < MAX_SESS_STKCTR; i++) {
|
|
|
|
|
struct stkctr *stkctr = &sess->stkctr[i];
|
|
|
|
|
|
2017-06-13 13:37:32 -04:00
|
|
|
ts = stkctr_entry(stkctr);
|
|
|
|
|
if (!ts)
|
2015-04-04 10:31:16 -04:00
|
|
|
continue;
|
|
|
|
|
|
2017-06-13 13:37:32 -04:00
|
|
|
ptr = stktable_data_ptr(stkctr->table, ts, STKTABLE_DT_CONN_CUR);
|
|
|
|
|
if (ptr) {
|
2017-11-07 04:42:54 -05:00
|
|
|
HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
|
2017-06-13 13:37:32 -04:00
|
|
|
|
2021-06-30 11:18:28 -04:00
|
|
|
if (stktable_data_cast(ptr, std_t_uint) > 0)
|
|
|
|
|
stktable_data_cast(ptr, std_t_uint)--;
|
2017-06-13 13:37:32 -04:00
|
|
|
|
2017-11-07 04:42:54 -05:00
|
|
|
HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
|
2017-11-29 10:15:07 -05:00
|
|
|
|
|
|
|
|
/* If data was modified, we need to touch to re-schedule sync */
|
|
|
|
|
stktable_touch_local(stkctr->table, ts, 0);
|
2017-06-13 13:37:32 -04:00
|
|
|
}
|
|
|
|
|
|
2015-04-04 10:31:16 -04:00
|
|
|
stkctr_set_entry(stkctr, NULL);
|
2017-06-13 13:37:32 -04:00
|
|
|
stksess_kill_if_expired(stkctr->table, ts, 1);
|
2015-04-04 10:31:16 -04:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2020-10-06 08:23:34 -04:00
|
|
|
/* Increase the number of cumulated HTTP requests in the tracked counters */
|
|
|
|
|
static inline void session_inc_http_req_ctr(struct session *sess)
|
|
|
|
|
{
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < MAX_SESS_STKCTR; i++)
|
|
|
|
|
stkctr_inc_http_req_ctr(&sess->stkctr[i]);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Increase the number of cumulated failed HTTP requests in the tracked
|
|
|
|
|
* counters. Only 4xx requests should be counted here so that we can
|
|
|
|
|
* distinguish between errors caused by client behaviour and other ones.
|
|
|
|
|
* Note that even 404 are interesting because they're generally caused by
|
|
|
|
|
* vulnerability scans.
|
|
|
|
|
*/
|
|
|
|
|
static inline void session_inc_http_err_ctr(struct session *sess)
|
|
|
|
|
{
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < MAX_SESS_STKCTR; i++)
|
|
|
|
|
stkctr_inc_http_err_ctr(&sess->stkctr[i]);
|
|
|
|
|
}
|
|
|
|
|
|
MINOR: stick-tables/counters: add http_fail_cnt and http_fail_rate data types
Historically we've been counting lots of client-triggered events in stick
tables to help detect misbehaving ones, but we've been missing the same on
the server side, and there's been repeated requests for being able to count
the server errors per URL in order to precisely monitor the quality of
service or even to avoid routing requests to certain dead services, which
is also called "circuit breaking" nowadays.
This commit introduces http_fail_cnt and http_fail_rate, which work like
http_err_cnt and http_err_rate in that they respectively count events and
their frequency, but they only consider server-side issues such as network
errors, unparsable and truncated responses, and 5xx status codes other
than 501 and 505 (since these ones are usually triggered by the client).
Note that retryable errors are purposely not accounted for, so that only
what the client really sees is considered.
With this it becomes very simple to put some protective measures in place
to perform a redirect or return an excuse page when the error rate goes
beyond a certain threshold for a given URL, and give more chances to the
server to recover from this condition. Typically it could look like this
to bypass a URL causing more than 10 requests per second:
stick-table type string len 80 size 4k expire 1m store http_fail_rate(1m)
http-request track-sc0 base # track host+path, ignore query string
http-request return status 503 content-type text/html \
lf-file excuse.html if { sc0_http_fail_rate gt 10 }
A more advanced mechanism using gpt0 could even implement high/low rates
to disable/enable the service.
Reg-test converteers_ref_cnt_never_dec.vtc was updated to test it.
2021-02-10 06:07:15 -05:00
|
|
|
/* Increase the number of cumulated failed HTTP responses in the tracked
|
|
|
|
|
* counters. Only some 5xx responses should be counted here so that we can
|
|
|
|
|
* distinguish between server failures and errors triggered by the client
|
|
|
|
|
* (i.e. 501 and 505 may be triggered and must be ignored).
|
|
|
|
|
*/
|
|
|
|
|
static inline void session_inc_http_fail_ctr(struct session *sess)
|
|
|
|
|
{
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < MAX_SESS_STKCTR; i++)
|
|
|
|
|
stkctr_inc_http_fail_ctr(&sess->stkctr[i]);
|
|
|
|
|
}
|
|
|
|
|
|
2020-10-06 08:23:34 -04:00
|
|
|
|
2018-12-27 11:20:54 -05:00
|
|
|
/* Remove the connection from the session list, and destroy the srv_list if it's now empty */
|
|
|
|
|
static inline void session_unown_conn(struct session *sess, struct connection *conn)
|
2018-11-30 11:24:55 -05:00
|
|
|
{
|
2018-12-27 11:20:54 -05:00
|
|
|
struct sess_srv_list *srv_list = NULL;
|
2020-07-02 09:56:23 -04:00
|
|
|
|
2021-05-03 08:28:30 -04:00
|
|
|
BUG_ON(objt_listener(conn->target));
|
|
|
|
|
|
BUG/MAJOR: connection: reset conn->owner when detaching from session list
Baptiste reported a new crash affecting 2.3 which can be triggered
when using H2 on the backend, with http-reuse always and with a tens
of clients doing close only. There are a few combined cases which cause
this to happen, but each time the issue is the same, an already freed
session is dereferenced in session_unown_conn().
Two cases were identified to cause this:
- a connection referencing a session as its owner, which is detached
from the session's list and is destroyed after this session ends.
The test on conn->owner before calling session_unown_conn() is not
sufficent as the pointer is not null but is not valid anymore.
- a connection that never goes idle and that gets killed form the
mux, where session_free() is called first, then conn_free() calls
session_unown_conn() which scans the just freed session for older
connections. This one is only triggered with DEBUG_UAF
The reason for this session to be present here is that it's needed during
the connection setup, to be passed to conn_install_mux_be() to mux->init()
as the owning session, but it's never deleted aftrewards. Furthermore, even
conn_session_free() doesn't delete this pointer after freeing the session
that lies there. Both do definitely result in a use-after-free that's more
easily triggered under DEBUG_UAF.
This patch makes sure that the owner is always deleted after detaching
or killing the session. However it is currently not possible to clear
the owner right after a synchronous init because the proxy protocol
apparently needs it (a reg test checks this), and if we leave it past
the connection setup with the session not attached anywhere, it's hard
to catch the right moment to detach it. This means that the session may
remain in conn->owner as long as the connection has never been added to
nor removed from the session's idle list. Given that this patch needs to
remain simple enough to be backported, instead it adds a workaround in
session_unown_conn() to detect that the element is already not attached
anywhere.
This fix absolutely requires previous patch "CLEANUP: connection: do not
use conn->owner when the session is known" otherwise the situation will
be even worse, as some places used to rely on conn->owner instead of the
session.
The fix could theorically be backported as far as 1.8. However, the code
in this area has significantly changed along versions and there are more
risks of breaking working stuff than fixing real issues there. The issue
was really woken up in two steps during 2.3-dev when slightly reworking
the idle conns with commit 08016ab82 ("MEDIUM: connection: Add private
connections synchronously in session server list") and when adding
support for storing used H2 connections in the session and adding the
necessary call to session_unown_conn() in the muxes. But the same test
managed to crash 2.2 when built in DEBUG_UAF and patched like this,
proving that we used to already leave dangling pointers behind us:
| diff --git a/include/haproxy/connection.h b/include/haproxy/connection.h
| index f8f235c1a..dd30b5f80 100644
| --- a/include/haproxy/connection.h
| +++ b/include/haproxy/connection.h
| @@ -458,6 +458,10 @@ static inline void conn_free(struct connection *conn)
| sess->idle_conns--;
| session_unown_conn(sess, conn);
| }
| + else {
| + struct session *sess = conn->owner;
| + BUG_ON(sess && sess->origin != &conn->obj_type);
| + }
|
| sockaddr_free(&conn->src);
| sockaddr_free(&conn->dst);
It's uncertain whether an existing code path there can lead to dereferencing
conn->owner when it's bad, though certain suspicious memory corruption bugs
make one think it's a likely candidate. The patch should not be hard to
adapt there.
Backports to 2.1 and older are left to the appreciation of the person
doing the backport.
A reproducer consists in this:
global
nbthread 1
listen l
bind :9000
mode http
http-reuse always
server s 127.0.0.1:8999 proto h2
frontend f
bind :8999 proto h2
mode http
http-request return status 200
Then this will make it crash within 2-3 seconds:
$ h1load -e -r 1 -c 10 http://0:9000/
If it does not, it might be that DEBUG_UAF was not used (it's harder then)
and it might be useful to restart.
2020-11-20 11:22:44 -05:00
|
|
|
/* WT: this currently is a workaround for an inconsistency between
|
|
|
|
|
* the link status of the connection in the session list and the
|
|
|
|
|
* connection's owner. This should be removed as soon as all this
|
|
|
|
|
* is addressed. Right now it's possible to enter here with a non-null
|
|
|
|
|
* conn->owner that points to a dead session, but in this case the
|
|
|
|
|
* element is not linked.
|
|
|
|
|
*/
|
2021-04-21 01:32:39 -04:00
|
|
|
if (!LIST_INLIST(&conn->session_list))
|
BUG/MAJOR: connection: reset conn->owner when detaching from session list
Baptiste reported a new crash affecting 2.3 which can be triggered
when using H2 on the backend, with http-reuse always and with a tens
of clients doing close only. There are a few combined cases which cause
this to happen, but each time the issue is the same, an already freed
session is dereferenced in session_unown_conn().
Two cases were identified to cause this:
- a connection referencing a session as its owner, which is detached
from the session's list and is destroyed after this session ends.
The test on conn->owner before calling session_unown_conn() is not
sufficent as the pointer is not null but is not valid anymore.
- a connection that never goes idle and that gets killed form the
mux, where session_free() is called first, then conn_free() calls
session_unown_conn() which scans the just freed session for older
connections. This one is only triggered with DEBUG_UAF
The reason for this session to be present here is that it's needed during
the connection setup, to be passed to conn_install_mux_be() to mux->init()
as the owning session, but it's never deleted aftrewards. Furthermore, even
conn_session_free() doesn't delete this pointer after freeing the session
that lies there. Both do definitely result in a use-after-free that's more
easily triggered under DEBUG_UAF.
This patch makes sure that the owner is always deleted after detaching
or killing the session. However it is currently not possible to clear
the owner right after a synchronous init because the proxy protocol
apparently needs it (a reg test checks this), and if we leave it past
the connection setup with the session not attached anywhere, it's hard
to catch the right moment to detach it. This means that the session may
remain in conn->owner as long as the connection has never been added to
nor removed from the session's idle list. Given that this patch needs to
remain simple enough to be backported, instead it adds a workaround in
session_unown_conn() to detect that the element is already not attached
anywhere.
This fix absolutely requires previous patch "CLEANUP: connection: do not
use conn->owner when the session is known" otherwise the situation will
be even worse, as some places used to rely on conn->owner instead of the
session.
The fix could theorically be backported as far as 1.8. However, the code
in this area has significantly changed along versions and there are more
risks of breaking working stuff than fixing real issues there. The issue
was really woken up in two steps during 2.3-dev when slightly reworking
the idle conns with commit 08016ab82 ("MEDIUM: connection: Add private
connections synchronously in session server list") and when adding
support for storing used H2 connections in the session and adding the
necessary call to session_unown_conn() in the muxes. But the same test
managed to crash 2.2 when built in DEBUG_UAF and patched like this,
proving that we used to already leave dangling pointers behind us:
| diff --git a/include/haproxy/connection.h b/include/haproxy/connection.h
| index f8f235c1a..dd30b5f80 100644
| --- a/include/haproxy/connection.h
| +++ b/include/haproxy/connection.h
| @@ -458,6 +458,10 @@ static inline void conn_free(struct connection *conn)
| sess->idle_conns--;
| session_unown_conn(sess, conn);
| }
| + else {
| + struct session *sess = conn->owner;
| + BUG_ON(sess && sess->origin != &conn->obj_type);
| + }
|
| sockaddr_free(&conn->src);
| sockaddr_free(&conn->dst);
It's uncertain whether an existing code path there can lead to dereferencing
conn->owner when it's bad, though certain suspicious memory corruption bugs
make one think it's a likely candidate. The patch should not be hard to
adapt there.
Backports to 2.1 and older are left to the appreciation of the person
doing the backport.
A reproducer consists in this:
global
nbthread 1
listen l
bind :9000
mode http
http-reuse always
server s 127.0.0.1:8999 proto h2
frontend f
bind :8999 proto h2
mode http
http-request return status 200
Then this will make it crash within 2-3 seconds:
$ h1load -e -r 1 -c 10 http://0:9000/
If it does not, it might be that DEBUG_UAF was not used (it's harder then)
and it might be useful to restart.
2020-11-20 11:22:44 -05:00
|
|
|
return;
|
|
|
|
|
|
2020-07-02 09:56:23 -04:00
|
|
|
if (conn->flags & CO_FL_SESS_IDLE)
|
|
|
|
|
sess->idle_conns--;
|
2020-10-14 12:17:05 -04:00
|
|
|
LIST_DEL_INIT(&conn->session_list);
|
BUG/MAJOR: connection: reset conn->owner when detaching from session list
Baptiste reported a new crash affecting 2.3 which can be triggered
when using H2 on the backend, with http-reuse always and with a tens
of clients doing close only. There are a few combined cases which cause
this to happen, but each time the issue is the same, an already freed
session is dereferenced in session_unown_conn().
Two cases were identified to cause this:
- a connection referencing a session as its owner, which is detached
from the session's list and is destroyed after this session ends.
The test on conn->owner before calling session_unown_conn() is not
sufficent as the pointer is not null but is not valid anymore.
- a connection that never goes idle and that gets killed form the
mux, where session_free() is called first, then conn_free() calls
session_unown_conn() which scans the just freed session for older
connections. This one is only triggered with DEBUG_UAF
The reason for this session to be present here is that it's needed during
the connection setup, to be passed to conn_install_mux_be() to mux->init()
as the owning session, but it's never deleted aftrewards. Furthermore, even
conn_session_free() doesn't delete this pointer after freeing the session
that lies there. Both do definitely result in a use-after-free that's more
easily triggered under DEBUG_UAF.
This patch makes sure that the owner is always deleted after detaching
or killing the session. However it is currently not possible to clear
the owner right after a synchronous init because the proxy protocol
apparently needs it (a reg test checks this), and if we leave it past
the connection setup with the session not attached anywhere, it's hard
to catch the right moment to detach it. This means that the session may
remain in conn->owner as long as the connection has never been added to
nor removed from the session's idle list. Given that this patch needs to
remain simple enough to be backported, instead it adds a workaround in
session_unown_conn() to detect that the element is already not attached
anywhere.
This fix absolutely requires previous patch "CLEANUP: connection: do not
use conn->owner when the session is known" otherwise the situation will
be even worse, as some places used to rely on conn->owner instead of the
session.
The fix could theorically be backported as far as 1.8. However, the code
in this area has significantly changed along versions and there are more
risks of breaking working stuff than fixing real issues there. The issue
was really woken up in two steps during 2.3-dev when slightly reworking
the idle conns with commit 08016ab82 ("MEDIUM: connection: Add private
connections synchronously in session server list") and when adding
support for storing used H2 connections in the session and adding the
necessary call to session_unown_conn() in the muxes. But the same test
managed to crash 2.2 when built in DEBUG_UAF and patched like this,
proving that we used to already leave dangling pointers behind us:
| diff --git a/include/haproxy/connection.h b/include/haproxy/connection.h
| index f8f235c1a..dd30b5f80 100644
| --- a/include/haproxy/connection.h
| +++ b/include/haproxy/connection.h
| @@ -458,6 +458,10 @@ static inline void conn_free(struct connection *conn)
| sess->idle_conns--;
| session_unown_conn(sess, conn);
| }
| + else {
| + struct session *sess = conn->owner;
| + BUG_ON(sess && sess->origin != &conn->obj_type);
| + }
|
| sockaddr_free(&conn->src);
| sockaddr_free(&conn->dst);
It's uncertain whether an existing code path there can lead to dereferencing
conn->owner when it's bad, though certain suspicious memory corruption bugs
make one think it's a likely candidate. The patch should not be hard to
adapt there.
Backports to 2.1 and older are left to the appreciation of the person
doing the backport.
A reproducer consists in this:
global
nbthread 1
listen l
bind :9000
mode http
http-reuse always
server s 127.0.0.1:8999 proto h2
frontend f
bind :8999 proto h2
mode http
http-request return status 200
Then this will make it crash within 2-3 seconds:
$ h1load -e -r 1 -c 10 http://0:9000/
If it does not, it might be that DEBUG_UAF was not used (it's harder then)
and it might be useful to restart.
2020-11-20 11:22:44 -05:00
|
|
|
conn->owner = NULL;
|
2018-12-27 11:20:54 -05:00
|
|
|
list_for_each_entry(srv_list, &sess->srv_list, srv_list) {
|
|
|
|
|
if (srv_list->target == conn->target) {
|
|
|
|
|
if (LIST_ISEMPTY(&srv_list->conn_list)) {
|
2021-04-21 01:32:39 -04:00
|
|
|
LIST_DELETE(&srv_list->srv_list);
|
2018-12-27 11:20:54 -05:00
|
|
|
pool_free(pool_head_sess_srv_list, srv_list);
|
|
|
|
|
}
|
2018-11-30 11:24:55 -05:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
2018-12-27 11:20:54 -05:00
|
|
|
}
|
|
|
|
|
|
MEDIUM: connection: Add private connections synchronously in session server list
When a connection is marked as private, it is now added in the session server
list. We don't wait a stream is detached from the mux to do so. When the
connection is created, this happens after the mux creation. Otherwise, it is
performed when the connection is marked as private.
To allow that, when a connection is created, the session is systematically set
as the connectin owner. Thus, a backend connection has always a owner during its
creation. And a private connection has always a owner until its death.
Note that outside the detach() callback, if the call to session_add_conn()
failed, the error is ignored. In this situation, we retry to add the connection
into the session server list in the detach() callback. If this fails at this
step, the multiplexer is destroyed and the connection is closed.
2020-07-01 10:10:06 -04:00
|
|
|
/* Add the connection <conn> to the server list of the session <sess>. This
|
|
|
|
|
* function is called only if the connection is private. Nothing is performed if
|
|
|
|
|
* the connection is already in the session sever list or if the session does
|
|
|
|
|
* not own the connection.
|
|
|
|
|
*/
|
2018-12-27 11:20:54 -05:00
|
|
|
static inline int session_add_conn(struct session *sess, struct connection *conn, void *target)
|
|
|
|
|
{
|
|
|
|
|
struct sess_srv_list *srv_list = NULL;
|
|
|
|
|
int found = 0;
|
2018-11-30 11:24:55 -05:00
|
|
|
|
2021-05-03 08:28:30 -04:00
|
|
|
BUG_ON(objt_listener(conn->target));
|
|
|
|
|
|
MEDIUM: connection: Add private connections synchronously in session server list
When a connection is marked as private, it is now added in the session server
list. We don't wait a stream is detached from the mux to do so. When the
connection is created, this happens after the mux creation. Otherwise, it is
performed when the connection is marked as private.
To allow that, when a connection is created, the session is systematically set
as the connectin owner. Thus, a backend connection has always a owner during its
creation. And a private connection has always a owner until its death.
Note that outside the detach() callback, if the call to session_add_conn()
failed, the error is ignored. In this situation, we retry to add the connection
into the session server list in the detach() callback. If this fails at this
step, the multiplexer is destroyed and the connection is closed.
2020-07-01 10:10:06 -04:00
|
|
|
/* Already attach to the session or not the connection owner */
|
2020-11-20 11:08:15 -05:00
|
|
|
if (!LIST_ISEMPTY(&conn->session_list) || (conn->owner && conn->owner != sess))
|
MEDIUM: connection: Add private connections synchronously in session server list
When a connection is marked as private, it is now added in the session server
list. We don't wait a stream is detached from the mux to do so. When the
connection is created, this happens after the mux creation. Otherwise, it is
performed when the connection is marked as private.
To allow that, when a connection is created, the session is systematically set
as the connectin owner. Thus, a backend connection has always a owner during its
creation. And a private connection has always a owner until its death.
Note that outside the detach() callback, if the call to session_add_conn()
failed, the error is ignored. In this situation, we retry to add the connection
into the session server list in the detach() callback. If this fails at this
step, the multiplexer is destroyed and the connection is closed.
2020-07-01 10:10:06 -04:00
|
|
|
return 1;
|
|
|
|
|
|
2018-12-27 11:20:54 -05:00
|
|
|
list_for_each_entry(srv_list, &sess->srv_list, srv_list) {
|
|
|
|
|
if (srv_list->target == target) {
|
|
|
|
|
found = 1;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
if (!found) {
|
|
|
|
|
/* The session has no connection for the server, create a new entry */
|
|
|
|
|
srv_list = pool_alloc(pool_head_sess_srv_list);
|
|
|
|
|
if (!srv_list)
|
|
|
|
|
return 0;
|
|
|
|
|
srv_list->target = target;
|
|
|
|
|
LIST_INIT(&srv_list->conn_list);
|
2021-04-21 01:32:39 -04:00
|
|
|
LIST_APPEND(&sess->srv_list, &srv_list->srv_list);
|
2018-11-30 11:24:55 -05:00
|
|
|
}
|
2021-04-21 01:32:39 -04:00
|
|
|
LIST_APPEND(&srv_list->conn_list, &conn->session_list);
|
2018-12-27 11:20:54 -05:00
|
|
|
return 1;
|
2018-11-30 11:24:55 -05:00
|
|
|
}
|
|
|
|
|
|
MEDIUM: connection: Add private connections synchronously in session server list
When a connection is marked as private, it is now added in the session server
list. We don't wait a stream is detached from the mux to do so. When the
connection is created, this happens after the mux creation. Otherwise, it is
performed when the connection is marked as private.
To allow that, when a connection is created, the session is systematically set
as the connectin owner. Thus, a backend connection has always a owner during its
creation. And a private connection has always a owner until its death.
Note that outside the detach() callback, if the call to session_add_conn()
failed, the error is ignored. In this situation, we retry to add the connection
into the session server list in the detach() callback. If this fails at this
step, the multiplexer is destroyed and the connection is closed.
2020-07-01 10:10:06 -04:00
|
|
|
/* Returns 0 if the session can keep the idle conn, -1 if it was destroyed. The
|
|
|
|
|
* connection must be private.
|
|
|
|
|
*/
|
2018-12-14 13:27:06 -05:00
|
|
|
static inline int session_check_idle_conn(struct session *sess, struct connection *conn)
|
|
|
|
|
{
|
MEDIUM: connection: Add private connections synchronously in session server list
When a connection is marked as private, it is now added in the session server
list. We don't wait a stream is detached from the mux to do so. When the
connection is created, this happens after the mux creation. Otherwise, it is
performed when the connection is marked as private.
To allow that, when a connection is created, the session is systematically set
as the connectin owner. Thus, a backend connection has always a owner during its
creation. And a private connection has always a owner until its death.
Note that outside the detach() callback, if the call to session_add_conn()
failed, the error is ignored. In this situation, we retry to add the connection
into the session server list in the detach() callback. If this fails at this
step, the multiplexer is destroyed and the connection is closed.
2020-07-01 10:10:06 -04:00
|
|
|
/* Another session owns this connection */
|
|
|
|
|
if (conn->owner != sess)
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
if (sess->idle_conns >= sess->fe->max_out_conns) {
|
2018-12-27 11:20:54 -05:00
|
|
|
session_unown_conn(sess, conn);
|
2018-12-14 13:27:06 -05:00
|
|
|
conn->owner = NULL;
|
2019-05-02 06:04:15 -04:00
|
|
|
conn->flags &= ~CO_FL_SESS_IDLE;
|
2020-04-27 09:53:41 -04:00
|
|
|
conn->mux->destroy(conn->ctx);
|
2020-01-20 07:56:01 -05:00
|
|
|
return -1;
|
2018-12-28 12:50:57 -05:00
|
|
|
} else {
|
|
|
|
|
conn->flags |= CO_FL_SESS_IDLE;
|
|
|
|
|
sess->idle_conns++;
|
2018-12-14 13:27:06 -05:00
|
|
|
}
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
2015-04-04 10:31:16 -04:00
|
|
|
|
2020-07-01 10:36:51 -04:00
|
|
|
/* Look for an available connection matching the target <target> in the server
|
|
|
|
|
* list of the session <sess>. It returns a connection if found. Otherwise it
|
|
|
|
|
* returns NULL.
|
|
|
|
|
*/
|
2021-01-25 04:29:35 -05:00
|
|
|
static inline struct connection *session_get_conn(struct session *sess, void *target, int64_t hash)
|
2020-07-01 10:36:51 -04:00
|
|
|
{
|
|
|
|
|
struct connection *srv_conn = NULL;
|
|
|
|
|
struct sess_srv_list *srv_list;
|
|
|
|
|
|
|
|
|
|
list_for_each_entry(srv_list, &sess->srv_list, srv_list) {
|
|
|
|
|
if (srv_list->target == target) {
|
|
|
|
|
list_for_each_entry(srv_conn, &srv_list->conn_list, session_list) {
|
BUG/MAJOR: conn-idle: fix hash indexing issues on idle conns
Idle connections do not work on 32-bit machines due to an alignment issue
causing the connection nodes to be indexed with their lower 32-bits set to
zero and the higher 32 ones containing the 32 lower bitss of the hash. The
cause is the use of ebmb_node with an aligned data, as on this platform
ebmb_node is only 32-bit aligned, leaving a hole before the following hash
which is a uint64_t:
$ pahole -C conn_hash_node ./haproxy
struct conn_hash_node {
struct ebmb_node node; /* 0 20 */
/* XXX 4 bytes hole, try to pack */
int64_t hash; /* 24 8 */
struct connection * conn; /* 32 4 */
/* size: 40, cachelines: 1, members: 3 */
/* sum members: 32, holes: 1, sum holes: 4 */
/* padding: 4 */
/* last cacheline: 40 bytes */
};
Instead, eb64 nodes should be used when it comes to simply storing a
64-bit key, and that is what this patch does.
For backports, a variant consisting in simply marking the "hash" member
with a "packed" attribute on the struct also does the job (tested), and
might be preferable if the fix is difficult to adapt. Only 2.6 and 2.5
are affected by this.
2022-09-29 14:32:43 -04:00
|
|
|
if ((srv_conn->hash_node && srv_conn->hash_node->node.key == hash) &&
|
2021-01-25 04:29:35 -05:00
|
|
|
srv_conn->mux &&
|
|
|
|
|
(srv_conn->mux->avail_streams(srv_conn) > 0) &&
|
2021-01-26 08:14:37 -05:00
|
|
|
!(srv_conn->flags & CO_FL_WAIT_XPRT)) {
|
2020-07-01 10:36:51 -04:00
|
|
|
if (srv_conn->flags & CO_FL_SESS_IDLE) {
|
|
|
|
|
srv_conn->flags &= ~CO_FL_SESS_IDLE;
|
|
|
|
|
sess->idle_conns--;
|
|
|
|
|
}
|
|
|
|
|
goto end;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
srv_conn = NULL; /* No available connection found */
|
|
|
|
|
goto end;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
end:
|
|
|
|
|
return srv_conn;
|
|
|
|
|
}
|
|
|
|
|
|
2021-10-22 09:41:57 -04:00
|
|
|
/* Returns the source address of the session and fallbacks on the client
|
2021-11-20 13:11:12 -05:00
|
|
|
* connection if not set. It returns a const address on success or NULL on
|
2021-10-22 09:41:57 -04:00
|
|
|
* failure.
|
|
|
|
|
*/
|
|
|
|
|
static inline const struct sockaddr_storage *sess_src(struct session *sess)
|
|
|
|
|
{
|
|
|
|
|
struct connection *cli_conn = objt_conn(sess->origin);
|
|
|
|
|
|
2022-05-02 11:51:51 -04:00
|
|
|
if (sess->src)
|
2021-10-22 09:41:57 -04:00
|
|
|
return sess->src;
|
|
|
|
|
if (cli_conn && conn_get_src(cli_conn))
|
|
|
|
|
return conn_src(cli_conn);
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Returns the destination address of the session and fallbacks on the client
|
2021-11-20 13:11:12 -05:00
|
|
|
* connection if not set. It returns a const address on success or NULL on
|
2021-10-22 09:41:57 -04:00
|
|
|
* failure.
|
|
|
|
|
*/
|
|
|
|
|
static inline const struct sockaddr_storage *sess_dst(struct session *sess)
|
|
|
|
|
{
|
|
|
|
|
struct connection *cli_conn = objt_conn(sess->origin);
|
|
|
|
|
|
2022-05-02 11:51:51 -04:00
|
|
|
if (sess->dst)
|
2021-10-22 09:41:57 -04:00
|
|
|
return sess->dst;
|
|
|
|
|
if (cli_conn && conn_get_dst(cli_conn))
|
|
|
|
|
return conn_dst(cli_conn);
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* Retrieves the source address of the session <sess>. Returns non-zero on
|
|
|
|
|
* success or zero on failure. The operation is only performed once and the
|
|
|
|
|
* address is stored in the session for future use. On the first call, the
|
|
|
|
|
* session source address is copied from the client connection one.
|
|
|
|
|
*/
|
|
|
|
|
static inline int sess_get_src(struct session *sess)
|
|
|
|
|
{
|
|
|
|
|
struct connection *cli_conn = objt_conn(sess->origin);
|
|
|
|
|
const struct sockaddr_storage *src = NULL;
|
|
|
|
|
|
2022-05-02 11:51:51 -04:00
|
|
|
if (sess->src)
|
2021-10-22 09:41:57 -04:00
|
|
|
return 1;
|
|
|
|
|
|
|
|
|
|
if (cli_conn && conn_get_src(cli_conn))
|
|
|
|
|
src = conn_src(cli_conn);
|
|
|
|
|
if (!src)
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
if (!sockaddr_alloc(&sess->src, src, sizeof(*src)))
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* Retrieves the destination address of the session <sess>. Returns non-zero on
|
|
|
|
|
* success or zero on failure. The operation is only performed once and the
|
|
|
|
|
* address is stored in the session for future use. On the first call, the
|
|
|
|
|
* session destination address is copied from the client connection one.
|
|
|
|
|
*/
|
|
|
|
|
static inline int sess_get_dst(struct session *sess)
|
|
|
|
|
{
|
|
|
|
|
struct connection *cli_conn = objt_conn(sess->origin);
|
|
|
|
|
const struct sockaddr_storage *dst = NULL;
|
|
|
|
|
|
2022-05-02 11:51:51 -04:00
|
|
|
if (sess->dst)
|
2021-10-22 09:41:57 -04:00
|
|
|
return 1;
|
|
|
|
|
|
|
|
|
|
if (cli_conn && conn_get_dst(cli_conn))
|
|
|
|
|
dst = conn_dst(cli_conn);
|
|
|
|
|
if (!dst)
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
if (!sockaddr_alloc(&sess->dst, dst, sizeof(*dst)))
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
|
2020-06-04 12:58:52 -04:00
|
|
|
#endif /* _HAPROXY_SESSION_H */
|
2015-04-03 07:53:24 -04:00
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Local variables:
|
|
|
|
|
* c-indent-level: 8
|
|
|
|
|
* c-basic-offset: 8
|
|
|
|
|
* End:
|
|
|
|
|
*/
|