2012-07-06 08:13:49 -04:00
|
|
|
/*
|
2020-06-04 12:02:10 -04:00
|
|
|
* include/haproxy/connection.h
|
2012-07-06 08:13:49 -04:00
|
|
|
* This file contains connection function prototypes
|
|
|
|
|
*
|
2020-06-04 12:02:10 -04:00
|
|
|
* Copyright (C) 2000-2002 Willy Tarreau - w@1wt.eu
|
2012-07-06 08:13:49 -04:00
|
|
|
*
|
|
|
|
|
* This library is free software; you can redistribute it and/or
|
|
|
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
|
|
|
* License as published by the Free Software Foundation, version 2.1
|
|
|
|
|
* exclusively.
|
|
|
|
|
*
|
|
|
|
|
* This library is distributed in the hope that it will be useful,
|
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
|
* Lesser General Public License for more details.
|
|
|
|
|
*
|
|
|
|
|
* You should have received a copy of the GNU Lesser General Public
|
|
|
|
|
* License along with this library; if not, write to the Free Software
|
|
|
|
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
|
|
|
|
*/
|
|
|
|
|
|
2020-06-04 12:02:10 -04:00
|
|
|
#ifndef _HAPROXY_CONNECTION_H
|
|
|
|
|
#define _HAPROXY_CONNECTION_H
|
2012-07-06 08:13:49 -04:00
|
|
|
|
2020-05-27 10:21:26 -04:00
|
|
|
#include <import/ist.h>
|
2020-06-04 12:02:10 -04:00
|
|
|
|
2020-06-04 05:29:21 -04:00
|
|
|
#include <haproxy/api.h>
|
2021-03-22 12:06:24 -04:00
|
|
|
#include <haproxy/buf.h>
|
2020-06-04 12:02:10 -04:00
|
|
|
#include <haproxy/connection-t.h>
|
2022-05-27 03:47:12 -04:00
|
|
|
#include <haproxy/stconn-t.h>
|
2020-06-04 12:02:10 -04:00
|
|
|
#include <haproxy/fd.h>
|
2021-10-06 12:48:01 -04:00
|
|
|
#include <haproxy/list.h>
|
2020-06-04 08:58:24 -04:00
|
|
|
#include <haproxy/listener-t.h>
|
2020-06-04 05:29:21 -04:00
|
|
|
#include <haproxy/obj_type.h>
|
2021-10-06 13:11:10 -04:00
|
|
|
#include <haproxy/pool-t.h>
|
2021-10-06 12:48:01 -04:00
|
|
|
#include <haproxy/server.h>
|
2021-10-06 13:03:12 -04:00
|
|
|
#include <haproxy/session-t.h>
|
2020-06-04 12:02:10 -04:00
|
|
|
#include <haproxy/task-t.h>
|
2012-07-06 08:13:49 -04:00
|
|
|
|
2017-11-24 11:34:44 -05:00
|
|
|
extern struct pool_head *pool_head_connection;
|
2021-02-19 09:29:16 -05:00
|
|
|
extern struct pool_head *pool_head_conn_hash_node;
|
2019-07-17 12:37:02 -04:00
|
|
|
extern struct pool_head *pool_head_sockaddr;
|
2023-08-16 09:35:04 -04:00
|
|
|
extern struct pool_head *pool_head_pp_tlv_128;
|
|
|
|
|
extern struct pool_head *pool_head_pp_tlv_256;
|
2021-12-16 11:32:56 -05:00
|
|
|
extern struct pool_head *pool_head_uniqueid;
|
2016-12-22 14:25:26 -05:00
|
|
|
extern struct xprt_ops *registered_xprt[XPRT_ENTRIES];
|
2018-04-10 08:33:41 -04:00
|
|
|
extern struct mux_proto_list mux_proto_list;
|
2021-05-03 04:47:51 -04:00
|
|
|
extern struct mux_stopping_data mux_stopping_data[MAX_THREADS];
|
2012-10-26 14:10:28 -04:00
|
|
|
|
2020-04-16 04:03:58 -04:00
|
|
|
#define IS_HTX_CONN(conn) ((conn)->mux && ((conn)->mux->flags & MX_FL_HTX))
|
|
|
|
|
|
2012-08-31 11:43:29 -04:00
|
|
|
/* receive a PROXY protocol header over a connection */
|
|
|
|
|
int conn_recv_proxy(struct connection *conn, int flag);
|
2022-04-01 07:22:50 -04:00
|
|
|
int conn_send_proxy(struct connection *conn, unsigned int flag);
|
2020-03-13 07:34:24 -04:00
|
|
|
int make_proxy_line(char *buf, int buf_len, struct server *srv, struct connection *remote, struct stream *strm);
|
2023-08-16 09:35:04 -04:00
|
|
|
struct conn_tlv_list *conn_get_tlv(struct connection *conn, int type);
|
2012-08-31 11:43:29 -04:00
|
|
|
|
2021-06-16 11:35:20 -04:00
|
|
|
int conn_append_debug_info(struct buffer *buf, const struct connection *conn, const char *pfx);
|
|
|
|
|
|
2020-01-17 01:52:13 -05:00
|
|
|
int conn_subscribe(struct connection *conn, void *xprt_ctx, int event_type, struct wait_event *es);
|
|
|
|
|
int conn_unsubscribe(struct connection *conn, void *xprt_ctx, int event_type, struct wait_event *es);
|
2018-09-28 11:57:58 -04:00
|
|
|
|
2016-06-04 10:11:10 -04:00
|
|
|
/* receive a NetScaler Client IP insertion header over a connection */
|
|
|
|
|
int conn_recv_netscaler_cip(struct connection *conn, int flag);
|
|
|
|
|
|
2015-03-12 18:56:52 -04:00
|
|
|
/* raw send() directly on the socket */
|
2020-12-11 09:26:55 -05:00
|
|
|
int conn_ctrl_send(struct connection *conn, const void *buf, int len, int flags);
|
2015-03-12 18:56:52 -04:00
|
|
|
|
2015-03-12 19:40:28 -04:00
|
|
|
/* drains any pending bytes from the socket */
|
2020-12-11 10:20:34 -05:00
|
|
|
int conn_ctrl_drain(struct connection *conn);
|
2015-03-12 19:40:28 -04:00
|
|
|
|
2019-05-22 07:44:48 -04:00
|
|
|
/* scoks4 proxy handshake */
|
|
|
|
|
int conn_send_socks4_proxy_request(struct connection *conn);
|
|
|
|
|
int conn_recv_socks4_proxy_response(struct connection *conn);
|
|
|
|
|
|
2020-01-22 12:08:48 -05:00
|
|
|
/* If we delayed the mux creation because we were waiting for the handshake, do it now */
|
|
|
|
|
int conn_create_mux(struct connection *conn);
|
2021-10-06 12:27:28 -04:00
|
|
|
int conn_notify_mux(struct connection *conn, int old_flags, int forced_wake);
|
2021-10-06 12:48:28 -04:00
|
|
|
int conn_upgrade_mux_fe(struct connection *conn, void *ctx, struct buffer *buf,
|
|
|
|
|
struct ist mux_proto, int mode);
|
|
|
|
|
int conn_install_mux_fe(struct connection *conn, void *ctx);
|
2021-10-28 10:36:11 -04:00
|
|
|
int conn_install_mux_be(struct connection *conn, void *ctx, struct session *sess,
|
|
|
|
|
const struct mux_ops *force_mux_ops);
|
2021-10-06 12:48:28 -04:00
|
|
|
int conn_install_mux_chk(struct connection *conn, void *ctx, struct session *sess);
|
|
|
|
|
|
2023-08-21 08:24:17 -04:00
|
|
|
void conn_delete_from_tree(struct connection *conn);
|
2020-01-22 12:08:48 -05:00
|
|
|
|
2021-10-06 12:48:28 -04:00
|
|
|
void conn_init(struct connection *conn, void *target);
|
|
|
|
|
struct connection *conn_new(void *target);
|
|
|
|
|
void conn_free(struct connection *conn);
|
2021-10-06 13:11:10 -04:00
|
|
|
struct conn_hash_node *conn_alloc_hash_node(struct connection *conn);
|
|
|
|
|
struct sockaddr_storage *sockaddr_alloc(struct sockaddr_storage **sap, const struct sockaddr_storage *orig, socklen_t len);
|
|
|
|
|
void sockaddr_free(struct sockaddr_storage **sap);
|
|
|
|
|
|
2021-10-06 12:48:28 -04:00
|
|
|
|
2021-10-06 11:14:49 -04:00
|
|
|
/* connection hash stuff */
|
|
|
|
|
uint64_t conn_calculate_hash(const struct conn_hash_params *params);
|
|
|
|
|
uint64_t conn_hash_prehash(char *buf, size_t size);
|
|
|
|
|
void conn_hash_update(char *buf, size_t *idx,
|
|
|
|
|
const void *data, size_t size,
|
|
|
|
|
enum conn_hash_params_t *flags,
|
|
|
|
|
enum conn_hash_params_t type);
|
|
|
|
|
uint64_t conn_hash_digest(char *buf, size_t bufsize,
|
|
|
|
|
enum conn_hash_params_t flags);
|
2023-07-27 09:56:34 -04:00
|
|
|
|
|
|
|
|
int conn_reverse(struct connection *conn);
|
|
|
|
|
|
2021-10-06 12:48:28 -04:00
|
|
|
const char *conn_err_code_str(struct connection *c);
|
|
|
|
|
int xprt_add_hs(struct connection *conn);
|
2022-03-02 08:46:45 -05:00
|
|
|
void register_mux_proto(struct mux_proto_list *list);
|
2021-10-06 11:14:49 -04:00
|
|
|
|
2020-06-27 18:19:17 -04:00
|
|
|
extern struct idle_conns idle_conns[MAX_THREADS];
|
BUG/MEDIUM: servers: Fix a race condition with idle connections.
When we're purging idle connections, there's a race condition, when we're
removing the connection from the idle list, to add it to the list of
connections to free, if the thread owning the connection tries to free it
at the same time.
To fix this, simply add a per-thread lock, that has to be hold before
removing the connection from the idle list, and when, in conn_free(), we're
about to remove the connection from every list. That way, we know for sure
the connection will stay valid while we remove it from the idle list, to add
it to the list of connections to free.
This should happen rarely enough that it shouldn't have any impact on
performances.
This has not been reported yet, but could provoke random segfaults.
This should be backported to 2.0.
2019-07-11 09:49:00 -04:00
|
|
|
|
2021-03-05 17:37:48 -05:00
|
|
|
/* returns true if the transport layer is ready */
|
2014-01-23 08:21:42 -05:00
|
|
|
static inline int conn_xprt_ready(const struct connection *conn)
|
2013-12-15 04:23:20 -05:00
|
|
|
{
|
2014-01-23 08:21:42 -05:00
|
|
|
return (conn->flags & CO_FL_XPRT_READY);
|
2013-12-15 04:23:20 -05:00
|
|
|
}
|
|
|
|
|
|
2021-03-05 17:37:48 -05:00
|
|
|
/* returns true if the control layer is ready */
|
2014-01-23 07:50:42 -05:00
|
|
|
static inline int conn_ctrl_ready(const struct connection *conn)
|
2013-12-15 04:23:20 -05:00
|
|
|
{
|
|
|
|
|
return (conn->flags & CO_FL_CTRL_READY);
|
|
|
|
|
}
|
|
|
|
|
|
2021-03-05 17:37:48 -05:00
|
|
|
/*
|
|
|
|
|
* Calls the start() function of the transport layer, if needed.
|
|
|
|
|
* Returns < 0 in case of error.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
static inline int conn_xprt_start(struct connection *conn)
|
2012-08-31 07:54:11 -04:00
|
|
|
{
|
MAJOR: connection: add two new flags to indicate readiness of control/transport
Currently the control and transport layers of a connection are supposed
to be initialized when their respective pointers are not NULL. This will
not work anymore when we plan to reuse connections, because there is an
asymmetry between the accept() side and the connect() side :
- on accept() side, the fd is set first, then the ctrl layer then the
transport layer ; upon error, they must be undone in the reverse order,
then the FD must be closed. The FD must not be deleted if the control
layer was not yet initialized ;
- on the connect() side, the fd is set last and there is no reliable way
to know if it has been initialized or not. In practice it's initialized
to -1 first but this is hackish and supposes that local FDs only will
be used forever. Also, there are even less solutions for keeping trace
of the transport layer's state.
Also it is possible to support delayed close() when something (eg: logs)
tracks some information requiring the transport and/or control layers,
making it even more difficult to clean them.
So the proposed solution is to add two flags to the connection :
- CO_FL_CTRL_READY is set when the control layer is initialized (fd_insert)
and cleared after it's released (fd_delete).
- CO_FL_XPRT_READY is set when the control layer is initialized (xprt->init)
and cleared after it's released (xprt->close).
The functions have been adapted to rely on this and not on the pointers
anymore. conn_xprt_close() was unused and dangerous : it did not close
the control layer (eg: the socket itself) but still marks the transport
layer as closed, preventing any future call to conn_full_close() from
finishing the job.
The problem comes from conn_full_close() in fact. It needs to close the
xprt and ctrl layers independantly. After that we're still having an issue :
we don't know based on ->ctrl alone whether the fd was registered or not.
For this we use the two new flags CO_FL_XPRT_READY and CO_FL_CTRL_READY. We
now rely on this and not on conn->xprt nor conn->ctrl anymore to decide what
remains to be done on the connection.
In order not to miss some flag assignments, we introduce conn_ctrl_init()
to initialize the control layer, register the fd using fd_insert() and set
the flag, and conn_ctrl_close() which unregisters the fd and removes the
flag, but only if the transport layer was closed.
Similarly, at the transport layer, conn_xprt_init() calls ->init and sets
the flag, while conn_xprt_close() checks the flag, calls ->close and clears
the flag, regardless xprt_ctx or xprt_st. This also ensures that the ->init
and the ->close functions are called only once each and in the correct order.
Note that conn_xprt_close() does nothing if the transport layer is still
tracked.
conn_full_close() now simply calls conn_xprt_close() then conn_full_close()
in turn, which do nothing if CO_FL_XPRT_TRACKED is set.
In order to handle the error path, we also provide conn_force_close() which
ignores CO_FL_XPRT_TRACKED and closes the transport and the control layers
in turns. All relevant instances of fd_delete() have been replaced with
conn_force_close(). Now we always know what state the connection is in and
we can expect to split its initialization.
2013-10-21 10:30:56 -04:00
|
|
|
int ret = 0;
|
|
|
|
|
|
2021-03-05 17:37:48 -05:00
|
|
|
if (!conn_xprt_ready(conn) && conn->xprt && conn->xprt->start)
|
|
|
|
|
ret = conn->xprt->start(conn, conn->xprt_ctx);
|
MAJOR: connection: add two new flags to indicate readiness of control/transport
Currently the control and transport layers of a connection are supposed
to be initialized when their respective pointers are not NULL. This will
not work anymore when we plan to reuse connections, because there is an
asymmetry between the accept() side and the connect() side :
- on accept() side, the fd is set first, then the ctrl layer then the
transport layer ; upon error, they must be undone in the reverse order,
then the FD must be closed. The FD must not be deleted if the control
layer was not yet initialized ;
- on the connect() side, the fd is set last and there is no reliable way
to know if it has been initialized or not. In practice it's initialized
to -1 first but this is hackish and supposes that local FDs only will
be used forever. Also, there are even less solutions for keeping trace
of the transport layer's state.
Also it is possible to support delayed close() when something (eg: logs)
tracks some information requiring the transport and/or control layers,
making it even more difficult to clean them.
So the proposed solution is to add two flags to the connection :
- CO_FL_CTRL_READY is set when the control layer is initialized (fd_insert)
and cleared after it's released (fd_delete).
- CO_FL_XPRT_READY is set when the control layer is initialized (xprt->init)
and cleared after it's released (xprt->close).
The functions have been adapted to rely on this and not on the pointers
anymore. conn_xprt_close() was unused and dangerous : it did not close
the control layer (eg: the socket itself) but still marks the transport
layer as closed, preventing any future call to conn_full_close() from
finishing the job.
The problem comes from conn_full_close() in fact. It needs to close the
xprt and ctrl layers independantly. After that we're still having an issue :
we don't know based on ->ctrl alone whether the fd was registered or not.
For this we use the two new flags CO_FL_XPRT_READY and CO_FL_CTRL_READY. We
now rely on this and not on conn->xprt nor conn->ctrl anymore to decide what
remains to be done on the connection.
In order not to miss some flag assignments, we introduce conn_ctrl_init()
to initialize the control layer, register the fd using fd_insert() and set
the flag, and conn_ctrl_close() which unregisters the fd and removes the
flag, but only if the transport layer was closed.
Similarly, at the transport layer, conn_xprt_init() calls ->init and sets
the flag, while conn_xprt_close() checks the flag, calls ->close and clears
the flag, regardless xprt_ctx or xprt_st. This also ensures that the ->init
and the ->close functions are called only once each and in the correct order.
Note that conn_xprt_close() does nothing if the transport layer is still
tracked.
conn_full_close() now simply calls conn_xprt_close() then conn_full_close()
in turn, which do nothing if CO_FL_XPRT_TRACKED is set.
In order to handle the error path, we also provide conn_force_close() which
ignores CO_FL_XPRT_TRACKED and closes the transport and the control layers
in turns. All relevant instances of fd_delete() have been replaced with
conn_force_close(). Now we always know what state the connection is in and
we can expect to split its initialization.
2013-10-21 10:30:56 -04:00
|
|
|
|
|
|
|
|
if (ret >= 0)
|
|
|
|
|
conn->flags |= CO_FL_XPRT_READY;
|
|
|
|
|
|
|
|
|
|
return ret;
|
2012-08-31 07:54:11 -04:00
|
|
|
}
|
|
|
|
|
|
MAJOR: connection: add two new flags to indicate readiness of control/transport
Currently the control and transport layers of a connection are supposed
to be initialized when their respective pointers are not NULL. This will
not work anymore when we plan to reuse connections, because there is an
asymmetry between the accept() side and the connect() side :
- on accept() side, the fd is set first, then the ctrl layer then the
transport layer ; upon error, they must be undone in the reverse order,
then the FD must be closed. The FD must not be deleted if the control
layer was not yet initialized ;
- on the connect() side, the fd is set last and there is no reliable way
to know if it has been initialized or not. In practice it's initialized
to -1 first but this is hackish and supposes that local FDs only will
be used forever. Also, there are even less solutions for keeping trace
of the transport layer's state.
Also it is possible to support delayed close() when something (eg: logs)
tracks some information requiring the transport and/or control layers,
making it even more difficult to clean them.
So the proposed solution is to add two flags to the connection :
- CO_FL_CTRL_READY is set when the control layer is initialized (fd_insert)
and cleared after it's released (fd_delete).
- CO_FL_XPRT_READY is set when the control layer is initialized (xprt->init)
and cleared after it's released (xprt->close).
The functions have been adapted to rely on this and not on the pointers
anymore. conn_xprt_close() was unused and dangerous : it did not close
the control layer (eg: the socket itself) but still marks the transport
layer as closed, preventing any future call to conn_full_close() from
finishing the job.
The problem comes from conn_full_close() in fact. It needs to close the
xprt and ctrl layers independantly. After that we're still having an issue :
we don't know based on ->ctrl alone whether the fd was registered or not.
For this we use the two new flags CO_FL_XPRT_READY and CO_FL_CTRL_READY. We
now rely on this and not on conn->xprt nor conn->ctrl anymore to decide what
remains to be done on the connection.
In order not to miss some flag assignments, we introduce conn_ctrl_init()
to initialize the control layer, register the fd using fd_insert() and set
the flag, and conn_ctrl_close() which unregisters the fd and removes the
flag, but only if the transport layer was closed.
Similarly, at the transport layer, conn_xprt_init() calls ->init and sets
the flag, while conn_xprt_close() checks the flag, calls ->close and clears
the flag, regardless xprt_ctx or xprt_st. This also ensures that the ->init
and the ->close functions are called only once each and in the correct order.
Note that conn_xprt_close() does nothing if the transport layer is still
tracked.
conn_full_close() now simply calls conn_xprt_close() then conn_full_close()
in turn, which do nothing if CO_FL_XPRT_TRACKED is set.
In order to handle the error path, we also provide conn_force_close() which
ignores CO_FL_XPRT_TRACKED and closes the transport and the control layers
in turns. All relevant instances of fd_delete() have been replaced with
conn_force_close(). Now we always know what state the connection is in and
we can expect to split its initialization.
2013-10-21 10:30:56 -04:00
|
|
|
/* Calls the close() function of the transport layer if any and if not done
|
2021-03-05 17:37:48 -05:00
|
|
|
* yet, and clears the CO_FL_XPRT_READY flags
|
|
|
|
|
* However this is not done if the CO_FL_XPRT_TRACKED flag is set,
|
|
|
|
|
* which allows logs to take data from the transport layer very late if needed.
|
2012-10-12 11:00:05 -04:00
|
|
|
*/
|
REORG: connection: rename the data layer the "transport layer"
While working on the changes required to make the health checks use the
new connections, it started to become obvious that some naming was not
logical at all in the connections. Specifically, it is not logical to
call the "data layer" the layer which is in charge for all the handshake
and which does not yet provide a data layer once established until a
session has allocated all the required buffers.
In fact, it's more a transport layer, which makes much more sense. The
transport layer offers a medium on which data can transit, and it offers
the functions to move these data when the upper layer requests this. And
it is the upper layer which iterates over the transport layer's functions
to move data which should be called the data layer.
The use case where it's obvious is with embryonic sessions : an incoming
SSL connection is accepted. Only the connection is allocated, not the
buffers nor stream interface, etc... The connection handles the SSL
handshake by itself. Once this handshake is complete, we can't use the
data functions because the buffers and stream interface are not there
yet. Hence we have to first call a specific function to complete the
session initialization, after which we'll be able to use the data
functions. This clearly proves that SSL here is only a transport layer
and that the stream interface constitutes the data layer.
A similar change will be performed to rename app_cb => data, but the
two could not be in the same commit for obvious reasons.
2012-10-02 18:19:48 -04:00
|
|
|
static inline void conn_xprt_close(struct connection *conn)
|
2012-08-06 09:06:49 -04:00
|
|
|
{
|
2021-03-05 17:37:48 -05:00
|
|
|
if (conn->xprt && !(conn->flags & CO_FL_XPRT_TRACKED)) {
|
2014-01-23 08:21:42 -05:00
|
|
|
if (conn->xprt->close)
|
2019-03-21 13:27:17 -04:00
|
|
|
conn->xprt->close(conn, conn->xprt_ctx);
|
2019-05-13 13:10:46 -04:00
|
|
|
conn->xprt_ctx = NULL;
|
MAJOR: connection: add two new flags to indicate readiness of control/transport
Currently the control and transport layers of a connection are supposed
to be initialized when their respective pointers are not NULL. This will
not work anymore when we plan to reuse connections, because there is an
asymmetry between the accept() side and the connect() side :
- on accept() side, the fd is set first, then the ctrl layer then the
transport layer ; upon error, they must be undone in the reverse order,
then the FD must be closed. The FD must not be deleted if the control
layer was not yet initialized ;
- on the connect() side, the fd is set last and there is no reliable way
to know if it has been initialized or not. In practice it's initialized
to -1 first but this is hackish and supposes that local FDs only will
be used forever. Also, there are even less solutions for keeping trace
of the transport layer's state.
Also it is possible to support delayed close() when something (eg: logs)
tracks some information requiring the transport and/or control layers,
making it even more difficult to clean them.
So the proposed solution is to add two flags to the connection :
- CO_FL_CTRL_READY is set when the control layer is initialized (fd_insert)
and cleared after it's released (fd_delete).
- CO_FL_XPRT_READY is set when the control layer is initialized (xprt->init)
and cleared after it's released (xprt->close).
The functions have been adapted to rely on this and not on the pointers
anymore. conn_xprt_close() was unused and dangerous : it did not close
the control layer (eg: the socket itself) but still marks the transport
layer as closed, preventing any future call to conn_full_close() from
finishing the job.
The problem comes from conn_full_close() in fact. It needs to close the
xprt and ctrl layers independantly. After that we're still having an issue :
we don't know based on ->ctrl alone whether the fd was registered or not.
For this we use the two new flags CO_FL_XPRT_READY and CO_FL_CTRL_READY. We
now rely on this and not on conn->xprt nor conn->ctrl anymore to decide what
remains to be done on the connection.
In order not to miss some flag assignments, we introduce conn_ctrl_init()
to initialize the control layer, register the fd using fd_insert() and set
the flag, and conn_ctrl_close() which unregisters the fd and removes the
flag, but only if the transport layer was closed.
Similarly, at the transport layer, conn_xprt_init() calls ->init and sets
the flag, while conn_xprt_close() checks the flag, calls ->close and clears
the flag, regardless xprt_ctx or xprt_st. This also ensures that the ->init
and the ->close functions are called only once each and in the correct order.
Note that conn_xprt_close() does nothing if the transport layer is still
tracked.
conn_full_close() now simply calls conn_xprt_close() then conn_full_close()
in turn, which do nothing if CO_FL_XPRT_TRACKED is set.
In order to handle the error path, we also provide conn_force_close() which
ignores CO_FL_XPRT_TRACKED and closes the transport and the control layers
in turns. All relevant instances of fd_delete() have been replaced with
conn_force_close(). Now we always know what state the connection is in and
we can expect to split its initialization.
2013-10-21 10:30:56 -04:00
|
|
|
conn->flags &= ~CO_FL_XPRT_READY;
|
2021-03-05 17:37:48 -05:00
|
|
|
conn->xprt = NULL;
|
MAJOR: connection: add two new flags to indicate readiness of control/transport
Currently the control and transport layers of a connection are supposed
to be initialized when their respective pointers are not NULL. This will
not work anymore when we plan to reuse connections, because there is an
asymmetry between the accept() side and the connect() side :
- on accept() side, the fd is set first, then the ctrl layer then the
transport layer ; upon error, they must be undone in the reverse order,
then the FD must be closed. The FD must not be deleted if the control
layer was not yet initialized ;
- on the connect() side, the fd is set last and there is no reliable way
to know if it has been initialized or not. In practice it's initialized
to -1 first but this is hackish and supposes that local FDs only will
be used forever. Also, there are even less solutions for keeping trace
of the transport layer's state.
Also it is possible to support delayed close() when something (eg: logs)
tracks some information requiring the transport and/or control layers,
making it even more difficult to clean them.
So the proposed solution is to add two flags to the connection :
- CO_FL_CTRL_READY is set when the control layer is initialized (fd_insert)
and cleared after it's released (fd_delete).
- CO_FL_XPRT_READY is set when the control layer is initialized (xprt->init)
and cleared after it's released (xprt->close).
The functions have been adapted to rely on this and not on the pointers
anymore. conn_xprt_close() was unused and dangerous : it did not close
the control layer (eg: the socket itself) but still marks the transport
layer as closed, preventing any future call to conn_full_close() from
finishing the job.
The problem comes from conn_full_close() in fact. It needs to close the
xprt and ctrl layers independantly. After that we're still having an issue :
we don't know based on ->ctrl alone whether the fd was registered or not.
For this we use the two new flags CO_FL_XPRT_READY and CO_FL_CTRL_READY. We
now rely on this and not on conn->xprt nor conn->ctrl anymore to decide what
remains to be done on the connection.
In order not to miss some flag assignments, we introduce conn_ctrl_init()
to initialize the control layer, register the fd using fd_insert() and set
the flag, and conn_ctrl_close() which unregisters the fd and removes the
flag, but only if the transport layer was closed.
Similarly, at the transport layer, conn_xprt_init() calls ->init and sets
the flag, while conn_xprt_close() checks the flag, calls ->close and clears
the flag, regardless xprt_ctx or xprt_st. This also ensures that the ->init
and the ->close functions are called only once each and in the correct order.
Note that conn_xprt_close() does nothing if the transport layer is still
tracked.
conn_full_close() now simply calls conn_xprt_close() then conn_full_close()
in turn, which do nothing if CO_FL_XPRT_TRACKED is set.
In order to handle the error path, we also provide conn_force_close() which
ignores CO_FL_XPRT_TRACKED and closes the transport and the control layers
in turns. All relevant instances of fd_delete() have been replaced with
conn_force_close(). Now we always know what state the connection is in and
we can expect to split its initialization.
2013-10-21 10:30:56 -04:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Initializes the connection's control layer which essentially consists in
|
2020-12-08 09:53:45 -05:00
|
|
|
* registering the connection handle (e.g. file descriptor) for events and
|
|
|
|
|
* setting the CO_FL_CTRL_READY flag. The caller is responsible for ensuring
|
|
|
|
|
* that the control layer is already assigned to the connection prior to the
|
|
|
|
|
* call.
|
MAJOR: connection: add two new flags to indicate readiness of control/transport
Currently the control and transport layers of a connection are supposed
to be initialized when their respective pointers are not NULL. This will
not work anymore when we plan to reuse connections, because there is an
asymmetry between the accept() side and the connect() side :
- on accept() side, the fd is set first, then the ctrl layer then the
transport layer ; upon error, they must be undone in the reverse order,
then the FD must be closed. The FD must not be deleted if the control
layer was not yet initialized ;
- on the connect() side, the fd is set last and there is no reliable way
to know if it has been initialized or not. In practice it's initialized
to -1 first but this is hackish and supposes that local FDs only will
be used forever. Also, there are even less solutions for keeping trace
of the transport layer's state.
Also it is possible to support delayed close() when something (eg: logs)
tracks some information requiring the transport and/or control layers,
making it even more difficult to clean them.
So the proposed solution is to add two flags to the connection :
- CO_FL_CTRL_READY is set when the control layer is initialized (fd_insert)
and cleared after it's released (fd_delete).
- CO_FL_XPRT_READY is set when the control layer is initialized (xprt->init)
and cleared after it's released (xprt->close).
The functions have been adapted to rely on this and not on the pointers
anymore. conn_xprt_close() was unused and dangerous : it did not close
the control layer (eg: the socket itself) but still marks the transport
layer as closed, preventing any future call to conn_full_close() from
finishing the job.
The problem comes from conn_full_close() in fact. It needs to close the
xprt and ctrl layers independantly. After that we're still having an issue :
we don't know based on ->ctrl alone whether the fd was registered or not.
For this we use the two new flags CO_FL_XPRT_READY and CO_FL_CTRL_READY. We
now rely on this and not on conn->xprt nor conn->ctrl anymore to decide what
remains to be done on the connection.
In order not to miss some flag assignments, we introduce conn_ctrl_init()
to initialize the control layer, register the fd using fd_insert() and set
the flag, and conn_ctrl_close() which unregisters the fd and removes the
flag, but only if the transport layer was closed.
Similarly, at the transport layer, conn_xprt_init() calls ->init and sets
the flag, while conn_xprt_close() checks the flag, calls ->close and clears
the flag, regardless xprt_ctx or xprt_st. This also ensures that the ->init
and the ->close functions are called only once each and in the correct order.
Note that conn_xprt_close() does nothing if the transport layer is still
tracked.
conn_full_close() now simply calls conn_xprt_close() then conn_full_close()
in turn, which do nothing if CO_FL_XPRT_TRACKED is set.
In order to handle the error path, we also provide conn_force_close() which
ignores CO_FL_XPRT_TRACKED and closes the transport and the control layers
in turns. All relevant instances of fd_delete() have been replaced with
conn_force_close(). Now we always know what state the connection is in and
we can expect to split its initialization.
2013-10-21 10:30:56 -04:00
|
|
|
*/
|
|
|
|
|
static inline void conn_ctrl_init(struct connection *conn)
|
|
|
|
|
{
|
2014-01-23 07:50:42 -05:00
|
|
|
if (!conn_ctrl_ready(conn)) {
|
MAJOR: connection: add two new flags to indicate readiness of control/transport
Currently the control and transport layers of a connection are supposed
to be initialized when their respective pointers are not NULL. This will
not work anymore when we plan to reuse connections, because there is an
asymmetry between the accept() side and the connect() side :
- on accept() side, the fd is set first, then the ctrl layer then the
transport layer ; upon error, they must be undone in the reverse order,
then the FD must be closed. The FD must not be deleted if the control
layer was not yet initialized ;
- on the connect() side, the fd is set last and there is no reliable way
to know if it has been initialized or not. In practice it's initialized
to -1 first but this is hackish and supposes that local FDs only will
be used forever. Also, there are even less solutions for keeping trace
of the transport layer's state.
Also it is possible to support delayed close() when something (eg: logs)
tracks some information requiring the transport and/or control layers,
making it even more difficult to clean them.
So the proposed solution is to add two flags to the connection :
- CO_FL_CTRL_READY is set when the control layer is initialized (fd_insert)
and cleared after it's released (fd_delete).
- CO_FL_XPRT_READY is set when the control layer is initialized (xprt->init)
and cleared after it's released (xprt->close).
The functions have been adapted to rely on this and not on the pointers
anymore. conn_xprt_close() was unused and dangerous : it did not close
the control layer (eg: the socket itself) but still marks the transport
layer as closed, preventing any future call to conn_full_close() from
finishing the job.
The problem comes from conn_full_close() in fact. It needs to close the
xprt and ctrl layers independantly. After that we're still having an issue :
we don't know based on ->ctrl alone whether the fd was registered or not.
For this we use the two new flags CO_FL_XPRT_READY and CO_FL_CTRL_READY. We
now rely on this and not on conn->xprt nor conn->ctrl anymore to decide what
remains to be done on the connection.
In order not to miss some flag assignments, we introduce conn_ctrl_init()
to initialize the control layer, register the fd using fd_insert() and set
the flag, and conn_ctrl_close() which unregisters the fd and removes the
flag, but only if the transport layer was closed.
Similarly, at the transport layer, conn_xprt_init() calls ->init and sets
the flag, while conn_xprt_close() checks the flag, calls ->close and clears
the flag, regardless xprt_ctx or xprt_st. This also ensures that the ->init
and the ->close functions are called only once each and in the correct order.
Note that conn_xprt_close() does nothing if the transport layer is still
tracked.
conn_full_close() now simply calls conn_xprt_close() then conn_full_close()
in turn, which do nothing if CO_FL_XPRT_TRACKED is set.
In order to handle the error path, we also provide conn_force_close() which
ignores CO_FL_XPRT_TRACKED and closes the transport and the control layers
in turns. All relevant instances of fd_delete() have been replaced with
conn_force_close(). Now we always know what state the connection is in and
we can expect to split its initialization.
2013-10-21 10:30:56 -04:00
|
|
|
conn->flags |= CO_FL_CTRL_READY;
|
2020-12-08 09:53:45 -05:00
|
|
|
if (conn->ctrl->ctrl_init)
|
|
|
|
|
conn->ctrl->ctrl_init(conn);
|
MAJOR: connection: add two new flags to indicate readiness of control/transport
Currently the control and transport layers of a connection are supposed
to be initialized when their respective pointers are not NULL. This will
not work anymore when we plan to reuse connections, because there is an
asymmetry between the accept() side and the connect() side :
- on accept() side, the fd is set first, then the ctrl layer then the
transport layer ; upon error, they must be undone in the reverse order,
then the FD must be closed. The FD must not be deleted if the control
layer was not yet initialized ;
- on the connect() side, the fd is set last and there is no reliable way
to know if it has been initialized or not. In practice it's initialized
to -1 first but this is hackish and supposes that local FDs only will
be used forever. Also, there are even less solutions for keeping trace
of the transport layer's state.
Also it is possible to support delayed close() when something (eg: logs)
tracks some information requiring the transport and/or control layers,
making it even more difficult to clean them.
So the proposed solution is to add two flags to the connection :
- CO_FL_CTRL_READY is set when the control layer is initialized (fd_insert)
and cleared after it's released (fd_delete).
- CO_FL_XPRT_READY is set when the control layer is initialized (xprt->init)
and cleared after it's released (xprt->close).
The functions have been adapted to rely on this and not on the pointers
anymore. conn_xprt_close() was unused and dangerous : it did not close
the control layer (eg: the socket itself) but still marks the transport
layer as closed, preventing any future call to conn_full_close() from
finishing the job.
The problem comes from conn_full_close() in fact. It needs to close the
xprt and ctrl layers independantly. After that we're still having an issue :
we don't know based on ->ctrl alone whether the fd was registered or not.
For this we use the two new flags CO_FL_XPRT_READY and CO_FL_CTRL_READY. We
now rely on this and not on conn->xprt nor conn->ctrl anymore to decide what
remains to be done on the connection.
In order not to miss some flag assignments, we introduce conn_ctrl_init()
to initialize the control layer, register the fd using fd_insert() and set
the flag, and conn_ctrl_close() which unregisters the fd and removes the
flag, but only if the transport layer was closed.
Similarly, at the transport layer, conn_xprt_init() calls ->init and sets
the flag, while conn_xprt_close() checks the flag, calls ->close and clears
the flag, regardless xprt_ctx or xprt_st. This also ensures that the ->init
and the ->close functions are called only once each and in the correct order.
Note that conn_xprt_close() does nothing if the transport layer is still
tracked.
conn_full_close() now simply calls conn_xprt_close() then conn_full_close()
in turn, which do nothing if CO_FL_XPRT_TRACKED is set.
In order to handle the error path, we also provide conn_force_close() which
ignores CO_FL_XPRT_TRACKED and closes the transport and the control layers
in turns. All relevant instances of fd_delete() have been replaced with
conn_force_close(). Now we always know what state the connection is in and
we can expect to split its initialization.
2013-10-21 10:30:56 -04:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2020-12-08 09:53:45 -05:00
|
|
|
/* Deletes the connection's handle (e.g. FD) if the transport layer is already
|
|
|
|
|
* gone, and removes the CO_FL_CTRL_READY flag.
|
MAJOR: connection: add two new flags to indicate readiness of control/transport
Currently the control and transport layers of a connection are supposed
to be initialized when their respective pointers are not NULL. This will
not work anymore when we plan to reuse connections, because there is an
asymmetry between the accept() side and the connect() side :
- on accept() side, the fd is set first, then the ctrl layer then the
transport layer ; upon error, they must be undone in the reverse order,
then the FD must be closed. The FD must not be deleted if the control
layer was not yet initialized ;
- on the connect() side, the fd is set last and there is no reliable way
to know if it has been initialized or not. In practice it's initialized
to -1 first but this is hackish and supposes that local FDs only will
be used forever. Also, there are even less solutions for keeping trace
of the transport layer's state.
Also it is possible to support delayed close() when something (eg: logs)
tracks some information requiring the transport and/or control layers,
making it even more difficult to clean them.
So the proposed solution is to add two flags to the connection :
- CO_FL_CTRL_READY is set when the control layer is initialized (fd_insert)
and cleared after it's released (fd_delete).
- CO_FL_XPRT_READY is set when the control layer is initialized (xprt->init)
and cleared after it's released (xprt->close).
The functions have been adapted to rely on this and not on the pointers
anymore. conn_xprt_close() was unused and dangerous : it did not close
the control layer (eg: the socket itself) but still marks the transport
layer as closed, preventing any future call to conn_full_close() from
finishing the job.
The problem comes from conn_full_close() in fact. It needs to close the
xprt and ctrl layers independantly. After that we're still having an issue :
we don't know based on ->ctrl alone whether the fd was registered or not.
For this we use the two new flags CO_FL_XPRT_READY and CO_FL_CTRL_READY. We
now rely on this and not on conn->xprt nor conn->ctrl anymore to decide what
remains to be done on the connection.
In order not to miss some flag assignments, we introduce conn_ctrl_init()
to initialize the control layer, register the fd using fd_insert() and set
the flag, and conn_ctrl_close() which unregisters the fd and removes the
flag, but only if the transport layer was closed.
Similarly, at the transport layer, conn_xprt_init() calls ->init and sets
the flag, while conn_xprt_close() checks the flag, calls ->close and clears
the flag, regardless xprt_ctx or xprt_st. This also ensures that the ->init
and the ->close functions are called only once each and in the correct order.
Note that conn_xprt_close() does nothing if the transport layer is still
tracked.
conn_full_close() now simply calls conn_xprt_close() then conn_full_close()
in turn, which do nothing if CO_FL_XPRT_TRACKED is set.
In order to handle the error path, we also provide conn_force_close() which
ignores CO_FL_XPRT_TRACKED and closes the transport and the control layers
in turns. All relevant instances of fd_delete() have been replaced with
conn_force_close(). Now we always know what state the connection is in and
we can expect to split its initialization.
2013-10-21 10:30:56 -04:00
|
|
|
*/
|
|
|
|
|
static inline void conn_ctrl_close(struct connection *conn)
|
|
|
|
|
{
|
2021-03-05 17:37:48 -05:00
|
|
|
if (!conn->xprt && (conn->flags & CO_FL_CTRL_READY)) {
|
2021-10-21 15:31:42 -04:00
|
|
|
if ((conn->flags & (CO_FL_WANT_DRAIN | CO_FL_SOCK_RD_SH)) == CO_FL_WANT_DRAIN)
|
|
|
|
|
conn_ctrl_drain(conn);
|
MAJOR: connection: add two new flags to indicate readiness of control/transport
Currently the control and transport layers of a connection are supposed
to be initialized when their respective pointers are not NULL. This will
not work anymore when we plan to reuse connections, because there is an
asymmetry between the accept() side and the connect() side :
- on accept() side, the fd is set first, then the ctrl layer then the
transport layer ; upon error, they must be undone in the reverse order,
then the FD must be closed. The FD must not be deleted if the control
layer was not yet initialized ;
- on the connect() side, the fd is set last and there is no reliable way
to know if it has been initialized or not. In practice it's initialized
to -1 first but this is hackish and supposes that local FDs only will
be used forever. Also, there are even less solutions for keeping trace
of the transport layer's state.
Also it is possible to support delayed close() when something (eg: logs)
tracks some information requiring the transport and/or control layers,
making it even more difficult to clean them.
So the proposed solution is to add two flags to the connection :
- CO_FL_CTRL_READY is set when the control layer is initialized (fd_insert)
and cleared after it's released (fd_delete).
- CO_FL_XPRT_READY is set when the control layer is initialized (xprt->init)
and cleared after it's released (xprt->close).
The functions have been adapted to rely on this and not on the pointers
anymore. conn_xprt_close() was unused and dangerous : it did not close
the control layer (eg: the socket itself) but still marks the transport
layer as closed, preventing any future call to conn_full_close() from
finishing the job.
The problem comes from conn_full_close() in fact. It needs to close the
xprt and ctrl layers independantly. After that we're still having an issue :
we don't know based on ->ctrl alone whether the fd was registered or not.
For this we use the two new flags CO_FL_XPRT_READY and CO_FL_CTRL_READY. We
now rely on this and not on conn->xprt nor conn->ctrl anymore to decide what
remains to be done on the connection.
In order not to miss some flag assignments, we introduce conn_ctrl_init()
to initialize the control layer, register the fd using fd_insert() and set
the flag, and conn_ctrl_close() which unregisters the fd and removes the
flag, but only if the transport layer was closed.
Similarly, at the transport layer, conn_xprt_init() calls ->init and sets
the flag, while conn_xprt_close() checks the flag, calls ->close and clears
the flag, regardless xprt_ctx or xprt_st. This also ensures that the ->init
and the ->close functions are called only once each and in the correct order.
Note that conn_xprt_close() does nothing if the transport layer is still
tracked.
conn_full_close() now simply calls conn_xprt_close() then conn_full_close()
in turn, which do nothing if CO_FL_XPRT_TRACKED is set.
In order to handle the error path, we also provide conn_force_close() which
ignores CO_FL_XPRT_TRACKED and closes the transport and the control layers
in turns. All relevant instances of fd_delete() have been replaced with
conn_force_close(). Now we always know what state the connection is in and
we can expect to split its initialization.
2013-10-21 10:30:56 -04:00
|
|
|
conn->flags &= ~CO_FL_CTRL_READY;
|
2020-12-08 09:53:45 -05:00
|
|
|
if (conn->ctrl->ctrl_close)
|
|
|
|
|
conn->ctrl->ctrl_close(conn);
|
2012-10-12 11:00:05 -04:00
|
|
|
}
|
2012-08-06 09:06:49 -04:00
|
|
|
}
|
|
|
|
|
|
2012-11-23 11:32:21 -05:00
|
|
|
/* If the connection still has a transport layer, then call its close() function
|
|
|
|
|
* if any, and delete the file descriptor if a control layer is set. This is
|
|
|
|
|
* used to close everything at once and atomically. However this is not done if
|
|
|
|
|
* the CO_FL_XPRT_TRACKED flag is set, which allows logs to take data from the
|
|
|
|
|
* transport layer very late if needed.
|
|
|
|
|
*/
|
|
|
|
|
static inline void conn_full_close(struct connection *conn)
|
|
|
|
|
{
|
MAJOR: connection: add two new flags to indicate readiness of control/transport
Currently the control and transport layers of a connection are supposed
to be initialized when their respective pointers are not NULL. This will
not work anymore when we plan to reuse connections, because there is an
asymmetry between the accept() side and the connect() side :
- on accept() side, the fd is set first, then the ctrl layer then the
transport layer ; upon error, they must be undone in the reverse order,
then the FD must be closed. The FD must not be deleted if the control
layer was not yet initialized ;
- on the connect() side, the fd is set last and there is no reliable way
to know if it has been initialized or not. In practice it's initialized
to -1 first but this is hackish and supposes that local FDs only will
be used forever. Also, there are even less solutions for keeping trace
of the transport layer's state.
Also it is possible to support delayed close() when something (eg: logs)
tracks some information requiring the transport and/or control layers,
making it even more difficult to clean them.
So the proposed solution is to add two flags to the connection :
- CO_FL_CTRL_READY is set when the control layer is initialized (fd_insert)
and cleared after it's released (fd_delete).
- CO_FL_XPRT_READY is set when the control layer is initialized (xprt->init)
and cleared after it's released (xprt->close).
The functions have been adapted to rely on this and not on the pointers
anymore. conn_xprt_close() was unused and dangerous : it did not close
the control layer (eg: the socket itself) but still marks the transport
layer as closed, preventing any future call to conn_full_close() from
finishing the job.
The problem comes from conn_full_close() in fact. It needs to close the
xprt and ctrl layers independantly. After that we're still having an issue :
we don't know based on ->ctrl alone whether the fd was registered or not.
For this we use the two new flags CO_FL_XPRT_READY and CO_FL_CTRL_READY. We
now rely on this and not on conn->xprt nor conn->ctrl anymore to decide what
remains to be done on the connection.
In order not to miss some flag assignments, we introduce conn_ctrl_init()
to initialize the control layer, register the fd using fd_insert() and set
the flag, and conn_ctrl_close() which unregisters the fd and removes the
flag, but only if the transport layer was closed.
Similarly, at the transport layer, conn_xprt_init() calls ->init and sets
the flag, while conn_xprt_close() checks the flag, calls ->close and clears
the flag, regardless xprt_ctx or xprt_st. This also ensures that the ->init
and the ->close functions are called only once each and in the correct order.
Note that conn_xprt_close() does nothing if the transport layer is still
tracked.
conn_full_close() now simply calls conn_xprt_close() then conn_full_close()
in turn, which do nothing if CO_FL_XPRT_TRACKED is set.
In order to handle the error path, we also provide conn_force_close() which
ignores CO_FL_XPRT_TRACKED and closes the transport and the control layers
in turns. All relevant instances of fd_delete() have been replaced with
conn_force_close(). Now we always know what state the connection is in and
we can expect to split its initialization.
2013-10-21 10:30:56 -04:00
|
|
|
conn_xprt_close(conn);
|
|
|
|
|
conn_ctrl_close(conn);
|
|
|
|
|
}
|
|
|
|
|
|
2017-10-05 12:09:20 -04:00
|
|
|
/* stop tracking a connection, allowing conn_full_close() to always
|
|
|
|
|
* succeed.
|
|
|
|
|
*/
|
|
|
|
|
static inline void conn_stop_tracking(struct connection *conn)
|
|
|
|
|
{
|
|
|
|
|
conn->flags &= ~CO_FL_XPRT_TRACKED;
|
|
|
|
|
}
|
|
|
|
|
|
2022-04-11 11:54:46 -04:00
|
|
|
/* returns the connection's FD if the connection exists, its control is ready,
|
|
|
|
|
* and the connection has an FD, otherwise -1.
|
|
|
|
|
*/
|
|
|
|
|
static inline int conn_fd(const struct connection *conn)
|
|
|
|
|
{
|
|
|
|
|
if (!conn || !conn_ctrl_ready(conn) || (conn->flags & CO_FL_FDLESS))
|
|
|
|
|
return -1;
|
|
|
|
|
return conn->handle.fd;
|
|
|
|
|
}
|
|
|
|
|
|
2017-10-25 03:59:22 -04:00
|
|
|
/* read shutdown, called from the rcv_buf/rcv_pipe handlers when
|
|
|
|
|
* detecting an end of connection.
|
|
|
|
|
*/
|
2012-08-20 10:55:48 -04:00
|
|
|
static inline void conn_sock_read0(struct connection *c)
|
|
|
|
|
{
|
|
|
|
|
c->flags |= CO_FL_SOCK_RD_SH;
|
2020-02-21 04:34:19 -05:00
|
|
|
if (conn_ctrl_ready(c)) {
|
|
|
|
|
/* we don't risk keeping ports unusable if we found the
|
|
|
|
|
* zero from the other side.
|
|
|
|
|
*/
|
2022-04-11 12:07:03 -04:00
|
|
|
BUG_ON(c->flags & CO_FL_FDLESS);
|
2021-04-06 11:49:19 -04:00
|
|
|
HA_ATOMIC_AND(&fdtab[c->handle.fd].state, ~FD_LINGER_RISK);
|
2020-02-21 04:34:19 -05:00
|
|
|
}
|
2012-08-20 10:55:48 -04:00
|
|
|
}
|
|
|
|
|
|
2017-10-25 03:59:22 -04:00
|
|
|
/* write shutdown, indication that the upper layer is not willing to send
|
2017-12-22 12:46:33 -05:00
|
|
|
* anything anymore and wants to close after pending data are sent. The
|
|
|
|
|
* <clean> argument will allow not to perform the socket layer shutdown if
|
|
|
|
|
* equal to 0.
|
2017-10-25 03:59:22 -04:00
|
|
|
*/
|
2017-12-22 12:46:33 -05:00
|
|
|
static inline void conn_sock_shutw(struct connection *c, int clean)
|
2012-08-20 10:55:48 -04:00
|
|
|
{
|
|
|
|
|
c->flags |= CO_FL_SOCK_WR_SH;
|
2020-02-21 04:34:19 -05:00
|
|
|
if (conn_ctrl_ready(c)) {
|
|
|
|
|
/* don't perform a clean shutdown if we're going to reset or
|
|
|
|
|
* if the shutr was already received.
|
|
|
|
|
*/
|
2022-04-11 12:07:03 -04:00
|
|
|
BUG_ON(c->flags & CO_FL_FDLESS);
|
2020-02-21 04:34:19 -05:00
|
|
|
if (!(c->flags & CO_FL_SOCK_RD_SH) && clean)
|
|
|
|
|
shutdown(c->handle.fd, SHUT_WR);
|
|
|
|
|
}
|
2012-08-20 10:55:48 -04:00
|
|
|
}
|
|
|
|
|
|
2017-09-13 12:30:23 -04:00
|
|
|
static inline void conn_xprt_shutw(struct connection *c)
|
2012-08-20 10:55:48 -04:00
|
|
|
{
|
2015-03-12 17:51:10 -04:00
|
|
|
/* clean data-layer shutdown */
|
|
|
|
|
if (c->xprt && c->xprt->shutw)
|
2019-03-21 13:27:17 -04:00
|
|
|
c->xprt->shutw(c, c->xprt_ctx, 1);
|
2015-03-12 17:51:10 -04:00
|
|
|
}
|
|
|
|
|
|
2017-09-13 12:30:23 -04:00
|
|
|
static inline void conn_xprt_shutw_hard(struct connection *c)
|
2015-03-12 17:51:10 -04:00
|
|
|
{
|
|
|
|
|
/* unclean data-layer shutdown */
|
|
|
|
|
if (c->xprt && c->xprt->shutw)
|
2019-03-21 13:27:17 -04:00
|
|
|
c->xprt->shutw(c, c->xprt_ctx, 0);
|
2012-08-20 10:55:48 -04:00
|
|
|
}
|
|
|
|
|
|
2018-12-19 11:59:30 -05:00
|
|
|
|
2012-08-20 10:55:48 -04:00
|
|
|
/* detect sock->data read0 transition */
|
2017-09-13 12:30:23 -04:00
|
|
|
static inline int conn_xprt_read0_pending(struct connection *c)
|
2012-08-20 10:55:48 -04:00
|
|
|
{
|
2017-08-30 01:35:35 -04:00
|
|
|
return (c->flags & CO_FL_SOCK_RD_SH) != 0;
|
2012-08-20 10:55:48 -04:00
|
|
|
}
|
|
|
|
|
|
2013-10-24 09:08:37 -04:00
|
|
|
/* prepares a connection to work with protocol <proto> and transport <xprt>.
|
MEDIUM: connection: start to introduce a mux layer between xprt and data
For HTTP/2 and QUIC, we'll need to deal with multiplexed streams inside
a connection. After quite a long brainstorming, it appears that the
connection interface to the existing streams is appropriate just like
the connection interface to the lower layers. In fact we need to have
the mux layer in the middle of the connection, between the transport
and the data layer.
A mux can exist on two directions/sides. On the inbound direction, it
instanciates new streams from incoming connections, while on the outbound
direction it muxes streams into outgoing connections. The difference is
visible on the mux->init() call : in one case, an upper context is already
known (outgoing connection), and in the other case, the upper context is
not yet known (incoming connection) and will have to be allocated by the
mux. The session doesn't have to create the new streams anymore, as this
is performed by the mux itself.
This patch introduces this and creates a pass-through mux called
"mux_pt" which is used for all new connections and which only
calls the data layer's recv,send,wake() calls. One incoming stream
is immediately created when init() is called on the inbound direction.
There should not be any visible impact.
Note that the connection's mux is purposely not set until the session
is completed so that we don't accidently run with the wrong mux. This
must not cause any issue as the xprt_done_cb function is always called
prior to using mux's recv/send functions.
2017-08-28 04:53:00 -04:00
|
|
|
* The transport's is initialized as well, and the mux and its context are
|
2018-09-06 05:45:30 -04:00
|
|
|
* cleared. The target is not reinitialized and it is recommended that it is
|
|
|
|
|
* set prior to calling this function so that the function may make use of it
|
|
|
|
|
* in the future to refine the mux choice if needed.
|
2013-10-24 09:08:37 -04:00
|
|
|
*/
|
2021-03-05 17:37:48 -05:00
|
|
|
static inline int conn_prepare(struct connection *conn, const struct protocol *proto, const struct xprt_ops *xprt)
|
2013-10-24 09:08:37 -04:00
|
|
|
{
|
2021-03-05 17:37:48 -05:00
|
|
|
int ret = 0;
|
|
|
|
|
|
2013-10-24 09:08:37 -04:00
|
|
|
conn->ctrl = proto;
|
|
|
|
|
conn->xprt = xprt;
|
MEDIUM: connection: start to introduce a mux layer between xprt and data
For HTTP/2 and QUIC, we'll need to deal with multiplexed streams inside
a connection. After quite a long brainstorming, it appears that the
connection interface to the existing streams is appropriate just like
the connection interface to the lower layers. In fact we need to have
the mux layer in the middle of the connection, between the transport
and the data layer.
A mux can exist on two directions/sides. On the inbound direction, it
instanciates new streams from incoming connections, while on the outbound
direction it muxes streams into outgoing connections. The difference is
visible on the mux->init() call : in one case, an upper context is already
known (outgoing connection), and in the other case, the upper context is
not yet known (incoming connection) and will have to be allocated by the
mux. The session doesn't have to create the new streams anymore, as this
is performed by the mux itself.
This patch introduces this and creates a pass-through mux called
"mux_pt" which is used for all new connections and which only
calls the data layer's recv,send,wake() calls. One incoming stream
is immediately created when init() is called on the inbound direction.
There should not be any visible impact.
Note that the connection's mux is purposely not set until the session
is completed so that we don't accidently run with the wrong mux. This
must not cause any issue as the xprt_done_cb function is always called
prior to using mux's recv/send functions.
2017-08-28 04:53:00 -04:00
|
|
|
conn->mux = NULL;
|
2013-10-24 09:08:37 -04:00
|
|
|
conn->xprt_ctx = NULL;
|
2018-12-19 08:12:10 -05:00
|
|
|
conn->ctx = NULL;
|
2021-03-05 17:37:48 -05:00
|
|
|
if (xprt->init) {
|
|
|
|
|
ret = xprt->init(conn, &conn->xprt_ctx);
|
|
|
|
|
if (ret < 0)
|
|
|
|
|
conn->xprt = NULL;
|
|
|
|
|
}
|
|
|
|
|
return ret;
|
2013-10-24 09:08:37 -04:00
|
|
|
}
|
|
|
|
|
|
2021-05-03 08:28:30 -04:00
|
|
|
/* returns 0 if the connection is valid and is a frontend connection, otherwise
|
|
|
|
|
* returns 1 indicating it's a backend connection. And uninitialized connection
|
|
|
|
|
* also returns 1 to better handle the usage in the middle of initialization.
|
|
|
|
|
*/
|
|
|
|
|
static inline int conn_is_back(const struct connection *conn)
|
|
|
|
|
{
|
|
|
|
|
return !objt_listener(conn->target);
|
|
|
|
|
}
|
|
|
|
|
|
2017-08-28 09:46:01 -04:00
|
|
|
/* sets <owner> as the connection's owner */
|
2017-10-08 05:16:46 -04:00
|
|
|
static inline void conn_set_owner(struct connection *conn, void *owner, void (*cb)(struct connection *))
|
2017-08-28 09:46:01 -04:00
|
|
|
{
|
|
|
|
|
conn->owner = owner;
|
2017-10-08 05:16:46 -04:00
|
|
|
conn->destroy_cb = cb;
|
2017-08-28 09:46:01 -04:00
|
|
|
}
|
|
|
|
|
|
2020-07-01 09:26:14 -04:00
|
|
|
|
|
|
|
|
/* Mark the connection <conn> as private and remove it from the available connection list */
|
|
|
|
|
static inline void conn_set_private(struct connection *conn)
|
|
|
|
|
{
|
2020-07-02 10:03:30 -04:00
|
|
|
if (!(conn->flags & CO_FL_PRIVATE)) {
|
|
|
|
|
conn->flags |= CO_FL_PRIVATE;
|
2020-07-01 09:26:14 -04:00
|
|
|
|
2020-07-02 10:03:30 -04:00
|
|
|
if (obj_type(conn->target) == OBJ_TYPE_SERVER)
|
2021-01-06 10:14:12 -05:00
|
|
|
srv_release_conn(__objt_server(conn->target), conn);
|
2020-07-02 10:03:30 -04:00
|
|
|
}
|
2020-07-01 09:26:14 -04:00
|
|
|
}
|
|
|
|
|
|
2018-10-20 18:32:01 -04:00
|
|
|
static inline void conn_force_unsubscribe(struct connection *conn)
|
2013-10-20 16:56:45 -04:00
|
|
|
{
|
2020-01-10 01:06:05 -05:00
|
|
|
if (!conn->subs)
|
|
|
|
|
return;
|
|
|
|
|
conn->subs->events = 0;
|
|
|
|
|
conn->subs = NULL;
|
2018-10-20 18:32:01 -04:00
|
|
|
}
|
|
|
|
|
|
2021-10-22 10:33:28 -04:00
|
|
|
/* Returns the source address of the connection or NULL if not set */
|
|
|
|
|
static inline const struct sockaddr_storage *conn_src(struct connection *conn)
|
|
|
|
|
{
|
2022-05-02 11:47:46 -04:00
|
|
|
return conn->src;
|
2021-10-22 10:33:28 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Returns the destination address of the connection or NULL if not set */
|
|
|
|
|
static inline const struct sockaddr_storage *conn_dst(struct connection *conn)
|
|
|
|
|
{
|
2022-05-02 11:47:46 -04:00
|
|
|
return conn->dst;
|
2021-10-22 10:33:28 -04:00
|
|
|
}
|
|
|
|
|
|
2019-07-17 04:48:33 -04:00
|
|
|
/* Retrieves the connection's original source address. Returns non-zero on
|
|
|
|
|
* success or zero on failure. The operation is only performed once and the
|
|
|
|
|
* address is stored in the connection for future use.
|
|
|
|
|
*/
|
|
|
|
|
static inline int conn_get_src(struct connection *conn)
|
|
|
|
|
{
|
2022-05-02 11:47:46 -04:00
|
|
|
if (conn->src)
|
2019-07-17 04:48:33 -04:00
|
|
|
return 1;
|
|
|
|
|
|
2022-04-08 12:05:41 -04:00
|
|
|
if (!conn_ctrl_ready(conn))
|
|
|
|
|
goto fail;
|
2019-07-17 04:48:33 -04:00
|
|
|
|
2020-10-15 01:32:10 -04:00
|
|
|
if (!sockaddr_alloc(&conn->src, NULL, 0))
|
2022-04-08 12:05:41 -04:00
|
|
|
goto fail;
|
2019-07-17 13:04:47 -04:00
|
|
|
|
2022-04-08 07:49:17 -04:00
|
|
|
/* some stream protocols may provide their own get_src/dst functions */
|
|
|
|
|
if (conn->ctrl->get_src &&
|
|
|
|
|
conn->ctrl->get_src(conn, (struct sockaddr *)conn->src, sizeof(*conn->src)) != -1)
|
|
|
|
|
goto done;
|
|
|
|
|
|
2022-04-08 12:05:41 -04:00
|
|
|
if (conn->ctrl->proto_type != PROTO_TYPE_STREAM)
|
|
|
|
|
goto fail;
|
|
|
|
|
|
|
|
|
|
/* most other socket-based stream protocols will use their socket family's functions */
|
2022-04-11 12:04:33 -04:00
|
|
|
if (conn->ctrl->fam->get_src && !(conn->flags & CO_FL_FDLESS) &&
|
|
|
|
|
conn->ctrl->fam->get_src(conn->handle.fd, (struct sockaddr *)conn->src,
|
2019-07-17 08:46:00 -04:00
|
|
|
sizeof(*conn->src),
|
2022-04-08 12:05:41 -04:00
|
|
|
obj_type(conn->target) != OBJ_TYPE_LISTENER) != -1)
|
|
|
|
|
goto done;
|
|
|
|
|
|
|
|
|
|
/* no other means */
|
|
|
|
|
fail:
|
|
|
|
|
sockaddr_free(&conn->src);
|
|
|
|
|
return 0;
|
|
|
|
|
done:
|
2019-07-17 04:48:33 -04:00
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Retrieves the connection's original destination address. Returns non-zero on
|
|
|
|
|
* success or zero on failure. The operation is only performed once and the
|
|
|
|
|
* address is stored in the connection for future use.
|
|
|
|
|
*/
|
|
|
|
|
static inline int conn_get_dst(struct connection *conn)
|
|
|
|
|
{
|
2022-05-02 11:47:46 -04:00
|
|
|
if (conn->dst)
|
2019-07-17 04:48:33 -04:00
|
|
|
return 1;
|
|
|
|
|
|
2022-04-08 12:05:41 -04:00
|
|
|
if (!conn_ctrl_ready(conn))
|
|
|
|
|
goto fail;
|
2019-07-17 04:48:33 -04:00
|
|
|
|
2020-10-15 01:32:10 -04:00
|
|
|
if (!sockaddr_alloc(&conn->dst, NULL, 0))
|
2022-04-08 12:05:41 -04:00
|
|
|
goto fail;
|
|
|
|
|
|
2022-04-08 07:49:17 -04:00
|
|
|
/* some stream protocols may provide their own get_src/dst functions */
|
|
|
|
|
if (conn->ctrl->get_dst &&
|
|
|
|
|
conn->ctrl->get_dst(conn, (struct sockaddr *)conn->dst, sizeof(*conn->dst)) != -1)
|
|
|
|
|
goto done;
|
|
|
|
|
|
2022-04-08 12:05:41 -04:00
|
|
|
if (conn->ctrl->proto_type != PROTO_TYPE_STREAM)
|
|
|
|
|
goto fail;
|
2019-07-17 13:04:47 -04:00
|
|
|
|
2022-04-08 12:05:41 -04:00
|
|
|
/* most other socket-based stream protocols will use their socket family's functions */
|
2022-04-11 12:04:33 -04:00
|
|
|
if (conn->ctrl->fam->get_dst && !(conn->flags & CO_FL_FDLESS) &&
|
|
|
|
|
conn->ctrl->fam->get_dst(conn->handle.fd, (struct sockaddr *)conn->dst,
|
2019-07-17 08:46:00 -04:00
|
|
|
sizeof(*conn->dst),
|
2022-04-08 12:05:41 -04:00
|
|
|
obj_type(conn->target) != OBJ_TYPE_LISTENER) != -1)
|
|
|
|
|
goto done;
|
|
|
|
|
|
|
|
|
|
/* no other means */
|
|
|
|
|
fail:
|
|
|
|
|
sockaddr_free(&conn->dst);
|
|
|
|
|
return 0;
|
|
|
|
|
done:
|
2019-07-17 04:48:33 -04:00
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
|
REORG: connection: centralize the conn_set_{tos,mark,quickack} functions
There were a number of ugly setsockopt() calls spread all over
proto_http.c, proto_htx.c and hlua.c just to manipulate the front
connection's TOS, mark or TCP quick-ack. These ones entirely relied
on the connection, its existence, its control layer's presence, and
its addresses. Worse, inet_set_tos() was placed in proto_http.c,
exported and used from the two other ones, surrounded in #ifdefs.
This patch moves this code to connection.h and makes the other ones
rely on it without ifdefs.
2018-12-11 10:37:42 -05:00
|
|
|
/* Sets the TOS header in IPv4 and the traffic class header in IPv6 packets
|
|
|
|
|
* (as per RFC3260 #4 and BCP37 #4.2 and #5.2). The connection is tested and if
|
|
|
|
|
* it is null, nothing is done.
|
|
|
|
|
*/
|
|
|
|
|
static inline void conn_set_tos(const struct connection *conn, int tos)
|
|
|
|
|
{
|
2022-04-11 12:04:33 -04:00
|
|
|
if (!conn || !conn_ctrl_ready(conn) || (conn->flags & CO_FL_FDLESS))
|
REORG: connection: centralize the conn_set_{tos,mark,quickack} functions
There were a number of ugly setsockopt() calls spread all over
proto_http.c, proto_htx.c and hlua.c just to manipulate the front
connection's TOS, mark or TCP quick-ack. These ones entirely relied
on the connection, its existence, its control layer's presence, and
its addresses. Worse, inet_set_tos() was placed in proto_http.c,
exported and used from the two other ones, surrounded in #ifdefs.
This patch moves this code to connection.h and makes the other ones
rely on it without ifdefs.
2018-12-11 10:37:42 -05:00
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
#ifdef IP_TOS
|
2019-07-17 08:46:00 -04:00
|
|
|
if (conn->src->ss_family == AF_INET)
|
REORG: connection: centralize the conn_set_{tos,mark,quickack} functions
There were a number of ugly setsockopt() calls spread all over
proto_http.c, proto_htx.c and hlua.c just to manipulate the front
connection's TOS, mark or TCP quick-ack. These ones entirely relied
on the connection, its existence, its control layer's presence, and
its addresses. Worse, inet_set_tos() was placed in proto_http.c,
exported and used from the two other ones, surrounded in #ifdefs.
This patch moves this code to connection.h and makes the other ones
rely on it without ifdefs.
2018-12-11 10:37:42 -05:00
|
|
|
setsockopt(conn->handle.fd, IPPROTO_IP, IP_TOS, &tos, sizeof(tos));
|
|
|
|
|
#endif
|
|
|
|
|
#ifdef IPV6_TCLASS
|
2019-07-17 08:46:00 -04:00
|
|
|
if (conn->src->ss_family == AF_INET6) {
|
|
|
|
|
if (IN6_IS_ADDR_V4MAPPED(&((struct sockaddr_in6 *)conn->src)->sin6_addr))
|
REORG: connection: centralize the conn_set_{tos,mark,quickack} functions
There were a number of ugly setsockopt() calls spread all over
proto_http.c, proto_htx.c and hlua.c just to manipulate the front
connection's TOS, mark or TCP quick-ack. These ones entirely relied
on the connection, its existence, its control layer's presence, and
its addresses. Worse, inet_set_tos() was placed in proto_http.c,
exported and used from the two other ones, surrounded in #ifdefs.
This patch moves this code to connection.h and makes the other ones
rely on it without ifdefs.
2018-12-11 10:37:42 -05:00
|
|
|
/* v4-mapped addresses need IP_TOS */
|
|
|
|
|
setsockopt(conn->handle.fd, IPPROTO_IP, IP_TOS, &tos, sizeof(tos));
|
|
|
|
|
else
|
|
|
|
|
setsockopt(conn->handle.fd, IPPROTO_IPV6, IPV6_TCLASS, &tos, sizeof(tos));
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Sets the netfilter mark on the connection's socket. The connection is tested
|
|
|
|
|
* and if it is null, nothing is done.
|
|
|
|
|
*/
|
|
|
|
|
static inline void conn_set_mark(const struct connection *conn, int mark)
|
|
|
|
|
{
|
2022-04-11 12:04:33 -04:00
|
|
|
if (!conn || !conn_ctrl_ready(conn) || (conn->flags & CO_FL_FDLESS))
|
REORG: connection: centralize the conn_set_{tos,mark,quickack} functions
There were a number of ugly setsockopt() calls spread all over
proto_http.c, proto_htx.c and hlua.c just to manipulate the front
connection's TOS, mark or TCP quick-ack. These ones entirely relied
on the connection, its existence, its control layer's presence, and
its addresses. Worse, inet_set_tos() was placed in proto_http.c,
exported and used from the two other ones, surrounded in #ifdefs.
This patch moves this code to connection.h and makes the other ones
rely on it without ifdefs.
2018-12-11 10:37:42 -05:00
|
|
|
return;
|
|
|
|
|
|
2021-06-26 07:04:36 -04:00
|
|
|
#if defined(SO_MARK)
|
REORG: connection: centralize the conn_set_{tos,mark,quickack} functions
There were a number of ugly setsockopt() calls spread all over
proto_http.c, proto_htx.c and hlua.c just to manipulate the front
connection's TOS, mark or TCP quick-ack. These ones entirely relied
on the connection, its existence, its control layer's presence, and
its addresses. Worse, inet_set_tos() was placed in proto_http.c,
exported and used from the two other ones, surrounded in #ifdefs.
This patch moves this code to connection.h and makes the other ones
rely on it without ifdefs.
2018-12-11 10:37:42 -05:00
|
|
|
setsockopt(conn->handle.fd, SOL_SOCKET, SO_MARK, &mark, sizeof(mark));
|
2021-06-26 07:04:36 -04:00
|
|
|
#elif defined(SO_USER_COOKIE)
|
2021-10-08 09:52:27 -04:00
|
|
|
setsockopt(conn->handle.fd, SOL_SOCKET, SO_USER_COOKIE, &mark, sizeof(mark));
|
2021-07-03 05:15:15 -04:00
|
|
|
#elif defined(SO_RTABLE)
|
|
|
|
|
setsockopt(conn->handle.fd, SOL_SOCKET, SO_RTABLE, &mark, sizeof(mark));
|
REORG: connection: centralize the conn_set_{tos,mark,quickack} functions
There were a number of ugly setsockopt() calls spread all over
proto_http.c, proto_htx.c and hlua.c just to manipulate the front
connection's TOS, mark or TCP quick-ack. These ones entirely relied
on the connection, its existence, its control layer's presence, and
its addresses. Worse, inet_set_tos() was placed in proto_http.c,
exported and used from the two other ones, surrounded in #ifdefs.
This patch moves this code to connection.h and makes the other ones
rely on it without ifdefs.
2018-12-11 10:37:42 -05:00
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Sets adjust the TCP quick-ack feature on the connection's socket. The
|
|
|
|
|
* connection is tested and if it is null, nothing is done.
|
|
|
|
|
*/
|
|
|
|
|
static inline void conn_set_quickack(const struct connection *conn, int value)
|
|
|
|
|
{
|
2022-04-11 12:04:33 -04:00
|
|
|
if (!conn || !conn_ctrl_ready(conn) || (conn->flags & CO_FL_FDLESS))
|
REORG: connection: centralize the conn_set_{tos,mark,quickack} functions
There were a number of ugly setsockopt() calls spread all over
proto_http.c, proto_htx.c and hlua.c just to manipulate the front
connection's TOS, mark or TCP quick-ack. These ones entirely relied
on the connection, its existence, its control layer's presence, and
its addresses. Worse, inet_set_tos() was placed in proto_http.c,
exported and used from the two other ones, surrounded in #ifdefs.
This patch moves this code to connection.h and makes the other ones
rely on it without ifdefs.
2018-12-11 10:37:42 -05:00
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
#ifdef TCP_QUICKACK
|
|
|
|
|
setsockopt(conn->handle.fd, IPPROTO_TCP, TCP_QUICKACK, &value, sizeof(value));
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
2021-03-02 10:09:26 -05:00
|
|
|
static inline struct wait_event *wl_set_waitcb(struct wait_event *wl, struct task *(*cb)(struct task *, void *, unsigned int), void *ctx)
|
2018-07-18 02:18:20 -04:00
|
|
|
{
|
2019-06-14 08:42:29 -04:00
|
|
|
if (!wl->tasklet->process) {
|
|
|
|
|
wl->tasklet->process = cb;
|
|
|
|
|
wl->tasklet->context = ctx;
|
2018-07-18 02:18:20 -04:00
|
|
|
}
|
|
|
|
|
return wl;
|
|
|
|
|
}
|
|
|
|
|
|
MEDIUM: connection: start to introduce a mux layer between xprt and data
For HTTP/2 and QUIC, we'll need to deal with multiplexed streams inside
a connection. After quite a long brainstorming, it appears that the
connection interface to the existing streams is appropriate just like
the connection interface to the lower layers. In fact we need to have
the mux layer in the middle of the connection, between the transport
and the data layer.
A mux can exist on two directions/sides. On the inbound direction, it
instanciates new streams from incoming connections, while on the outbound
direction it muxes streams into outgoing connections. The difference is
visible on the mux->init() call : in one case, an upper context is already
known (outgoing connection), and in the other case, the upper context is
not yet known (incoming connection) and will have to be allocated by the
mux. The session doesn't have to create the new streams anymore, as this
is performed by the mux itself.
This patch introduces this and creates a pass-through mux called
"mux_pt" which is used for all new connections and which only
calls the data layer's recv,send,wake() calls. One incoming stream
is immediately created when init() is called on the inbound direction.
There should not be any visible impact.
Note that the connection's mux is purposely not set until the session
is completed so that we don't accidently run with the wrong mux. This
must not cause any issue as the xprt_done_cb function is always called
prior to using mux's recv/send functions.
2017-08-28 04:53:00 -04:00
|
|
|
/* Installs the connection's mux layer for upper context <ctx>.
|
|
|
|
|
* Returns < 0 on error.
|
|
|
|
|
*/
|
2018-09-12 06:02:05 -04:00
|
|
|
static inline int conn_install_mux(struct connection *conn, const struct mux_ops *mux,
|
2018-12-14 13:42:40 -05:00
|
|
|
void *ctx, struct proxy *prx, struct session *sess)
|
MEDIUM: connection: start to introduce a mux layer between xprt and data
For HTTP/2 and QUIC, we'll need to deal with multiplexed streams inside
a connection. After quite a long brainstorming, it appears that the
connection interface to the existing streams is appropriate just like
the connection interface to the lower layers. In fact we need to have
the mux layer in the middle of the connection, between the transport
and the data layer.
A mux can exist on two directions/sides. On the inbound direction, it
instanciates new streams from incoming connections, while on the outbound
direction it muxes streams into outgoing connections. The difference is
visible on the mux->init() call : in one case, an upper context is already
known (outgoing connection), and in the other case, the upper context is
not yet known (incoming connection) and will have to be allocated by the
mux. The session doesn't have to create the new streams anymore, as this
is performed by the mux itself.
This patch introduces this and creates a pass-through mux called
"mux_pt" which is used for all new connections and which only
calls the data layer's recv,send,wake() calls. One incoming stream
is immediately created when init() is called on the inbound direction.
There should not be any visible impact.
Note that the connection's mux is purposely not set until the session
is completed so that we don't accidently run with the wrong mux. This
must not cause any issue as the xprt_done_cb function is always called
prior to using mux's recv/send functions.
2017-08-28 04:53:00 -04:00
|
|
|
{
|
2019-01-10 04:33:32 -05:00
|
|
|
int ret;
|
|
|
|
|
|
MEDIUM: connection: start to introduce a mux layer between xprt and data
For HTTP/2 and QUIC, we'll need to deal with multiplexed streams inside
a connection. After quite a long brainstorming, it appears that the
connection interface to the existing streams is appropriate just like
the connection interface to the lower layers. In fact we need to have
the mux layer in the middle of the connection, between the transport
and the data layer.
A mux can exist on two directions/sides. On the inbound direction, it
instanciates new streams from incoming connections, while on the outbound
direction it muxes streams into outgoing connections. The difference is
visible on the mux->init() call : in one case, an upper context is already
known (outgoing connection), and in the other case, the upper context is
not yet known (incoming connection) and will have to be allocated by the
mux. The session doesn't have to create the new streams anymore, as this
is performed by the mux itself.
This patch introduces this and creates a pass-through mux called
"mux_pt" which is used for all new connections and which only
calls the data layer's recv,send,wake() calls. One incoming stream
is immediately created when init() is called on the inbound direction.
There should not be any visible impact.
Note that the connection's mux is purposely not set until the session
is completed so that we don't accidently run with the wrong mux. This
must not cause any issue as the xprt_done_cb function is always called
prior to using mux's recv/send functions.
2017-08-28 04:53:00 -04:00
|
|
|
conn->mux = mux;
|
2018-12-19 08:12:10 -05:00
|
|
|
conn->ctx = ctx;
|
2019-04-08 05:22:47 -04:00
|
|
|
ret = mux->init ? mux->init(conn, prx, sess, &BUF_NULL) : 0;
|
2019-01-10 04:33:32 -05:00
|
|
|
if (ret < 0) {
|
|
|
|
|
conn->mux = NULL;
|
|
|
|
|
conn->ctx = NULL;
|
|
|
|
|
}
|
|
|
|
|
return ret;
|
MEDIUM: connection: start to introduce a mux layer between xprt and data
For HTTP/2 and QUIC, we'll need to deal with multiplexed streams inside
a connection. After quite a long brainstorming, it appears that the
connection interface to the existing streams is appropriate just like
the connection interface to the lower layers. In fact we need to have
the mux layer in the middle of the connection, between the transport
and the data layer.
A mux can exist on two directions/sides. On the inbound direction, it
instanciates new streams from incoming connections, while on the outbound
direction it muxes streams into outgoing connections. The difference is
visible on the mux->init() call : in one case, an upper context is already
known (outgoing connection), and in the other case, the upper context is
not yet known (incoming connection) and will have to be allocated by the
mux. The session doesn't have to create the new streams anymore, as this
is performed by the mux itself.
This patch introduces this and creates a pass-through mux called
"mux_pt" which is used for all new connections and which only
calls the data layer's recv,send,wake() calls. One incoming stream
is immediately created when init() is called on the inbound direction.
There should not be any visible impact.
Note that the connection's mux is purposely not set until the session
is completed so that we don't accidently run with the wrong mux. This
must not cause any issue as the xprt_done_cb function is always called
prior to using mux's recv/send functions.
2017-08-28 04:53:00 -04:00
|
|
|
}
|
|
|
|
|
|
2022-05-18 12:11:27 -04:00
|
|
|
/* Retrieves any valid stream connector from this connection, preferably the first
|
|
|
|
|
* valid one. The purpose is to be able to figure one other end of a private
|
|
|
|
|
* connection for purposes like source binding or proxy protocol header
|
|
|
|
|
* emission. In such cases, any stream connector is expected to be valid so the
|
|
|
|
|
* mux is encouraged to return the first one it finds. If the connection has
|
2022-05-27 05:00:59 -04:00
|
|
|
* no mux or the mux has no get_first_sc() method or the mux has no valid
|
2022-05-18 12:11:27 -04:00
|
|
|
* stream connector, NULL is returned. The output pointer is purposely marked
|
|
|
|
|
* const to discourage the caller from modifying anything there.
|
|
|
|
|
*/
|
|
|
|
|
static inline struct stconn *conn_get_first_sc(const struct connection *conn)
|
|
|
|
|
{
|
|
|
|
|
BUG_ON(!conn || !conn->mux);
|
|
|
|
|
|
2022-05-27 05:00:59 -04:00
|
|
|
if (!conn->mux->get_first_sc)
|
2022-05-18 12:11:27 -04:00
|
|
|
return NULL;
|
2022-05-27 05:00:59 -04:00
|
|
|
return conn->mux->get_first_sc(conn);
|
2022-05-18 12:11:27 -04:00
|
|
|
}
|
|
|
|
|
|
2021-10-18 08:32:36 -04:00
|
|
|
int conn_update_alpn(struct connection *conn, const struct ist alpn, int force);
|
|
|
|
|
|
2016-11-23 12:00:08 -05:00
|
|
|
static inline const char *conn_get_ctrl_name(const struct connection *conn)
|
|
|
|
|
{
|
2019-04-25 12:35:49 -04:00
|
|
|
if (!conn || !conn_ctrl_ready(conn))
|
2016-11-23 12:00:08 -05:00
|
|
|
return "NONE";
|
|
|
|
|
return conn->ctrl->name;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline const char *conn_get_xprt_name(const struct connection *conn)
|
|
|
|
|
{
|
2021-03-05 17:37:48 -05:00
|
|
|
if (!conn || !conn->xprt)
|
2016-11-23 12:00:08 -05:00
|
|
|
return "NONE";
|
2016-11-24 10:58:12 -05:00
|
|
|
return conn->xprt->name;
|
2016-11-23 12:00:08 -05:00
|
|
|
}
|
|
|
|
|
|
MEDIUM: connection: start to introduce a mux layer between xprt and data
For HTTP/2 and QUIC, we'll need to deal with multiplexed streams inside
a connection. After quite a long brainstorming, it appears that the
connection interface to the existing streams is appropriate just like
the connection interface to the lower layers. In fact we need to have
the mux layer in the middle of the connection, between the transport
and the data layer.
A mux can exist on two directions/sides. On the inbound direction, it
instanciates new streams from incoming connections, while on the outbound
direction it muxes streams into outgoing connections. The difference is
visible on the mux->init() call : in one case, an upper context is already
known (outgoing connection), and in the other case, the upper context is
not yet known (incoming connection) and will have to be allocated by the
mux. The session doesn't have to create the new streams anymore, as this
is performed by the mux itself.
This patch introduces this and creates a pass-through mux called
"mux_pt" which is used for all new connections and which only
calls the data layer's recv,send,wake() calls. One incoming stream
is immediately created when init() is called on the inbound direction.
There should not be any visible impact.
Note that the connection's mux is purposely not set until the session
is completed so that we don't accidently run with the wrong mux. This
must not cause any issue as the xprt_done_cb function is always called
prior to using mux's recv/send functions.
2017-08-28 04:53:00 -04:00
|
|
|
static inline const char *conn_get_mux_name(const struct connection *conn)
|
|
|
|
|
{
|
2019-04-25 12:35:49 -04:00
|
|
|
if (!conn || !conn->mux)
|
MEDIUM: connection: start to introduce a mux layer between xprt and data
For HTTP/2 and QUIC, we'll need to deal with multiplexed streams inside
a connection. After quite a long brainstorming, it appears that the
connection interface to the existing streams is appropriate just like
the connection interface to the lower layers. In fact we need to have
the mux layer in the middle of the connection, between the transport
and the data layer.
A mux can exist on two directions/sides. On the inbound direction, it
instanciates new streams from incoming connections, while on the outbound
direction it muxes streams into outgoing connections. The difference is
visible on the mux->init() call : in one case, an upper context is already
known (outgoing connection), and in the other case, the upper context is
not yet known (incoming connection) and will have to be allocated by the
mux. The session doesn't have to create the new streams anymore, as this
is performed by the mux itself.
This patch introduces this and creates a pass-through mux called
"mux_pt" which is used for all new connections and which only
calls the data layer's recv,send,wake() calls. One incoming stream
is immediately created when init() is called on the inbound direction.
There should not be any visible impact.
Note that the connection's mux is purposely not set until the session
is completed so that we don't accidently run with the wrong mux. This
must not cause any issue as the xprt_done_cb function is always called
prior to using mux's recv/send functions.
2017-08-28 04:53:00 -04:00
|
|
|
return "NONE";
|
|
|
|
|
return conn->mux->name;
|
|
|
|
|
}
|
|
|
|
|
|
2016-12-22 14:25:26 -05:00
|
|
|
/* registers pointer to transport layer <id> (XPRT_*) */
|
|
|
|
|
static inline void xprt_register(int id, struct xprt_ops *xprt)
|
|
|
|
|
{
|
|
|
|
|
if (id >= XPRT_ENTRIES)
|
|
|
|
|
return;
|
|
|
|
|
registered_xprt[id] = xprt;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* returns pointer to transport layer <id> (XPRT_*) or NULL if not registered */
|
|
|
|
|
static inline struct xprt_ops *xprt_get(int id)
|
|
|
|
|
{
|
|
|
|
|
if (id >= XPRT_ENTRIES)
|
|
|
|
|
return NULL;
|
|
|
|
|
return registered_xprt[id];
|
|
|
|
|
}
|
2016-11-23 12:00:08 -05:00
|
|
|
|
2021-03-02 11:27:58 -05:00
|
|
|
/* notify the next xprt that the connection is about to become idle and that it
|
|
|
|
|
* may be stolen at any time after the function returns and that any tasklet in
|
|
|
|
|
* the chain must be careful before dereferencing its context.
|
|
|
|
|
*/
|
|
|
|
|
static inline void xprt_set_idle(struct connection *conn, const struct xprt_ops *xprt, void *xprt_ctx)
|
|
|
|
|
{
|
|
|
|
|
if (xprt->set_idle)
|
|
|
|
|
xprt->set_idle(conn, conn->xprt_ctx);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* notify the next xprt that the connection is not idle anymore and that it may
|
|
|
|
|
* not be stolen before the next xprt_set_idle().
|
|
|
|
|
*/
|
|
|
|
|
static inline void xprt_set_used(struct connection *conn, const struct xprt_ops *xprt, void *xprt_ctx)
|
|
|
|
|
{
|
|
|
|
|
if (xprt->set_used)
|
|
|
|
|
xprt->set_used(conn, conn->xprt_ctx);
|
|
|
|
|
}
|
|
|
|
|
|
2016-12-04 12:42:09 -05:00
|
|
|
static inline int conn_get_alpn(const struct connection *conn, const char **str, int *len)
|
|
|
|
|
{
|
|
|
|
|
if (!conn_xprt_ready(conn) || !conn->xprt->get_alpn)
|
|
|
|
|
return 0;
|
2019-03-21 13:27:17 -04:00
|
|
|
return conn->xprt->get_alpn(conn, conn->xprt_ctx, str, len);
|
2016-12-04 12:42:09 -05:00
|
|
|
}
|
|
|
|
|
|
2018-04-10 08:33:41 -04:00
|
|
|
/* unregisters proto mux list <list> */
|
|
|
|
|
static inline void unregister_mux_proto(struct mux_proto_list *list)
|
2017-09-21 13:40:52 -04:00
|
|
|
{
|
2021-04-21 01:32:39 -04:00
|
|
|
LIST_DELETE(&list->list);
|
2017-09-21 13:40:52 -04:00
|
|
|
LIST_INIT(&list->list);
|
|
|
|
|
}
|
|
|
|
|
|
2018-08-08 04:21:56 -04:00
|
|
|
static inline struct mux_proto_list *get_mux_proto(const struct ist proto)
|
2018-04-10 08:37:32 -04:00
|
|
|
{
|
|
|
|
|
struct mux_proto_list *item;
|
|
|
|
|
|
|
|
|
|
list_for_each_entry(item, &mux_proto_list.list, list) {
|
|
|
|
|
if (isteq(proto, item->token))
|
|
|
|
|
return item;
|
|
|
|
|
}
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
2021-05-08 08:06:09 -04:00
|
|
|
void list_mux_proto(FILE *out);
|
2018-12-02 07:04:43 -05:00
|
|
|
/* returns the first mux entry in the list matching the exact same <mux_proto>
|
|
|
|
|
* and compatible with the <proto_side> (FE or BE) and the <proto_mode> (TCP or
|
2018-04-10 09:01:45 -04:00
|
|
|
* HTTP). <mux_proto> can be empty. Will fall back to the first compatible mux
|
|
|
|
|
* with exactly the same <proto_mode> or with an empty name. May return
|
|
|
|
|
* null if the code improperly registered the default mux to use as a fallback.
|
2023-10-19 10:06:03 -04:00
|
|
|
*
|
|
|
|
|
* <proto_mode> expects PROTO_MODE_* value only: PROXY_MODE_* values should
|
|
|
|
|
* never be used directly here (but you may use conn_pr_mode_to_proto_mode()
|
|
|
|
|
* to map proxy mode to corresponding proto mode before calling the function).
|
2017-09-21 13:40:52 -04:00
|
|
|
*/
|
2018-12-02 07:04:43 -05:00
|
|
|
static inline const struct mux_proto_list *conn_get_best_mux_entry(
|
|
|
|
|
const struct ist mux_proto,
|
|
|
|
|
int proto_side, int proto_mode)
|
2017-09-21 13:40:52 -04:00
|
|
|
{
|
2018-04-10 08:33:41 -04:00
|
|
|
struct mux_proto_list *item;
|
2018-04-10 09:01:45 -04:00
|
|
|
struct mux_proto_list *fallback = NULL;
|
2017-09-21 13:40:52 -04:00
|
|
|
|
2018-04-10 08:33:41 -04:00
|
|
|
list_for_each_entry(item, &mux_proto_list.list, list) {
|
2018-04-10 09:01:45 -04:00
|
|
|
if (!(item->side & proto_side) || !(item->mode & proto_mode))
|
2017-09-21 13:40:52 -04:00
|
|
|
continue;
|
2018-04-10 09:01:45 -04:00
|
|
|
if (istlen(mux_proto) && isteq(mux_proto, item->token))
|
2018-12-02 07:04:43 -05:00
|
|
|
return item;
|
2018-04-10 09:01:45 -04:00
|
|
|
else if (!istlen(item->token)) {
|
|
|
|
|
if (!fallback || (item->mode == proto_mode && fallback->mode != proto_mode))
|
|
|
|
|
fallback = item;
|
|
|
|
|
}
|
2017-09-21 13:40:52 -04:00
|
|
|
}
|
2018-12-02 07:04:43 -05:00
|
|
|
return fallback;
|
|
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* returns the first mux in the list matching the exact same <mux_proto> and
|
|
|
|
|
* compatible with the <proto_side> (FE or BE) and the <proto_mode> (TCP or
|
|
|
|
|
* HTTP). <mux_proto> can be empty. Will fall back to the first compatible mux
|
|
|
|
|
* with exactly the same <proto_mode> or with an empty name. May return
|
|
|
|
|
* null if the code improperly registered the default mux to use as a fallback.
|
|
|
|
|
*/
|
|
|
|
|
static inline const struct mux_ops *conn_get_best_mux(struct connection *conn,
|
|
|
|
|
const struct ist mux_proto,
|
|
|
|
|
int proto_side, int proto_mode)
|
|
|
|
|
{
|
|
|
|
|
const struct mux_proto_list *item;
|
|
|
|
|
|
|
|
|
|
item = conn_get_best_mux_entry(mux_proto, proto_side, proto_mode);
|
2018-04-10 09:01:45 -04:00
|
|
|
|
2018-12-02 07:04:43 -05:00
|
|
|
return item ? item->mux : NULL;
|
2017-09-21 13:40:52 -04:00
|
|
|
}
|
2018-09-06 08:52:21 -04:00
|
|
|
|
2018-09-06 05:48:44 -04:00
|
|
|
/* returns a pointer to the proxy associated with this connection. For a front
|
|
|
|
|
* connection it returns a pointer to the frontend ; for a back connection, it
|
|
|
|
|
* returns a pointer to the backend.
|
|
|
|
|
*/
|
|
|
|
|
static inline struct proxy *conn_get_proxy(const struct connection *conn)
|
|
|
|
|
{
|
|
|
|
|
struct listener *l;
|
|
|
|
|
struct server *s;
|
|
|
|
|
|
|
|
|
|
/* check if it's a frontend connection */
|
|
|
|
|
l = objt_listener(conn->target);
|
|
|
|
|
if (l)
|
|
|
|
|
return l->bind_conf->frontend;
|
|
|
|
|
|
|
|
|
|
/* check if it's a backend connection */
|
|
|
|
|
s = objt_server(conn->target);
|
|
|
|
|
if (s)
|
|
|
|
|
return s->proxy;
|
|
|
|
|
|
|
|
|
|
return objt_proxy(conn->target);
|
|
|
|
|
}
|
|
|
|
|
|
2022-04-12 01:31:06 -04:00
|
|
|
/* unconditionally retrieves the ssl_sock_ctx for this connection. Prefer using
|
|
|
|
|
* the standard form conn_get_ssl_sock_ctx() which checks the transport layer
|
|
|
|
|
* and the availability of the method.
|
|
|
|
|
*/
|
|
|
|
|
static inline struct ssl_sock_ctx *__conn_get_ssl_sock_ctx(struct connection *conn)
|
|
|
|
|
{
|
|
|
|
|
return conn->xprt->get_ssl_sock_ctx(conn);
|
|
|
|
|
}
|
|
|
|
|
|
2022-04-11 04:43:28 -04:00
|
|
|
/* retrieves the ssl_sock_ctx for this connection otherwise NULL */
|
|
|
|
|
static inline struct ssl_sock_ctx *conn_get_ssl_sock_ctx(struct connection *conn)
|
|
|
|
|
{
|
|
|
|
|
if (!conn || !conn->xprt || !conn->xprt->get_ssl_sock_ctx)
|
|
|
|
|
return NULL;
|
|
|
|
|
return conn->xprt->get_ssl_sock_ctx(conn);
|
|
|
|
|
}
|
2019-04-08 04:42:41 -04:00
|
|
|
|
2021-10-06 05:38:44 -04:00
|
|
|
/* boolean, returns true if connection is over SSL */
|
2022-04-11 04:43:28 -04:00
|
|
|
static inline int conn_is_ssl(struct connection *conn)
|
2021-10-06 05:38:44 -04:00
|
|
|
{
|
2022-04-11 04:43:28 -04:00
|
|
|
return !!conn_get_ssl_sock_ctx(conn);
|
2021-10-06 05:38:44 -04:00
|
|
|
}
|
|
|
|
|
|
2023-07-27 09:58:08 -04:00
|
|
|
/* Returns true if connection must be reversed. */
|
|
|
|
|
static inline int conn_is_reverse(const struct connection *conn)
|
|
|
|
|
{
|
|
|
|
|
return !!(conn->reverse.target);
|
|
|
|
|
}
|
|
|
|
|
|
2023-08-23 11:16:07 -04:00
|
|
|
/* Returns true if connection must be actively reversed or waiting to be accepted. */
|
|
|
|
|
static inline int conn_reverse_in_preconnect(const struct connection *conn)
|
|
|
|
|
{
|
|
|
|
|
return conn_is_back(conn) ? !!(conn->reverse.target) :
|
2023-11-16 11:13:28 -05:00
|
|
|
!!(conn->flags & CO_FL_ACT_REVERSING);
|
2023-08-23 11:16:07 -04:00
|
|
|
}
|
|
|
|
|
|
2023-07-25 09:59:30 -04:00
|
|
|
/* Initialize <conn> as a reverse connection to <target>. */
|
|
|
|
|
static inline void conn_set_reverse(struct connection *conn, enum obj_type *target)
|
|
|
|
|
{
|
|
|
|
|
/* Ensure the correct target type is used depending on the connection side before reverse. */
|
2023-08-23 11:16:07 -04:00
|
|
|
BUG_ON((!conn_is_back(conn) && !objt_server(target)) ||
|
|
|
|
|
(conn_is_back(conn) && !objt_listener(target)));
|
2023-07-25 09:59:30 -04:00
|
|
|
|
|
|
|
|
conn->reverse.target = target;
|
|
|
|
|
}
|
|
|
|
|
|
2023-08-23 11:16:07 -04:00
|
|
|
/* Returns the listener instance for connection used for active reverse. */
|
|
|
|
|
static inline struct listener *conn_active_reverse_listener(const struct connection *conn)
|
|
|
|
|
{
|
|
|
|
|
return conn_is_back(conn) ? __objt_listener(conn->reverse.target) :
|
|
|
|
|
__objt_listener(conn->target);
|
|
|
|
|
}
|
|
|
|
|
|
2023-08-16 10:18:33 -04:00
|
|
|
/*
|
|
|
|
|
* Prepare TLV argument for redirecting fetches.
|
|
|
|
|
* Note that it is not possible to use an argument check function
|
|
|
|
|
* as that would require us to allow arguments for functions
|
|
|
|
|
* that do not need it. Alternatively, the sample logic could be
|
|
|
|
|
* adjusted to perform checks for no arguments and allocate
|
|
|
|
|
* in the check function. However, this does not seem worth the trouble.
|
|
|
|
|
*/
|
|
|
|
|
static inline void set_tlv_arg(int tlv_type, struct arg *tlv_arg)
|
|
|
|
|
{
|
|
|
|
|
tlv_arg->type = ARGT_SINT;
|
|
|
|
|
tlv_arg->data.sint = tlv_type;
|
|
|
|
|
}
|
|
|
|
|
|
2023-10-19 10:06:03 -04:00
|
|
|
/*
|
|
|
|
|
* Map proxy mode (PR_MODE_*) to equivalent proto_proxy_mode (PROTO_MODE_*)
|
|
|
|
|
*/
|
|
|
|
|
static inline int conn_pr_mode_to_proto_mode(int proxy_mode)
|
|
|
|
|
{
|
|
|
|
|
int mode;
|
|
|
|
|
|
|
|
|
|
/* for now we only support TCP and HTTP proto_modes, so we
|
|
|
|
|
* consider that if it's not HTTP, then it's TCP
|
|
|
|
|
*/
|
|
|
|
|
mode = 1 << (proxy_mode == PR_MODE_HTTP);
|
|
|
|
|
|
|
|
|
|
return mode;
|
|
|
|
|
}
|
|
|
|
|
|
2020-06-04 12:02:10 -04:00
|
|
|
#endif /* _HAPROXY_CONNECTION_H */
|
2012-07-06 08:13:49 -04:00
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Local variables:
|
|
|
|
|
* c-indent-level: 8
|
|
|
|
|
* c-basic-offset: 8
|
|
|
|
|
* End:
|
|
|
|
|
*/
|