mirror of
https://github.com/haproxy/haproxy.git
synced 2026-04-21 14:17:30 -04:00
On the backend side, QUIC MUX may be started preemptively before the ALPN negotiation. This is useful notably for 0-RTT implementation. However, this was a source of crashes. ALPN was expected to be retrieved from the server cache, however QUIC MUX still used the ALPN from the transport layer. This could cause a crash, especially when several connections runs in parallel as the server cache is shared among threads. Thanks to the previous patch which reworks QUIC MUX init, this solution can now be fixed. Indeed, if conn_get_alpn() is not successful, MUX can look at the server cache again to use the expected value. Note that this could still prevent the MUX to work as expected if the server cache is resetted between connect_server() and MUX init. Thus, the ultimate solution would be to copy the cached ALPN into the connection. This problem is not specific to QUIC though, and must be fixed in a separate patch.
280 lines
7.5 KiB
C
280 lines
7.5 KiB
C
/*
|
|
* QUIC xprt layer. Act as an abstraction between quic_conn and MUX layers.
|
|
*
|
|
* Copyright 2020 HAProxy Technologies, Frederic Lecaille <flecaille@haproxy.com>
|
|
*
|
|
* This program is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU General Public License
|
|
* as published by the Free Software Foundation; either version
|
|
* 2 of the License, or (at your option) any later version.
|
|
*
|
|
*/
|
|
|
|
#include <haproxy/api.h>
|
|
#include <haproxy/buf.h>
|
|
#include <haproxy/connection.h>
|
|
#include <haproxy/quic_conn.h>
|
|
#include <haproxy/quic_sock.h>
|
|
#include <haproxy/quic_ssl.h>
|
|
#include <haproxy/ssl_sock.h>
|
|
#include <haproxy/quic_trace.h>
|
|
#include <haproxy/trace.h>
|
|
|
|
/* Returns true if conn layer above <qc> has not been yet fully initialized, i.e. MUX is not yet opened. */
|
|
int qc_wait_for_conn(const struct quic_conn *qc)
|
|
{
|
|
return (!qc->conn || !qc->conn->mux) &&
|
|
!(qc->flags & QUIC_FL_CONN_XPRT_CLOSED);
|
|
}
|
|
|
|
/* Returns true if conn layer above <qc> is fully initialized and available. */
|
|
int qc_is_conn_ready(const struct quic_conn *qc)
|
|
{
|
|
return qc->conn && qc->conn->mux;
|
|
}
|
|
|
|
static void quic_close(struct connection *conn, void *xprt_ctx)
|
|
{
|
|
struct ssl_sock_ctx *conn_ctx = xprt_ctx;
|
|
struct quic_conn *qc = conn_ctx->qc;
|
|
|
|
TRACE_ENTER(QUIC_EV_CONN_CLOSE, qc);
|
|
|
|
qc->flags |= QUIC_FL_CONN_XPRT_CLOSED;
|
|
qc->conn = NULL;
|
|
|
|
/* Immediately release the connection in the following cases :
|
|
* - idle timeout already expired
|
|
* - connection in closing state
|
|
* - backend conn with no FD avail (after connect() failure)
|
|
*/
|
|
if (qc->flags & (QUIC_FL_CONN_EXP_TIMER|QUIC_FL_CONN_CLOSING) ||
|
|
(qc_is_back(qc) && !qc_test_fd(qc))) {
|
|
quic_conn_release(qc);
|
|
qc = NULL;
|
|
goto leave;
|
|
}
|
|
|
|
/* Schedule a CONNECTION_CLOSE emission. If process stopping is in
|
|
* progress, quic-conn idle-timer will be scheduled immediately after
|
|
* its emission to ensure an immediate connection closing.
|
|
*/
|
|
qc_check_close_on_released_mux(qc);
|
|
leave:
|
|
TRACE_LEAVE(QUIC_EV_CONN_CLOSE, qc);
|
|
}
|
|
|
|
/* Called from the upper layer, to subscribe <es> to events <event_type>. The
|
|
* event subscriber <es> is not allowed to change from a previous call as long
|
|
* as at least one event is still subscribed. The <event_type> must only be a
|
|
* combination of SUB_RETRY_RECV and SUB_RETRY_SEND. It always returns 0.
|
|
*/
|
|
static int quic_conn_subscribe(struct connection *conn, void *xprt_ctx, int event_type, struct wait_event *es)
|
|
{
|
|
struct quic_conn *qc = conn->handle.qc;
|
|
|
|
TRACE_ENTER(QUIC_EV_CONN_SUB, qc);
|
|
|
|
BUG_ON(event_type & ~(SUB_RETRY_SEND|SUB_RETRY_RECV));
|
|
BUG_ON(qc->subs && qc->subs != es);
|
|
|
|
es->events |= event_type;
|
|
qc->subs = es;
|
|
|
|
/* TODO implement a check_events to detect if subscriber should be
|
|
* woken up immediately ?
|
|
*/
|
|
|
|
if (event_type & SUB_RETRY_RECV)
|
|
TRACE_DEVEL("subscribe(recv)", QUIC_EV_CONN_XPRTRECV, qc);
|
|
|
|
if (event_type & SUB_RETRY_SEND)
|
|
TRACE_DEVEL("subscribe(send)", QUIC_EV_CONN_XPRTSEND, qc);
|
|
|
|
TRACE_LEAVE(QUIC_EV_CONN_SUB, qc);
|
|
|
|
return 0;
|
|
}
|
|
|
|
/* Called from the upper layer, to unsubscribe <es> from events <event_type>.
|
|
* The <es> pointer is not allowed to differ from the one passed to the
|
|
* subscribe() call. It always returns zero.
|
|
*/
|
|
static int quic_conn_unsubscribe(struct connection *conn, void *xprt_ctx, int event_type, struct wait_event *es)
|
|
{
|
|
struct quic_conn *qc = conn->handle.qc;
|
|
|
|
TRACE_ENTER(QUIC_EV_CONN_SUB, qc);
|
|
|
|
if (event_type & SUB_RETRY_RECV)
|
|
TRACE_DEVEL("unsubscribe(recv)", QUIC_EV_CONN_XPRTRECV, qc);
|
|
if (event_type & SUB_RETRY_SEND)
|
|
TRACE_DEVEL("unsubscribe(send)", QUIC_EV_CONN_XPRTSEND, qc);
|
|
|
|
es->events &= ~event_type;
|
|
if (!es->events)
|
|
qc->subs = NULL;
|
|
|
|
/* TODO implement ignore_events similar to conn_unsubscribe() ? */
|
|
|
|
TRACE_LEAVE(QUIC_EV_CONN_SUB, qc);
|
|
|
|
return 0;
|
|
}
|
|
|
|
/* Store in <xprt_ctx> the context attached to <conn>.
|
|
* Returns always 0.
|
|
*/
|
|
static int qc_conn_init(struct connection *conn, void **xprt_ctx)
|
|
{
|
|
int ret = -1;
|
|
struct quic_conn *qc = NULL;
|
|
struct quic_connection_id *conn_id;
|
|
|
|
TRACE_ENTER(QUIC_EV_CONN_NEW, qc);
|
|
|
|
if (objt_listener(conn->target)) {
|
|
qc = conn->handle.qc;
|
|
if (!qc)
|
|
goto out;
|
|
}
|
|
else {
|
|
int retry_rand_cid = 3; /* Number of random retries on CID collision. */
|
|
struct server *srv = objt_server(conn->target);
|
|
|
|
conn_id = quic_cid_alloc(QUIC_CID_SIDE_BE);
|
|
if (!conn_id) {
|
|
TRACE_ERROR("error on CID allocation", QUIC_EV_CONN_NEW);
|
|
goto out;
|
|
}
|
|
|
|
while (retry_rand_cid--) {
|
|
if (quic_cid_generate_random(conn_id)) {
|
|
TRACE_ERROR("error on CID generation", QUIC_EV_CONN_NEW);
|
|
pool_free(pool_head_quic_connection_id, conn_id);
|
|
goto out;
|
|
}
|
|
|
|
if (quic_cid_insert(conn_id, NULL) == 0)
|
|
break;
|
|
}
|
|
|
|
if (retry_rand_cid < 0) {
|
|
TRACE_ERROR("CID pool exhausted", QUIC_EV_CONN_NEW);
|
|
pool_free(pool_head_quic_connection_id, conn_id);
|
|
goto out;
|
|
}
|
|
|
|
qc = qc_new_conn(srv, NULL, NULL, conn, conn_id, NULL, &srv->addr);
|
|
if (!qc) {
|
|
pool_free(pool_head_quic_connection_id, conn_id);
|
|
goto out;
|
|
}
|
|
|
|
quic_cid_register_seq_num(conn_id, qc);
|
|
|
|
conn->flags |= CO_FL_SSL_WAIT_HS | CO_FL_WAIT_L6_CONN | CO_FL_FDLESS;
|
|
conn->handle.qc = qc;
|
|
}
|
|
|
|
ret = 0;
|
|
/* Ensure thread connection migration is finalized ASAP. */
|
|
if (qc->flags & QUIC_FL_CONN_TID_REBIND)
|
|
qc_finalize_tid_rebind(qc);
|
|
|
|
/* do not store the context if already set */
|
|
if (*xprt_ctx)
|
|
goto out;
|
|
|
|
*xprt_ctx = qc->xprt_ctx;
|
|
|
|
out:
|
|
TRACE_LEAVE(QUIC_EV_CONN_NEW, qc);
|
|
|
|
return ret;
|
|
}
|
|
|
|
/* Start the QUIC transport layer */
|
|
static int qc_xprt_start(struct connection *conn, void *ctx)
|
|
{
|
|
int ret = 0;
|
|
struct quic_conn *qc;
|
|
|
|
qc = conn->handle.qc;
|
|
TRACE_ENTER(QUIC_EV_CONN_NEW, qc);
|
|
|
|
/* This has as side effet to create a SSL_SESSION object attached to
|
|
* the SSL object.
|
|
*/
|
|
if (qc_is_back(qc) && !qc_ssl_do_hanshake(qc, ctx))
|
|
goto out;
|
|
|
|
/* Schedule quic-conn to ensure post handshake frames are emitted. This
|
|
* is not done for 0-RTT as xprt->start happens before handshake
|
|
* completion.
|
|
* Note that, when 0-RTT is enabled for backend connections, it is
|
|
* possible that the ealy-data secrets could not be derived. This is the
|
|
* case when the server does not support 0-RTT.
|
|
*/
|
|
if ((qc_is_back(qc) && (!qc_is_conn_ready(qc) || !qc->eel)) ||
|
|
(qc->flags & QUIC_FL_CONN_NEED_POST_HANDSHAKE_FRMS))
|
|
tasklet_wakeup(qc->wait_event.tasklet);
|
|
|
|
ret = 1;
|
|
out:
|
|
TRACE_LEAVE(QUIC_EV_CONN_NEW, qc);
|
|
return ret;
|
|
}
|
|
|
|
static struct ssl_sock_ctx *qc_get_ssl_sock_ctx(struct connection *conn)
|
|
{
|
|
if (!conn || conn->xprt != xprt_get(XPRT_QUIC) || !conn->handle.qc || !conn->xprt_ctx)
|
|
return NULL;
|
|
|
|
return conn->handle.qc->xprt_ctx;
|
|
}
|
|
|
|
static void qc_xprt_dump_info(struct buffer *msg, const struct connection *conn)
|
|
{
|
|
quic_dump_qc_info(msg, conn->handle.qc);
|
|
}
|
|
|
|
static int qc_get_alpn(const struct connection *conn, void *xprt_ctx, const char **str, int *len)
|
|
{
|
|
struct quic_conn *qc = conn->handle.qc;
|
|
int ret = 0;
|
|
|
|
TRACE_ENTER(QUIC_EV_CONN_NEW, qc);
|
|
|
|
if (qc->alpn) {
|
|
*str = qc->alpn;
|
|
*len = strlen(qc->alpn);
|
|
ret = 1;
|
|
}
|
|
|
|
TRACE_LEAVE(QUIC_EV_CONN_NEW, qc);
|
|
return ret;
|
|
}
|
|
|
|
/* transport-layer operations for QUIC connections. */
|
|
static struct xprt_ops ssl_quic = {
|
|
.close = quic_close,
|
|
.subscribe = quic_conn_subscribe,
|
|
.unsubscribe = quic_conn_unsubscribe,
|
|
.init = qc_conn_init,
|
|
.start = qc_xprt_start,
|
|
.prepare_bind_conf = ssl_sock_prepare_bind_conf,
|
|
.destroy_bind_conf = ssl_sock_destroy_bind_conf,
|
|
.prepare_srv = ssl_sock_prepare_srv_ctx,
|
|
.destroy_srv = ssl_sock_free_srv_ctx,
|
|
.get_alpn = qc_get_alpn,
|
|
.get_ssl_sock_ctx = qc_get_ssl_sock_ctx,
|
|
.dump_info = qc_xprt_dump_info,
|
|
.name = "QUIC",
|
|
};
|
|
|
|
static void __quic_conn_init(void)
|
|
{
|
|
xprt_register(XPRT_QUIC, &ssl_quic);
|
|
}
|
|
INITCALL0(STG_REGISTER, __quic_conn_init);
|