2015-12-02 13:01:29 -05:00
2012-05-18 09:47:34 -04:00
/*
REORG: connection: rename the data layer the "transport layer"
While working on the changes required to make the health checks use the
new connections, it started to become obvious that some naming was not
logical at all in the connections. Specifically, it is not logical to
call the "data layer" the layer which is in charge for all the handshake
and which does not yet provide a data layer once established until a
session has allocated all the required buffers.
In fact, it's more a transport layer, which makes much more sense. The
transport layer offers a medium on which data can transit, and it offers
the functions to move these data when the upper layer requests this. And
it is the upper layer which iterates over the transport layer's functions
to move data which should be called the data layer.
The use case where it's obvious is with embryonic sessions : an incoming
SSL connection is accepted. Only the connection is allocated, not the
buffers nor stream interface, etc... The connection handles the SSL
handshake by itself. Once this handshake is complete, we can't use the
data functions because the buffers and stream interface are not there
yet. Hence we have to first call a specific function to complete the
session initialization, after which we'll be able to use the data
functions. This clearly proves that SSL here is only a transport layer
and that the stream interface constitutes the data layer.
A similar change will be performed to rename app_cb => data, but the
two could not be in the same commit for obvious reasons.
2012-10-02 18:19:48 -04:00
* SSL / TLS transport layer over SOCK_STREAM sockets
2012-05-18 09:47:34 -04:00
*
* Copyright ( C ) 2012 EXCELIANCE , Emeric Brun < ebrun @ exceliance . fr >
*
* This program is free software ; you can redistribute it and / or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation ; either version
* 2 of the License , or ( at your option ) any later version .
*
2012-09-10 03:43:09 -04:00
* Acknowledgement :
* We ' d like to specially thank the Stud project authors for a very clean
* and well documented code which helped us understand how the OpenSSL API
* ought to be used in non - blocking mode . This is one difficult part which
* is not easy to get from the OpenSSL doc , and reading the Stud code made
* it much more obvious than the examples in the OpenSSL package . Keep up
* the good works , guys !
*
* Stud is an extremely efficient and scalable SSL / TLS proxy which combines
* particularly well with haproxy . For more info about this project , visit :
* https : //github.com/bumptech/stud
*
2012-05-18 09:47:34 -04:00
*/
2019-05-10 03:35:00 -04:00
/* Note: do NOT include openssl/xxx.h here, do it in openssl-compat.h */
2012-05-18 09:47:34 -04:00
# define _GNU_SOURCE
2012-09-07 11:30:07 -04:00
# include <ctype.h>
# include <dirent.h>
2012-05-18 09:47:34 -04:00
# include <errno.h>
# include <stdio.h>
# include <stdlib.h>
2012-09-07 11:30:07 -04:00
# include <string.h>
# include <unistd.h>
2012-05-18 09:47:34 -04:00
# include <sys/socket.h>
# include <sys/stat.h>
# include <sys/types.h>
2015-06-09 11:29:50 -04:00
# include <netdb.h>
2012-05-18 09:47:34 -04:00
# include <netinet/tcp.h>
2020-06-09 03:07:15 -04:00
# include <import/ebpttree.h>
# include <import/ebsttree.h>
2015-06-09 11:29:50 -04:00
# include <import/lru.h>
2020-06-09 03:07:15 -04:00
# include <haproxy/api.h>
2022-05-05 02:50:17 -04:00
# include <haproxy/applet.h>
2020-06-09 03:07:15 -04:00
# include <haproxy/arg.h>
# include <haproxy/base64.h>
2020-06-04 15:07:02 -04:00
# include <haproxy/channel.h>
2020-06-02 04:22:45 -04:00
# include <haproxy/chunk.h>
2020-06-04 14:19:54 -04:00
# include <haproxy/cli.h>
2020-06-04 12:02:10 -04:00
# include <haproxy/connection.h>
2020-06-09 03:07:15 -04:00
# include <haproxy/dynbuf.h>
2020-05-27 10:10:29 -04:00
# include <haproxy/errors.h>
2020-06-09 03:07:15 -04:00
# include <haproxy/fd.h>
# include <haproxy/freq_ctr.h>
2020-06-04 05:23:07 -04:00
# include <haproxy/frontend.h>
2020-06-04 11:05:57 -04:00
# include <haproxy/global.h>
2020-06-04 05:40:28 -04:00
# include <haproxy/http_rules.h>
2020-06-04 16:01:04 -04:00
# include <haproxy/log.h>
2020-05-27 10:26:00 -04:00
# include <haproxy/openssl-compat.h>
2020-06-04 09:06:28 -04:00
# include <haproxy/pattern-t.h>
2020-06-09 03:07:15 -04:00
# include <haproxy/proto_tcp.h>
2020-06-04 16:29:18 -04:00
# include <haproxy/proxy.h>
2022-09-30 12:11:13 -04:00
# include <haproxy/quic_conn.h>
2023-12-21 10:11:35 -05:00
# include <haproxy/quic_openssl_compat.h>
2022-05-21 17:58:40 -04:00
# include <haproxy/quic_tp.h>
2023-12-21 10:11:35 -05:00
# include <haproxy/sample.h>
# include <haproxy/sc_strm.h>
2020-06-04 17:20:13 -04:00
# include <haproxy/server.h>
2020-06-03 12:38:48 -04:00
# include <haproxy/shctx.h>
2020-06-04 08:25:47 -04:00
# include <haproxy/ssl_ckch.h>
2020-06-04 08:29:23 -04:00
# include <haproxy/ssl_crtlist.h>
2024-01-12 09:23:49 -05:00
# include <haproxy/ssl_gencert.h>
2020-06-04 14:30:20 -04:00
# include <haproxy/ssl_sock.h>
2020-06-04 08:21:22 -04:00
# include <haproxy/ssl_utils.h>
2020-11-03 11:10:00 -05:00
# include <haproxy/stats.h>
2022-05-27 03:47:12 -04:00
# include <haproxy/stconn.h>
2020-06-04 17:46:14 -04:00
# include <haproxy/stream-t.h>
2020-06-04 11:25:40 -04:00
# include <haproxy/task.h>
2020-06-02 12:15:32 -04:00
# include <haproxy/ticks.h>
2020-06-01 05:05:15 -04:00
# include <haproxy/time.h>
2020-06-09 03:07:15 -04:00
# include <haproxy/tools.h>
2020-06-04 10:25:31 -04:00
# include <haproxy/vars.h>
2021-09-11 11:51:13 -04:00
# include <haproxy/xxhash.h>
2022-01-11 04:11:10 -05:00
# include <haproxy/istbuf.h>
2022-12-20 05:11:17 -05:00
# include <haproxy/ssl_ocsp.h>
2012-05-18 09:47:34 -04:00
2019-05-10 03:22:53 -04:00
/* ***** READ THIS before adding code here! *****
*
* Due to API incompatibilities between multiple OpenSSL versions and their
* derivatives , it ' s often tempting to add macros to ( re - ) define certain
* symbols . Please do not do this here , and do it in common / openssl - compat . h
* exclusively so that the whole code consistently uses the same macros .
*
* Whenever possible if a macro is missing in certain versions , it ' s better
* to conditionally define it in openssl - compat . h than using lots of ifdefs .
*/
2017-12-06 07:51:49 -05:00
int nb_engines = 0 ;
2012-09-03 14:36:47 -04:00
2020-02-25 08:53:06 -05:00
static struct eb_root cert_issuer_tree = EB_ROOT ; /* issuers tree from "issuers-chain-path" */
2020-05-07 09:20:43 -04:00
struct global_ssl global_ssl = {
2016-12-22 17:12:01 -05:00
# ifdef LISTEN_DEFAULT_CIPHERS
. listen_default_ciphers = LISTEN_DEFAULT_CIPHERS ,
# endif
# ifdef CONNECT_DEFAULT_CIPHERS
. connect_default_ciphers = CONNECT_DEFAULT_CIPHERS ,
2018-09-14 05:14:21 -04:00
# endif
2020-11-21 04:37:34 -05:00
# ifdef HAVE_SSL_CTX_SET_CIPHERSUITES
2018-09-14 05:14:21 -04:00
. listen_default_ciphersuites = LISTEN_DEFAULT_CIPHERSUITES ,
. connect_default_ciphersuites = CONNECT_DEFAULT_CIPHERSUITES ,
2016-12-22 17:12:01 -05:00
# endif
. listen_default_ssloptions = BC_SSL_O_NONE ,
. connect_default_ssloptions = SRV_SSL_O_NONE ,
2017-03-30 13:19:37 -04:00
. listen_default_sslmethods . flags = MC_SSL_O_ALL ,
. listen_default_sslmethods . min = CONF_TLSV_NONE ,
. listen_default_sslmethods . max = CONF_TLSV_NONE ,
. connect_default_sslmethods . flags = MC_SSL_O_ALL ,
. connect_default_sslmethods . min = CONF_TLSV_NONE ,
. connect_default_sslmethods . max = CONF_TLSV_NONE ,
2016-12-22 17:12:01 -05:00
# ifdef DEFAULT_SSL_MAX_RECORD
. max_record = DEFAULT_SSL_MAX_RECORD ,
# endif
2022-04-27 07:04:54 -04:00
. hard_max_record = 0 ,
2016-12-22 17:12:01 -05:00
. default_dh_param = SSL_DEFAULT_DH_PARAM ,
. ctx_cache = DEFAULT_SSL_CTX_CACHE ,
2021-07-13 13:04:24 -04:00
. capture_buffer_size = 0 ,
2020-02-03 11:15:52 -05:00
. extra_files = SSL_GF_ALL ,
2020-10-20 11:36:46 -04:00
. extra_files_noext = 0 ,
2021-06-09 10:46:12 -04:00
# ifdef HAVE_SSL_KEYLOG
2023-02-28 11:46:29 -05:00
. keylog = 0 ,
# endif
2024-03-12 11:22:34 -04:00
. security_level = - 1 ,
2023-02-28 11:46:29 -05:00
# ifndef OPENSSL_NO_OCSP
. ocsp_update . delay_max = SSL_OCSP_UPDATE_DELAY_MAX ,
. ocsp_update . delay_min = SSL_OCSP_UPDATE_DELAY_MIN ,
2024-03-25 11:50:25 -04:00
. ocsp_update . mode = SSL_SOCK_OCSP_UPDATE_DFLT ,
WIP/MINOR: ssl: add sample fetches for keylog in frontend
OpenSSL 1.1.1 provides a callback registering function
SSL_CTX_set_keylog_callback, which allows one to receive a string
containing the keys to deciphers TLSv1.3.
Unfortunately it is not possible to store this data in binary form and
we can only get this information using the callback. Which means that we
need to store it until the connection is closed.
This patches add 2 pools, the first one, pool_head_ssl_keylog is used to
store a struct ssl_keylog which will be inserted as a ex_data in a SSL *.
The second one is pool_head_ssl_keylog_str which will be used to store
the hexadecimal strings.
To enable the capture of the keys, you need to set "tune.ssl.keylog on"
in your configuration.
The following fetches were implemented:
ssl_fc_client_early_traffic_secret,
ssl_fc_client_handshake_traffic_secret,
ssl_fc_server_handshake_traffic_secret,
ssl_fc_client_traffic_secret_0,
ssl_fc_server_traffic_secret_0,
ssl_fc_exporter_secret,
ssl_fc_early_exporter_secret
2020-07-06 05:41:30 -04:00
# endif
2016-12-22 17:12:01 -05:00
} ;
2019-04-07 16:00:38 -04:00
static BIO_METHOD * ha_meth ;
2022-06-23 05:02:08 -04:00
DECLARE_STATIC_POOL ( ssl_sock_ctx_pool , " ssl_sock_ctx " , sizeof ( struct ssl_sock_ctx ) ) ;
2019-02-26 12:37:15 -05:00
2022-06-23 05:02:08 -04:00
DECLARE_STATIC_POOL ( ssl_sock_client_sni_pool , " ssl_sock_client_sni " , TLSEXT_MAXLEN_host_name + 1 ) ;
2022-01-07 11:12:01 -05:00
2020-11-03 11:10:00 -05:00
/* ssl stats module */
enum {
2020-11-03 11:10:02 -05:00
SSL_ST_SESS ,
SSL_ST_REUSED_SESS ,
2020-11-13 10:05:00 -05:00
SSL_ST_FAILED_HANDSHAKE ,
2020-11-03 11:10:01 -05:00
2020-11-03 11:10:00 -05:00
SSL_ST_STATS_COUNT /* must be the last member of the enum */
} ;
static struct name_desc ssl_stats [ ] = {
2020-11-13 10:05:00 -05:00
[ SSL_ST_SESS ] = { . name = " ssl_sess " ,
. desc = " Total number of ssl sessions established " } ,
[ SSL_ST_REUSED_SESS ] = { . name = " ssl_reused_sess " ,
. desc = " Total number of ssl sessions reused " } ,
[ SSL_ST_FAILED_HANDSHAKE ] = { . name = " ssl_failed_handshake " ,
. desc = " Total number of failed handshake " } ,
2020-11-03 11:10:00 -05:00
} ;
static struct ssl_counters {
2020-11-03 11:10:02 -05:00
long long sess ;
long long reused_sess ;
2020-11-13 10:05:00 -05:00
long long failed_handshake ;
2020-11-03 11:10:00 -05:00
} ssl_counters ;
2024-01-29 10:35:19 -05:00
static int ssl_fill_stats ( void * data , struct field * stats , unsigned int * selected_field )
2020-11-03 11:10:00 -05:00
{
2020-11-03 11:10:01 -05:00
struct ssl_counters * counters = data ;
2024-01-29 10:35:19 -05:00
unsigned int current_field = ( selected_field ! = NULL ? * selected_field : 0 ) ;
for ( ; current_field < SSL_ST_STATS_COUNT ; current_field + + ) {
struct field metric = { 0 } ;
switch ( current_field ) {
case SSL_ST_SESS :
metric = mkf_u64 ( FN_COUNTER , counters - > sess ) ;
break ;
case SSL_ST_REUSED_SESS :
metric = mkf_u64 ( FN_COUNTER , counters - > reused_sess ) ;
break ;
case SSL_ST_FAILED_HANDSHAKE :
metric = mkf_u64 ( FN_COUNTER , counters - > failed_handshake ) ;
break ;
default :
/* not used for frontends. If a specific metric
* is requested , return an error . Otherwise continue .
*/
if ( selected_field ! = NULL )
return 0 ;
continue ;
}
stats [ current_field ] = metric ;
if ( selected_field ! = NULL )
break ;
}
return 1 ;
2020-11-03 11:10:00 -05:00
}
static struct stats_module ssl_stats_module = {
. name = " ssl " ,
. fill_stats = ssl_fill_stats ,
. stats = ssl_stats ,
. stats_count = SSL_ST_STATS_COUNT ,
. counters = & ssl_counters ,
. counters_size = sizeof ( ssl_counters ) ,
. domain_flags = MK_STATS_PROXY_DOMAIN ( STATS_PX_CAP_FE | STATS_PX_CAP_LI | STATS_PX_CAP_BE | STATS_PX_CAP_SRV ) ,
. clearable = 1 ,
} ;
INITCALL1 ( STG_REGISTER , stats_register_module , & ssl_stats_module ) ;
2022-05-05 02:50:17 -04:00
/* CLI context for "show tls-keys" */
struct show_keys_ctx {
struct tls_keys_ref * next_ref ; /* next reference to be dumped */
int names_only ; /* non-zero = only show file names */
int next_index ; /* next index to be dumped */
2022-05-05 02:59:17 -04:00
int dump_entries ; /* dump entries also */
2022-05-05 03:03:44 -04:00
enum {
SHOW_KEYS_INIT = 0 ,
SHOW_KEYS_LIST ,
SHOW_KEYS_DONE ,
} state ; /* phase of the current dump */
2022-05-05 02:50:17 -04:00
} ;
2021-01-20 08:55:01 -05:00
/* ssl_sock_io_cb is exported to see it resolved in "show fd" */
2021-03-02 10:09:26 -05:00
struct task * ssl_sock_io_cb ( struct task * , void * , unsigned int ) ;
2019-05-23 08:45:12 -04:00
static int ssl_sock_handshake ( struct connection * conn , unsigned int flag ) ;
2019-05-20 08:02:16 -04:00
2019-04-07 16:00:38 -04:00
/* Methods to implement OpenSSL BIO */
static int ha_ssl_write ( BIO * h , const char * buf , int num )
{
struct buffer tmpbuf ;
struct ssl_sock_ctx * ctx ;
2023-03-17 11:13:05 -04:00
uint flags ;
2019-04-07 16:00:38 -04:00
int ret ;
ctx = BIO_get_data ( h ) ;
tmpbuf . size = num ;
tmpbuf . area = ( void * ) ( uintptr_t ) buf ;
tmpbuf . data = num ;
tmpbuf . head = 0 ;
2023-03-17 11:13:05 -04:00
flags = ( ctx - > xprt_st & SSL_SOCK_SEND_MORE ) ? CO_SFL_MSG_MORE : 0 ;
ret = ctx - > xprt - > snd_buf ( ctx - > conn , ctx - > xprt_ctx , & tmpbuf , num , flags ) ;
BUG/MAJOR: ssl_sock: Always clear retry flags in read/write functions
It has been found that under some rare error circumstances,
SSL_do_handshake() could return with SSL_ERROR_WANT_READ without
even trying to call the read function, causing permanent wakeups
that prevent the process from sleeping.
It was established that this only happens if the retry flags are
not systematically cleared in both directions upon any I/O attempt,
but, given the lack of documentation on this topic, it is hard to
say if this rather strange behavior is expected or not, otherwise
why wouldn't the library always clear the flags by itself before
proceeding?
In addition, this only seems to affect OpenSSL 1.1.0 and above,
and does not affect wolfSSL nor aws-lc.
A bisection on haproxy showed that this issue was first triggered by
commit a8955d57ed ("MEDIUM: ssl: provide our own BIO."), which means
that OpenSSL's socket BIO does not have this problem. And this one
does always clear the flags before proceeding. So let's just proceed
the same way. It was verified that it properly fixes the problem,
does not affect other implementations, and doesn't cause any freeze
nor spurious wakeups either.
Many thanks to Valentn Gutirrez for providing a network capture
showing the incident as well as a reproducer. This is GH issue #2403.
This patch needs to be backported to all versions that include the
commit above, i.e. as far as 2.0.
2024-01-27 16:58:29 -05:00
BIO_clear_retry_flags ( h ) ;
2019-05-01 11:24:36 -04:00
if ( ret = = 0 & & ! ( ctx - > conn - > flags & ( CO_FL_ERROR | CO_FL_SOCK_WR_SH ) ) ) {
2019-04-07 16:00:38 -04:00
BIO_set_retry_write ( h ) ;
2019-04-24 06:04:36 -04:00
ret = - 1 ;
BUG/MAJOR: ssl_sock: Always clear retry flags in read/write functions
It has been found that under some rare error circumstances,
SSL_do_handshake() could return with SSL_ERROR_WANT_READ without
even trying to call the read function, causing permanent wakeups
that prevent the process from sleeping.
It was established that this only happens if the retry flags are
not systematically cleared in both directions upon any I/O attempt,
but, given the lack of documentation on this topic, it is hard to
say if this rather strange behavior is expected or not, otherwise
why wouldn't the library always clear the flags by itself before
proceeding?
In addition, this only seems to affect OpenSSL 1.1.0 and above,
and does not affect wolfSSL nor aws-lc.
A bisection on haproxy showed that this issue was first triggered by
commit a8955d57ed ("MEDIUM: ssl: provide our own BIO."), which means
that OpenSSL's socket BIO does not have this problem. And this one
does always clear the flags before proceeding. So let's just proceed
the same way. It was verified that it properly fixes the problem,
does not affect other implementations, and doesn't cause any freeze
nor spurious wakeups either.
Many thanks to Valentn Gutirrez for providing a network capture
showing the incident as well as a reproducer. This is GH issue #2403.
This patch needs to be backported to all versions that include the
commit above, i.e. as far as 2.0.
2024-01-27 16:58:29 -05:00
}
2019-04-07 16:00:38 -04:00
return ret ;
}
static int ha_ssl_gets ( BIO * h , char * buf , int size )
{
return 0 ;
}
static int ha_ssl_puts ( BIO * h , const char * str )
{
return ha_ssl_write ( h , str , strlen ( str ) ) ;
}
static int ha_ssl_read ( BIO * h , char * buf , int size )
{
struct buffer tmpbuf ;
struct ssl_sock_ctx * ctx ;
int ret ;
ctx = BIO_get_data ( h ) ;
tmpbuf . size = size ;
tmpbuf . area = buf ;
tmpbuf . data = 0 ;
tmpbuf . head = 0 ;
ret = ctx - > xprt - > rcv_buf ( ctx - > conn , ctx - > xprt_ctx , & tmpbuf , size , 0 ) ;
BUG/MAJOR: ssl_sock: Always clear retry flags in read/write functions
It has been found that under some rare error circumstances,
SSL_do_handshake() could return with SSL_ERROR_WANT_READ without
even trying to call the read function, causing permanent wakeups
that prevent the process from sleeping.
It was established that this only happens if the retry flags are
not systematically cleared in both directions upon any I/O attempt,
but, given the lack of documentation on this topic, it is hard to
say if this rather strange behavior is expected or not, otherwise
why wouldn't the library always clear the flags by itself before
proceeding?
In addition, this only seems to affect OpenSSL 1.1.0 and above,
and does not affect wolfSSL nor aws-lc.
A bisection on haproxy showed that this issue was first triggered by
commit a8955d57ed ("MEDIUM: ssl: provide our own BIO."), which means
that OpenSSL's socket BIO does not have this problem. And this one
does always clear the flags before proceeding. So let's just proceed
the same way. It was verified that it properly fixes the problem,
does not affect other implementations, and doesn't cause any freeze
nor spurious wakeups either.
Many thanks to Valentn Gutirrez for providing a network capture
showing the incident as well as a reproducer. This is GH issue #2403.
This patch needs to be backported to all versions that include the
commit above, i.e. as far as 2.0.
2024-01-27 16:58:29 -05:00
BIO_clear_retry_flags ( h ) ;
2019-05-01 11:24:36 -04:00
if ( ret = = 0 & & ! ( ctx - > conn - > flags & ( CO_FL_ERROR | CO_FL_SOCK_RD_SH ) ) ) {
2019-04-07 16:00:38 -04:00
BIO_set_retry_read ( h ) ;
2019-04-24 06:04:36 -04:00
ret = - 1 ;
BUG/MAJOR: ssl_sock: Always clear retry flags in read/write functions
It has been found that under some rare error circumstances,
SSL_do_handshake() could return with SSL_ERROR_WANT_READ without
even trying to call the read function, causing permanent wakeups
that prevent the process from sleeping.
It was established that this only happens if the retry flags are
not systematically cleared in both directions upon any I/O attempt,
but, given the lack of documentation on this topic, it is hard to
say if this rather strange behavior is expected or not, otherwise
why wouldn't the library always clear the flags by itself before
proceeding?
In addition, this only seems to affect OpenSSL 1.1.0 and above,
and does not affect wolfSSL nor aws-lc.
A bisection on haproxy showed that this issue was first triggered by
commit a8955d57ed ("MEDIUM: ssl: provide our own BIO."), which means
that OpenSSL's socket BIO does not have this problem. And this one
does always clear the flags before proceeding. So let's just proceed
the same way. It was verified that it properly fixes the problem,
does not affect other implementations, and doesn't cause any freeze
nor spurious wakeups either.
Many thanks to Valentn Gutirrez for providing a network capture
showing the incident as well as a reproducer. This is GH issue #2403.
This patch needs to be backported to all versions that include the
commit above, i.e. as far as 2.0.
2024-01-27 16:58:29 -05:00
}
2019-04-07 16:00:38 -04:00
return ret ;
}
static long ha_ssl_ctrl ( BIO * h , int cmd , long arg1 , void * arg2 )
{
int ret = 0 ;
switch ( cmd ) {
case BIO_CTRL_DUP :
case BIO_CTRL_FLUSH :
ret = 1 ;
break ;
}
return ret ;
}
static int ha_ssl_new ( BIO * h )
{
BIO_set_init ( h , 1 ) ;
BIO_set_data ( h , NULL ) ;
BIO_clear_flags ( h , ~ 0 ) ;
return 1 ;
}
static int ha_ssl_free ( BIO * data )
{
return 1 ;
}
2019-05-09 08:13:35 -04:00
# if defined(USE_THREAD) && (HA_OPENSSL_VERSION_NUMBER < 0x10100000L)
2017-11-13 04:34:01 -05:00
2017-06-15 10:37:39 -04:00
static HA_RWLOCK_T * ssl_rwlocks ;
unsigned long ssl_id_function ( void )
{
return ( unsigned long ) tid ;
}
void ssl_locking_function ( int mode , int n , const char * file , int line )
{
if ( mode & CRYPTO_LOCK ) {
if ( mode & CRYPTO_READ )
2017-11-07 04:42:54 -05:00
HA_RWLOCK_RDLOCK ( SSL_LOCK , & ssl_rwlocks [ n ] ) ;
2017-06-15 10:37:39 -04:00
else
2017-11-07 04:42:54 -05:00
HA_RWLOCK_WRLOCK ( SSL_LOCK , & ssl_rwlocks [ n ] ) ;
2017-06-15 10:37:39 -04:00
}
else {
if ( mode & CRYPTO_READ )
2017-11-07 04:42:54 -05:00
HA_RWLOCK_RDUNLOCK ( SSL_LOCK , & ssl_rwlocks [ n ] ) ;
2017-06-15 10:37:39 -04:00
else
2017-11-07 04:42:54 -05:00
HA_RWLOCK_WRUNLOCK ( SSL_LOCK , & ssl_rwlocks [ n ] ) ;
2017-06-15 10:37:39 -04:00
}
}
static int ssl_locking_init ( void )
{
int i ;
ssl_rwlocks = malloc ( sizeof ( HA_RWLOCK_T ) * CRYPTO_num_locks ( ) ) ;
if ( ! ssl_rwlocks )
return - 1 ;
for ( i = 0 ; i < CRYPTO_num_locks ( ) ; i + + )
2017-11-07 04:42:54 -05:00
HA_RWLOCK_INIT ( & ssl_rwlocks [ i ] ) ;
2017-06-15 10:37:39 -04:00
CRYPTO_set_id_callback ( ssl_id_function ) ;
CRYPTO_set_locking_callback ( ssl_locking_function ) ;
return 0 ;
}
2017-11-13 04:34:01 -05:00
2017-06-15 10:37:39 -04:00
# endif
2020-06-05 02:40:51 -04:00
__decl_thread ( HA_SPINLOCK_T ckch_lock ) ;
2017-06-15 10:37:39 -04:00
2019-10-29 18:48:19 -04:00
2019-10-24 05:32:47 -04:00
/* mimic what X509_STORE_load_locations do with store_ctx */
static int ssl_set_cert_crl_file ( X509_STORE * store_ctx , char * path )
{
2021-05-17 05:45:55 -04:00
X509_STORE * store = NULL ;
2021-02-19 11:41:55 -05:00
struct cafile_entry * ca_e = ssl_store_get_cafile_entry ( path , 0 ) ;
if ( ca_e )
store = ca_e - > ca_store ;
2019-10-24 05:32:47 -04:00
if ( store_ctx & & store ) {
int i ;
X509_OBJECT * obj ;
STACK_OF ( X509_OBJECT ) * objs = X509_STORE_get0_objects ( store ) ;
for ( i = 0 ; i < sk_X509_OBJECT_num ( objs ) ; i + + ) {
obj = sk_X509_OBJECT_value ( objs , i ) ;
switch ( X509_OBJECT_get_type ( obj ) ) {
case X509_LU_X509 :
X509_STORE_add_cert ( store_ctx , X509_OBJECT_get0_X509 ( obj ) ) ;
break ;
case X509_LU_CRL :
X509_STORE_add_crl ( store_ctx , X509_OBJECT_get0_X509_CRL ( obj ) ) ;
break ;
default :
break ;
}
}
return 1 ;
}
return 0 ;
}
2020-03-10 03:06:11 -04:00
/* SSL_CTX_load_verify_locations substitute, internally call X509_STORE_load_locations */
2019-10-24 05:32:47 -04:00
static int ssl_set_verify_locations_file ( SSL_CTX * ctx , char * path )
{
X509_STORE * store_ctx = SSL_CTX_get_cert_store ( ctx ) ;
return ssl_set_cert_crl_file ( store_ctx , path ) ;
}
2019-10-24 12:08:51 -04:00
/*
Extract CA_list from CA_file already in tree .
Duplicate ca_name is tracking with ebtree . It ' s simplify openssl compatibility .
Return a shared ca_list : SSL_dup_CA_list must be used before set it on SSL_CTX .
*/
static STACK_OF ( X509_NAME ) * ssl_get_client_ca_file ( char * path )
{
struct ebmb_node * eb ;
struct cafile_entry * ca_e ;
eb = ebst_lookup ( & cafile_tree , path ) ;
if ( ! eb )
return NULL ;
ca_e = ebmb_entry ( eb , struct cafile_entry , node ) ;
if ( ca_e - > ca_list = = NULL ) {
int i ;
unsigned long key ;
struct eb_root ca_name_tree = EB_ROOT ;
struct eb64_node * node , * back ;
struct {
struct eb64_node node ;
X509_NAME * xname ;
} * ca_name ;
STACK_OF ( X509_OBJECT ) * objs ;
STACK_OF ( X509_NAME ) * skn ;
X509 * x ;
X509_NAME * xn ;
skn = sk_X509_NAME_new_null ( ) ;
/* take x509 from cafile_tree */
objs = X509_STORE_get0_objects ( ca_e - > ca_store ) ;
for ( i = 0 ; i < sk_X509_OBJECT_num ( objs ) ; i + + ) {
x = X509_OBJECT_get0_X509 ( sk_X509_OBJECT_value ( objs , i ) ) ;
if ( ! x )
continue ;
xn = X509_get_subject_name ( x ) ;
if ( ! xn )
continue ;
/* Check for duplicates. */
key = X509_NAME_hash ( xn ) ;
for ( node = eb64_lookup ( & ca_name_tree , key ) , ca_name = NULL ;
node & & ca_name = = NULL ;
node = eb64_next ( node ) ) {
ca_name = container_of ( node , typeof ( * ca_name ) , node ) ;
if ( X509_NAME_cmp ( xn , ca_name - > xname ) ! = 0 )
ca_name = NULL ;
}
/* find a duplicate */
if ( ca_name )
continue ;
ca_name = calloc ( 1 , sizeof * ca_name ) ;
xn = X509_NAME_dup ( xn ) ;
if ( ! ca_name | |
! xn | |
! sk_X509_NAME_push ( skn , xn ) ) {
free ( ca_name ) ;
X509_NAME_free ( xn ) ;
sk_X509_NAME_pop_free ( skn , X509_NAME_free ) ;
sk_X509_NAME_free ( skn ) ;
skn = NULL ;
break ;
}
ca_name - > node . key = key ;
ca_name - > xname = xn ;
eb64_insert ( & ca_name_tree , & ca_name - > node ) ;
}
ca_e - > ca_list = skn ;
/* remove temporary ca_name tree */
node = eb64_first ( & ca_name_tree ) ;
while ( node ) {
ca_name = container_of ( node , typeof ( * ca_name ) , node ) ;
back = eb64_next ( node ) ;
eb64_delete ( node ) ;
free ( ca_name ) ;
node = back ;
}
}
return ca_e - > ca_list ;
}
2021-04-10 11:23:00 -04:00
struct pool_head * pool_head_ssl_capture __read_mostly = NULL ;
2020-05-14 18:25:08 -04:00
int ssl_capture_ptr_index = - 1 ;
2020-11-23 05:19:04 -05:00
int ssl_app_data_index = - 1 ;
2022-01-19 04:03:30 -05:00
# ifdef USE_QUIC
int ssl_qc_app_data_index = - 1 ;
# endif /* USE_QUIC */
2016-12-22 17:12:01 -05:00
2021-06-09 10:46:12 -04:00
# ifdef HAVE_SSL_KEYLOG
WIP/MINOR: ssl: add sample fetches for keylog in frontend
OpenSSL 1.1.1 provides a callback registering function
SSL_CTX_set_keylog_callback, which allows one to receive a string
containing the keys to deciphers TLSv1.3.
Unfortunately it is not possible to store this data in binary form and
we can only get this information using the callback. Which means that we
need to store it until the connection is closed.
This patches add 2 pools, the first one, pool_head_ssl_keylog is used to
store a struct ssl_keylog which will be inserted as a ex_data in a SSL *.
The second one is pool_head_ssl_keylog_str which will be used to store
the hexadecimal strings.
To enable the capture of the keys, you need to set "tune.ssl.keylog on"
in your configuration.
The following fetches were implemented:
ssl_fc_client_early_traffic_secret,
ssl_fc_client_handshake_traffic_secret,
ssl_fc_server_handshake_traffic_secret,
ssl_fc_client_traffic_secret_0,
ssl_fc_server_traffic_secret_0,
ssl_fc_exporter_secret,
ssl_fc_early_exporter_secret
2020-07-06 05:41:30 -04:00
int ssl_keylog_index = - 1 ;
2021-04-10 11:23:00 -04:00
struct pool_head * pool_head_ssl_keylog __read_mostly = NULL ;
struct pool_head * pool_head_ssl_keylog_str __read_mostly = NULL ;
WIP/MINOR: ssl: add sample fetches for keylog in frontend
OpenSSL 1.1.1 provides a callback registering function
SSL_CTX_set_keylog_callback, which allows one to receive a string
containing the keys to deciphers TLSv1.3.
Unfortunately it is not possible to store this data in binary form and
we can only get this information using the callback. Which means that we
need to store it until the connection is closed.
This patches add 2 pools, the first one, pool_head_ssl_keylog is used to
store a struct ssl_keylog which will be inserted as a ex_data in a SSL *.
The second one is pool_head_ssl_keylog_str which will be used to store
the hexadecimal strings.
To enable the capture of the keys, you need to set "tune.ssl.keylog on"
in your configuration.
The following fetches were implemented:
ssl_fc_client_early_traffic_secret,
ssl_fc_client_handshake_traffic_secret,
ssl_fc_server_handshake_traffic_secret,
ssl_fc_client_traffic_secret_0,
ssl_fc_server_traffic_secret_0,
ssl_fc_exporter_secret,
ssl_fc_early_exporter_secret
2020-07-06 05:41:30 -04:00
# endif
2021-08-19 12:06:30 -04:00
int ssl_client_crt_ref_index = - 1 ;
2022-01-07 11:12:01 -05:00
/* Used to store the client's SNI in case of ClientHello callback error */
int ssl_client_sni_index = - 1 ;
2015-05-09 02:46:01 -04:00
# if (defined SSL_CTRL_SET_TLSEXT_TICKET_KEY_CB && TLS_TICKETS_NO > 0)
struct list tlskeys_reference = LIST_HEAD_INIT ( tlskeys_reference ) ;
# endif
2022-04-11 12:41:24 -04:00
# if defined(USE_ENGINE) && !defined(OPENSSL_NO_ENGINE)
2020-05-14 11:47:32 -04:00
unsigned int openssl_engines_initialized ;
2017-01-20 20:10:18 -05:00
struct list openssl_engines = LIST_HEAD_INIT ( openssl_engines ) ;
struct ssl_engine_list {
struct list list ;
ENGINE * e ;
} ;
2017-05-29 08:36:20 -04:00
# endif
2017-01-20 20:10:18 -05:00
2022-05-16 10:24:33 -04:00
# ifdef HAVE_SSL_PROVIDERS
struct list openssl_providers = LIST_HEAD_INIT ( openssl_providers ) ;
struct ssl_provider_list {
struct list list ;
OSSL_PROVIDER * provider ;
} ;
# endif
2014-07-15 05:36:40 -04:00
# ifndef OPENSSL_NO_DH
2015-05-28 10:23:00 -04:00
static int ssl_dh_ptr_index = - 1 ;
2022-02-11 06:04:55 -05:00
static HASSL_DH * global_dh = NULL ;
static HASSL_DH * local_dh_1024 = NULL ;
static HASSL_DH * local_dh_2048 = NULL ;
static HASSL_DH * local_dh_4096 = NULL ;
2014-07-15 05:36:40 -04:00
# endif /* OPENSSL_NO_DH */
2015-12-10 15:07:30 -05:00
/* The order here matters for picking a default context,
* keep the most common keytype at the bottom of the list
*/
const char * SSL_SOCK_KEYTYPE_NAMES [ ] = {
" dsa " ,
" ecdsa " ,
" rsa "
} ;
2017-11-28 05:04:43 -05:00
static struct shared_context * ssl_shctx = NULL ; /* ssl shared session cache */
MEDIUM: shctx: separate ssl and shctx
This patch reorganize the shctx API in a generic storage API, separating
the shared SSL session handling from its core.
The shctx API only handles the generic data part, it does not know what
kind of data you use with it.
A shared_context is a storage structure allocated in a shared memory,
allowing its usage in a multithread or a multiprocess context.
The structure use 2 linked list, one containing the available blocks,
and another for the hot locked blocks. At initialization the available
list is filled with <maxblocks> blocks of size <blocksize>. An <extra>
space is initialized outside the list in case you need some specific
storage.
+-----------------------+--------+--------+--------+--------+----
| struct shared_context | extra | block1 | block2 | block3 | ...
+-----------------------+--------+--------+--------+--------+----
<-------- maxblocks --------->
* blocksize
The API allows to store content on several linked blocks. For example,
if you allocated blocks of 16 bytes, and you want to store an object of
60 bytes, the object will be allocated in a row of 4 blocks.
The API was made for LRU usage, each time you get an object, it pushes
the object at the end of the list. When it needs more space, it discards
The functions name have been renamed in a more logical way, the part
regarding shctx have been prefixed by shctx_ and the functions for the
shared ssl session cache have been prefixed by sh_ssl_sess_.
2017-10-30 15:08:51 -04:00
static struct eb_root * sh_ssl_sess_tree ; /* ssl shared session tree */
2020-05-11 09:51:45 -04:00
/* Dedicated callback functions for heartbeat and clienthello.
*/
# ifdef TLS1_RT_HEARTBEAT
static void ssl_sock_parse_heartbeat ( struct connection * conn , int write_p , int version ,
int content_type , const void * buf , size_t len ,
SSL * ssl ) ;
# endif
static void ssl_sock_parse_clienthello ( struct connection * conn , int write_p , int version ,
int content_type , const void * buf , size_t len ,
SSL * ssl ) ;
2021-06-09 10:46:12 -04:00
# ifdef HAVE_SSL_KEYLOG
WIP/MINOR: ssl: add sample fetches for keylog in frontend
OpenSSL 1.1.1 provides a callback registering function
SSL_CTX_set_keylog_callback, which allows one to receive a string
containing the keys to deciphers TLSv1.3.
Unfortunately it is not possible to store this data in binary form and
we can only get this information using the callback. Which means that we
need to store it until the connection is closed.
This patches add 2 pools, the first one, pool_head_ssl_keylog is used to
store a struct ssl_keylog which will be inserted as a ex_data in a SSL *.
The second one is pool_head_ssl_keylog_str which will be used to store
the hexadecimal strings.
To enable the capture of the keys, you need to set "tune.ssl.keylog on"
in your configuration.
The following fetches were implemented:
ssl_fc_client_early_traffic_secret,
ssl_fc_client_handshake_traffic_secret,
ssl_fc_server_handshake_traffic_secret,
ssl_fc_client_traffic_secret_0,
ssl_fc_server_traffic_secret_0,
ssl_fc_exporter_secret,
ssl_fc_early_exporter_secret
2020-07-06 05:41:30 -04:00
static void ssl_init_keylog ( struct connection * conn , int write_p , int version ,
int content_type , const void * buf , size_t len ,
SSL * ssl ) ;
# endif
2020-05-08 12:30:00 -04:00
/* List head of all registered SSL/TLS protocol message callbacks. */
struct list ssl_sock_msg_callbacks = LIST_HEAD_INIT ( ssl_sock_msg_callbacks ) ;
/* Registers the function <func> in order to be called on SSL/TLS protocol
* message processing . It will return 0 if the function < func > is not set
* or if it fails to allocate memory .
*/
int ssl_sock_register_msg_callback ( ssl_sock_msg_callback_func func )
{
struct ssl_sock_msg_callback * cbk ;
if ( ! func )
return 0 ;
cbk = calloc ( 1 , sizeof ( * cbk ) ) ;
if ( ! cbk ) {
ha_alert ( " out of memory in ssl_sock_register_msg_callback(). \n " ) ;
return 0 ;
}
cbk - > func = func ;
2021-04-21 01:32:39 -04:00
LIST_APPEND ( & ssl_sock_msg_callbacks , & cbk - > list ) ;
2020-05-08 12:30:00 -04:00
return 1 ;
}
2020-05-11 09:51:45 -04:00
/* Used to register dedicated SSL/TLS protocol message callbacks.
*/
static int ssl_sock_register_msg_callbacks ( void )
{
# ifdef TLS1_RT_HEARTBEAT
if ( ! ssl_sock_register_msg_callback ( ssl_sock_parse_heartbeat ) )
return ERR_ABORT ;
# endif
2021-07-13 13:04:24 -04:00
if ( global_ssl . capture_buffer_size > 0 ) {
2020-05-11 09:51:45 -04:00
if ( ! ssl_sock_register_msg_callback ( ssl_sock_parse_clienthello ) )
return ERR_ABORT ;
}
2021-06-09 10:46:12 -04:00
# ifdef HAVE_SSL_KEYLOG
WIP/MINOR: ssl: add sample fetches for keylog in frontend
OpenSSL 1.1.1 provides a callback registering function
SSL_CTX_set_keylog_callback, which allows one to receive a string
containing the keys to deciphers TLSv1.3.
Unfortunately it is not possible to store this data in binary form and
we can only get this information using the callback. Which means that we
need to store it until the connection is closed.
This patches add 2 pools, the first one, pool_head_ssl_keylog is used to
store a struct ssl_keylog which will be inserted as a ex_data in a SSL *.
The second one is pool_head_ssl_keylog_str which will be used to store
the hexadecimal strings.
To enable the capture of the keys, you need to set "tune.ssl.keylog on"
in your configuration.
The following fetches were implemented:
ssl_fc_client_early_traffic_secret,
ssl_fc_client_handshake_traffic_secret,
ssl_fc_server_handshake_traffic_secret,
ssl_fc_client_traffic_secret_0,
ssl_fc_server_traffic_secret_0,
ssl_fc_exporter_secret,
ssl_fc_early_exporter_secret
2020-07-06 05:41:30 -04:00
if ( global_ssl . keylog > 0 ) {
if ( ! ssl_sock_register_msg_callback ( ssl_init_keylog ) )
return ERR_ABORT ;
}
# endif
2023-12-21 10:11:35 -05:00
# ifdef USE_QUIC_OPENSSL_COMPAT
if ( ! ssl_sock_register_msg_callback ( quic_tls_compat_msg_callback ) )
return ERR_ABORT ;
# endif
WIP/MINOR: ssl: add sample fetches for keylog in frontend
OpenSSL 1.1.1 provides a callback registering function
SSL_CTX_set_keylog_callback, which allows one to receive a string
containing the keys to deciphers TLSv1.3.
Unfortunately it is not possible to store this data in binary form and
we can only get this information using the callback. Which means that we
need to store it until the connection is closed.
This patches add 2 pools, the first one, pool_head_ssl_keylog is used to
store a struct ssl_keylog which will be inserted as a ex_data in a SSL *.
The second one is pool_head_ssl_keylog_str which will be used to store
the hexadecimal strings.
To enable the capture of the keys, you need to set "tune.ssl.keylog on"
in your configuration.
The following fetches were implemented:
ssl_fc_client_early_traffic_secret,
ssl_fc_client_handshake_traffic_secret,
ssl_fc_server_handshake_traffic_secret,
ssl_fc_client_traffic_secret_0,
ssl_fc_server_traffic_secret_0,
ssl_fc_exporter_secret,
ssl_fc_early_exporter_secret
2020-07-06 05:41:30 -04:00
2020-11-06 09:24:23 -05:00
return ERR_NONE ;
2020-05-11 09:51:45 -04:00
}
2020-05-08 12:30:00 -04:00
/* Used to free all SSL/TLS protocol message callbacks that were
* registered by using ssl_sock_register_msg_callback ( ) .
*/
static void ssl_sock_unregister_msg_callbacks ( void )
{
struct ssl_sock_msg_callback * cbk , * cbkback ;
list_for_each_entry_safe ( cbk , cbkback , & ssl_sock_msg_callbacks , list ) {
2021-04-21 01:32:39 -04:00
LIST_DELETE ( & cbk - > list ) ;
2020-05-08 12:30:00 -04:00
free ( cbk ) ;
}
}
2022-04-11 04:43:28 -04:00
static struct ssl_sock_ctx * ssl_sock_get_ctx ( struct connection * conn )
{
if ( ! conn | | conn - > xprt ! = xprt_get ( XPRT_SSL ) | | ! conn - > xprt_ctx )
return NULL ;
return ( struct ssl_sock_ctx * ) conn - > xprt_ctx ;
}
2020-05-11 11:17:06 -04:00
SSL * ssl_sock_get_ssl_object ( struct connection * conn )
{
2022-04-11 05:29:11 -04:00
struct ssl_sock_ctx * ctx = conn_get_ssl_sock_ctx ( conn ) ;
2020-05-11 11:17:06 -04:00
2022-04-11 05:29:11 -04:00
return ctx ? ctx - > ssl : NULL ;
2020-05-11 11:17:06 -04:00
}
2017-01-13 11:48:18 -05:00
/*
* This function gives the detail of the SSL error . It is used only
* if the debug mode and the verbose mode are activated . It dump all
* the SSL error until the stack was empty .
*/
2022-09-06 13:37:08 -04:00
static forceinline void ssl_sock_dump_errors ( struct connection * conn ,
struct quic_conn * qc )
2017-01-13 11:48:18 -05:00
{
unsigned long ret ;
if ( unlikely ( global . mode & MODE_DEBUG ) ) {
while ( 1 ) {
2022-02-11 06:04:44 -05:00
const char * func = NULL ;
ERR_peek_error_func ( & func ) ;
2017-01-13 11:48:18 -05:00
ret = ERR_get_error ( ) ;
if ( ret = = 0 )
return ;
2022-09-06 13:37:08 -04:00
if ( conn ) {
fprintf ( stderr , " fd[%#x] OpenSSL error[0x%lx] %s: %s \n " ,
conn_fd ( conn ) , ret ,
func , ERR_reason_error_string ( ret ) ) ;
}
# ifdef USE_QUIC
else {
/* TODO: we are not sure <conn> is always initialized for QUIC connections */
fprintf ( stderr , " qc @%p OpenSSL error[0x%lx] %s: %s \n " , qc , ret ,
func , ERR_reason_error_string ( ret ) ) ;
}
# endif
2017-01-13 11:48:18 -05:00
}
}
}
2015-12-10 15:07:30 -05:00
2022-04-11 12:41:24 -04:00
# if defined(USE_ENGINE) && !defined(OPENSSL_NO_ENGINE)
2020-05-14 11:47:32 -04:00
int ssl_init_single_engine ( const char * engine_id , const char * def_algorithms )
2017-01-20 20:10:18 -05:00
{
int err_code = ERR_ABORT ;
ENGINE * engine ;
struct ssl_engine_list * el ;
/* grab the structural reference to the engine */
engine = ENGINE_by_id ( engine_id ) ;
if ( engine = = NULL ) {
2017-11-24 10:50:31 -05:00
ha_alert ( " ssl-engine %s: failed to get structural reference \n " , engine_id ) ;
2017-01-20 20:10:18 -05:00
goto fail_get ;
}
if ( ! ENGINE_init ( engine ) ) {
/* the engine couldn't initialise, release it */
2017-11-24 10:50:31 -05:00
ha_alert ( " ssl-engine %s: failed to initialize \n " , engine_id ) ;
2017-01-20 20:10:18 -05:00
goto fail_init ;
}
if ( ENGINE_set_default_string ( engine , def_algorithms ) = = 0 ) {
2017-11-24 10:50:31 -05:00
ha_alert ( " ssl-engine %s: failed on ENGINE_set_default_string \n " , engine_id ) ;
2017-01-20 20:10:18 -05:00
goto fail_set_method ;
}
el = calloc ( 1 , sizeof ( * el ) ) ;
2021-05-12 11:45:21 -04:00
if ( ! el )
goto fail_alloc ;
2017-01-20 20:10:18 -05:00
el - > e = engine ;
2021-04-21 01:32:39 -04:00
LIST_INSERT ( & openssl_engines , & el - > list ) ;
2017-12-06 07:51:49 -05:00
nb_engines + + ;
if ( global_ssl . async )
global . ssl_used_async_engines = nb_engines ;
2017-01-20 20:10:18 -05:00
return 0 ;
2021-05-12 11:45:21 -04:00
fail_alloc :
2017-01-20 20:10:18 -05:00
fail_set_method :
/* release the functional reference from ENGINE_init() */
ENGINE_finish ( engine ) ;
fail_init :
/* release the structural reference from ENGINE_by_id() */
ENGINE_free ( engine ) ;
fail_get :
return err_code ;
}
2017-05-29 08:36:20 -04:00
# endif
2017-01-20 20:10:18 -05:00
2022-05-16 10:24:33 -04:00
# ifdef HAVE_SSL_PROVIDERS
int ssl_init_provider ( const char * provider_name )
{
int err_code = ERR_ABORT ;
struct ssl_provider_list * prov = NULL ;
prov = calloc ( 1 , sizeof ( * prov ) ) ;
if ( ! prov ) {
ha_alert ( " ssl-provider %s: memory allocation failure \n " , provider_name ) ;
goto error ;
}
if ( ( prov - > provider = OSSL_PROVIDER_load ( NULL , provider_name ) ) = = NULL ) {
ha_alert ( " ssl-provider %s: unknown provider \n " , provider_name ) ;
goto error ;
}
LIST_INSERT ( & openssl_providers , & prov - > list ) ;
return 0 ;
error :
ha_free ( & prov ) ;
return err_code ;
}
# endif /* HAVE_SSL_PROVIDERS */
2020-11-13 15:56:34 -05:00
# ifdef SSL_MODE_ASYNC
2017-05-17 14:42:48 -04:00
/*
* openssl async fd handler
*/
2019-04-19 11:15:28 -04:00
void ssl_async_fd_handler ( int fd )
2017-01-13 20:42:15 -05:00
{
2019-05-20 08:02:16 -04:00
struct ssl_sock_ctx * ctx = fdtab [ fd ] . owner ;
2017-01-13 20:42:15 -05:00
2017-05-17 14:42:48 -04:00
/* fd is an async enfine fd, we must stop
2017-01-13 20:42:15 -05:00
* to poll this fd until it is requested
*/
2017-06-02 11:54:06 -04:00
fd_stop_recv ( fd ) ;
2017-01-13 20:42:15 -05:00
fd_cant_recv ( fd ) ;
/* crypto engine is available, let's notify the associated
* connection that it can pursue its processing .
*/
BUG/MEDIUM: ssl: Don't call ssl_sock_io_cb() directly.
In the SSL code, when we were waiting for the availability of the crypto
engine, once it is ready and its fd's I/O handler is called, don't call
ssl_sock_io_cb() directly, instead, call tasklet_wakeup() on the
ssl_sock_ctx's tasklet. We were calling ssl_sock_io_cb() with NULL as
a tasklet, which used to be fine, but it is no longer true since the
fd takeover changes. We could just provide the tasklet, but let's just
wake the tasklet, as is done for other FDs, for fairness.
This should fix github issue #856.
This should be backported into 2.2.
2020-09-15 16:16:02 -04:00
tasklet_wakeup ( ctx - > wait_event . tasklet ) ;
2017-01-13 20:42:15 -05:00
}
2017-05-17 14:42:48 -04:00
/*
* openssl async delayed SSL_free handler
*/
2019-04-19 11:15:28 -04:00
void ssl_async_fd_free ( int fd )
2017-01-13 20:42:15 -05:00
{
SSL * ssl = fdtab [ fd ] . owner ;
2017-05-17 14:42:48 -04:00
OSSL_ASYNC_FD all_fd [ 32 ] ;
size_t num_all_fds = 0 ;
int i ;
/* We suppose that the async job for a same SSL *
* are serialized . So if we are awake it is
* because the running job has just finished
* and we can remove all async fds safely
*/
SSL_get_all_async_fds ( ssl , NULL , & num_all_fds ) ;
if ( num_all_fds > 32 ) {
send_log ( NULL , LOG_EMERG , " haproxy: openssl returns too many async fds. It seems a bug. Process may crash \n " ) ;
return ;
}
2017-01-13 20:42:15 -05:00
2017-05-17 14:42:48 -04:00
SSL_get_all_async_fds ( ssl , all_fd , & num_all_fds ) ;
2022-07-01 11:36:50 -04:00
for ( i = 0 ; i < num_all_fds ; i + + ) {
/* We want to remove the fd from the fdtab
* but we flag it to disown because the
* close is performed by the engine itself
*/
fdtab [ all_fd [ i ] ] . state | = FD_DISOWN ;
fd_delete ( all_fd [ i ] ) ;
}
2017-05-17 14:42:48 -04:00
/* Now we can safely call SSL_free, no more pending job in engines */
2017-01-13 20:42:15 -05:00
SSL_free ( ssl ) ;
2021-10-06 06:15:18 -04:00
_HA_ATOMIC_DEC ( & global . sslconns ) ;
2021-04-06 07:53:36 -04:00
_HA_ATOMIC_DEC ( & jobs ) ;
2017-01-13 20:42:15 -05:00
}
/*
2017-05-17 14:42:48 -04:00
* function used to manage a returned SSL_ERROR_WANT_ASYNC
* and enable / disable polling for async fds
2017-01-13 20:42:15 -05:00
*/
2019-05-20 08:02:16 -04:00
static inline void ssl_async_process_fds ( struct ssl_sock_ctx * ctx )
2017-01-13 20:42:15 -05:00
{
2018-01-25 01:22:13 -05:00
OSSL_ASYNC_FD add_fd [ 32 ] ;
2017-05-17 14:42:48 -04:00
OSSL_ASYNC_FD del_fd [ 32 ] ;
2019-05-20 08:02:16 -04:00
SSL * ssl = ctx - > ssl ;
2017-01-13 20:42:15 -05:00
size_t num_add_fds = 0 ;
size_t num_del_fds = 0 ;
2017-05-17 14:42:48 -04:00
int i ;
2017-01-13 20:42:15 -05:00
SSL_get_changed_async_fds ( ssl , NULL , & num_add_fds , NULL ,
& num_del_fds ) ;
2017-05-17 14:42:48 -04:00
if ( num_add_fds > 32 | | num_del_fds > 32 ) {
send_log ( NULL , LOG_EMERG , " haproxy: openssl returns too many async fds. It seems a bug. Process may crash \n " ) ;
2017-01-13 20:42:15 -05:00
return ;
}
2017-05-17 14:42:48 -04:00
SSL_get_changed_async_fds ( ssl , add_fd , & num_add_fds , del_fd , & num_del_fds ) ;
2017-01-13 20:42:15 -05:00
2017-05-17 14:42:48 -04:00
/* We remove unused fds from the fdtab */
2022-07-01 11:36:50 -04:00
for ( i = 0 ; i < num_del_fds ; i + + ) {
/* We want to remove the fd from the fdtab
* but we flag it to disown because the
* close is performed by the engine itself
*/
fdtab [ del_fd [ i ] ] . state | = FD_DISOWN ;
fd_delete ( del_fd [ i ] ) ;
}
2017-01-13 20:42:15 -05:00
2017-05-17 14:42:48 -04:00
/* We add new fds to the fdtab */
for ( i = 0 ; i < num_add_fds ; i + + ) {
2022-07-07 02:29:00 -04:00
fd_insert ( add_fd [ i ] , ctx , ssl_async_fd_handler , tgid , ti - > ltid_bit ) ;
2017-01-13 20:42:15 -05:00
}
2017-05-17 14:42:48 -04:00
num_add_fds = 0 ;
SSL_get_all_async_fds ( ssl , NULL , & num_add_fds ) ;
if ( num_add_fds > 32 ) {
send_log ( NULL , LOG_EMERG , " haproxy: openssl returns too many async fds. It seems a bug. Process may crash \n " ) ;
return ;
2017-01-13 20:42:15 -05:00
}
2017-05-17 14:42:48 -04:00
/* We activate the polling for all known async fds */
SSL_get_all_async_fds ( ssl , add_fd , & num_add_fds ) ;
2017-05-31 06:02:53 -04:00
for ( i = 0 ; i < num_add_fds ; i + + ) {
2017-05-17 14:42:48 -04:00
fd_want_recv ( add_fd [ i ] ) ;
2017-05-31 06:02:53 -04:00
/* To ensure that the fd cache won't be used
* We ' ll prefer to catch a real RD event
* because handling an EAGAIN on this fd will
* result in a context switch and also
* some engines uses a fd in blocking mode .
*/
fd_cant_recv ( add_fd [ i ] ) ;
}
2017-05-17 14:42:48 -04:00
2017-01-13 20:42:15 -05:00
}
# endif
2022-02-08 11:45:59 -05:00
/*
* Initialize an HMAC context < hctx > using the < key > and < md > parameters .
* Returns - 1 in case of error , 1 otherwise .
*/
static int ssl_hmac_init ( MAC_CTX * hctx , unsigned char * key , int key_len , const EVP_MD * md )
{
# ifdef HAVE_OSSL_PARAM
OSSL_PARAM params [ 3 ] ;
params [ 0 ] = OSSL_PARAM_construct_octet_string ( OSSL_MAC_PARAM_KEY , key , key_len ) ;
params [ 1 ] = OSSL_PARAM_construct_utf8_string ( OSSL_MAC_PARAM_DIGEST , ( char * ) EVP_MD_name ( md ) , 0 ) ;
params [ 2 ] = OSSL_PARAM_construct_end ( ) ;
if ( EVP_MAC_CTX_set_params ( hctx , params ) = = 0 )
return - 1 ; /* error in mac initialisation */
# else
HMAC_Init_ex ( hctx , key , key_len , md , NULL ) ;
# endif
return 1 ;
}
2015-02-27 13:56:49 -05:00
# if (defined SSL_CTRL_SET_TLSEXT_TICKET_KEY_CB && TLS_TICKETS_NO > 0)
2022-02-08 11:45:58 -05:00
static int ssl_tlsext_ticket_key_cb ( SSL * s , unsigned char key_name [ 16 ] , unsigned char * iv , EVP_CIPHER_CTX * ectx , MAC_CTX * hctx , int enc )
2015-02-27 13:56:49 -05:00
{
2022-09-06 11:04:55 -04:00
struct tls_keys_ref * ref = NULL ;
2019-01-10 11:51:55 -05:00
union tls_sess_key * keys ;
2015-02-27 13:56:49 -05:00
int head ;
int i ;
2018-02-16 05:23:49 -05:00
int ret = - 1 ; /* error by default */
2022-09-06 11:04:55 -04:00
struct connection * conn = SSL_get_ex_data ( s , ssl_app_data_index ) ;
# ifdef USE_QUIC
struct quic_conn * qc = SSL_get_ex_data ( s , ssl_qc_app_data_index ) ;
# endif
if ( conn )
ref = __objt_listener ( conn - > target ) - > bind_conf - > keys_ref ;
# ifdef USE_QUIC
else if ( qc )
ref = qc - > li - > bind_conf - > keys_ref ;
# endif
if ( ! ref ) {
/* must never happen */
ABORT_NOW ( ) ;
}
2015-02-27 13:56:49 -05:00
2018-02-16 05:23:49 -05:00
HA_RWLOCK_RDLOCK ( TLSKEYS_REF_LOCK , & ref - > lock ) ;
keys = ref - > tlskeys ;
head = ref - > tls_ticket_enc_index ;
2015-02-27 13:56:49 -05:00
if ( enc ) {
memcpy ( key_name , keys [ head ] . name , 16 ) ;
if ( ! RAND_pseudo_bytes ( iv , EVP_MAX_IV_LENGTH ) )
2018-02-16 05:23:49 -05:00
goto end ;
2015-02-27 13:56:49 -05:00
2019-01-10 11:51:55 -05:00
if ( ref - > key_size_bits = = 128 ) {
2015-02-27 13:56:49 -05:00
2019-01-10 11:51:55 -05:00
if ( ! EVP_EncryptInit_ex ( ectx , EVP_aes_128_cbc ( ) , NULL , keys [ head ] . key_128 . aes_key , iv ) )
goto end ;
2022-02-08 11:45:59 -05:00
if ( ssl_hmac_init ( hctx , keys [ head ] . key_128 . hmac_key , 16 , TLS_TICKET_HASH_FUNCT ( ) ) < 0 )
goto end ;
2019-01-10 11:51:55 -05:00
ret = 1 ;
}
else if ( ref - > key_size_bits = = 256 ) {
if ( ! EVP_EncryptInit_ex ( ectx , EVP_aes_256_cbc ( ) , NULL , keys [ head ] . key_256 . aes_key , iv ) )
goto end ;
2022-02-08 11:45:59 -05:00
if ( ssl_hmac_init ( hctx , keys [ head ] . key_256 . hmac_key , 32 , TLS_TICKET_HASH_FUNCT ( ) ) < 0 )
goto end ;
2019-01-10 11:51:55 -05:00
ret = 1 ;
}
2015-02-27 13:56:49 -05:00
} else {
for ( i = 0 ; i < TLS_TICKETS_NO ; i + + ) {
if ( ! memcmp ( key_name , keys [ ( head + i ) % TLS_TICKETS_NO ] . name , 16 ) )
goto found ;
}
2018-02-16 05:23:49 -05:00
ret = 0 ;
goto end ;
2015-02-27 13:56:49 -05:00
2018-02-16 05:23:49 -05:00
found :
2019-01-10 11:51:55 -05:00
if ( ref - > key_size_bits = = 128 ) {
2022-02-08 11:45:59 -05:00
if ( ssl_hmac_init ( hctx , keys [ ( head + i ) % TLS_TICKETS_NO ] . key_128 . hmac_key , 16 , TLS_TICKET_HASH_FUNCT ( ) ) < 0 )
goto end ;
2019-01-10 11:51:55 -05:00
if ( ! EVP_DecryptInit_ex ( ectx , EVP_aes_128_cbc ( ) , NULL , keys [ ( head + i ) % TLS_TICKETS_NO ] . key_128 . aes_key , iv ) )
goto end ;
/* 2 for key renewal, 1 if current key is still valid */
ret = i ? 2 : 1 ;
}
else if ( ref - > key_size_bits = = 256 ) {
2022-02-08 11:45:59 -05:00
if ( ssl_hmac_init ( hctx , keys [ ( head + i ) % TLS_TICKETS_NO ] . key_256 . hmac_key , 32 , TLS_TICKET_HASH_FUNCT ( ) ) < 0 )
goto end ;
2019-01-10 11:51:55 -05:00
if ( ! EVP_DecryptInit_ex ( ectx , EVP_aes_256_cbc ( ) , NULL , keys [ ( head + i ) % TLS_TICKETS_NO ] . key_256 . aes_key , iv ) )
goto end ;
/* 2 for key renewal, 1 if current key is still valid */
ret = i ? 2 : 1 ;
}
2015-02-27 13:56:49 -05:00
}
2019-01-10 11:51:55 -05:00
2018-02-16 05:23:49 -05:00
end :
HA_RWLOCK_RDUNLOCK ( TLSKEYS_REF_LOCK , & ref - > lock ) ;
return ret ;
2015-02-27 13:56:49 -05:00
}
2015-05-09 02:46:01 -04:00
struct tls_keys_ref * tlskeys_ref_lookup ( const char * filename )
{
struct tls_keys_ref * ref ;
list_for_each_entry ( ref , & tlskeys_reference , list )
if ( ref - > filename & & strcmp ( filename , ref - > filename ) = = 0 )
return ref ;
return NULL ;
}
struct tls_keys_ref * tlskeys_ref_lookupid ( int unique_id )
{
struct tls_keys_ref * ref ;
list_for_each_entry ( ref , & tlskeys_reference , list )
if ( ref - > unique_id = = unique_id )
return ref ;
return NULL ;
}
2020-03-10 03:06:11 -04:00
/* Update the key into ref: if keysize doesn't
2019-01-10 11:51:55 -05:00
* match existing ones , this function returns - 1
* else it returns 0 on success .
*/
int ssl_sock_update_tlskey_ref ( struct tls_keys_ref * ref ,
2018-07-13 05:56:34 -04:00
struct buffer * tlskey )
2018-02-16 05:23:49 -05:00
{
2019-01-10 11:51:55 -05:00
if ( ref - > key_size_bits = = 128 ) {
if ( tlskey - > data ! = sizeof ( struct tls_sess_key_128 ) )
return - 1 ;
}
else if ( ref - > key_size_bits = = 256 ) {
if ( tlskey - > data ! = sizeof ( struct tls_sess_key_256 ) )
return - 1 ;
}
else
return - 1 ;
2018-02-16 05:23:49 -05:00
HA_RWLOCK_WRLOCK ( TLSKEYS_REF_LOCK , & ref - > lock ) ;
2018-07-13 04:54:26 -04:00
memcpy ( ( char * ) ( ref - > tlskeys + ( ( ref - > tls_ticket_enc_index + 2 ) % TLS_TICKETS_NO ) ) ,
tlskey - > area , tlskey - > data ) ;
2018-02-16 05:23:49 -05:00
ref - > tls_ticket_enc_index = ( ref - > tls_ticket_enc_index + 1 ) % TLS_TICKETS_NO ;
HA_RWLOCK_WRUNLOCK ( TLSKEYS_REF_LOCK , & ref - > lock ) ;
2019-01-10 11:51:55 -05:00
return 0 ;
2018-02-16 05:23:49 -05:00
}
2018-07-13 05:56:34 -04:00
int ssl_sock_update_tlskey ( char * filename , struct buffer * tlskey , char * * err )
2018-02-16 05:23:49 -05:00
{
2015-05-09 02:46:01 -04:00
struct tls_keys_ref * ref = tlskeys_ref_lookup ( filename ) ;
if ( ! ref ) {
memprintf ( err , " Unable to locate the referenced filename: %s " , filename ) ;
return 1 ;
}
2019-01-10 11:51:55 -05:00
if ( ssl_sock_update_tlskey_ref ( ref , tlskey ) < 0 ) {
memprintf ( err , " Invalid key size " ) ;
return 1 ;
}
2015-05-09 02:46:01 -04:00
return 0 ;
}
/* This function finalize the configuration parsing. Its set all the
2016-12-22 16:46:15 -05:00
* automatic ids . It ' s called just after the basic checks . It returns
* 0 on success otherwise ERR_ * .
2015-05-09 02:46:01 -04:00
*/
2016-12-22 16:46:15 -05:00
static int tlskeys_finalize_config ( void )
2015-05-09 02:46:01 -04:00
{
int i = 0 ;
struct tls_keys_ref * ref , * ref2 , * ref3 ;
struct list tkr = LIST_HEAD_INIT ( tkr ) ;
list_for_each_entry ( ref , & tlskeys_reference , list ) {
if ( ref - > unique_id = = - 1 ) {
/* Look for the first free id. */
while ( 1 ) {
list_for_each_entry ( ref2 , & tlskeys_reference , list ) {
if ( ref2 - > unique_id = = i ) {
i + + ;
break ;
}
}
if ( & ref2 - > list = = & tlskeys_reference )
break ;
}
/* Uses the unique id and increment it for the next entry. */
ref - > unique_id = i ;
i + + ;
}
}
/* This sort the reference list by id. */
list_for_each_entry_safe ( ref , ref2 , & tlskeys_reference , list ) {
2021-04-21 01:32:39 -04:00
LIST_DELETE ( & ref - > list ) ;
2015-05-09 02:46:01 -04:00
list_for_each_entry ( ref3 , & tkr , list ) {
if ( ref - > unique_id < ref3 - > unique_id ) {
2021-04-21 01:32:39 -04:00
LIST_APPEND ( & ref3 - > list , & ref - > list ) ;
2015-05-09 02:46:01 -04:00
break ;
}
}
if ( & ref3 - > list = = & tkr )
2021-04-21 01:32:39 -04:00
LIST_APPEND ( & tkr , & ref - > list ) ;
2015-05-09 02:46:01 -04:00
}
/* swap root */
2023-05-09 08:15:57 -04:00
LIST_SPLICE ( & tlskeys_reference , & tkr ) ;
2020-11-06 09:24:23 -05:00
return ERR_NONE ;
2015-05-09 02:46:01 -04:00
}
2015-02-27 13:56:49 -05:00
# endif /* SSL_CTRL_SET_TLSEXT_TICKET_KEY_CB */
2019-10-14 08:51:41 -04:00
2020-10-18 00:11:50 -04:00
# if ((defined SSL_CTRL_SET_TLSEXT_STATUS_REQ_CB && !defined OPENSSL_NO_OCSP) && !defined OPENSSL_IS_BORINGSSL)
2014-06-16 12:36:30 -04:00
/*
* This function enables the handling of OCSP status extension on ' ctx ' if a
2019-10-11 02:59:13 -04:00
* ocsp_response buffer was found in the cert_key_and_chain . To enable OCSP
* status extension , the issuer ' s certificate is mandatory . It should be
* present in ckch - > ocsp_issuer .
2014-06-16 12:36:30 -04:00
*
2019-10-11 02:59:13 -04:00
* In addition , the ckch - > ocsp_reponse buffer is loaded as a DER format of an
* OCSP response . If file is empty or content is not a valid OCSP response ,
* OCSP status extension is enabled but OCSP response is ignored ( a warning is
* displayed ) .
2014-06-16 12:36:30 -04:00
*
* Returns 1 if no " .ocsp " file found , 0 if OCSP status extension is
2018-11-15 12:07:59 -05:00
* successfully enabled , or - 1 in other error case .
2014-06-16 12:36:30 -04:00
*/
2023-03-01 10:11:50 -05:00
static int ssl_sock_load_ocsp ( const char * path , SSL_CTX * ctx , struct ckch_data * data , STACK_OF ( X509 ) * chain )
2014-06-16 12:36:30 -04:00
{
2020-02-18 09:56:39 -05:00
X509 * x , * issuer ;
2014-06-16 12:36:30 -04:00
int i , ret = - 1 ;
struct certificate_ocsp * ocsp = NULL , * iocsp ;
char * warn = NULL ;
unsigned char * p ;
2022-11-23 10:41:25 -05:00
# ifndef USE_OPENSSL_WOLFSSL
2023-07-06 18:41:46 -04:00
# if (HA_OPENSSL_VERSION_NUMBER >= 0x10101000L)
int ( * callback ) ( SSL * , void * ) ;
# else
2016-08-29 07:26:37 -04:00
void ( * callback ) ( void ) ;
2023-07-06 18:41:46 -04:00
# endif
2022-11-23 10:41:25 -05:00
# else
tlsextStatusCb callback ;
# endif
2022-12-20 05:11:12 -05:00
struct buffer * ocsp_uri = get_trash_chunk ( ) ;
2023-03-13 10:56:32 -04:00
char * err = NULL ;
2023-04-07 11:49:37 -04:00
size_t path_len ;
BUG/MAJOR: ocsp: Separate refcount per instance and per store
With the current way OCSP responses are stored, a single OCSP response
is stored (in a certificate_ocsp structure) when it is loaded during a
certificate parsing, and each SSL_CTX that references it increments its
refcount. The reference to the certificate_ocsp is kept in the SSL_CTX
linked to each ckch_inst, in an ex_data entry that gets freed when the
context is freed.
One of the downsides of this implementation is that if every ckch_inst
referencing a certificate_ocsp gets detroyed, then the OCSP response is
removed from the system. So if we were to remove all crt-list lines
containing a given certificate (that has an OCSP response), and if all
the corresponding SSL_CTXs were destroyed (no ongoing connection using
them), the OCSP response would be destroyed even if the certificate
remains in the system (as an unused certificate).
In such a case, we would want the OCSP response not to be "usable",
since it is not used by any ckch_inst, but still remain in the OCSP
response tree so that if the certificate gets reused (via an "add ssl
crt-list" command for instance), its OCSP response is still known as
well.
But we would also like such an entry not to be updated automatically
anymore once no instance uses it. An easy way to do it could have been
to keep a reference to the certificate_ocsp structure in the ckch_store
as well, on top of all the ones in the ckch_instances, and to remove the
ocsp response from the update tree once the refcount falls to 1, but it
would not work because of the way the ocsp response tree keys are
calculated. They are decorrelated from the ckch_store and are the actual
OCSP_CERTIDs, which is a combination of the issuer's name hash and key
hash, and the certificate's serial number. So two copies of the same
certificate but with different names would still point to the same ocsp
response tree entry.
The solution that answers to all the needs expressed aboved is actually
to have two reference counters in the certificate_ocsp structure, one
actual reference counter corresponding to the number of "live" pointers
on the certificate_ocsp structure, incremented for every SSL_CTX using
it, and one for the ckch stores.
If the ckch_store reference counter falls to 0, the corresponding
certificate must have been removed via CLI calls ('set ssl cert' for
instance).
If the actual refcount falls to 0, then no live SSL_CTX uses the
response anymore. It could happen if all the corresponding crt-list
lines were removed and there are no live SSL sessions using the
certificate anymore.
If any of the two refcounts becomes 0, we will always remove the
response from the auto update tree, because there's no point in spending
time updating an OCSP response that no new SSL connection will be able
to use. But the certificate_ocsp object won't be removed from the tree
unless both refcounts are 0.
Must be backported up to 2.8. Wait a little bit before backporting.
2024-03-14 10:38:30 -04:00
int inc_refcount_store = 0 ;
2024-03-25 11:50:25 -04:00
int enable_auto_update = ( data - > ocsp_update_mode = = SSL_SOCK_OCSP_UPDATE_ON | |
( data - > ocsp_update_mode = = SSL_SOCK_OCSP_UPDATE_DFLT & &
global_ssl . ocsp_update . mode = = SSL_SOCK_OCSP_UPDATE_ON ) ) ;
2014-06-16 12:36:30 -04:00
2022-11-22 05:51:53 -05:00
x = data - > cert ;
2014-06-16 12:36:30 -04:00
if ( ! x )
goto out ;
2023-03-13 10:56:32 -04:00
ssl_ocsp_get_uri_from_cert ( x , ocsp_uri , & err ) ;
2023-07-21 11:21:15 -04:00
if ( ! data - > ocsp_response & & ! data - > ocsp_cid ) {
2024-03-25 11:50:25 -04:00
/* In case of ocsp update mode set to 'on', this function might
* be called with no known ocsp response . If no ocsp uri can be
* found in the certificate , nothing needs to be done here . */
if ( ! enable_auto_update | | b_data ( ocsp_uri ) = = 0 ) {
2022-12-20 05:11:12 -05:00
ret = 0 ;
goto out ;
}
2024-03-25 11:50:25 -04:00
} else {
/* If we have an OCSP response provided and the ocsp auto update
* enabled , we must raise an error if no OCSP URI was found . */
if ( data - > ocsp_update_mode = = SSL_SOCK_OCSP_UPDATE_ON & & b_data ( ocsp_uri ) = = 0 )
goto out ;
2022-12-20 05:11:12 -05:00
}
2022-11-22 05:51:53 -05:00
issuer = data - > ocsp_issuer ;
2020-02-18 09:56:39 -05:00
/* take issuer from chain over ocsp_issuer, is what is done historicaly */
if ( chain ) {
/* check if one of the certificate of the chain is the issuer */
for ( i = 0 ; i < sk_X509_num ( chain ) ; i + + ) {
X509 * ti = sk_X509_value ( chain , i ) ;
if ( X509_check_issued ( ti , x ) = = X509_V_OK ) {
issuer = ti ;
break ;
}
}
}
2019-10-11 02:59:13 -04:00
if ( ! issuer )
goto out ;
2014-06-16 12:36:30 -04:00
BUG/MAJOR: ocsp: Separate refcount per instance and per store
With the current way OCSP responses are stored, a single OCSP response
is stored (in a certificate_ocsp structure) when it is loaded during a
certificate parsing, and each SSL_CTX that references it increments its
refcount. The reference to the certificate_ocsp is kept in the SSL_CTX
linked to each ckch_inst, in an ex_data entry that gets freed when the
context is freed.
One of the downsides of this implementation is that if every ckch_inst
referencing a certificate_ocsp gets detroyed, then the OCSP response is
removed from the system. So if we were to remove all crt-list lines
containing a given certificate (that has an OCSP response), and if all
the corresponding SSL_CTXs were destroyed (no ongoing connection using
them), the OCSP response would be destroyed even if the certificate
remains in the system (as an unused certificate).
In such a case, we would want the OCSP response not to be "usable",
since it is not used by any ckch_inst, but still remain in the OCSP
response tree so that if the certificate gets reused (via an "add ssl
crt-list" command for instance), its OCSP response is still known as
well.
But we would also like such an entry not to be updated automatically
anymore once no instance uses it. An easy way to do it could have been
to keep a reference to the certificate_ocsp structure in the ckch_store
as well, on top of all the ones in the ckch_instances, and to remove the
ocsp response from the update tree once the refcount falls to 1, but it
would not work because of the way the ocsp response tree keys are
calculated. They are decorrelated from the ckch_store and are the actual
OCSP_CERTIDs, which is a combination of the issuer's name hash and key
hash, and the certificate's serial number. So two copies of the same
certificate but with different names would still point to the same ocsp
response tree entry.
The solution that answers to all the needs expressed aboved is actually
to have two reference counters in the certificate_ocsp structure, one
actual reference counter corresponding to the number of "live" pointers
on the certificate_ocsp structure, incremented for every SSL_CTX using
it, and one for the ckch stores.
If the ckch_store reference counter falls to 0, the corresponding
certificate must have been removed via CLI calls ('set ssl cert' for
instance).
If the actual refcount falls to 0, then no live SSL_CTX uses the
response anymore. It could happen if all the corresponding crt-list
lines were removed and there are no live SSL sessions using the
certificate anymore.
If any of the two refcounts becomes 0, we will always remove the
response from the auto update tree, because there's no point in spending
time updating an OCSP response that no new SSL connection will be able
to use. But the certificate_ocsp object won't be removed from the tree
unless both refcounts are 0.
Must be backported up to 2.8. Wait a little bit before backporting.
2024-03-14 10:38:30 -04:00
if ( ! data - > ocsp_cid ) {
2023-01-09 06:02:47 -05:00
data - > ocsp_cid = OCSP_cert_to_id ( 0 , x , issuer ) ;
BUG/MAJOR: ocsp: Separate refcount per instance and per store
With the current way OCSP responses are stored, a single OCSP response
is stored (in a certificate_ocsp structure) when it is loaded during a
certificate parsing, and each SSL_CTX that references it increments its
refcount. The reference to the certificate_ocsp is kept in the SSL_CTX
linked to each ckch_inst, in an ex_data entry that gets freed when the
context is freed.
One of the downsides of this implementation is that if every ckch_inst
referencing a certificate_ocsp gets detroyed, then the OCSP response is
removed from the system. So if we were to remove all crt-list lines
containing a given certificate (that has an OCSP response), and if all
the corresponding SSL_CTXs were destroyed (no ongoing connection using
them), the OCSP response would be destroyed even if the certificate
remains in the system (as an unused certificate).
In such a case, we would want the OCSP response not to be "usable",
since it is not used by any ckch_inst, but still remain in the OCSP
response tree so that if the certificate gets reused (via an "add ssl
crt-list" command for instance), its OCSP response is still known as
well.
But we would also like such an entry not to be updated automatically
anymore once no instance uses it. An easy way to do it could have been
to keep a reference to the certificate_ocsp structure in the ckch_store
as well, on top of all the ones in the ckch_instances, and to remove the
ocsp response from the update tree once the refcount falls to 1, but it
would not work because of the way the ocsp response tree keys are
calculated. They are decorrelated from the ckch_store and are the actual
OCSP_CERTIDs, which is a combination of the issuer's name hash and key
hash, and the certificate's serial number. So two copies of the same
certificate but with different names would still point to the same ocsp
response tree entry.
The solution that answers to all the needs expressed aboved is actually
to have two reference counters in the certificate_ocsp structure, one
actual reference counter corresponding to the number of "live" pointers
on the certificate_ocsp structure, incremented for every SSL_CTX using
it, and one for the ckch stores.
If the ckch_store reference counter falls to 0, the corresponding
certificate must have been removed via CLI calls ('set ssl cert' for
instance).
If the actual refcount falls to 0, then no live SSL_CTX uses the
response anymore. It could happen if all the corresponding crt-list
lines were removed and there are no live SSL sessions using the
certificate anymore.
If any of the two refcounts becomes 0, we will always remove the
response from the auto update tree, because there's no point in spending
time updating an OCSP response that no new SSL connection will be able
to use. But the certificate_ocsp object won't be removed from the tree
unless both refcounts are 0.
Must be backported up to 2.8. Wait a little bit before backporting.
2024-03-14 10:38:30 -04:00
inc_refcount_store = 1 ;
}
2022-12-20 05:11:08 -05:00
if ( ! data - > ocsp_cid )
2014-06-16 12:36:30 -04:00
goto out ;
2022-12-20 05:11:08 -05:00
i = i2d_OCSP_CERTID ( data - > ocsp_cid , NULL ) ;
2014-06-16 12:36:30 -04:00
if ( ! i | | ( i > OCSP_MAX_CERTID_ASN1_LENGTH ) )
goto out ;
2023-04-07 11:49:37 -04:00
path_len = strlen ( path ) ;
ocsp = calloc ( 1 , sizeof ( * ocsp ) + path_len + 1 ) ;
2014-06-16 12:36:30 -04:00
if ( ! ocsp )
goto out ;
p = ocsp - > key_data ;
2022-12-20 05:11:08 -05:00
ocsp - > key_length = i2d_OCSP_CERTID ( data - > ocsp_cid , & p ) ;
2014-06-16 12:36:30 -04:00
2022-12-20 05:11:02 -05:00
HA_SPIN_LOCK ( OCSP_LOCK , & ocsp_tree_lock ) ;
2014-06-16 12:36:30 -04:00
iocsp = ( struct certificate_ocsp * ) ebmb_insert ( & cert_ocsp_tree , & ocsp - > key , OCSP_MAX_CERTID_ASN1_LENGTH ) ;
if ( iocsp = = ocsp )
ocsp = NULL ;
2016-08-29 07:26:37 -04:00
# ifndef SSL_CTX_get_tlsext_status_cb
# define SSL_CTX_get_tlsext_status_cb(ctx, cb) \
* cb = ( void ( * ) ( void ) ) ctx - > tlsext_status_cb ;
# endif
SSL_CTX_get_tlsext_status_cb ( ctx , & callback ) ;
BUG/MAJOR: ocsp: Separate refcount per instance and per store
With the current way OCSP responses are stored, a single OCSP response
is stored (in a certificate_ocsp structure) when it is loaded during a
certificate parsing, and each SSL_CTX that references it increments its
refcount. The reference to the certificate_ocsp is kept in the SSL_CTX
linked to each ckch_inst, in an ex_data entry that gets freed when the
context is freed.
One of the downsides of this implementation is that if every ckch_inst
referencing a certificate_ocsp gets detroyed, then the OCSP response is
removed from the system. So if we were to remove all crt-list lines
containing a given certificate (that has an OCSP response), and if all
the corresponding SSL_CTXs were destroyed (no ongoing connection using
them), the OCSP response would be destroyed even if the certificate
remains in the system (as an unused certificate).
In such a case, we would want the OCSP response not to be "usable",
since it is not used by any ckch_inst, but still remain in the OCSP
response tree so that if the certificate gets reused (via an "add ssl
crt-list" command for instance), its OCSP response is still known as
well.
But we would also like such an entry not to be updated automatically
anymore once no instance uses it. An easy way to do it could have been
to keep a reference to the certificate_ocsp structure in the ckch_store
as well, on top of all the ones in the ckch_instances, and to remove the
ocsp response from the update tree once the refcount falls to 1, but it
would not work because of the way the ocsp response tree keys are
calculated. They are decorrelated from the ckch_store and are the actual
OCSP_CERTIDs, which is a combination of the issuer's name hash and key
hash, and the certificate's serial number. So two copies of the same
certificate but with different names would still point to the same ocsp
response tree entry.
The solution that answers to all the needs expressed aboved is actually
to have two reference counters in the certificate_ocsp structure, one
actual reference counter corresponding to the number of "live" pointers
on the certificate_ocsp structure, incremented for every SSL_CTX using
it, and one for the ckch stores.
If the ckch_store reference counter falls to 0, the corresponding
certificate must have been removed via CLI calls ('set ssl cert' for
instance).
If the actual refcount falls to 0, then no live SSL_CTX uses the
response anymore. It could happen if all the corresponding crt-list
lines were removed and there are no live SSL sessions using the
certificate anymore.
If any of the two refcounts becomes 0, we will always remove the
response from the auto update tree, because there's no point in spending
time updating an OCSP response that no new SSL connection will be able
to use. But the certificate_ocsp object won't be removed from the tree
unless both refcounts are 0.
Must be backported up to 2.8. Wait a little bit before backporting.
2024-03-14 10:38:30 -04:00
if ( inc_refcount_store )
iocsp - > refcount_store + + ;
2016-08-29 07:26:37 -04:00
if ( ! callback ) {
2020-07-31 05:43:20 -04:00
struct ocsp_cbk_arg * cb_arg ;
2017-01-06 06:57:46 -05:00
EVP_PKEY * pkey ;
2015-12-10 15:07:30 -05:00
2020-07-31 05:43:20 -04:00
cb_arg = calloc ( 1 , sizeof ( * cb_arg ) ) ;
if ( ! cb_arg )
goto out ;
2015-12-10 15:07:30 -05:00
cb_arg - > is_single = 1 ;
cb_arg - > s_ocsp = iocsp ;
2024-02-26 11:53:02 -05:00
iocsp - > refcount + + ;
2016-08-29 07:26:37 -04:00
2017-01-06 06:57:46 -05:00
pkey = X509_get_pubkey ( x ) ;
cb_arg - > single_kt = EVP_PKEY_base_id ( pkey ) ;
EVP_PKEY_free ( pkey ) ;
2015-12-10 15:07:30 -05:00
SSL_CTX_set_tlsext_status_cb ( ctx , ssl_sock_ocsp_stapling_cbk ) ;
2020-08-04 11:41:39 -04:00
SSL_CTX_set_ex_data ( ctx , ocsp_ex_index , cb_arg ) ; /* we use the ex_data instead of the cb_arg function here, so we can use the cleanup callback to free */
2015-12-10 15:07:30 -05:00
} else {
/*
* If the ctx has a status CB , then we have previously set an OCSP staple for this ctx
* Update that cb_arg with the new cert ' s staple
*/
2016-08-29 07:26:37 -04:00
struct ocsp_cbk_arg * cb_arg ;
2015-12-10 15:07:30 -05:00
struct certificate_ocsp * tmp_ocsp ;
int index ;
2016-08-29 07:26:37 -04:00
int key_type ;
2017-01-06 06:57:46 -05:00
EVP_PKEY * pkey ;
2016-08-29 07:26:37 -04:00
2020-08-04 11:41:39 -04:00
cb_arg = SSL_CTX_get_ex_data ( ctx , ocsp_ex_index ) ;
2015-12-10 15:07:30 -05:00
/*
* The following few lines will convert cb_arg from a single ocsp to multi ocsp
* the order of operations below matter , take care when changing it
*/
tmp_ocsp = cb_arg - > s_ocsp ;
index = ssl_sock_get_ocsp_arg_kt_index ( cb_arg - > single_kt ) ;
cb_arg - > s_ocsp = NULL ;
cb_arg - > m_ocsp [ index ] = tmp_ocsp ;
cb_arg - > is_single = 0 ;
cb_arg - > single_kt = 0 ;
2017-01-06 06:57:46 -05:00
pkey = X509_get_pubkey ( x ) ;
key_type = EVP_PKEY_base_id ( pkey ) ;
EVP_PKEY_free ( pkey ) ;
2016-08-29 07:26:37 -04:00
index = ssl_sock_get_ocsp_arg_kt_index ( key_type ) ;
2020-08-04 11:41:39 -04:00
if ( index > = 0 & & ! cb_arg - > m_ocsp [ index ] ) {
2015-12-10 15:07:30 -05:00
cb_arg - > m_ocsp [ index ] = iocsp ;
2024-02-26 11:53:02 -05:00
iocsp - > refcount + + ;
2020-08-04 11:41:39 -04:00
}
2015-12-10 15:07:30 -05:00
}
2022-12-20 05:11:02 -05:00
HA_SPIN_UNLOCK ( OCSP_LOCK , & ocsp_tree_lock ) ;
2014-06-16 12:36:30 -04:00
ret = 0 ;
warn = NULL ;
2022-12-20 05:11:12 -05:00
if ( data - > ocsp_response & & ssl_sock_load_ocsp_response ( data - > ocsp_response , iocsp , data - > ocsp_cid , & warn ) ) {
2019-10-16 12:05:05 -04:00
memprintf ( & warn , " Loading: %s. Content will be ignored " , warn ? warn : " failure " ) ;
2017-11-24 10:50:31 -05:00
ha_warning ( " %s. \n " , warn ) ;
2014-06-16 12:36:30 -04:00
}
2022-12-20 05:11:12 -05:00
2023-03-13 10:56:32 -04:00
/* Do not insert the same certificate_ocsp structure in the
* update tree more than once . */
if ( ! ocsp ) {
/* Issuer certificate is not included in the certificate
* chain , it will have to be treated separately during
* ocsp response validation . */
if ( issuer = = data - > ocsp_issuer ) {
iocsp - > issuer = issuer ;
X509_up_ref ( issuer ) ;
}
if ( data - > chain )
iocsp - > chain = X509_chain_up_ref ( data - > chain ) ;
iocsp - > uri = calloc ( 1 , sizeof ( * iocsp - > uri ) ) ;
if ( ! chunk_dup ( iocsp - > uri , ocsp_uri ) ) {
ha_free ( & iocsp - > uri ) ;
goto out ;
}
2022-12-20 05:11:12 -05:00
2023-04-07 11:49:37 -04:00
/* Note: if we arrive here, ocsp==NULL because iocsp==ocsp
* after the ebmb_insert ( ) , which indicates that we ' ve
* just inserted this new node and that it ' s the one for
* which we previously allocated enough room for path_len + 1
* chars .
*/
memcpy ( iocsp - > path , path , path_len + 1 ) ;
2023-03-01 10:11:50 -05:00
2024-03-25 11:50:25 -04:00
if ( enable_auto_update ) {
2022-12-20 05:11:12 -05:00
ssl_ocsp_update_insert ( iocsp ) ;
2023-03-02 09:49:53 -05:00
/* If we are during init the update task is not
* scheduled yet so a wakeup won ' t do anything .
* Otherwise , if the OCSP was added through the CLI , we
* wake the task up to manage the case of a new entry
* that needs to be updated before the previous first
2024-02-07 10:38:45 -05:00
* entry .
*/
if ( ocsp_update_task )
task_wakeup ( ocsp_update_task , TASK_WOKEN_MSG ) ;
}
2024-03-25 11:50:25 -04:00
} else if ( iocsp - > uri & & enable_auto_update ) {
2024-02-07 10:38:45 -05:00
/* This unlikely case can happen if a series of "del ssl
* crt - list " / " add ssl crt - list " commands are made on the CLI.
* In such a case , the OCSP response tree entry will be created
* prior to the activation of the ocsp auto update and in such a
* case we must " force " insertion in the auto update tree .
*/
if ( iocsp - > next_update . node . leaf_p = = NULL ) {
ssl_ocsp_update_insert ( iocsp ) ;
/* If we are during init the update task is not
* scheduled yet so a wakeup won ' t do anything .
* Otherwise , if the OCSP was added through the CLI , we
* wake the task up to manage the case of a new entry
* that needs to be updated before the previous first
2023-03-02 09:49:53 -05:00
* entry .
*/
if ( ocsp_update_task )
task_wakeup ( ocsp_update_task , TASK_WOKEN_MSG ) ;
2022-12-20 05:11:12 -05:00
}
}
2014-06-16 12:36:30 -04:00
out :
2023-12-05 08:50:40 -05:00
if ( ret & & data - > ocsp_cid ) {
2022-12-20 05:11:08 -05:00
OCSP_CERTID_free ( data - > ocsp_cid ) ;
2023-12-05 08:50:40 -05:00
data - > ocsp_cid = NULL ;
}
2022-12-20 05:11:08 -05:00
if ( ! ret & & data - > ocsp_response ) {
ha_free ( & data - > ocsp_response - > area ) ;
ha_free ( & data - > ocsp_response ) ;
}
2014-06-16 12:36:30 -04:00
if ( ocsp )
2022-11-03 10:16:49 -04:00
ssl_sock_free_ocsp ( ocsp ) ;
2014-06-16 12:36:30 -04:00
if ( warn )
free ( warn ) ;
2023-03-19 11:07:47 -04:00
free ( err ) ;
2014-06-16 12:36:30 -04:00
return ret ;
}
2022-12-20 05:11:17 -05:00
2020-10-26 08:55:30 -04:00
# endif
# ifdef OPENSSL_IS_BORINGSSL
2023-03-01 10:11:50 -05:00
static int ssl_sock_load_ocsp ( const char * path , SSL_CTX * ctx , struct ckch_data * data , STACK_OF ( X509 ) * chain )
2017-05-22 08:58:00 -04:00
{
2019-10-14 08:51:41 -04:00
return SSL_CTX_set_ocsp_response ( ctx , ( const uint8_t * ) ckch - > ocsp_response - > area , ckch - > ocsp_response - > data ) ;
2017-05-22 08:58:00 -04:00
}
# endif
2019-10-14 08:51:41 -04:00
2021-02-06 08:55:27 -05:00
# ifdef HAVE_SSL_CTX_ADD_SERVER_CUSTOM_EXT
2015-03-07 17:03:59 -05:00
# define CT_EXTENSION_TYPE 18
REORG: ssl: move the ckch_store related functions to src/ssl_ckch.c
Move the cert_key_and_chain functions:
int ssl_sock_load_files_into_ckch(const char *path, struct cert_key_and_chain *ckch, char **err);
int ssl_sock_load_pem_into_ckch(const char *path, char *buf, struct cert_key_and_chain *ckch , char **err);
void ssl_sock_free_cert_key_and_chain_contents(struct cert_key_and_chain *ckch);
int ssl_sock_load_key_into_ckch(const char *path, char *buf, struct cert_key_and_chain *ckch , char **err);
int ssl_sock_load_ocsp_response_from_file(const char *ocsp_path, char *buf, struct cert_key_and_chain *ckch, char **err);
int ssl_sock_load_sctl_from_file(const char *sctl_path, char *buf, struct cert_key_and_chain *ckch, char **err);
int ssl_sock_load_issuer_file_into_ckch(const char *path, char *buf, struct cert_key_and_chain *ckch, char **err);
And the utility ckch_store functions:
void ckch_store_free(struct ckch_store *store)
struct ckch_store *ckch_store_new(const char *filename, int nmemb)
struct ckch_store *ckchs_dup(const struct ckch_store *src)
ckch_store *ckchs_lookup(char *path)
ckch_store *ckchs_load_cert_file(char *path, int multi, char **err)
2020-05-13 04:10:01 -04:00
int sctl_ex_index = - 1 ;
2015-03-07 17:03:59 -05:00
int ssl_sock_sctl_add_cbk ( SSL * ssl , unsigned ext_type , const unsigned char * * out , size_t * outlen , int * al , void * add_arg )
{
2018-07-13 05:56:34 -04:00
struct buffer * sctl = add_arg ;
2015-03-07 17:03:59 -05:00
2018-07-13 04:54:26 -04:00
* out = ( unsigned char * ) sctl - > area ;
* outlen = sctl - > data ;
2015-03-07 17:03:59 -05:00
return 1 ;
}
int ssl_sock_sctl_parse_cbk ( SSL * s , unsigned int ext_type , const unsigned char * in , size_t inlen , int * al , void * parse_arg )
{
return 1 ;
}
2019-10-10 09:16:44 -04:00
static int ssl_sock_load_sctl ( SSL_CTX * ctx , struct buffer * sctl )
2015-03-07 17:03:59 -05:00
{
int ret = - 1 ;
2019-10-10 09:16:44 -04:00
if ( ! SSL_CTX_add_server_custom_ext ( ctx , CT_EXTENSION_TYPE , ssl_sock_sctl_add_cbk , NULL , sctl , ssl_sock_sctl_parse_cbk , NULL ) )
2015-03-07 17:03:59 -05:00
goto out ;
SSL_CTX_set_ex_data ( ctx , sctl_ex_index , sctl ) ;
ret = 0 ;
out :
return ret ;
}
# endif
2012-09-03 14:36:47 -04:00
void ssl_sock_infocbk ( const SSL * ssl , int where , int ret )
{
BUG/MAJOR: ssl: OpenSSL context is stored in non-reserved memory slot
We never saw unexplicated crash with SSL, so I suppose that we are
luck, or the slot 0 is always reserved. Anyway the usage of the macro
SSL_get_app_data() and SSL_set_app_data() seem wrong. This patch change
the deprecated functions SSL_get_app_data() and SSL_set_app_data()
by the new functions SSL_get_ex_data() and SSL_set_ex_data(), and
it reserves the slot in the SSL memory space.
For information, this is the two declaration which seems wrong or
incomplete in the OpenSSL ssl.h file. We can see the usage of the
slot 0 whoch is hardcoded, but never reserved.
#define SSL_set_app_data(s,arg) (SSL_set_ex_data(s,0,(char *)arg))
#define SSL_get_app_data(s) (SSL_get_ex_data(s,0))
This patch must be backported at least in 1.8, maybe in other versions.
2018-06-17 15:37:05 -04:00
struct connection * conn = SSL_get_ex_data ( ssl , ssl_app_data_index ) ;
2022-01-19 04:03:30 -05:00
# ifdef USE_QUIC
struct quic_conn * qc = SSL_get_ex_data ( ssl , ssl_qc_app_data_index ) ;
# endif /* USE_QUIC */
struct ssl_sock_ctx * ctx = NULL ;
2014-01-28 09:43:53 -05:00
BIO * write_bio ;
2015-02-27 10:36:16 -05:00
( void ) ret ; /* shut gcc stupid warning */
2012-09-03 14:36:47 -04:00
2022-01-19 04:03:30 -05:00
if ( conn )
2022-04-11 05:29:11 -04:00
ctx = conn_get_ssl_sock_ctx ( conn ) ;
2022-01-19 04:03:30 -05:00
# ifdef USE_QUIC
else if ( qc )
ctx = qc - > xprt_ctx ;
# endif /* USE_QUIC */
2022-01-24 05:04:05 -05:00
if ( ! ctx ) {
/* must never happen */
ABORT_NOW ( ) ;
return ;
}
2022-01-19 04:03:30 -05:00
2019-01-21 12:35:03 -05:00
# ifndef SSL_OP_NO_RENEGOTIATION
/* Please note that BoringSSL defines this macro to zero so don't
* change this to # if and do not assign a default value to this macro !
*/
2012-09-03 14:36:47 -04:00
if ( where & SSL_CB_HANDSHAKE_START ) {
/* Disable renegotiation (CVE-2009-3555) */
2022-01-19 04:03:30 -05:00
if ( conn & & ( conn - > flags & ( CO_FL_WAIT_L6_CONN | CO_FL_EARLY_SSL_HS | CO_FL_EARLY_DATA ) ) = = 0 ) {
2012-09-03 14:36:47 -04:00
conn - > flags | = CO_FL_ERROR ;
2012-12-03 10:32:10 -05:00
conn - > err_code = CO_ER_SSL_RENEG ;
}
2012-09-03 14:36:47 -04:00
}
2019-01-21 12:35:03 -05:00
# endif
2014-01-28 09:43:53 -05:00
if ( ( where & SSL_CB_ACCEPT_LOOP ) = = SSL_CB_ACCEPT_LOOP ) {
2019-02-28 12:10:45 -05:00
if ( ! ( ctx - > xprt_st & SSL_SOCK_ST_FL_16K_WBFSIZE ) ) {
2014-01-28 09:43:53 -05:00
/* Long certificate chains optimz
2020-03-10 03:06:11 -04:00
If write and read bios are different , we
2014-01-28 09:43:53 -05:00
consider that the buffering was activated ,
so we rise the output buffer size from 4 k
to 16 k */
write_bio = SSL_get_wbio ( ssl ) ;
if ( write_bio ! = SSL_get_rbio ( ssl ) ) {
BIO_set_write_buffer_size ( write_bio , 16384 ) ;
2019-02-28 12:10:45 -05:00
ctx - > xprt_st | = SSL_SOCK_ST_FL_16K_WBFSIZE ;
2014-01-28 09:43:53 -05:00
}
}
}
2012-09-03 14:36:47 -04:00
}
2012-09-21 07:15:06 -04:00
/* Callback is called for each certificate of the chain during a verify
ok is set to 1 if preverify detect no error on current certificate .
Returns 0 to break the handshake , 1 otherwise . */
2013-06-27 03:05:25 -04:00
int ssl_sock_bind_verifycbk ( int ok , X509_STORE_CTX * x_store )
2012-09-21 07:15:06 -04:00
{
SSL * ssl ;
struct connection * conn ;
2022-09-06 13:37:08 -04:00
struct ssl_sock_ctx * ctx = NULL ;
2012-09-21 08:31:21 -04:00
int err , depth ;
2021-08-19 12:06:30 -04:00
X509 * client_crt ;
STACK_OF ( X509 ) * certs ;
2022-10-14 03:34:00 -04:00
struct bind_conf * bind_conf = NULL ;
2022-09-06 13:37:08 -04:00
struct quic_conn * qc = NULL ;
2012-09-21 07:15:06 -04:00
ssl = X509_STORE_CTX_get_ex_data ( x_store , SSL_get_ex_data_X509_STORE_CTX_idx ( ) ) ;
BUG/MAJOR: ssl: OpenSSL context is stored in non-reserved memory slot
We never saw unexplicated crash with SSL, so I suppose that we are
luck, or the slot 0 is always reserved. Anyway the usage of the macro
SSL_get_app_data() and SSL_set_app_data() seem wrong. This patch change
the deprecated functions SSL_get_app_data() and SSL_set_app_data()
by the new functions SSL_get_ex_data() and SSL_set_ex_data(), and
it reserves the slot in the SSL memory space.
For information, this is the two declaration which seems wrong or
incomplete in the OpenSSL ssl.h file. We can see the usage of the
slot 0 whoch is hardcoded, but never reserved.
#define SSL_set_app_data(s,arg) (SSL_set_ex_data(s,0,(char *)arg))
#define SSL_get_app_data(s) (SSL_get_ex_data(s,0))
This patch must be backported at least in 1.8, maybe in other versions.
2018-06-17 15:37:05 -04:00
conn = SSL_get_ex_data ( ssl , ssl_app_data_index ) ;
2021-08-19 12:06:30 -04:00
client_crt = SSL_get_ex_data ( ssl , ssl_client_crt_ref_index ) ;
2012-09-21 07:15:06 -04:00
2022-09-06 13:37:08 -04:00
if ( conn ) {
bind_conf = __objt_listener ( conn - > target ) - > bind_conf ;
ctx = __conn_get_ssl_sock_ctx ( conn ) ;
}
# ifdef USE_QUIC
else {
qc = SSL_get_ex_data ( ssl , ssl_qc_app_data_index ) ;
2022-10-17 12:46:49 -04:00
BUG_ON ( ! qc ) ; /* Must never happen */
bind_conf = qc - > li - > bind_conf ;
ctx = qc - > xprt_ctx ;
2022-09-06 13:37:08 -04:00
}
# endif
2022-10-14 03:34:00 -04:00
BUG_ON ( ! ctx | | ! bind_conf ) ;
2022-11-23 03:27:13 -05:00
ALREADY_CHECKED ( ctx ) ;
ALREADY_CHECKED ( bind_conf ) ;
2022-09-06 13:37:08 -04:00
2019-02-28 12:10:45 -05:00
ctx - > xprt_st | = SSL_SOCK_ST_FL_VERIFY_DONE ;
2012-09-21 07:15:06 -04:00
2021-08-19 12:06:30 -04:00
depth = X509_STORE_CTX_get_error_depth ( x_store ) ;
err = X509_STORE_CTX_get_error ( x_store ) ;
2012-09-21 08:31:21 -04:00
if ( ok ) /* no errors */
return ok ;
2021-08-19 12:06:30 -04:00
/* Keep a reference to the client's certificate in order to be able to
* dump some fetches values in a log even when the verification process
* fails . */
if ( depth = = 0 ) {
X509_free ( client_crt ) ;
client_crt = X509_STORE_CTX_get0_cert ( x_store ) ;
if ( client_crt ) {
X509_up_ref ( client_crt ) ;
SSL_set_ex_data ( ssl , ssl_client_crt_ref_index , client_crt ) ;
}
}
else {
/* An error occurred on a CA certificate of the certificate
* chain , we might never call this verify callback on the client
* certificate ' s depth ( which is 0 ) so we try to store the
* reference right now . */
2021-08-20 03:51:23 -04:00
certs = X509_STORE_CTX_get1_chain ( x_store ) ;
if ( certs ) {
client_crt = sk_X509_value ( certs , 0 ) ;
if ( client_crt ) {
X509_up_ref ( client_crt ) ;
SSL_set_ex_data ( ssl , ssl_client_crt_ref_index , client_crt ) ;
2021-08-19 12:06:30 -04:00
}
sk_X509_pop_free ( certs , X509_free ) ;
}
}
2012-09-21 08:31:21 -04:00
/* check if CA error needs to be ignored */
if ( depth > 0 ) {
2019-02-28 12:10:45 -05:00
if ( ! SSL_SOCK_ST_TO_CA_ERROR ( ctx - > xprt_st ) ) {
ctx - > xprt_st | = SSL_SOCK_CA_ERROR_TO_ST ( err ) ;
ctx - > xprt_st | = SSL_SOCK_CAEDEPTH_TO_ST ( depth ) ;
2012-09-21 09:27:54 -04:00
}
2022-11-10 04:48:58 -05:00
if ( err < = SSL_MAX_VFY_ERROR_CODE & &
2022-11-10 10:45:24 -05:00
cert_ignerr_bitfield_get ( bind_conf - > ca_ignerr_bitfield , err ) )
2022-09-06 13:37:08 -04:00
goto err_ignored ;
2012-09-21 08:31:21 -04:00
2022-09-06 13:37:08 -04:00
/* TODO: for QUIC connection, this error code is lost */
if ( conn )
conn - > err_code = CO_ER_SSL_CA_FAIL ;
2012-09-21 08:31:21 -04:00
return 0 ;
}
2019-02-28 12:10:45 -05:00
if ( ! SSL_SOCK_ST_TO_CRTERROR ( ctx - > xprt_st ) )
ctx - > xprt_st | = SSL_SOCK_CRTERROR_TO_ST ( err ) ;
2012-09-21 09:27:54 -04:00
2012-09-21 08:31:21 -04:00
/* check if certificate error needs to be ignored */
2022-11-10 04:48:58 -05:00
if ( err < = SSL_MAX_VFY_ERROR_CODE & &
2022-11-10 10:45:24 -05:00
cert_ignerr_bitfield_get ( bind_conf - > crt_ignerr_bitfield , err ) )
2022-09-06 13:37:08 -04:00
goto err_ignored ;
2012-09-21 08:31:21 -04:00
2022-09-06 13:37:08 -04:00
/* TODO: for QUIC connection, this error code is lost */
if ( conn )
conn - > err_code = CO_ER_SSL_CRT_FAIL ;
2012-09-21 08:31:21 -04:00
return 0 ;
2022-09-06 13:37:08 -04:00
err_ignored :
ssl_sock_dump_errors ( conn , qc ) ;
ERR_clear_error ( ) ;
return 1 ;
2012-09-21 07:15:06 -04:00
}
2020-05-11 09:51:45 -04:00
# ifdef TLS1_RT_HEARTBEAT
static void ssl_sock_parse_heartbeat ( struct connection * conn , int write_p , int version ,
int content_type , const void * buf , size_t len ,
SSL * ssl )
{
/* test heartbeat received (write_p is set to 0
for a received record ) */
if ( ( content_type = = TLS1_RT_HEARTBEAT ) & & ( write_p = = 0 ) ) {
2022-04-12 01:31:06 -04:00
struct ssl_sock_ctx * ctx = __conn_get_ssl_sock_ctx ( conn ) ;
2020-05-11 09:51:45 -04:00
const unsigned char * p = buf ;
unsigned int payload ;
ctx - > xprt_st | = SSL_SOCK_RECV_HEARTBEAT ;
/* Check if this is a CVE-2014-0160 exploitation attempt. */
if ( * p ! = TLS1_HB_REQUEST )
return ;
if ( len < 1 + 2 + 16 ) /* 1 type + 2 size + 0 payload + 16 padding */
goto kill_it ;
payload = ( p [ 1 ] * 256 ) + p [ 2 ] ;
if ( 3 + payload + 16 < = len )
return ; /* OK no problem */
kill_it :
/* We have a clear heartbleed attack (CVE-2014-0160), the
* advertised payload is larger than the advertised packet
* length , so we have garbage in the buffer between the
* payload and the end of the buffer ( p + len ) . We can ' t know
* if the SSL stack is patched , and we don ' t know if we can
* safely wipe out the area between p + 3 + len and payload .
* So instead , we prevent the response from being sent by
* setting the max_send_fragment to 0 and we report an SSL
* error , which will kill this connection . It will be reported
* above as SSL_ERROR_SSL while an other handshake failure with
* a heartbeat message will be reported as SSL_ERROR_SYSCALL .
*/
ssl - > max_send_fragment = 0 ;
SSLerr ( SSL_F_TLS1_HEARTBEAT , SSL_R_SSL_HANDSHAKE_FAILURE ) ;
}
}
# endif
static void ssl_sock_parse_clienthello ( struct connection * conn , int write_p , int version ,
int content_type , const void * buf , size_t len ,
SSL * ssl )
2017-02-25 06:45:22 -05:00
{
2017-03-08 05:07:10 -05:00
struct ssl_capture * capture ;
2021-07-12 08:16:55 -04:00
uchar * msg ;
uchar * end ;
uchar * extensions_end ;
uchar * ec_start = NULL ;
uchar * ec_formats_start = NULL ;
uchar * list_end ;
ushort protocol_version ;
ushort extension_id ;
ushort ec_len = 0 ;
uchar ec_formats_len = 0 ;
int offset = 0 ;
int rec_len ;
2017-02-25 06:45:22 -05:00
/* This function is called for "from client" and "to server"
* connections . The combination of write_p = = 0 and content_type = = 22
2018-11-15 12:07:59 -05:00
* is only available during " from client " connection .
2017-02-25 06:45:22 -05:00
*/
/* "write_p" is set to 0 is the bytes are received messages,
* otherwise it is set to 1.
*/
if ( write_p ! = 0 )
return ;
/* content_type contains the type of message received or sent
* according with the SSL / TLS protocol spec . This message is
* encoded with one byte . The value 256 ( two bytes ) is used
* for designing the SSL / TLS record layer . According with the
* rfc6101 , the expected message ( other than 256 ) are :
* - change_cipher_spec ( 20 )
* - alert ( 21 )
* - handshake ( 22 )
* - application_data ( 23 )
* - ( 255 )
* We are interessed by the handshake and specially the client
* hello .
*/
if ( content_type ! = 22 )
return ;
/* The message length is at least 4 bytes, containing the
* message type and the message length .
*/
if ( len < 4 )
return ;
/* First byte of the handshake message id the type of
2020-03-10 03:06:11 -04:00
* message . The known types are :
2017-02-25 06:45:22 -05:00
* - hello_request ( 0 )
* - client_hello ( 1 )
* - server_hello ( 2 )
* - certificate ( 11 )
* - server_key_exchange ( 12 )
* - certificate_request ( 13 )
* - server_hello_done ( 14 )
* We are interested by the client hello .
*/
msg = ( unsigned char * ) buf ;
if ( msg [ 0 ] ! = 1 )
return ;
/* Next three bytes are the length of the message. The total length
* must be this decoded length + 4. If the length given as argument
* is not the same , we abort the protocol dissector .
*/
rec_len = ( msg [ 1 ] < < 16 ) + ( msg [ 2 ] < < 8 ) + msg [ 3 ] ;
if ( len < rec_len + 4 )
return ;
msg + = 4 ;
end = msg + rec_len ;
if ( end < msg )
return ;
2021-07-12 08:16:55 -04:00
/* Expect 2 bytes for protocol version
* ( 1 byte for major and 1 byte for minor )
2017-02-25 06:45:22 -05:00
*/
2021-07-12 08:16:55 -04:00
if ( msg + 2 > end )
return ;
protocol_version = ( msg [ 0 ] < < 8 ) + msg [ 1 ] ;
msg + = 2 ;
/* Expect the random, composed by 4 bytes for the unix time and
* 28 bytes for unix payload . So we jump 4 + 28.
*/
msg + = 4 + 28 ;
2018-11-28 09:20:25 -05:00
if ( msg > end )
return ;
/* Next, is session id:
* if present , we have to jump by length + 1 for the size information
* if not present , we have to jump by 1 only
*/
if ( msg [ 0 ] > 0 )
msg + = msg [ 0 ] ;
msg + = 1 ;
2017-02-25 06:45:22 -05:00
if ( msg > end )
return ;
/* Next two bytes are the ciphersuite length. */
if ( msg + 2 > end )
return ;
rec_len = ( msg [ 0 ] < < 8 ) + msg [ 1 ] ;
msg + = 2 ;
if ( msg + rec_len > end | | msg + rec_len < msg )
return ;
2021-07-12 08:16:55 -04:00
capture = pool_zalloc ( pool_head_ssl_capture ) ;
2017-03-08 05:07:10 -05:00
if ( ! capture )
return ;
2017-02-25 06:45:22 -05:00
/* Compute the xxh64 of the ciphersuite. */
capture - > xxh64 = XXH64 ( msg , rec_len , 0 ) ;
/* Capture the ciphersuite. */
2021-07-13 13:04:24 -04:00
capture - > ciphersuite_len = MIN ( global_ssl . capture_buffer_size , rec_len ) ;
2021-07-12 08:16:55 -04:00
capture - > ciphersuite_offset = 0 ;
memcpy ( capture - > data , msg , capture - > ciphersuite_len ) ;
msg + = rec_len ;
offset + = capture - > ciphersuite_len ;
/* Initialize other data */
capture - > protocol_version = protocol_version ;
/* Next, compression methods:
* if present , we have to jump by length + 1 for the size information
* if not present , we have to jump by 1 only
*/
if ( msg [ 0 ] > 0 )
msg + = msg [ 0 ] ;
msg + = 1 ;
if ( msg > end )
goto store_capture ;
/* We reached extensions */
if ( msg + 2 > end )
goto store_capture ;
rec_len = ( msg [ 0 ] < < 8 ) + msg [ 1 ] ;
msg + = 2 ;
if ( msg + rec_len > end | | msg + rec_len < msg )
goto store_capture ;
extensions_end = msg + rec_len ;
capture - > extensions_offset = offset ;
/* Parse each extension */
while ( msg + 4 < extensions_end ) {
/* Add 2 bytes of extension_id */
2021-07-13 13:04:24 -04:00
if ( global_ssl . capture_buffer_size > = offset + 2 ) {
2021-07-12 08:16:55 -04:00
capture - > data [ offset + + ] = msg [ 0 ] ;
capture - > data [ offset + + ] = msg [ 1 ] ;
capture - > extensions_len + = 2 ;
}
else
break ;
extension_id = ( msg [ 0 ] < < 8 ) + msg [ 1 ] ;
/* Length of the extension */
rec_len = ( msg [ 2 ] < < 8 ) + msg [ 3 ] ;
/* Expect 2 bytes extension id + 2 bytes extension size */
msg + = 2 + 2 ;
if ( msg + rec_len > extensions_end | | msg + rec_len < msg )
goto store_capture ;
/* TLS Extensions
* https : //www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml */
if ( extension_id = = 0x000a ) {
/* Elliptic Curves:
* https : //www.rfc-editor.org/rfc/rfc8422.html
* https : //www.rfc-editor.org/rfc/rfc7919.html */
list_end = msg + rec_len ;
if ( msg + 2 > list_end )
goto store_capture ;
rec_len = ( msg [ 0 ] < < 8 ) + msg [ 1 ] ;
msg + = 2 ;
if ( msg + rec_len > list_end | | msg + rec_len < msg )
goto store_capture ;
/* Store location/size of the list */
ec_start = msg ;
ec_len = rec_len ;
}
else if ( extension_id = = 0x000b ) {
/* Elliptic Curves Point Formats:
* https : //www.rfc-editor.org/rfc/rfc8422.html */
list_end = msg + rec_len ;
if ( msg + 1 > list_end )
goto store_capture ;
rec_len = msg [ 0 ] ;
msg + = 1 ;
if ( msg + rec_len > list_end | | msg + rec_len < msg )
goto store_capture ;
/* Store location/size of the list */
ec_formats_start = msg ;
ec_formats_len = rec_len ;
}
msg + = rec_len ;
}
if ( ec_start ) {
rec_len = ec_len ;
2021-07-13 13:04:24 -04:00
if ( offset + rec_len > global_ssl . capture_buffer_size )
rec_len = global_ssl . capture_buffer_size - offset ;
2021-07-12 08:16:55 -04:00
memcpy ( capture - > data + offset , ec_start , rec_len ) ;
capture - > ec_offset = offset ;
capture - > ec_len = rec_len ;
offset + = rec_len ;
}
if ( ec_formats_start ) {
rec_len = ec_formats_len ;
2021-07-13 13:04:24 -04:00
if ( offset + rec_len > global_ssl . capture_buffer_size )
rec_len = global_ssl . capture_buffer_size - offset ;
2021-07-12 08:16:55 -04:00
memcpy ( capture - > data + offset , ec_formats_start , rec_len ) ;
capture - > ec_formats_offset = offset ;
capture - > ec_formats_len = rec_len ;
offset + = rec_len ;
}
2017-03-08 05:07:10 -05:00
2021-07-12 08:16:55 -04:00
store_capture :
2017-03-08 05:07:10 -05:00
SSL_set_ex_data ( ssl , ssl_capture_ptr_index , capture ) ;
2017-02-25 06:45:22 -05:00
}
WIP/MINOR: ssl: add sample fetches for keylog in frontend
OpenSSL 1.1.1 provides a callback registering function
SSL_CTX_set_keylog_callback, which allows one to receive a string
containing the keys to deciphers TLSv1.3.
Unfortunately it is not possible to store this data in binary form and
we can only get this information using the callback. Which means that we
need to store it until the connection is closed.
This patches add 2 pools, the first one, pool_head_ssl_keylog is used to
store a struct ssl_keylog which will be inserted as a ex_data in a SSL *.
The second one is pool_head_ssl_keylog_str which will be used to store
the hexadecimal strings.
To enable the capture of the keys, you need to set "tune.ssl.keylog on"
in your configuration.
The following fetches were implemented:
ssl_fc_client_early_traffic_secret,
ssl_fc_client_handshake_traffic_secret,
ssl_fc_server_handshake_traffic_secret,
ssl_fc_client_traffic_secret_0,
ssl_fc_server_traffic_secret_0,
ssl_fc_exporter_secret,
ssl_fc_early_exporter_secret
2020-07-06 05:41:30 -04:00
2021-06-09 10:46:12 -04:00
# ifdef HAVE_SSL_KEYLOG
WIP/MINOR: ssl: add sample fetches for keylog in frontend
OpenSSL 1.1.1 provides a callback registering function
SSL_CTX_set_keylog_callback, which allows one to receive a string
containing the keys to deciphers TLSv1.3.
Unfortunately it is not possible to store this data in binary form and
we can only get this information using the callback. Which means that we
need to store it until the connection is closed.
This patches add 2 pools, the first one, pool_head_ssl_keylog is used to
store a struct ssl_keylog which will be inserted as a ex_data in a SSL *.
The second one is pool_head_ssl_keylog_str which will be used to store
the hexadecimal strings.
To enable the capture of the keys, you need to set "tune.ssl.keylog on"
in your configuration.
The following fetches were implemented:
ssl_fc_client_early_traffic_secret,
ssl_fc_client_handshake_traffic_secret,
ssl_fc_server_handshake_traffic_secret,
ssl_fc_client_traffic_secret_0,
ssl_fc_server_traffic_secret_0,
ssl_fc_exporter_secret,
ssl_fc_early_exporter_secret
2020-07-06 05:41:30 -04:00
static void ssl_init_keylog ( struct connection * conn , int write_p , int version ,
int content_type , const void * buf , size_t len ,
SSL * ssl )
{
struct ssl_keylog * keylog ;
if ( SSL_get_ex_data ( ssl , ssl_keylog_index ) )
return ;
2021-03-22 16:10:12 -04:00
keylog = pool_zalloc ( pool_head_ssl_keylog ) ;
WIP/MINOR: ssl: add sample fetches for keylog in frontend
OpenSSL 1.1.1 provides a callback registering function
SSL_CTX_set_keylog_callback, which allows one to receive a string
containing the keys to deciphers TLSv1.3.
Unfortunately it is not possible to store this data in binary form and
we can only get this information using the callback. Which means that we
need to store it until the connection is closed.
This patches add 2 pools, the first one, pool_head_ssl_keylog is used to
store a struct ssl_keylog which will be inserted as a ex_data in a SSL *.
The second one is pool_head_ssl_keylog_str which will be used to store
the hexadecimal strings.
To enable the capture of the keys, you need to set "tune.ssl.keylog on"
in your configuration.
The following fetches were implemented:
ssl_fc_client_early_traffic_secret,
ssl_fc_client_handshake_traffic_secret,
ssl_fc_server_handshake_traffic_secret,
ssl_fc_client_traffic_secret_0,
ssl_fc_server_traffic_secret_0,
ssl_fc_exporter_secret,
ssl_fc_early_exporter_secret
2020-07-06 05:41:30 -04:00
if ( ! keylog )
return ;
if ( ! SSL_set_ex_data ( ssl , ssl_keylog_index , keylog ) ) {
pool_free ( pool_head_ssl_keylog , keylog ) ;
return ;
}
}
# endif
2014-04-25 13:05:36 -04:00
/* Callback is called for ssl protocol analyse */
void ssl_sock_msgcbk ( int write_p , int version , int content_type , const void * buf , size_t len , SSL * ssl , void * arg )
{
2020-05-08 12:30:00 -04:00
struct connection * conn = SSL_get_ex_data ( ssl , ssl_app_data_index ) ;
struct ssl_sock_msg_callback * cbk ;
/* Try to call all callback functions that were registered by using
* ssl_sock_register_msg_callback ( ) .
*/
list_for_each_entry ( cbk , & ssl_sock_msg_callbacks , list ) {
cbk - > func ( conn , write_p , version , content_type , buf , len , ssl ) ;
}
2014-04-25 13:05:36 -04:00
}
2018-11-20 17:33:50 -05:00
# if defined(OPENSSL_NPN_NEGOTIATED) && !defined(OPENSSL_NO_NEXTPROTONEG)
static int ssl_sock_srv_select_protos ( SSL * s , unsigned char * * out , unsigned char * outlen ,
const unsigned char * in , unsigned int inlen ,
void * arg )
{
struct server * srv = arg ;
if ( SSL_select_next_proto ( out , outlen , in , inlen , ( unsigned char * ) srv - > ssl_ctx . npn_str ,
srv - > ssl_ctx . npn_len ) = = OPENSSL_NPN_NEGOTIATED )
return SSL_TLSEXT_ERR_OK ;
return SSL_TLSEXT_ERR_NOACK ;
}
# endif
2018-02-15 07:34:58 -05:00
# if defined(OPENSSL_NPN_NEGOTIATED) && !defined(OPENSSL_NO_NEXTPROTONEG)
2012-10-18 12:57:14 -04:00
/* This callback is used so that the server advertises the list of
2020-03-10 03:06:11 -04:00
* negotiable protocols for NPN .
2012-10-18 12:57:14 -04:00
*/
static int ssl_sock_advertise_npn_protos ( SSL * s , const unsigned char * * data ,
unsigned int * len , void * arg )
{
2016-12-29 12:26:15 -05:00
struct ssl_bind_conf * conf = arg ;
2012-10-18 12:57:14 -04:00
* data = ( const unsigned char * ) conf - > npn_str ;
* len = conf - > npn_len ;
return SSL_TLSEXT_ERR_OK ;
}
# endif
2014-02-13 06:29:42 -05:00
# ifdef TLSEXT_TYPE_application_layer_protocol_negotiation
2013-04-01 20:30:41 -04:00
/* This callback is used so that the server advertises the list of
2020-03-10 03:06:11 -04:00
* negotiable protocols for ALPN .
2013-04-01 20:30:41 -04:00
*/
2014-02-13 06:29:42 -05:00
static int ssl_sock_advertise_alpn_protos ( SSL * s , const unsigned char * * out ,
unsigned char * outlen ,
const unsigned char * server ,
unsigned int server_len , void * arg )
2013-04-01 20:30:41 -04:00
{
2016-12-29 12:26:15 -05:00
struct ssl_bind_conf * conf = arg ;
2021-12-14 13:40:04 -05:00
# ifdef USE_QUIC
2022-01-19 04:03:30 -05:00
struct quic_conn * qc = SSL_get_ex_data ( s , ssl_qc_app_data_index ) ;
2021-12-14 13:40:04 -05:00
# endif
2013-04-01 20:30:41 -04:00
2014-02-13 06:29:42 -05:00
if ( SSL_select_next_proto ( ( unsigned char * * ) out , outlen , ( const unsigned char * ) conf - > alpn_str ,
conf - > alpn_len , server , server_len ) ! = OPENSSL_NPN_NEGOTIATED ) {
2021-11-19 11:02:20 -05:00
# ifdef USE_QUIC
2022-01-19 04:03:30 -05:00
if ( qc )
quic_set_tls_alert ( qc , SSL_AD_NO_APPLICATION_PROTOCOL ) ;
2021-11-19 11:02:20 -05:00
# endif
2014-02-13 06:29:42 -05:00
return SSL_TLSEXT_ERR_NOACK ;
}
2021-12-14 13:40:04 -05:00
# ifdef USE_QUIC
2022-01-19 04:03:30 -05:00
if ( qc & & ! quic_set_app_ops ( qc , * out , * outlen ) ) {
quic_set_tls_alert ( qc , SSL_AD_NO_APPLICATION_PROTOCOL ) ;
2021-12-14 13:40:04 -05:00
return SSL_TLSEXT_ERR_NOACK ;
}
# endif
2013-04-01 20:30:41 -04:00
return SSL_TLSEXT_ERR_OK ;
}
# endif
2019-05-09 07:26:41 -04:00
# if (HA_OPENSSL_VERSION_NUMBER < 0x1010000fL)
2017-05-18 06:33:19 -04:00
static void ctx_set_SSLv3_func ( SSL_CTX * ctx , set_context_func c )
2017-05-18 05:56:58 -04:00
{
2017-07-12 06:53:02 -04:00
# if SSL_OP_NO_SSLv3
2017-05-18 06:33:19 -04:00
c = = SET_SERVER ? SSL_CTX_set_ssl_version ( ctx , SSLv3_server_method ( ) )
2017-05-18 05:56:58 -04:00
: SSL_CTX_set_ssl_version ( ctx , SSLv3_client_method ( ) ) ;
# endif
}
2017-05-18 06:33:19 -04:00
static void ctx_set_TLSv10_func ( SSL_CTX * ctx , set_context_func c ) {
c = = SET_SERVER ? SSL_CTX_set_ssl_version ( ctx , TLSv1_server_method ( ) )
2017-05-18 05:56:58 -04:00
: SSL_CTX_set_ssl_version ( ctx , TLSv1_client_method ( ) ) ;
}
2017-05-18 06:33:19 -04:00
static void ctx_set_TLSv11_func ( SSL_CTX * ctx , set_context_func c ) {
2017-05-18 05:56:58 -04:00
# if SSL_OP_NO_TLSv1_1
2017-05-18 06:33:19 -04:00
c = = SET_SERVER ? SSL_CTX_set_ssl_version ( ctx , TLSv1_1_server_method ( ) )
2017-05-18 05:56:58 -04:00
: SSL_CTX_set_ssl_version ( ctx , TLSv1_1_client_method ( ) ) ;
# endif
}
2017-05-18 06:33:19 -04:00
static void ctx_set_TLSv12_func ( SSL_CTX * ctx , set_context_func c ) {
2017-05-18 05:56:58 -04:00
# if SSL_OP_NO_TLSv1_2
2017-05-18 06:33:19 -04:00
c = = SET_SERVER ? SSL_CTX_set_ssl_version ( ctx , TLSv1_2_server_method ( ) )
2017-05-18 05:56:58 -04:00
: SSL_CTX_set_ssl_version ( ctx , TLSv1_2_client_method ( ) ) ;
# endif
}
2018-08-13 19:56:13 -04:00
/* TLSv1.2 is the last supported version in this context. */
2017-05-18 06:33:19 -04:00
static void ctx_set_TLSv13_func ( SSL_CTX * ctx , set_context_func c ) { }
/* Unusable in this context. */
static void ssl_set_SSLv3_func ( SSL * ssl , set_context_func c ) { }
static void ssl_set_TLSv10_func ( SSL * ssl , set_context_func c ) { }
static void ssl_set_TLSv11_func ( SSL * ssl , set_context_func c ) { }
static void ssl_set_TLSv12_func ( SSL * ssl , set_context_func c ) { }
static void ssl_set_TLSv13_func ( SSL * ssl , set_context_func c ) { }
2017-05-18 05:56:58 -04:00
# else /* openssl >= 1.1.0 */
2017-05-18 06:33:19 -04:00
static void ctx_set_SSLv3_func ( SSL_CTX * ctx , set_context_func c ) {
c = = SET_MAX ? SSL_CTX_set_max_proto_version ( ctx , SSL3_VERSION )
2017-05-18 05:56:58 -04:00
: SSL_CTX_set_min_proto_version ( ctx , SSL3_VERSION ) ;
}
2017-05-18 06:33:19 -04:00
static void ssl_set_SSLv3_func ( SSL * ssl , set_context_func c ) {
c = = SET_MAX ? SSL_set_max_proto_version ( ssl , SSL3_VERSION )
: SSL_set_min_proto_version ( ssl , SSL3_VERSION ) ;
}
static void ctx_set_TLSv10_func ( SSL_CTX * ctx , set_context_func c ) {
c = = SET_MAX ? SSL_CTX_set_max_proto_version ( ctx , TLS1_VERSION )
2017-05-18 05:56:58 -04:00
: SSL_CTX_set_min_proto_version ( ctx , TLS1_VERSION ) ;
}
2017-05-18 06:33:19 -04:00
static void ssl_set_TLSv10_func ( SSL * ssl , set_context_func c ) {
c = = SET_MAX ? SSL_set_max_proto_version ( ssl , TLS1_VERSION )
: SSL_set_min_proto_version ( ssl , TLS1_VERSION ) ;
}
static void ctx_set_TLSv11_func ( SSL_CTX * ctx , set_context_func c ) {
c = = SET_MAX ? SSL_CTX_set_max_proto_version ( ctx , TLS1_1_VERSION )
2017-05-18 05:56:58 -04:00
: SSL_CTX_set_min_proto_version ( ctx , TLS1_1_VERSION ) ;
}
2017-05-18 06:33:19 -04:00
static void ssl_set_TLSv11_func ( SSL * ssl , set_context_func c ) {
c = = SET_MAX ? SSL_set_max_proto_version ( ssl , TLS1_1_VERSION )
: SSL_set_min_proto_version ( ssl , TLS1_1_VERSION ) ;
}
static void ctx_set_TLSv12_func ( SSL_CTX * ctx , set_context_func c ) {
c = = SET_MAX ? SSL_CTX_set_max_proto_version ( ctx , TLS1_2_VERSION )
2017-05-18 05:56:58 -04:00
: SSL_CTX_set_min_proto_version ( ctx , TLS1_2_VERSION ) ;
}
2017-05-18 06:33:19 -04:00
static void ssl_set_TLSv12_func ( SSL * ssl , set_context_func c ) {
c = = SET_MAX ? SSL_set_max_proto_version ( ssl , TLS1_2_VERSION )
: SSL_set_min_proto_version ( ssl , TLS1_2_VERSION ) ;
}
static void ctx_set_TLSv13_func ( SSL_CTX * ctx , set_context_func c ) {
2021-06-02 10:09:11 -04:00
# if (HA_OPENSSL_VERSION_NUMBER >= 0x10101000L)
2017-05-18 06:33:19 -04:00
c = = SET_MAX ? SSL_CTX_set_max_proto_version ( ctx , TLS1_3_VERSION )
2017-05-18 05:56:58 -04:00
: SSL_CTX_set_min_proto_version ( ctx , TLS1_3_VERSION ) ;
# endif
}
2017-05-18 06:33:19 -04:00
static void ssl_set_TLSv13_func ( SSL * ssl , set_context_func c ) {
2021-06-02 10:09:11 -04:00
# if (HA_OPENSSL_VERSION_NUMBER >= 0x10101000L)
2017-05-18 06:33:19 -04:00
c = = SET_MAX ? SSL_set_max_proto_version ( ssl , TLS1_3_VERSION )
: SSL_set_min_proto_version ( ssl , TLS1_3_VERSION ) ;
2017-05-18 05:56:58 -04:00
# endif
}
2017-05-18 06:33:19 -04:00
# endif
static void ctx_set_None_func ( SSL_CTX * ctx , set_context_func c ) { }
static void ssl_set_None_func ( SSL * ssl , set_context_func c ) { }
2017-05-18 05:56:58 -04:00
2020-05-07 09:20:43 -04:00
struct methodVersions methodVersions [ ] = {
2017-05-18 06:33:19 -04:00
{ 0 , 0 , ctx_set_None_func , ssl_set_None_func , " NONE " } , /* CONF_TLSV_NONE */
{ SSL_OP_NO_SSLv3 , MC_SSL_O_NO_SSLV3 , ctx_set_SSLv3_func , ssl_set_SSLv3_func , " SSLv3 " } , /* CONF_SSLV3 */
{ SSL_OP_NO_TLSv1 , MC_SSL_O_NO_TLSV10 , ctx_set_TLSv10_func , ssl_set_TLSv10_func , " TLSv1.0 " } , /* CONF_TLSV10 */
{ SSL_OP_NO_TLSv1_1 , MC_SSL_O_NO_TLSV11 , ctx_set_TLSv11_func , ssl_set_TLSv11_func , " TLSv1.1 " } , /* CONF_TLSV11 */
{ SSL_OP_NO_TLSv1_2 , MC_SSL_O_NO_TLSV12 , ctx_set_TLSv12_func , ssl_set_TLSv12_func , " TLSv1.2 " } , /* CONF_TLSV12 */
{ SSL_OP_NO_TLSv1_3 , MC_SSL_O_NO_TLSV13 , ctx_set_TLSv13_func , ssl_set_TLSv13_func , " TLSv1.3 " } , /* CONF_TLSV13 */
2017-05-18 05:56:58 -04:00
} ;
2017-03-01 12:54:56 -05:00
static void ssl_sock_switchctx_set ( SSL * ssl , SSL_CTX * ctx )
{
SSL_set_verify ( ssl , SSL_CTX_get_verify_mode ( ctx ) , ssl_sock_bind_verifycbk ) ;
SSL_set_client_CA_list ( ssl , SSL_dup_CA_list ( SSL_CTX_get_client_CA_list ( ctx ) ) ) ;
SSL_set_SSL_CTX ( ssl , ctx ) ;
}
2023-11-23 10:35:52 -05:00
/*
* Return the right sni_ctx for a < bind_conf > and a chosen < servername > ( must be in lowercase )
* RSA < have_rsa_sig > and ECDSA < have_ecdsa_sig > capabilities of the client can also be used .
*
* This function does a lookup in the bind_conf sni tree so the caller should lock its tree .
*/
2024-01-12 09:23:49 -05:00
struct sni_ctx * ssl_sock_chose_sni_ctx ( struct bind_conf * s , const char * servername ,
2023-11-23 10:35:52 -05:00
int have_rsa_sig , int have_ecdsa_sig )
{
struct ebmb_node * node , * n , * node_ecdsa = NULL , * node_rsa = NULL , * node_anonymous = NULL ;
const char * wildp = NULL ;
int i ;
/* look for the first dot for wildcard search */
for ( i = 0 ; servername [ i ] ! = ' \0 ' ; i + + ) {
if ( servername [ i ] = = ' . ' ) {
wildp = & servername [ i ] ;
break ;
}
}
MEDIUM: ssl: allow multiple fallback certificate to allow ECDSA/RSA selection
This patch changes the default certificate mechanism.
Since the beginning of SSL in HAProxy, the default certificate was the first
certificate of a bind line. This allowed to fallback on this certificate
when no servername extension was sent by the server, or when no SAN nor
CN was available in the certificate.
When using a multi-certificate bundle (ecdsa+rsa), it was possible to
have both certificates as the fallback one, leting openssl chose the
right one. This was possible because a multi-certificate bundle
was generating a unique SSL_CTX for both certificates.
When the haproxy and openssl architecture evolved, we decided to
use multiple SSL_CTX for a multi-cert bundle, in order to simplify the
code and allow updates over the CLI.
However only one default_ctx was allowed, so we lost the ability to
chose between ECDSA and RSA for the default certificate.
This patch allows to use a '*' filter for a certificate, which allow to
lookup between multiple '*' filter, and have one in RSA and another one
in ECDSA. It replaces the default_ctx mechanism in the ClientHello
callback and use the standard algorithm to look for a default cert and
chose between ECDSA and RSA.
/!\ This patch breaks the automatic setting of the default certificate, which
will be introduce in the next patch. So the first certificate of a bind
line won't be used as a defaullt anymore.
To use this feature, one could use crt-list with '*' filters:
$ cat foo.crtlist
foobar.pem.rsa *
foobar.pem.ecdsa *
In order to test the feature, it's easy to send a request without
the servername extension and use ECDSA or RSA compatible ciphers:
$ openssl s_client -connect localhost:8443 -tls1_2 -cipher ECDHE-RSA-AES256-GCM-SHA384
$ openssl s_client -connect localhost:8443 -tls1_2 -cipher ECDHE-ECDSA-AES256-GCM-SHA384
2024-01-10 08:05:59 -05:00
/* if the servername is empty look for the default in the wildcard list */
if ( ! * servername )
wildp = servername ;
2023-11-23 10:35:52 -05:00
/* Look for an ECDSA, RSA and DSA certificate, first in the single
* name and if not found in the wildcard */
for ( i = 0 ; i < 2 ; i + + ) {
if ( i = = 0 ) /* lookup in full qualified names */
node = ebst_lookup ( & s - > sni_ctx , trash . area ) ;
else if ( i = = 1 & & wildp ) /* lookup in wildcards names */
node = ebst_lookup ( & s - > sni_w_ctx , wildp ) ;
else
break ;
for ( n = node ; n ; n = ebmb_next_dup ( n ) ) {
/* lookup a not neg filter */
if ( ! container_of ( n , struct sni_ctx , name ) - > neg ) {
struct sni_ctx * sni , * sni_tmp ;
int skip = 0 ;
if ( i = = 1 & & wildp ) { /* wildcard */
/* If this is a wildcard, look for an exclusion on the same crt-list line */
sni = container_of ( n , struct sni_ctx , name ) ;
list_for_each_entry ( sni_tmp , & sni - > ckch_inst - > sni_ctx , by_ckch_inst ) {
if ( sni_tmp - > neg & & ( strcmp ( ( const char * ) sni_tmp - > name . key , trash . area ) = = 0 ) ) {
skip = 1 ;
break ;
}
}
if ( skip )
continue ;
}
switch ( container_of ( n , struct sni_ctx , name ) - > kinfo . sig ) {
case TLSEXT_signature_ecdsa :
if ( ! node_ecdsa )
node_ecdsa = n ;
break ;
case TLSEXT_signature_rsa :
if ( ! node_rsa )
node_rsa = n ;
break ;
default : /* TLSEXT_signature_anonymous|dsa */
if ( ! node_anonymous )
node_anonymous = n ;
break ;
}
}
}
}
/* Once the certificates are found, select them depending on what is
* supported in the client and by key_signature priority order : EDSA >
* RSA > DSA */
if ( have_ecdsa_sig & & node_ecdsa )
node = node_ecdsa ;
else if ( have_rsa_sig & & node_rsa )
node = node_rsa ;
else if ( node_anonymous )
node = node_anonymous ;
else if ( node_ecdsa )
node = node_ecdsa ; /* no ecdsa signature case (< TLSv1.2) */
else
node = node_rsa ; /* no rsa signature case (far far away) */
if ( node )
return container_of ( node , struct sni_ctx , name ) ;
return NULL ;
}
2021-01-22 14:09:14 -05:00
# ifdef HAVE_SSL_CLIENT_HELLO_CB
2017-02-20 10:11:50 -05:00
2020-11-23 05:19:04 -05:00
int ssl_sock_switchctx_err_cbk ( SSL * ssl , int * al , void * priv )
2017-02-20 10:11:50 -05:00
{
2017-08-14 05:01:25 -04:00
struct bind_conf * s = priv ;
2017-02-20 10:11:50 -05:00
( void ) al ; /* shut gcc stupid warning */
2022-05-20 10:03:18 -04:00
if ( SSL_get_servername ( ssl , TLSEXT_NAMETYPE_host_name ) | | ( s - > options & BC_O_GENERATE_CERTS ) )
2017-08-14 05:01:25 -04:00
return SSL_TLSEXT_ERR_OK ;
return SSL_TLSEXT_ERR_NOACK ;
2017-02-20 10:11:50 -05:00
}
2017-08-16 05:33:17 -04:00
# ifdef OPENSSL_IS_BORINGSSL
2020-11-23 05:19:04 -05:00
int ssl_sock_switchctx_cbk ( const struct ssl_early_callback_ctx * ctx )
2017-02-20 10:11:50 -05:00
{
2017-08-16 05:28:44 -04:00
SSL * ssl = ctx - > ssl ;
2017-08-16 05:33:17 -04:00
# else
2020-11-23 05:19:04 -05:00
int ssl_sock_switchctx_cbk ( SSL * ssl , int * al , void * arg )
2017-08-16 05:33:17 -04:00
{
# endif
2022-01-19 04:03:30 -05:00
struct connection * conn = SSL_get_ex_data ( ssl , ssl_app_data_index ) ;
# ifdef USE_QUIC
struct quic_conn * qc = SSL_get_ex_data ( ssl , ssl_qc_app_data_index ) ;
# endif /* USE_QUIC */
struct bind_conf * s = NULL ;
2017-02-20 10:11:50 -05:00
const uint8_t * extension_data ;
size_t extension_len ;
2018-09-03 10:29:16 -04:00
int has_rsa_sig = 0 , has_ecdsa_sig = 0 ;
2023-11-23 11:54:47 -05:00
struct sni_ctx * sni_ctx ;
2023-11-24 13:20:28 -05:00
const char * servername ;
MEDIUM: ssl: allow multiple fallback certificate to allow ECDSA/RSA selection
This patch changes the default certificate mechanism.
Since the beginning of SSL in HAProxy, the default certificate was the first
certificate of a bind line. This allowed to fallback on this certificate
when no servername extension was sent by the server, or when no SAN nor
CN was available in the certificate.
When using a multi-certificate bundle (ecdsa+rsa), it was possible to
have both certificates as the fallback one, leting openssl chose the
right one. This was possible because a multi-certificate bundle
was generating a unique SSL_CTX for both certificates.
When the haproxy and openssl architecture evolved, we decided to
use multiple SSL_CTX for a multi-cert bundle, in order to simplify the
code and allow updates over the CLI.
However only one default_ctx was allowed, so we lost the ability to
chose between ECDSA and RSA for the default certificate.
This patch allows to use a '*' filter for a certificate, which allow to
lookup between multiple '*' filter, and have one in RSA and another one
in ECDSA. It replaces the default_ctx mechanism in the ClientHello
callback and use the standard algorithm to look for a default cert and
chose between ECDSA and RSA.
/!\ This patch breaks the automatic setting of the default certificate, which
will be introduce in the next patch. So the first certificate of a bind
line won't be used as a defaullt anymore.
To use this feature, one could use crt-list with '*' filters:
$ cat foo.crtlist
foobar.pem.rsa *
foobar.pem.ecdsa *
In order to test the feature, it's easy to send a request without
the servername extension and use ECDSA or RSA compatible ciphers:
$ openssl s_client -connect localhost:8443 -tls1_2 -cipher ECDHE-RSA-AES256-GCM-SHA384
$ openssl s_client -connect localhost:8443 -tls1_2 -cipher ECDHE-ECDSA-AES256-GCM-SHA384
2024-01-10 08:05:59 -05:00
size_t servername_len = 0 ;
int default_lookup = 0 ; /* did we lookup for a default yet? */
MEDIUM: ssl: Handle early data with OpenSSL 1.1.1
When compiled with Openssl >= 1.1.1, before attempting to do the handshake,
try to read any early data. If any early data is present, then we'll create
the session, read the data, and handle the request before we're doing the
handshake.
For this, we add a new connection flag, CO_FL_EARLY_SSL_HS, which is not
part of the CO_FL_HANDSHAKE set, allowing to proceed with a session even
before an SSL handshake is completed.
As early data do have security implication, we let the origin server know
the request comes from early data by adding the "Early-Data" header, as
specified in this draft from the HTTP working group :
https://datatracker.ietf.org/doc/html/draft-ietf-httpbis-replay
2017-09-22 12:26:28 -04:00
int allow_early = 0 ;
2017-02-20 10:11:50 -05:00
int i ;
2022-01-19 04:03:30 -05:00
if ( conn )
s = __objt_listener ( conn - > target ) - > bind_conf ;
# ifdef USE_QUIC
else if ( qc )
s = qc - > li - > bind_conf ;
# endif /* USE_QUIC */
2022-01-24 05:04:05 -05:00
if ( ! s ) {
/* must never happen */
ABORT_NOW ( ) ;
return 0 ;
}
2017-02-20 10:11:50 -05:00
2020-11-23 09:37:11 -05:00
# ifdef USE_QUIC
2022-01-19 04:03:30 -05:00
if ( qc ) {
2020-11-23 09:37:11 -05:00
/* Look for the QUIC transport parameters. */
# ifdef OPENSSL_IS_BORINGSSL
2022-01-19 04:03:30 -05:00
if ( ! SSL_early_callback_ctx_extension_get ( ctx , qc - > tps_tls_ext ,
2020-11-23 09:37:11 -05:00
& extension_data , & extension_len ) )
# else
2022-01-19 04:03:30 -05:00
if ( ! SSL_client_hello_get0_ext ( ssl , qc - > tps_tls_ext ,
2020-11-23 09:37:11 -05:00
& extension_data , & extension_len ) )
# endif
2021-11-22 09:55:16 -05:00
{
/* This is not redundant. It we only return 0 without setting
* < * al > , this has as side effect to generate another TLS alert
* which would be set after calling quic_set_tls_alert ( ) .
*/
* al = SSL_AD_MISSING_EXTENSION ;
2022-01-19 04:03:30 -05:00
quic_set_tls_alert ( qc , SSL_AD_MISSING_EXTENSION ) ;
2021-11-22 09:55:16 -05:00
return 0 ;
}
2020-11-23 09:37:11 -05:00
2022-01-19 04:03:30 -05:00
if ( ! quic_transport_params_store ( qc , 0 , extension_data ,
2023-02-01 11:56:57 -05:00
extension_data + extension_len ) )
2020-11-23 09:37:11 -05:00
goto abort ;
2023-02-01 11:56:57 -05:00
qc - > flags | = QUIC_FL_CONN_TX_TP_RECEIVED ;
2020-11-23 09:37:11 -05:00
}
2022-01-19 04:03:30 -05:00
# endif /* USE_QUIC */
2020-11-23 09:37:11 -05:00
2017-10-27 08:58:08 -04:00
if ( s - > ssl_conf . early_data )
MEDIUM: ssl: Handle early data with OpenSSL 1.1.1
When compiled with Openssl >= 1.1.1, before attempting to do the handshake,
try to read any early data. If any early data is present, then we'll create
the session, read the data, and handle the request before we're doing the
handshake.
For this, we add a new connection flag, CO_FL_EARLY_SSL_HS, which is not
part of the CO_FL_HANDSHAKE set, allowing to proceed with a session even
before an SSL handshake is completed.
As early data do have security implication, we let the origin server know
the request comes from early data by adding the "Early-Data" header, as
specified in this draft from the HTTP working group :
https://datatracker.ietf.org/doc/html/draft-ietf-httpbis-replay
2017-09-22 12:26:28 -04:00
allow_early = 1 ;
2017-08-16 05:33:17 -04:00
# ifdef OPENSSL_IS_BORINGSSL
2017-02-20 10:11:50 -05:00
if ( SSL_early_callback_ctx_extension_get ( ctx , TLSEXT_TYPE_server_name ,
& extension_data , & extension_len ) ) {
2017-08-16 05:33:17 -04:00
# else
if ( SSL_client_hello_get0_ext ( ssl , TLSEXT_TYPE_server_name , & extension_data , & extension_len ) ) {
# endif
2017-08-16 05:28:44 -04:00
/*
* The server_name extension was given too much extensibility when it
* was written , so parsing the normal case is a bit complex .
*/
size_t len ;
if ( extension_len < = 2 )
2017-02-20 10:11:50 -05:00
goto abort ;
2017-08-16 05:28:44 -04:00
/* Extract the length of the supplied list of names. */
len = ( * extension_data + + ) < < 8 ;
len | = * extension_data + + ;
if ( len + 2 ! = extension_len )
goto abort ;
/*
* The list in practice only has a single element , so we only consider
* the first one .
*/
if ( len = = 0 | | * extension_data + + ! = TLSEXT_NAMETYPE_host_name )
goto abort ;
extension_len = len - 1 ;
/* Now we can finally pull out the byte array with the actual hostname. */
if ( extension_len < = 2 )
goto abort ;
len = ( * extension_data + + ) < < 8 ;
len | = * extension_data + + ;
if ( len = = 0 | | len + 2 > extension_len | | len > TLSEXT_MAXLEN_host_name
| | memchr ( extension_data , 0 , len ) ! = NULL )
goto abort ;
2023-11-24 13:20:28 -05:00
servername = ( char * ) extension_data ;
2017-08-16 05:28:44 -04:00
servername_len = len ;
2017-02-20 10:11:50 -05:00
} else {
2017-08-14 05:01:25 -04:00
# if (!defined SSL_NO_GENERATE_CERTIFICATES)
2022-05-20 10:03:18 -04:00
if ( s - > options & BC_O_GENERATE_CERTS & & ssl_sock_generate_certificate_from_conn ( s , ssl ) ) {
MEDIUM: ssl: Handle early data with OpenSSL 1.1.1
When compiled with Openssl >= 1.1.1, before attempting to do the handshake,
try to read any early data. If any early data is present, then we'll create
the session, read the data, and handle the request before we're doing the
handshake.
For this, we add a new connection flag, CO_FL_EARLY_SSL_HS, which is not
part of the CO_FL_HANDSHAKE set, allowing to proceed with a session even
before an SSL handshake is completed.
As early data do have security implication, we let the origin server know
the request comes from early data by adding the "Early-Data" header, as
specified in this draft from the HTTP working group :
https://datatracker.ietf.org/doc/html/draft-ietf-httpbis-replay
2017-09-22 12:26:28 -04:00
goto allow_early ;
2017-08-14 05:01:25 -04:00
}
# endif
MEDIUM: ssl: allow multiple fallback certificate to allow ECDSA/RSA selection
This patch changes the default certificate mechanism.
Since the beginning of SSL in HAProxy, the default certificate was the first
certificate of a bind line. This allowed to fallback on this certificate
when no servername extension was sent by the server, or when no SAN nor
CN was available in the certificate.
When using a multi-certificate bundle (ecdsa+rsa), it was possible to
have both certificates as the fallback one, leting openssl chose the
right one. This was possible because a multi-certificate bundle
was generating a unique SSL_CTX for both certificates.
When the haproxy and openssl architecture evolved, we decided to
use multiple SSL_CTX for a multi-cert bundle, in order to simplify the
code and allow updates over the CLI.
However only one default_ctx was allowed, so we lost the ability to
chose between ECDSA and RSA for the default certificate.
This patch allows to use a '*' filter for a certificate, which allow to
lookup between multiple '*' filter, and have one in RSA and another one
in ECDSA. It replaces the default_ctx mechanism in the ClientHello
callback and use the standard algorithm to look for a default cert and
chose between ECDSA and RSA.
/!\ This patch breaks the automatic setting of the default certificate, which
will be introduce in the next patch. So the first certificate of a bind
line won't be used as a defaullt anymore.
To use this feature, one could use crt-list with '*' filters:
$ cat foo.crtlist
foobar.pem.rsa *
foobar.pem.ecdsa *
In order to test the feature, it's easy to send a request without
the servername extension and use ECDSA or RSA compatible ciphers:
$ openssl s_client -connect localhost:8443 -tls1_2 -cipher ECDHE-RSA-AES256-GCM-SHA384
$ openssl s_client -connect localhost:8443 -tls1_2 -cipher ECDHE-ECDSA-AES256-GCM-SHA384
2024-01-10 08:05:59 -05:00
/* no servername field is not compatible with strict-sni */
if ( s - > strict_sni )
goto abort ;
/* without servername extension, look for the defaults which is
* defined by an empty servername string */
servername = " " ;
servername_len = 0 ;
default_lookup = 1 ;
2017-02-20 10:11:50 -05:00
}
2020-03-10 03:06:11 -04:00
/* extract/check clientHello information */
2017-08-16 05:33:17 -04:00
# ifdef OPENSSL_IS_BORINGSSL
2017-02-20 10:11:50 -05:00
if ( SSL_early_callback_ctx_extension_get ( ctx , TLSEXT_TYPE_signature_algorithms , & extension_data , & extension_len ) ) {
2017-08-16 05:33:17 -04:00
# else
if ( SSL_client_hello_get0_ext ( ssl , TLSEXT_TYPE_signature_algorithms , & extension_data , & extension_len ) ) {
# endif
2017-08-16 05:28:44 -04:00
uint8_t sign ;
size_t len ;
if ( extension_len < 2 )
2017-02-20 10:11:50 -05:00
goto abort ;
2017-08-16 05:28:44 -04:00
len = ( * extension_data + + ) < < 8 ;
len | = * extension_data + + ;
if ( len + 2 ! = extension_len )
2017-02-20 10:11:50 -05:00
goto abort ;
2017-08-16 05:28:44 -04:00
if ( len % 2 ! = 0 )
goto abort ;
for ( ; len > 0 ; len - = 2 ) {
extension_data + + ; /* hash */
sign = * extension_data + + ;
2017-02-20 10:11:50 -05:00
switch ( sign ) {
case TLSEXT_signature_rsa :
2018-09-03 10:29:16 -04:00
has_rsa_sig = 1 ;
2017-02-20 10:11:50 -05:00
break ;
case TLSEXT_signature_ecdsa :
has_ecdsa_sig = 1 ;
break ;
default :
continue ;
}
2018-09-03 10:29:16 -04:00
if ( has_ecdsa_sig & & has_rsa_sig )
2017-02-20 10:11:50 -05:00
break ;
}
} else {
2018-08-13 19:56:13 -04:00
/* without TLSEXT_TYPE_signature_algorithms extension (< TLSv1.2) */
2018-09-03 10:29:16 -04:00
has_rsa_sig = 1 ;
2017-02-20 10:11:50 -05:00
}
if ( has_ecdsa_sig ) { /* in very rare case: has ecdsa sign but not a ECDSA cipher */
2017-08-16 05:28:44 -04:00
const SSL_CIPHER * cipher ;
2023-10-24 17:58:02 -04:00
uint32_t cipher_id ;
2017-08-16 05:28:44 -04:00
size_t len ;
const uint8_t * cipher_suites ;
2018-09-03 10:29:16 -04:00
has_ecdsa_sig = 0 ;
2017-08-16 05:33:17 -04:00
# ifdef OPENSSL_IS_BORINGSSL
2017-08-16 05:28:44 -04:00
len = ctx - > cipher_suites_len ;
cipher_suites = ctx - > cipher_suites ;
2017-08-16 05:33:17 -04:00
# else
len = SSL_client_hello_get0_ciphers ( ssl , & cipher_suites ) ;
# endif
2017-08-16 05:28:44 -04:00
if ( len % 2 ! = 0 )
goto abort ;
for ( ; len ! = 0 ; len - = 2 , cipher_suites + = 2 ) {
2017-08-16 05:33:17 -04:00
# ifdef OPENSSL_IS_BORINGSSL
2017-08-16 05:28:44 -04:00
uint16_t cipher_suite = ( cipher_suites [ 0 ] < < 8 ) | cipher_suites [ 1 ] ;
2017-02-20 10:11:50 -05:00
cipher = SSL_get_cipher_by_value ( cipher_suite ) ;
2017-08-16 05:33:17 -04:00
# else
cipher = SSL_CIPHER_find ( ssl , cipher_suites ) ;
# endif
2023-10-30 13:08:16 -04:00
if ( ! cipher )
continue ;
2023-10-24 17:58:02 -04:00
cipher_id = SSL_CIPHER_get_id ( cipher ) ;
/* skip the SCSV "fake" signaling ciphersuites because they are NID_auth_any (RFC 7507) */
if ( cipher_id = = SSL3_CK_SCSV | | cipher_id = = SSL3_CK_FALLBACK_SCSV )
continue ;
2023-10-30 13:08:16 -04:00
if ( SSL_CIPHER_get_auth_nid ( cipher ) = = NID_auth_ecdsa
| | SSL_CIPHER_get_auth_nid ( cipher ) = = NID_auth_any ) {
2018-09-03 10:29:16 -04:00
has_ecdsa_sig = 1 ;
2017-02-20 10:11:50 -05:00
break ;
}
}
}
MEDIUM: ssl: allow multiple fallback certificate to allow ECDSA/RSA selection
This patch changes the default certificate mechanism.
Since the beginning of SSL in HAProxy, the default certificate was the first
certificate of a bind line. This allowed to fallback on this certificate
when no servername extension was sent by the server, or when no SAN nor
CN was available in the certificate.
When using a multi-certificate bundle (ecdsa+rsa), it was possible to
have both certificates as the fallback one, leting openssl chose the
right one. This was possible because a multi-certificate bundle
was generating a unique SSL_CTX for both certificates.
When the haproxy and openssl architecture evolved, we decided to
use multiple SSL_CTX for a multi-cert bundle, in order to simplify the
code and allow updates over the CLI.
However only one default_ctx was allowed, so we lost the ability to
chose between ECDSA and RSA for the default certificate.
This patch allows to use a '*' filter for a certificate, which allow to
lookup between multiple '*' filter, and have one in RSA and another one
in ECDSA. It replaces the default_ctx mechanism in the ClientHello
callback and use the standard algorithm to look for a default cert and
chose between ECDSA and RSA.
/!\ This patch breaks the automatic setting of the default certificate, which
will be introduce in the next patch. So the first certificate of a bind
line won't be used as a defaullt anymore.
To use this feature, one could use crt-list with '*' filters:
$ cat foo.crtlist
foobar.pem.rsa *
foobar.pem.ecdsa *
In order to test the feature, it's easy to send a request without
the servername extension and use ECDSA or RSA compatible ciphers:
$ openssl s_client -connect localhost:8443 -tls1_2 -cipher ECDHE-RSA-AES256-GCM-SHA384
$ openssl s_client -connect localhost:8443 -tls1_2 -cipher ECDHE-ECDSA-AES256-GCM-SHA384
2024-01-10 08:05:59 -05:00
sni_lookup :
2023-11-23 11:54:47 -05:00
/* we need to transform this a NULL-ended string in lowecase */
for ( i = 0 ; i < trash . size & & i < servername_len ; i + + )
2018-07-13 04:54:26 -04:00
trash . area [ i ] = tolower ( servername [ i ] ) ;
trash . area [ i ] = 0 ;
2017-02-20 10:11:50 -05:00
2019-09-19 11:12:49 -04:00
HA_RWLOCK_RDLOCK ( SNI_LOCK , & s - > sni_lock ) ;
MEDIUM: ssl: allow multiple fallback certificate to allow ECDSA/RSA selection
This patch changes the default certificate mechanism.
Since the beginning of SSL in HAProxy, the default certificate was the first
certificate of a bind line. This allowed to fallback on this certificate
when no servername extension was sent by the server, or when no SAN nor
CN was available in the certificate.
When using a multi-certificate bundle (ecdsa+rsa), it was possible to
have both certificates as the fallback one, leting openssl chose the
right one. This was possible because a multi-certificate bundle
was generating a unique SSL_CTX for both certificates.
When the haproxy and openssl architecture evolved, we decided to
use multiple SSL_CTX for a multi-cert bundle, in order to simplify the
code and allow updates over the CLI.
However only one default_ctx was allowed, so we lost the ability to
chose between ECDSA and RSA for the default certificate.
This patch allows to use a '*' filter for a certificate, which allow to
lookup between multiple '*' filter, and have one in RSA and another one
in ECDSA. It replaces the default_ctx mechanism in the ClientHello
callback and use the standard algorithm to look for a default cert and
chose between ECDSA and RSA.
/!\ This patch breaks the automatic setting of the default certificate, which
will be introduce in the next patch. So the first certificate of a bind
line won't be used as a defaullt anymore.
To use this feature, one could use crt-list with '*' filters:
$ cat foo.crtlist
foobar.pem.rsa *
foobar.pem.ecdsa *
In order to test the feature, it's easy to send a request without
the servername extension and use ECDSA or RSA compatible ciphers:
$ openssl s_client -connect localhost:8443 -tls1_2 -cipher ECDHE-RSA-AES256-GCM-SHA384
$ openssl s_client -connect localhost:8443 -tls1_2 -cipher ECDHE-ECDSA-AES256-GCM-SHA384
2024-01-10 08:05:59 -05:00
sni_ctx = ssl_sock_chose_sni_ctx ( s , trash . area , has_rsa_sig , has_ecdsa_sig ) ;
2023-11-23 11:54:47 -05:00
if ( sni_ctx ) {
2020-08-14 08:43:35 -04:00
/* switch ctx */
2023-11-23 11:54:47 -05:00
struct ssl_bind_conf * conf = sni_ctx - > conf ;
ssl_sock_switchctx_set ( ssl , sni_ctx - > ctx ) ;
2020-08-14 08:43:35 -04:00
if ( conf ) {
methodVersions [ conf - > ssl_methods . min ] . ssl_set_version ( ssl , SET_MIN ) ;
methodVersions [ conf - > ssl_methods . max ] . ssl_set_version ( ssl , SET_MAX ) ;
if ( conf - > early_data )
allow_early = 1 ;
2019-11-06 10:05:34 -05:00
}
2020-08-14 08:43:35 -04:00
HA_RWLOCK_RDUNLOCK ( SNI_LOCK , & s - > sni_lock ) ;
goto allow_early ;
2017-02-20 10:11:50 -05:00
}
2019-09-19 11:12:49 -04:00
2019-10-18 05:02:19 -04:00
HA_RWLOCK_RDUNLOCK ( SNI_LOCK , & s - > sni_lock ) ;
2017-08-14 05:01:25 -04:00
# if (!defined SSL_NO_GENERATE_CERTIFICATES)
MEDIUM: ssl: allow multiple fallback certificate to allow ECDSA/RSA selection
This patch changes the default certificate mechanism.
Since the beginning of SSL in HAProxy, the default certificate was the first
certificate of a bind line. This allowed to fallback on this certificate
when no servername extension was sent by the server, or when no SAN nor
CN was available in the certificate.
When using a multi-certificate bundle (ecdsa+rsa), it was possible to
have both certificates as the fallback one, leting openssl chose the
right one. This was possible because a multi-certificate bundle
was generating a unique SSL_CTX for both certificates.
When the haproxy and openssl architecture evolved, we decided to
use multiple SSL_CTX for a multi-cert bundle, in order to simplify the
code and allow updates over the CLI.
However only one default_ctx was allowed, so we lost the ability to
chose between ECDSA and RSA for the default certificate.
This patch allows to use a '*' filter for a certificate, which allow to
lookup between multiple '*' filter, and have one in RSA and another one
in ECDSA. It replaces the default_ctx mechanism in the ClientHello
callback and use the standard algorithm to look for a default cert and
chose between ECDSA and RSA.
/!\ This patch breaks the automatic setting of the default certificate, which
will be introduce in the next patch. So the first certificate of a bind
line won't be used as a defaullt anymore.
To use this feature, one could use crt-list with '*' filters:
$ cat foo.crtlist
foobar.pem.rsa *
foobar.pem.ecdsa *
In order to test the feature, it's easy to send a request without
the servername extension and use ECDSA or RSA compatible ciphers:
$ openssl s_client -connect localhost:8443 -tls1_2 -cipher ECDHE-RSA-AES256-GCM-SHA384
$ openssl s_client -connect localhost:8443 -tls1_2 -cipher ECDHE-ECDSA-AES256-GCM-SHA384
2024-01-10 08:05:59 -05:00
if ( s - > options & BC_O_GENERATE_CERTS & & ssl_sock_generate_certificate ( trash . area , s , ssl ) ) {
2017-08-14 05:01:25 -04:00
/* switch ctx done in ssl_sock_generate_certificate */
MEDIUM: ssl: Handle early data with OpenSSL 1.1.1
When compiled with Openssl >= 1.1.1, before attempting to do the handshake,
try to read any early data. If any early data is present, then we'll create
the session, read the data, and handle the request before we're doing the
handshake.
For this, we add a new connection flag, CO_FL_EARLY_SSL_HS, which is not
part of the CO_FL_HANDSHAKE set, allowing to proceed with a session even
before an SSL handshake is completed.
As early data do have security implication, we let the origin server know
the request comes from early data by adding the "Early-Data" header, as
specified in this draft from the HTTP working group :
https://datatracker.ietf.org/doc/html/draft-ietf-httpbis-replay
2017-09-22 12:26:28 -04:00
goto allow_early ;
2017-08-14 05:01:25 -04:00
}
# endif
MEDIUM: ssl: allow multiple fallback certificate to allow ECDSA/RSA selection
This patch changes the default certificate mechanism.
Since the beginning of SSL in HAProxy, the default certificate was the first
certificate of a bind line. This allowed to fallback on this certificate
when no servername extension was sent by the server, or when no SAN nor
CN was available in the certificate.
When using a multi-certificate bundle (ecdsa+rsa), it was possible to
have both certificates as the fallback one, leting openssl chose the
right one. This was possible because a multi-certificate bundle
was generating a unique SSL_CTX for both certificates.
When the haproxy and openssl architecture evolved, we decided to
use multiple SSL_CTX for a multi-cert bundle, in order to simplify the
code and allow updates over the CLI.
However only one default_ctx was allowed, so we lost the ability to
chose between ECDSA and RSA for the default certificate.
This patch allows to use a '*' filter for a certificate, which allow to
lookup between multiple '*' filter, and have one in RSA and another one
in ECDSA. It replaces the default_ctx mechanism in the ClientHello
callback and use the standard algorithm to look for a default cert and
chose between ECDSA and RSA.
/!\ This patch breaks the automatic setting of the default certificate, which
will be introduce in the next patch. So the first certificate of a bind
line won't be used as a defaullt anymore.
To use this feature, one could use crt-list with '*' filters:
$ cat foo.crtlist
foobar.pem.rsa *
foobar.pem.ecdsa *
In order to test the feature, it's easy to send a request without
the servername extension and use ECDSA or RSA compatible ciphers:
$ openssl s_client -connect localhost:8443 -tls1_2 -cipher ECDHE-RSA-AES256-GCM-SHA384
$ openssl s_client -connect localhost:8443 -tls1_2 -cipher ECDHE-ECDSA-AES256-GCM-SHA384
2024-01-10 08:05:59 -05:00
if ( ! s - > strict_sni & & ! default_lookup ) {
/* we didn't find a SNI, and we didn't look for a default
* look again to find a matching default cert */
servername = " " ;
servername_len = 0 ;
default_lookup = 1 ;
goto sni_lookup ;
2017-03-06 09:34:44 -05:00
}
2021-11-18 11:46:26 -05:00
2022-01-07 11:12:01 -05:00
/* We are about to raise an handshake error so the servername extension
* callback will never be called and the SNI will never be stored in the
* SSL context . In order for the ssl_fc_sni sample fetch to still work
* in such a case , we store the SNI ourselves as an ex_data information
* in the SSL context .
*/
{
char * client_sni = pool_alloc ( ssl_sock_client_sni_pool ) ;
if ( client_sni ) {
2023-11-24 13:20:28 -05:00
strncpy ( client_sni , servername , TLSEXT_MAXLEN_host_name ) ;
2022-01-07 11:12:01 -05:00
client_sni [ TLSEXT_MAXLEN_host_name ] = ' \0 ' ;
SSL_set_ex_data ( ssl , ssl_client_sni_index , client_sni ) ;
}
}
2021-11-18 11:46:26 -05:00
/* other cases fallback on abort, if strict-sni is set but no node was found */
2017-02-20 10:11:50 -05:00
abort :
/* abort handshake (was SSL_TLSEXT_ERR_ALERT_FATAL) */
2022-01-19 04:03:30 -05:00
if ( conn )
conn - > err_code = CO_ER_SSL_HANDSHAKE ;
2017-08-16 05:33:17 -04:00
# ifdef OPENSSL_IS_BORINGSSL
2017-08-16 05:28:44 -04:00
return ssl_select_cert_error ;
2017-08-16 05:33:17 -04:00
# else
* al = SSL_AD_UNRECOGNIZED_NAME ;
return 0 ;
# endif
2021-11-18 11:46:26 -05:00
allow_early :
# ifdef OPENSSL_IS_BORINGSSL
if ( allow_early )
SSL_set_early_data_enabled ( ssl , 1 ) ;
# else
if ( ! allow_early )
SSL_set_max_early_data ( ssl , 0 ) ;
# endif
return 1 ;
2017-02-20 10:11:50 -05:00
}
2021-11-18 09:25:16 -05:00
# else /* ! HAVE_SSL_CLIENT_HELLO_CB */
2017-02-20 10:11:50 -05:00
2012-09-07 11:30:07 -04:00
/* Sets the SSL ctx of <ssl> to match the advertised server name. Returns a
* warning when no match is found , which implies the default ( first ) cert
* will keep being used .
*/
2022-09-02 09:27:32 -04:00
int ssl_sock_switchctx_cbk ( SSL * ssl , int * al , void * priv )
2012-09-07 11:30:07 -04:00
{
const char * servername ;
const char * wildp = NULL ;
2013-05-07 14:20:06 -04:00
struct ebmb_node * node , * n ;
2017-01-13 11:48:18 -05:00
struct bind_conf * s = priv ;
MEDIUM: ssl: allow multiple fallback certificate to allow ECDSA/RSA selection
This patch changes the default certificate mechanism.
Since the beginning of SSL in HAProxy, the default certificate was the first
certificate of a bind line. This allowed to fallback on this certificate
when no servername extension was sent by the server, or when no SAN nor
CN was available in the certificate.
When using a multi-certificate bundle (ecdsa+rsa), it was possible to
have both certificates as the fallback one, leting openssl chose the
right one. This was possible because a multi-certificate bundle
was generating a unique SSL_CTX for both certificates.
When the haproxy and openssl architecture evolved, we decided to
use multiple SSL_CTX for a multi-cert bundle, in order to simplify the
code and allow updates over the CLI.
However only one default_ctx was allowed, so we lost the ability to
chose between ECDSA and RSA for the default certificate.
This patch allows to use a '*' filter for a certificate, which allow to
lookup between multiple '*' filter, and have one in RSA and another one
in ECDSA. It replaces the default_ctx mechanism in the ClientHello
callback and use the standard algorithm to look for a default cert and
chose between ECDSA and RSA.
/!\ This patch breaks the automatic setting of the default certificate, which
will be introduce in the next patch. So the first certificate of a bind
line won't be used as a defaullt anymore.
To use this feature, one could use crt-list with '*' filters:
$ cat foo.crtlist
foobar.pem.rsa *
foobar.pem.ecdsa *
In order to test the feature, it's easy to send a request without
the servername extension and use ECDSA or RSA compatible ciphers:
$ openssl s_client -connect localhost:8443 -tls1_2 -cipher ECDHE-RSA-AES256-GCM-SHA384
$ openssl s_client -connect localhost:8443 -tls1_2 -cipher ECDHE-ECDSA-AES256-GCM-SHA384
2024-01-10 08:05:59 -05:00
int default_lookup = 0 ; /* did we lookup for a default yet? */
2022-09-07 05:21:34 -04:00
# ifdef USE_QUIC
const uint8_t * extension_data ;
size_t extension_len ;
struct quic_conn * qc = SSL_get_ex_data ( ssl , ssl_qc_app_data_index ) ;
# endif /* USE_QUIC */
2012-09-07 11:30:07 -04:00
int i ;
( void ) al ; /* shut gcc stupid warning */
2022-09-07 05:21:34 -04:00
# ifdef USE_QUIC
if ( qc ) {
/* Look for the QUIC transport parameters. */
SSL_get_peer_quic_transport_params ( ssl , & extension_data , & extension_len ) ;
if ( extension_len = = 0 ) {
/* This is not redundant. It we only return 0 without setting
* < * al > , this has as side effect to generate another TLS alert
* which would be set after calling quic_set_tls_alert ( ) .
*/
* al = SSL_AD_MISSING_EXTENSION ;
quic_set_tls_alert ( qc , SSL_AD_MISSING_EXTENSION ) ;
return SSL_TLSEXT_ERR_NOACK ;
}
if ( ! quic_transport_params_store ( qc , 0 , extension_data ,
2023-02-01 11:56:57 -05:00
extension_data + extension_len ) )
2022-09-07 05:21:34 -04:00
return SSL_TLSEXT_ERR_NOACK ;
2023-02-01 11:56:57 -05:00
qc - > flags | = QUIC_FL_CONN_TX_TP_RECEIVED ;
2022-09-07 05:21:34 -04:00
}
# endif /* USE_QUIC */
2012-09-07 11:30:07 -04:00
servername = SSL_get_servername ( ssl , TLSEXT_NAMETYPE_host_name ) ;
2013-01-24 11:17:15 -05:00
if ( ! servername ) {
2017-01-13 11:48:18 -05:00
# if (!defined SSL_NO_GENERATE_CERTIFICATES)
2022-05-20 10:03:18 -04:00
if ( s - > options & BC_O_GENERATE_CERTS & & ssl_sock_generate_certificate_from_conn ( s , ssl ) )
2017-08-14 05:01:25 -04:00
return SSL_TLSEXT_ERR_OK ;
2017-01-13 11:48:18 -05:00
# endif
2017-03-06 09:34:44 -05:00
if ( s - > strict_sni )
return SSL_TLSEXT_ERR_ALERT_FATAL ;
MEDIUM: ssl: allow multiple fallback certificate to allow ECDSA/RSA selection
This patch changes the default certificate mechanism.
Since the beginning of SSL in HAProxy, the default certificate was the first
certificate of a bind line. This allowed to fallback on this certificate
when no servername extension was sent by the server, or when no SAN nor
CN was available in the certificate.
When using a multi-certificate bundle (ecdsa+rsa), it was possible to
have both certificates as the fallback one, leting openssl chose the
right one. This was possible because a multi-certificate bundle
was generating a unique SSL_CTX for both certificates.
When the haproxy and openssl architecture evolved, we decided to
use multiple SSL_CTX for a multi-cert bundle, in order to simplify the
code and allow updates over the CLI.
However only one default_ctx was allowed, so we lost the ability to
chose between ECDSA and RSA for the default certificate.
This patch allows to use a '*' filter for a certificate, which allow to
lookup between multiple '*' filter, and have one in RSA and another one
in ECDSA. It replaces the default_ctx mechanism in the ClientHello
callback and use the standard algorithm to look for a default cert and
chose between ECDSA and RSA.
/!\ This patch breaks the automatic setting of the default certificate, which
will be introduce in the next patch. So the first certificate of a bind
line won't be used as a defaullt anymore.
To use this feature, one could use crt-list with '*' filters:
$ cat foo.crtlist
foobar.pem.rsa *
foobar.pem.ecdsa *
In order to test the feature, it's easy to send a request without
the servername extension and use ECDSA or RSA compatible ciphers:
$ openssl s_client -connect localhost:8443 -tls1_2 -cipher ECDHE-RSA-AES256-GCM-SHA384
$ openssl s_client -connect localhost:8443 -tls1_2 -cipher ECDHE-ECDSA-AES256-GCM-SHA384
2024-01-10 08:05:59 -05:00
/* without servername extension, look for the defaults which is
* defined by an empty servername string */
servername = " " ;
default_lookup = 1 ;
2013-01-24 11:17:15 -05:00
}
2012-09-07 11:30:07 -04:00
MEDIUM: ssl: allow multiple fallback certificate to allow ECDSA/RSA selection
This patch changes the default certificate mechanism.
Since the beginning of SSL in HAProxy, the default certificate was the first
certificate of a bind line. This allowed to fallback on this certificate
when no servername extension was sent by the server, or when no SAN nor
CN was available in the certificate.
When using a multi-certificate bundle (ecdsa+rsa), it was possible to
have both certificates as the fallback one, leting openssl chose the
right one. This was possible because a multi-certificate bundle
was generating a unique SSL_CTX for both certificates.
When the haproxy and openssl architecture evolved, we decided to
use multiple SSL_CTX for a multi-cert bundle, in order to simplify the
code and allow updates over the CLI.
However only one default_ctx was allowed, so we lost the ability to
chose between ECDSA and RSA for the default certificate.
This patch allows to use a '*' filter for a certificate, which allow to
lookup between multiple '*' filter, and have one in RSA and another one
in ECDSA. It replaces the default_ctx mechanism in the ClientHello
callback and use the standard algorithm to look for a default cert and
chose between ECDSA and RSA.
/!\ This patch breaks the automatic setting of the default certificate, which
will be introduce in the next patch. So the first certificate of a bind
line won't be used as a defaullt anymore.
To use this feature, one could use crt-list with '*' filters:
$ cat foo.crtlist
foobar.pem.rsa *
foobar.pem.ecdsa *
In order to test the feature, it's easy to send a request without
the servername extension and use ECDSA or RSA compatible ciphers:
$ openssl s_client -connect localhost:8443 -tls1_2 -cipher ECDHE-RSA-AES256-GCM-SHA384
$ openssl s_client -connect localhost:8443 -tls1_2 -cipher ECDHE-ECDSA-AES256-GCM-SHA384
2024-01-10 08:05:59 -05:00
sni_lookup :
2012-10-29 11:51:55 -04:00
for ( i = 0 ; i < trash . size ; i + + ) {
2012-09-07 11:30:07 -04:00
if ( ! servername [ i ] )
break ;
2020-07-05 15:46:32 -04:00
trash . area [ i ] = tolower ( ( unsigned char ) servername [ i ] ) ;
2018-07-13 04:54:26 -04:00
if ( ! wildp & & ( trash . area [ i ] = = ' . ' ) )
wildp = & trash . area [ i ] ;
2012-09-07 11:30:07 -04:00
}
2018-07-13 04:54:26 -04:00
trash . area [ i ] = 0 ;
MEDIUM: ssl: allow multiple fallback certificate to allow ECDSA/RSA selection
This patch changes the default certificate mechanism.
Since the beginning of SSL in HAProxy, the default certificate was the first
certificate of a bind line. This allowed to fallback on this certificate
when no servername extension was sent by the server, or when no SAN nor
CN was available in the certificate.
When using a multi-certificate bundle (ecdsa+rsa), it was possible to
have both certificates as the fallback one, leting openssl chose the
right one. This was possible because a multi-certificate bundle
was generating a unique SSL_CTX for both certificates.
When the haproxy and openssl architecture evolved, we decided to
use multiple SSL_CTX for a multi-cert bundle, in order to simplify the
code and allow updates over the CLI.
However only one default_ctx was allowed, so we lost the ability to
chose between ECDSA and RSA for the default certificate.
This patch allows to use a '*' filter for a certificate, which allow to
lookup between multiple '*' filter, and have one in RSA and another one
in ECDSA. It replaces the default_ctx mechanism in the ClientHello
callback and use the standard algorithm to look for a default cert and
chose between ECDSA and RSA.
/!\ This patch breaks the automatic setting of the default certificate, which
will be introduce in the next patch. So the first certificate of a bind
line won't be used as a defaullt anymore.
To use this feature, one could use crt-list with '*' filters:
$ cat foo.crtlist
foobar.pem.rsa *
foobar.pem.ecdsa *
In order to test the feature, it's easy to send a request without
the servername extension and use ECDSA or RSA compatible ciphers:
$ openssl s_client -connect localhost:8443 -tls1_2 -cipher ECDHE-RSA-AES256-GCM-SHA384
$ openssl s_client -connect localhost:8443 -tls1_2 -cipher ECDHE-ECDSA-AES256-GCM-SHA384
2024-01-10 08:05:59 -05:00
if ( ! * trash . area ) /* handle the default which in wildcard tree */
wildp = trash . area ;
2012-09-07 11:30:07 -04:00
2019-09-19 11:12:49 -04:00
HA_RWLOCK_RDLOCK ( SNI_LOCK , & s - > sni_lock ) ;
2019-11-04 09:49:46 -05:00
node = NULL ;
2012-09-07 11:30:07 -04:00
/* lookup in full qualified names */
2019-11-04 09:49:46 -05:00
for ( n = ebst_lookup ( & s - > sni_ctx , trash . area ) ; n ; n = ebmb_next_dup ( n ) ) {
/* lookup a not neg filter */
2013-05-07 14:20:06 -04:00
if ( ! container_of ( n , struct sni_ctx , name ) - > neg ) {
node = n ;
break ;
2013-01-24 11:17:15 -05:00
}
2013-05-07 14:20:06 -04:00
}
if ( ! node & & wildp ) {
/* lookup in wildcards names */
2019-11-04 09:49:46 -05:00
for ( n = ebst_lookup ( & s - > sni_w_ctx , wildp ) ; n ; n = ebmb_next_dup ( n ) ) {
/* lookup a not neg filter */
if ( ! container_of ( n , struct sni_ctx , name ) - > neg ) {
node = n ;
break ;
}
}
2013-05-07 14:20:06 -04:00
}
2019-11-04 09:49:46 -05:00
if ( ! node ) {
2017-01-13 11:48:18 -05:00
# if (!defined SSL_NO_GENERATE_CERTIFICATES)
2022-05-20 10:03:18 -04:00
if ( s - > options & BC_O_GENERATE_CERTS & & ssl_sock_generate_certificate ( servername , s , ssl ) ) {
2017-08-14 05:01:25 -04:00
/* switch ctx done in ssl_sock_generate_certificate */
2019-09-19 11:12:49 -04:00
HA_RWLOCK_RDUNLOCK ( SNI_LOCK , & s - > sni_lock ) ;
2015-06-09 11:29:50 -04:00
return SSL_TLSEXT_ERR_OK ;
}
2017-01-13 11:48:18 -05:00
# endif
2019-11-04 11:56:13 -05:00
HA_RWLOCK_RDUNLOCK ( SNI_LOCK , & s - > sni_lock ) ;
MEDIUM: ssl: allow multiple fallback certificate to allow ECDSA/RSA selection
This patch changes the default certificate mechanism.
Since the beginning of SSL in HAProxy, the default certificate was the first
certificate of a bind line. This allowed to fallback on this certificate
when no servername extension was sent by the server, or when no SAN nor
CN was available in the certificate.
When using a multi-certificate bundle (ecdsa+rsa), it was possible to
have both certificates as the fallback one, leting openssl chose the
right one. This was possible because a multi-certificate bundle
was generating a unique SSL_CTX for both certificates.
When the haproxy and openssl architecture evolved, we decided to
use multiple SSL_CTX for a multi-cert bundle, in order to simplify the
code and allow updates over the CLI.
However only one default_ctx was allowed, so we lost the ability to
chose between ECDSA and RSA for the default certificate.
This patch allows to use a '*' filter for a certificate, which allow to
lookup between multiple '*' filter, and have one in RSA and another one
in ECDSA. It replaces the default_ctx mechanism in the ClientHello
callback and use the standard algorithm to look for a default cert and
chose between ECDSA and RSA.
/!\ This patch breaks the automatic setting of the default certificate, which
will be introduce in the next patch. So the first certificate of a bind
line won't be used as a defaullt anymore.
To use this feature, one could use crt-list with '*' filters:
$ cat foo.crtlist
foobar.pem.rsa *
foobar.pem.ecdsa *
In order to test the feature, it's easy to send a request without
the servername extension and use ECDSA or RSA compatible ciphers:
$ openssl s_client -connect localhost:8443 -tls1_2 -cipher ECDHE-RSA-AES256-GCM-SHA384
$ openssl s_client -connect localhost:8443 -tls1_2 -cipher ECDHE-ECDSA-AES256-GCM-SHA384
2024-01-10 08:05:59 -05:00
if ( ! s - > strict_sni & & ! default_lookup ) {
/* we didn't find a SNI, and we didn't look for a default
* look again to find a matching default cert */
servername = " " ;
default_lookup = 1 ;
goto sni_lookup ;
}
return SSL_TLSEXT_ERR_ALERT_FATAL ;
2012-09-07 11:30:07 -04:00
}
2024-01-23 08:45:25 -05:00
# if defined(OPENSSL_IS_AWSLC)
/* Note that ssl_sock_switchctx_set() calls SSL_set_SSL_CTX() which propagates the
* " early data enabled " setting from the SSL_CTX object to the SSL objects .
* So enable early data for this SSL_CTX context if configured .
*/
if ( s - > ssl_conf . early_data )
SSL_CTX_set_early_data_enabled ( container_of ( node , struct sni_ctx , name ) - > ctx , 1 ) ;
# endif
2012-09-07 11:30:07 -04:00
/* switch ctx */
2017-03-01 12:54:56 -05:00
ssl_sock_switchctx_set ( ssl , container_of ( node , struct sni_ctx , name ) - > ctx ) ;
2019-09-19 11:12:49 -04:00
HA_RWLOCK_RDUNLOCK ( SNI_LOCK , & s - > sni_lock ) ;
2012-09-07 11:30:07 -04:00
return SSL_TLSEXT_ERR_OK ;
}
2017-02-20 10:11:50 -05:00
# endif /* (!) OPENSSL_IS_BORINGSSL */
2012-09-07 11:30:07 -04:00
2023-12-08 05:33:03 -05:00
# if defined(USE_OPENSSL_WOLFSSL)
2023-11-16 12:16:53 -05:00
/* This implement the equivalent of the clientHello Callback but using the cert_cb.
* WolfSSL is able to extract the sigalgs and ciphers of the client byt using the API
* provided in https : //github.com/wolfSSL/wolfssl/pull/6963
*
* Not activated for now since the PR is not merged .
*/
static int ssl_sock_switchctx_wolfSSL_cbk ( WOLFSSL * ssl , void * arg )
{
struct bind_conf * s = arg ;
int has_rsa_sig = 0 , has_ecdsa_sig = 0 ;
const char * servername ;
MEDIUM: ssl: allow multiple fallback certificate to allow ECDSA/RSA selection
This patch changes the default certificate mechanism.
Since the beginning of SSL in HAProxy, the default certificate was the first
certificate of a bind line. This allowed to fallback on this certificate
when no servername extension was sent by the server, or when no SAN nor
CN was available in the certificate.
When using a multi-certificate bundle (ecdsa+rsa), it was possible to
have both certificates as the fallback one, leting openssl chose the
right one. This was possible because a multi-certificate bundle
was generating a unique SSL_CTX for both certificates.
When the haproxy and openssl architecture evolved, we decided to
use multiple SSL_CTX for a multi-cert bundle, in order to simplify the
code and allow updates over the CLI.
However only one default_ctx was allowed, so we lost the ability to
chose between ECDSA and RSA for the default certificate.
This patch allows to use a '*' filter for a certificate, which allow to
lookup between multiple '*' filter, and have one in RSA and another one
in ECDSA. It replaces the default_ctx mechanism in the ClientHello
callback and use the standard algorithm to look for a default cert and
chose between ECDSA and RSA.
/!\ This patch breaks the automatic setting of the default certificate, which
will be introduce in the next patch. So the first certificate of a bind
line won't be used as a defaullt anymore.
To use this feature, one could use crt-list with '*' filters:
$ cat foo.crtlist
foobar.pem.rsa *
foobar.pem.ecdsa *
In order to test the feature, it's easy to send a request without
the servername extension and use ECDSA or RSA compatible ciphers:
$ openssl s_client -connect localhost:8443 -tls1_2 -cipher ECDHE-RSA-AES256-GCM-SHA384
$ openssl s_client -connect localhost:8443 -tls1_2 -cipher ECDHE-ECDSA-AES256-GCM-SHA384
2024-01-10 08:05:59 -05:00
int default_lookup = 0 ;
2023-11-23 10:35:52 -05:00
struct sni_ctx * sni_ctx ;
2023-11-16 12:16:53 -05:00
int i ;
if ( ! s ) {
/* must never happen */
ABORT_NOW ( ) ;
return 0 ;
}
servername = SSL_get_servername ( ssl , TLSEXT_NAMETYPE_host_name ) ;
if ( ! servername ) {
MEDIUM: ssl: allow multiple fallback certificate to allow ECDSA/RSA selection
This patch changes the default certificate mechanism.
Since the beginning of SSL in HAProxy, the default certificate was the first
certificate of a bind line. This allowed to fallback on this certificate
when no servername extension was sent by the server, or when no SAN nor
CN was available in the certificate.
When using a multi-certificate bundle (ecdsa+rsa), it was possible to
have both certificates as the fallback one, leting openssl chose the
right one. This was possible because a multi-certificate bundle
was generating a unique SSL_CTX for both certificates.
When the haproxy and openssl architecture evolved, we decided to
use multiple SSL_CTX for a multi-cert bundle, in order to simplify the
code and allow updates over the CLI.
However only one default_ctx was allowed, so we lost the ability to
chose between ECDSA and RSA for the default certificate.
This patch allows to use a '*' filter for a certificate, which allow to
lookup between multiple '*' filter, and have one in RSA and another one
in ECDSA. It replaces the default_ctx mechanism in the ClientHello
callback and use the standard algorithm to look for a default cert and
chose between ECDSA and RSA.
/!\ This patch breaks the automatic setting of the default certificate, which
will be introduce in the next patch. So the first certificate of a bind
line won't be used as a defaullt anymore.
To use this feature, one could use crt-list with '*' filters:
$ cat foo.crtlist
foobar.pem.rsa *
foobar.pem.ecdsa *
In order to test the feature, it's easy to send a request without
the servername extension and use ECDSA or RSA compatible ciphers:
$ openssl s_client -connect localhost:8443 -tls1_2 -cipher ECDHE-RSA-AES256-GCM-SHA384
$ openssl s_client -connect localhost:8443 -tls1_2 -cipher ECDHE-ECDSA-AES256-GCM-SHA384
2024-01-10 08:05:59 -05:00
if ( s - > strict_sni )
goto abort ;
/* without servername extension, look for the defaults which is
* defined by an empty servername string */
servername = " " ;
default_lookup = 1 ;
2023-11-16 12:16:53 -05:00
}
/* extract sigalgs and ciphers */
{
const byte * suites = NULL ;
word16 suiteSz = 0 ;
const byte * hashSigAlgo = NULL ;
word16 hashSigAlgoSz = 0 ;
word16 idx = 0 ;
wolfSSL_get_client_suites_sigalgs ( ssl , & suites , & suiteSz , & hashSigAlgo , & hashSigAlgoSz ) ;
if ( suites = = NULL | | suiteSz = = 0 | | hashSigAlgo = = NULL | | hashSigAlgoSz = = 0 )
return 0 ;
if ( SSL_version ( ssl ) ! = TLS1_3_VERSION ) {
for ( idx = 0 ; idx < suiteSz ; idx + = 2 ) {
WOLFSSL_CIPHERSUITE_INFO info ;
info = wolfSSL_get_ciphersuite_info ( suites [ idx ] , suites [ idx + 1 ] ) ;
if ( info . rsaAuth )
has_rsa_sig = 1 ;
else if ( info . eccAuth )
has_ecdsa_sig = 1 ;
}
}
if ( hashSigAlgoSz > 0 ) {
/* sigalgs extension takes precedence over ciphersuites */
has_ecdsa_sig = 0 ;
has_rsa_sig = 0 ;
}
for ( idx = 0 ; idx < hashSigAlgoSz ; idx + = 2 ) {
2023-12-08 05:55:15 -05:00
int hashAlgo ;
int sigAlgo ;
2023-11-16 12:16:53 -05:00
wolfSSL_get_sigalg_info ( hashSigAlgo [ idx + 0 ] , hashSigAlgo [ idx + 1 ] , & hashAlgo , & sigAlgo ) ;
if ( sigAlgo = = RSAk | | sigAlgo = = RSAPSSk )
has_rsa_sig = 1 ;
else if ( sigAlgo = = ECDSAk )
has_ecdsa_sig = 1 ;
}
}
MEDIUM: ssl: allow multiple fallback certificate to allow ECDSA/RSA selection
This patch changes the default certificate mechanism.
Since the beginning of SSL in HAProxy, the default certificate was the first
certificate of a bind line. This allowed to fallback on this certificate
when no servername extension was sent by the server, or when no SAN nor
CN was available in the certificate.
When using a multi-certificate bundle (ecdsa+rsa), it was possible to
have both certificates as the fallback one, leting openssl chose the
right one. This was possible because a multi-certificate bundle
was generating a unique SSL_CTX for both certificates.
When the haproxy and openssl architecture evolved, we decided to
use multiple SSL_CTX for a multi-cert bundle, in order to simplify the
code and allow updates over the CLI.
However only one default_ctx was allowed, so we lost the ability to
chose between ECDSA and RSA for the default certificate.
This patch allows to use a '*' filter for a certificate, which allow to
lookup between multiple '*' filter, and have one in RSA and another one
in ECDSA. It replaces the default_ctx mechanism in the ClientHello
callback and use the standard algorithm to look for a default cert and
chose between ECDSA and RSA.
/!\ This patch breaks the automatic setting of the default certificate, which
will be introduce in the next patch. So the first certificate of a bind
line won't be used as a defaullt anymore.
To use this feature, one could use crt-list with '*' filters:
$ cat foo.crtlist
foobar.pem.rsa *
foobar.pem.ecdsa *
In order to test the feature, it's easy to send a request without
the servername extension and use ECDSA or RSA compatible ciphers:
$ openssl s_client -connect localhost:8443 -tls1_2 -cipher ECDHE-RSA-AES256-GCM-SHA384
$ openssl s_client -connect localhost:8443 -tls1_2 -cipher ECDHE-ECDSA-AES256-GCM-SHA384
2024-01-10 08:05:59 -05:00
sni_lookup :
2023-11-23 10:35:52 -05:00
/* we need to transform this into a NULL-ended string in lowecase */
for ( i = 0 ; i < trash . size & & servername [ i ] ! = ' \0 ' ; i + + )
trash . area [ i ] = tolower ( servername [ i ] ) ;
2023-11-16 12:16:53 -05:00
trash . area [ i ] = 0 ;
2023-11-24 13:20:28 -05:00
servername = trash . area ;
2023-11-16 12:16:53 -05:00
HA_RWLOCK_RDLOCK ( SNI_LOCK , & s - > sni_lock ) ;
2023-11-23 10:35:52 -05:00
sni_ctx = ssl_sock_chose_sni_ctx ( s , servername , has_rsa_sig , has_ecdsa_sig ) ;
if ( sni_ctx ) {
2023-11-16 12:16:53 -05:00
/* switch ctx */
2023-11-23 10:35:52 -05:00
struct ssl_bind_conf * conf = sni_ctx - > conf ;
ssl_sock_switchctx_set ( ssl , sni_ctx - > ctx ) ;
2023-11-16 12:16:53 -05:00
if ( conf ) {
methodVersions [ conf - > ssl_methods . min ] . ssl_set_version ( ssl , SET_MIN ) ;
methodVersions [ conf - > ssl_methods . max ] . ssl_set_version ( ssl , SET_MAX ) ;
}
HA_RWLOCK_RDUNLOCK ( SNI_LOCK , & s - > sni_lock ) ;
goto allow_early ;
}
HA_RWLOCK_RDUNLOCK ( SNI_LOCK , & s - > sni_lock ) ;
MEDIUM: ssl: allow multiple fallback certificate to allow ECDSA/RSA selection
This patch changes the default certificate mechanism.
Since the beginning of SSL in HAProxy, the default certificate was the first
certificate of a bind line. This allowed to fallback on this certificate
when no servername extension was sent by the server, or when no SAN nor
CN was available in the certificate.
When using a multi-certificate bundle (ecdsa+rsa), it was possible to
have both certificates as the fallback one, leting openssl chose the
right one. This was possible because a multi-certificate bundle
was generating a unique SSL_CTX for both certificates.
When the haproxy and openssl architecture evolved, we decided to
use multiple SSL_CTX for a multi-cert bundle, in order to simplify the
code and allow updates over the CLI.
However only one default_ctx was allowed, so we lost the ability to
chose between ECDSA and RSA for the default certificate.
This patch allows to use a '*' filter for a certificate, which allow to
lookup between multiple '*' filter, and have one in RSA and another one
in ECDSA. It replaces the default_ctx mechanism in the ClientHello
callback and use the standard algorithm to look for a default cert and
chose between ECDSA and RSA.
/!\ This patch breaks the automatic setting of the default certificate, which
will be introduce in the next patch. So the first certificate of a bind
line won't be used as a defaullt anymore.
To use this feature, one could use crt-list with '*' filters:
$ cat foo.crtlist
foobar.pem.rsa *
foobar.pem.ecdsa *
In order to test the feature, it's easy to send a request without
the servername extension and use ECDSA or RSA compatible ciphers:
$ openssl s_client -connect localhost:8443 -tls1_2 -cipher ECDHE-RSA-AES256-GCM-SHA384
$ openssl s_client -connect localhost:8443 -tls1_2 -cipher ECDHE-ECDSA-AES256-GCM-SHA384
2024-01-10 08:05:59 -05:00
if ( ! s - > strict_sni & & ! default_lookup ) {
/* we didn't find a SNI, and we didn't look for a default
* look again to find a matching default cert */
servername = " " ;
default_lookup = 1 ;
goto sni_lookup ;
2023-11-16 12:16:53 -05:00
}
/* We are about to raise an handshake error so the servername extension
* callback will never be called and the SNI will never be stored in the
* SSL context . In order for the ssl_fc_sni sample fetch to still work
* in such a case , we store the SNI ourselves as an ex_data information
* in the SSL context .
*/
{
char * client_sni = pool_alloc ( ssl_sock_client_sni_pool ) ;
if ( client_sni ) {
2023-11-24 13:20:28 -05:00
strncpy ( client_sni , servername , TLSEXT_MAXLEN_host_name ) ;
2023-11-16 12:16:53 -05:00
client_sni [ TLSEXT_MAXLEN_host_name ] = ' \0 ' ;
SSL_set_ex_data ( ssl , ssl_client_sni_index , client_sni ) ;
}
}
/* other cases fallback on abort, if strict-sni is set but no node was found */
abort :
/* abort handshake (was SSL_TLSEXT_ERR_ALERT_FATAL) */
return 0 ;
allow_early :
return 1 ;
}
# endif
2012-09-20 10:19:02 -04:00
# ifndef OPENSSL_NO_DH
2014-06-12 08:58:40 -04:00
2022-02-11 06:04:52 -05:00
static inline HASSL_DH * ssl_new_dh_fromdata ( BIGNUM * p , BIGNUM * g )
{
# if (HA_OPENSSL_VERSION_NUMBER >= 0x3000000fL)
OSSL_PARAM_BLD * tmpl = NULL ;
OSSL_PARAM * params = NULL ;
EVP_PKEY_CTX * ctx = NULL ;
EVP_PKEY * pkey = NULL ;
if ( ( tmpl = OSSL_PARAM_BLD_new ( ) ) = = NULL
| | ! OSSL_PARAM_BLD_push_BN ( tmpl , OSSL_PKEY_PARAM_FFC_P , p )
| | ! OSSL_PARAM_BLD_push_BN ( tmpl , OSSL_PKEY_PARAM_FFC_G , g )
| | ( params = OSSL_PARAM_BLD_to_param ( tmpl ) ) = = NULL ) {
goto end ;
}
ctx = EVP_PKEY_CTX_new_from_name ( NULL , " DH " , NULL ) ;
if ( ctx = = NULL
| | ! EVP_PKEY_fromdata_init ( ctx )
| | ! EVP_PKEY_fromdata ( ctx , & pkey , EVP_PKEY_KEY_PARAMETERS , params ) ) {
goto end ;
}
end :
EVP_PKEY_CTX_free ( ctx ) ;
OSSL_PARAM_free ( params ) ;
OSSL_PARAM_BLD_free ( tmpl ) ;
2022-11-03 10:16:47 -04:00
BN_free ( p ) ;
BN_free ( g ) ;
2022-02-11 06:04:52 -05:00
return pkey ;
# else
2022-02-11 06:04:55 -05:00
HASSL_DH * dh = DH_new ( ) ;
2022-02-11 06:04:52 -05:00
if ( ! dh )
return NULL ;
DH_set0_pqg ( dh , p , NULL , g ) ;
return dh ;
# endif
}
2022-04-20 12:30:17 -04:00
# if (HA_OPENSSL_VERSION_NUMBER >= 0x10101000L)
2022-04-12 05:31:54 -04:00
static inline HASSL_DH * ssl_get_dh_by_nid ( int nid )
{
# if (HA_OPENSSL_VERSION_NUMBER >= 0x3000000fL)
OSSL_PARAM params [ 2 ] ;
EVP_PKEY * pkey = NULL ;
EVP_PKEY_CTX * pctx = EVP_PKEY_CTX_new_from_name ( NULL , " DH " , NULL ) ;
const char * named_group = NULL ;
if ( ! pctx )
goto end ;
named_group = OBJ_nid2ln ( nid ) ;
if ( ! named_group )
goto end ;
params [ 0 ] = OSSL_PARAM_construct_utf8_string ( " group " , ( char * ) named_group , 0 ) ;
params [ 1 ] = OSSL_PARAM_construct_end ( ) ;
if ( EVP_PKEY_keygen_init ( pctx ) & & EVP_PKEY_CTX_set_params ( pctx , params ) )
EVP_PKEY_generate ( pctx , & pkey ) ;
end :
EVP_PKEY_CTX_free ( pctx ) ;
return pkey ;
# else
HASSL_DH * dh = NULL ;
dh = DH_new_by_nid ( nid ) ;
return dh ;
# endif
}
2022-04-20 12:30:17 -04:00
# endif
2022-04-12 05:31:54 -04:00
2022-02-11 06:04:52 -05:00
2022-02-11 06:04:55 -05:00
static HASSL_DH * ssl_get_dh_1024 ( void )
2014-06-12 08:58:40 -04:00
{
2015-05-29 10:26:17 -04:00
static unsigned char dh1024_p [ ] = {
0xFA , 0xF9 , 0x2A , 0x22 , 0x2A , 0xA7 , 0x7F , 0xE1 , 0x67 , 0x4E , 0x53 , 0xF7 ,
0x56 , 0x13 , 0xC3 , 0xB1 , 0xE3 , 0x29 , 0x6B , 0x66 , 0x31 , 0x6A , 0x7F , 0xB3 ,
0xC2 , 0x68 , 0x6B , 0xCB , 0x1D , 0x57 , 0x39 , 0x1D , 0x1F , 0xFF , 0x1C , 0xC9 ,
0xA6 , 0xA4 , 0x98 , 0x82 , 0x31 , 0x5D , 0x25 , 0xFF , 0x8A , 0xE0 , 0x73 , 0x96 ,
0x81 , 0xC8 , 0x83 , 0x79 , 0xC1 , 0x5A , 0x04 , 0xF8 , 0x37 , 0x0D , 0xA8 , 0x3D ,
0xAE , 0x74 , 0xBC , 0xDB , 0xB6 , 0xA4 , 0x75 , 0xD9 , 0x71 , 0x8A , 0xA0 , 0x17 ,
0x9E , 0x2D , 0xC8 , 0xA8 , 0xDF , 0x2C , 0x5F , 0x82 , 0x95 , 0xF8 , 0x92 , 0x9B ,
0xA7 , 0x33 , 0x5F , 0x89 , 0x71 , 0xC8 , 0x2D , 0x6B , 0x18 , 0x86 , 0xC4 , 0x94 ,
0x22 , 0xA5 , 0x52 , 0x8D , 0xF6 , 0xF6 , 0xD2 , 0x37 , 0x92 , 0x0F , 0xA5 , 0xCC ,
0xDB , 0x7B , 0x1D , 0x3D , 0xA1 , 0x31 , 0xB7 , 0x80 , 0x8F , 0x0B , 0x67 , 0x5E ,
0x36 , 0xA5 , 0x60 , 0x0C , 0xF1 , 0x95 , 0x33 , 0x8B ,
} ;
static unsigned char dh1024_g [ ] = {
0x02 ,
} ;
2016-08-29 07:26:37 -04:00
BIGNUM * p ;
BIGNUM * g ;
2022-02-11 06:04:55 -05:00
HASSL_DH * dh = NULL ;
p = BN_bin2bn ( dh1024_p , sizeof dh1024_p , NULL ) ;
g = BN_bin2bn ( dh1024_g , sizeof dh1024_g , NULL ) ;
if ( p & & g )
dh = ssl_new_dh_fromdata ( p , g ) ;
2014-06-12 08:58:40 -04:00
return dh ;
}
2022-02-11 06:04:55 -05:00
static HASSL_DH * ssl_get_dh_2048 ( void )
2014-06-12 08:58:40 -04:00
{
2022-04-12 05:31:54 -04:00
# if (HA_OPENSSL_VERSION_NUMBER < 0x10101000L)
2015-05-29 10:26:17 -04:00
static unsigned char dh2048_p [ ] = {
0xEC , 0x86 , 0xF8 , 0x70 , 0xA0 , 0x33 , 0x16 , 0xEC , 0x05 , 0x1A , 0x73 , 0x59 ,
0xCD , 0x1F , 0x8B , 0xF8 , 0x29 , 0xE4 , 0xD2 , 0xCF , 0x52 , 0xDD , 0xC2 , 0x24 ,
0x8D , 0xB5 , 0x38 , 0x9A , 0xFB , 0x5C , 0xA4 , 0xE4 , 0xB2 , 0xDA , 0xCE , 0x66 ,
0x50 , 0x74 , 0xA6 , 0x85 , 0x4D , 0x4B , 0x1D , 0x30 , 0xB8 , 0x2B , 0xF3 , 0x10 ,
0xE9 , 0xA7 , 0x2D , 0x05 , 0x71 , 0xE7 , 0x81 , 0xDF , 0x8B , 0x59 , 0x52 , 0x3B ,
0x5F , 0x43 , 0x0B , 0x68 , 0xF1 , 0xDB , 0x07 , 0xBE , 0x08 , 0x6B , 0x1B , 0x23 ,
0xEE , 0x4D , 0xCC , 0x9E , 0x0E , 0x43 , 0xA0 , 0x1E , 0xDF , 0x43 , 0x8C , 0xEC ,
0xBE , 0xBE , 0x90 , 0xB4 , 0x51 , 0x54 , 0xB9 , 0x2F , 0x7B , 0x64 , 0x76 , 0x4E ,
0x5D , 0xD4 , 0x2E , 0xAE , 0xC2 , 0x9E , 0xAE , 0x51 , 0x43 , 0x59 , 0xC7 , 0x77 ,
0x9C , 0x50 , 0x3C , 0x0E , 0xED , 0x73 , 0x04 , 0x5F , 0xF1 , 0x4C , 0x76 , 0x2A ,
0xD8 , 0xF8 , 0xCF , 0xFC , 0x34 , 0x40 , 0xD1 , 0xB4 , 0x42 , 0x61 , 0x84 , 0x66 ,
0x42 , 0x39 , 0x04 , 0xF8 , 0x68 , 0xB2 , 0x62 , 0xD7 , 0x55 , 0xED , 0x1B , 0x74 ,
0x75 , 0x91 , 0xE0 , 0xC5 , 0x69 , 0xC1 , 0x31 , 0x5C , 0xDB , 0x7B , 0x44 , 0x2E ,
0xCE , 0x84 , 0x58 , 0x0D , 0x1E , 0x66 , 0x0C , 0xC8 , 0x44 , 0x9E , 0xFD , 0x40 ,
0x08 , 0x67 , 0x5D , 0xFB , 0xA7 , 0x76 , 0x8F , 0x00 , 0x11 , 0x87 , 0xE9 , 0x93 ,
0xF9 , 0x7D , 0xC4 , 0xBC , 0x74 , 0x55 , 0x20 , 0xD4 , 0x4A , 0x41 , 0x2F , 0x43 ,
0x42 , 0x1A , 0xC1 , 0xF2 , 0x97 , 0x17 , 0x49 , 0x27 , 0x37 , 0x6B , 0x2F , 0x88 ,
0x7E , 0x1C , 0xA0 , 0xA1 , 0x89 , 0x92 , 0x27 , 0xD9 , 0x56 , 0x5A , 0x71 , 0xC1 ,
0x56 , 0x37 , 0x7E , 0x3A , 0x9D , 0x05 , 0xE7 , 0xEE , 0x5D , 0x8F , 0x82 , 0x17 ,
0xBC , 0xE9 , 0xC2 , 0x93 , 0x30 , 0x82 , 0xF9 , 0xF4 , 0xC9 , 0xAE , 0x49 , 0xDB ,
0xD0 , 0x54 , 0xB4 , 0xD9 , 0x75 , 0x4D , 0xFA , 0x06 , 0xB8 , 0xD6 , 0x38 , 0x41 ,
0xB7 , 0x1F , 0x77 , 0xF3 ,
} ;
static unsigned char dh2048_g [ ] = {
0x02 ,
} ;
2016-08-29 07:26:37 -04:00
BIGNUM * p ;
BIGNUM * g ;
2022-02-11 06:04:55 -05:00
HASSL_DH * dh = NULL ;
p = BN_bin2bn ( dh2048_p , sizeof dh2048_p , NULL ) ;
g = BN_bin2bn ( dh2048_g , sizeof dh2048_g , NULL ) ;
if ( p & & g )
dh = ssl_new_dh_fromdata ( p , g ) ;
2014-06-12 08:58:40 -04:00
return dh ;
2022-04-12 05:31:54 -04:00
# else
return ssl_get_dh_by_nid ( NID_ffdhe2048 ) ;
# endif
2014-06-12 08:58:40 -04:00
}
2022-02-11 06:04:55 -05:00
static HASSL_DH * ssl_get_dh_4096 ( void )
2014-06-12 08:58:40 -04:00
{
2022-04-12 05:31:54 -04:00
# if (HA_OPENSSL_VERSION_NUMBER < 0x10101000L)
2015-05-29 10:26:17 -04:00
static unsigned char dh4096_p [ ] = {
0xDE , 0x16 , 0x94 , 0xCD , 0x99 , 0x58 , 0x07 , 0xF1 , 0xF7 , 0x32 , 0x96 , 0x11 ,
0x04 , 0x82 , 0xD4 , 0x84 , 0x72 , 0x80 , 0x99 , 0x06 , 0xCA , 0xF0 , 0xA3 , 0x68 ,
0x07 , 0xCE , 0x64 , 0x50 , 0xE7 , 0x74 , 0x45 , 0x20 , 0x80 , 0x5E , 0x4D , 0xAD ,
0xA5 , 0xB6 , 0xED , 0xFA , 0x80 , 0x6C , 0x3B , 0x35 , 0xC4 , 0x9A , 0x14 , 0x6B ,
0x32 , 0xBB , 0xFD , 0x1F , 0x17 , 0x8E , 0xB7 , 0x1F , 0xD6 , 0xFA , 0x3F , 0x7B ,
0xEE , 0x16 , 0xA5 , 0x62 , 0x33 , 0x0D , 0xED , 0xBC , 0x4E , 0x58 , 0xE5 , 0x47 ,
0x4D , 0xE9 , 0xAB , 0x8E , 0x38 , 0xD3 , 0x6E , 0x90 , 0x57 , 0xE3 , 0x22 , 0x15 ,
0x33 , 0xBD , 0xF6 , 0x43 , 0x45 , 0xB5 , 0x10 , 0x0A , 0xBE , 0x2C , 0xB4 , 0x35 ,
0xB8 , 0x53 , 0x8D , 0xAD , 0xFB , 0xA7 , 0x1F , 0x85 , 0x58 , 0x41 , 0x7A , 0x79 ,
0x20 , 0x68 , 0xB3 , 0xE1 , 0x3D , 0x08 , 0x76 , 0xBF , 0x86 , 0x0D , 0x49 , 0xE3 ,
0x82 , 0x71 , 0x8C , 0xB4 , 0x8D , 0x81 , 0x84 , 0xD4 , 0xE7 , 0xBE , 0x91 , 0xDC ,
0x26 , 0x39 , 0x48 , 0x0F , 0x35 , 0xC4 , 0xCA , 0x65 , 0xE3 , 0x40 , 0x93 , 0x52 ,
0x76 , 0x58 , 0x7D , 0xDD , 0x51 , 0x75 , 0xDC , 0x69 , 0x61 , 0xBF , 0x47 , 0x2C ,
0x16 , 0x68 , 0x2D , 0xC9 , 0x29 , 0xD3 , 0xE6 , 0xC0 , 0x99 , 0x48 , 0xA0 , 0x9A ,
0xC8 , 0x78 , 0xC0 , 0x6D , 0x81 , 0x67 , 0x12 , 0x61 , 0x3F , 0x71 , 0xBA , 0x41 ,
0x1F , 0x6C , 0x89 , 0x44 , 0x03 , 0xBA , 0x3B , 0x39 , 0x60 , 0xAA , 0x28 , 0x55 ,
0x59 , 0xAE , 0xB8 , 0xFA , 0xCB , 0x6F , 0xA5 , 0x1A , 0xF7 , 0x2B , 0xDD , 0x52 ,
0x8A , 0x8B , 0xE2 , 0x71 , 0xA6 , 0x5E , 0x7E , 0xD8 , 0x2E , 0x18 , 0xE0 , 0x66 ,
0xDF , 0xDD , 0x22 , 0x21 , 0x99 , 0x52 , 0x73 , 0xA6 , 0x33 , 0x20 , 0x65 , 0x0E ,
0x53 , 0xE7 , 0x6B , 0x9B , 0xC5 , 0xA3 , 0x2F , 0x97 , 0x65 , 0x76 , 0xD3 , 0x47 ,
0x23 , 0x77 , 0x12 , 0xB6 , 0x11 , 0x7B , 0x24 , 0xED , 0xF1 , 0xEF , 0xC0 , 0xE2 ,
0xA3 , 0x7E , 0x67 , 0x05 , 0x3E , 0x96 , 0x4D , 0x45 , 0xC2 , 0x18 , 0xD1 , 0x73 ,
0x9E , 0x07 , 0xF3 , 0x81 , 0x6E , 0x52 , 0x63 , 0xF6 , 0x20 , 0x76 , 0xB9 , 0x13 ,
0xD2 , 0x65 , 0x30 , 0x18 , 0x16 , 0x09 , 0x16 , 0x9E , 0x8F , 0xF1 , 0xD2 , 0x10 ,
0x5A , 0xD3 , 0xD4 , 0xAF , 0x16 , 0x61 , 0xDA , 0x55 , 0x2E , 0x18 , 0x5E , 0x14 ,
0x08 , 0x54 , 0x2E , 0x2A , 0x25 , 0xA2 , 0x1A , 0x9B , 0x8B , 0x32 , 0xA9 , 0xFD ,
0xC2 , 0x48 , 0x96 , 0xE1 , 0x80 , 0xCA , 0xE9 , 0x22 , 0x17 , 0xBB , 0xCE , 0x3E ,
0x9E , 0xED , 0xC7 , 0xF1 , 0x1F , 0xEC , 0x17 , 0x21 , 0xDC , 0x7B , 0x82 , 0x48 ,
0x8E , 0xBB , 0x4B , 0x9D , 0x5B , 0x04 , 0x04 , 0xDA , 0xDB , 0x39 , 0xDF , 0x01 ,
0x40 , 0xC3 , 0xAA , 0x26 , 0x23 , 0x89 , 0x75 , 0xC6 , 0x0B , 0xD0 , 0xA2 , 0x60 ,
0x6A , 0xF1 , 0xCC , 0x65 , 0x18 , 0x98 , 0x1B , 0x52 , 0xD2 , 0x74 , 0x61 , 0xCC ,
0xBD , 0x60 , 0xAE , 0xA3 , 0xA0 , 0x66 , 0x6A , 0x16 , 0x34 , 0x92 , 0x3F , 0x41 ,
0x40 , 0x31 , 0x29 , 0xC0 , 0x2C , 0x63 , 0xB2 , 0x07 , 0x8D , 0xEB , 0x94 , 0xB8 ,
0xE8 , 0x47 , 0x92 , 0x52 , 0x93 , 0x6A , 0x1B , 0x7E , 0x1A , 0x61 , 0xB3 , 0x1B ,
0xF0 , 0xD6 , 0x72 , 0x9B , 0xF1 , 0xB0 , 0xAF , 0xBF , 0x3E , 0x65 , 0xEF , 0x23 ,
0x1D , 0x6F , 0xFF , 0x70 , 0xCD , 0x8A , 0x4C , 0x8A , 0xA0 , 0x72 , 0x9D , 0xBE ,
0xD4 , 0xBB , 0x24 , 0x47 , 0x4A , 0x68 , 0xB5 , 0xF5 , 0xC6 , 0xD5 , 0x7A , 0xCD ,
0xCA , 0x06 , 0x41 , 0x07 , 0xAD , 0xC2 , 0x1E , 0xE6 , 0x54 , 0xA7 , 0xAD , 0x03 ,
0xD9 , 0x12 , 0xC1 , 0x9C , 0x13 , 0xB1 , 0xC9 , 0x0A , 0x43 , 0x8E , 0x1E , 0x08 ,
0xCE , 0x50 , 0x82 , 0x73 , 0x5F , 0xA7 , 0x55 , 0x1D , 0xD9 , 0x59 , 0xAC , 0xB5 ,
0xEA , 0x02 , 0x7F , 0x6C , 0x5B , 0x74 , 0x96 , 0x98 , 0x67 , 0x24 , 0xA3 , 0x0F ,
0x15 , 0xFC , 0xA9 , 0x7D , 0x3E , 0x67 , 0xD1 , 0x70 , 0xF8 , 0x97 , 0xF3 , 0x67 ,
0xC5 , 0x8C , 0x88 , 0x44 , 0x08 , 0x02 , 0xC7 , 0x2B ,
2014-06-12 08:58:40 -04:00
} ;
2015-05-29 10:26:17 -04:00
static unsigned char dh4096_g [ ] = {
0x02 ,
} ;
2014-06-12 08:58:40 -04:00
2016-08-29 07:26:37 -04:00
BIGNUM * p ;
BIGNUM * g ;
2022-02-11 06:04:55 -05:00
HASSL_DH * dh = NULL ;
p = BN_bin2bn ( dh4096_p , sizeof dh4096_p , NULL ) ;
g = BN_bin2bn ( dh4096_g , sizeof dh4096_g , NULL ) ;
if ( p & & g )
dh = ssl_new_dh_fromdata ( p , g ) ;
2014-06-12 08:58:40 -04:00
return dh ;
2022-04-12 05:31:54 -04:00
# else
return ssl_get_dh_by_nid ( NID_ffdhe4096 ) ;
# endif
2014-06-12 08:58:40 -04:00
}
2022-02-11 06:04:55 -05:00
static HASSL_DH * ssl_get_tmp_dh ( EVP_PKEY * pkey )
2014-06-12 08:58:40 -04:00
{
2022-02-11 06:04:55 -05:00
HASSL_DH * dh = NULL ;
2016-08-29 07:26:37 -04:00
int type ;
2022-02-11 06:04:49 -05:00
int keylen = 0 ;
2016-08-29 07:26:37 -04:00
type = pkey ? EVP_PKEY_base_id ( pkey ) : EVP_PKEY_NONE ;
2014-06-12 08:58:40 -04:00
2022-07-23 14:55:19 -04:00
if ( type = = EVP_PKEY_EC ) {
keylen = global_ssl . default_dh_param ;
}
2014-06-12 08:58:40 -04:00
/* The keylen supplied by OpenSSL can only be 512 or 1024.
See ssl3_send_server_key_exchange ( ) in ssl / s3_srvr . c
*/
if ( type = = EVP_PKEY_RSA | | type = = EVP_PKEY_DSA ) {
keylen = EVP_PKEY_bits ( pkey ) ;
}
2016-12-22 17:12:01 -05:00
if ( keylen > global_ssl . default_dh_param ) {
keylen = global_ssl . default_dh_param ;
2014-06-12 08:58:40 -04:00
}
2015-05-29 10:26:17 -04:00
if ( keylen > = 4096 ) {
2022-02-11 06:04:53 -05:00
if ( ! local_dh_4096 )
local_dh_4096 = ssl_get_dh_4096 ( ) ;
2014-07-15 05:36:40 -04:00
dh = local_dh_4096 ;
2014-06-12 08:58:40 -04:00
}
else if ( keylen > = 2048 ) {
2022-02-11 06:04:53 -05:00
if ( ! local_dh_2048 )
local_dh_2048 = ssl_get_dh_2048 ( ) ;
2014-07-15 05:36:40 -04:00
dh = local_dh_2048 ;
2014-06-12 08:58:40 -04:00
}
else {
2022-02-11 06:04:53 -05:00
if ( ! local_dh_1024 )
local_dh_1024 = ssl_get_dh_1024 ( ) ;
2014-07-15 05:36:40 -04:00
dh = local_dh_1024 ;
2014-06-12 08:58:40 -04:00
}
return dh ;
}
2022-02-11 06:04:56 -05:00
# if (HA_OPENSSL_VERSION_NUMBER < 0x3000000fL)
2022-02-11 06:04:49 -05:00
/* Returns Diffie-Hellman parameters matching the private key length
but not exceeding global_ssl . default_dh_param */
2024-01-12 09:23:49 -05:00
HASSL_DH * ssl_get_tmp_dh_cbk ( SSL * ssl , int export , int keylen )
2022-02-11 06:04:49 -05:00
{
EVP_PKEY * pkey = SSL_get_privatekey ( ssl ) ;
return ssl_get_tmp_dh ( pkey ) ;
}
2022-02-11 06:04:56 -05:00
# endif
2022-02-11 06:04:49 -05:00
2022-02-11 06:04:50 -05:00
static int ssl_sock_set_tmp_dh ( SSL_CTX * ctx , HASSL_DH * dh )
{
# if (HA_OPENSSL_VERSION_NUMBER < 0x3000000fL)
return SSL_CTX_set_tmp_dh ( ctx , dh ) ;
# else
int retval = 0 ;
HASSL_DH_up_ref ( dh ) ;
retval = SSL_CTX_set0_tmp_dh_pkey ( ctx , dh ) ;
if ( ! retval )
HASSL_DH_free ( dh ) ;
return retval ;
# endif
}
2022-02-11 06:04:51 -05:00
# if (HA_OPENSSL_VERSION_NUMBER >= 0x3000000fL)
2024-01-12 09:23:49 -05:00
void ssl_sock_set_tmp_dh_from_pkey ( SSL_CTX * ctx , EVP_PKEY * pkey )
2022-02-11 06:04:51 -05:00
{
HASSL_DH * dh = NULL ;
if ( pkey & & ( dh = ssl_get_tmp_dh ( pkey ) ) ) {
HASSL_DH_up_ref ( dh ) ;
if ( ! SSL_CTX_set0_tmp_dh_pkey ( ctx , dh ) )
HASSL_DH_free ( dh ) ;
}
}
# endif
2022-02-11 06:04:48 -05:00
HASSL_DH * ssl_sock_get_dh_from_bio ( BIO * bio )
{
# if (HA_OPENSSL_VERSION_NUMBER >= 0x3000000fL)
HASSL_DH * dh = NULL ;
OSSL_DECODER_CTX * dctx = NULL ;
const char * format = " PEM " ;
const char * keytype = " DH " ;
dctx = OSSL_DECODER_CTX_new_for_pkey ( & dh , format , NULL , keytype ,
OSSL_KEYMGMT_SELECT_DOMAIN_PARAMETERS ,
NULL , NULL ) ;
if ( dctx = = NULL | | OSSL_DECODER_CTX_get_num_decoders ( dctx ) = = 0 )
goto end ;
/* The DH parameters might not be the first section found in the PEM
* file so we need to iterate over all of them until we find the right
* one .
*/
while ( ! BIO_eof ( bio ) & & ! dh )
OSSL_DECODER_from_bio ( dctx , bio ) ;
end :
OSSL_DECODER_CTX_free ( dctx ) ;
return dh ;
# else
HASSL_DH * dh = NULL ;
dh = PEM_read_bio_DHparams ( bio , NULL , NULL , NULL ) ;
return dh ;
# endif
}
2022-02-11 06:04:55 -05:00
static HASSL_DH * ssl_sock_get_dh_from_file ( const char * filename )
2012-09-20 10:19:02 -04:00
{
2022-02-11 06:04:55 -05:00
HASSL_DH * dh = NULL ;
2015-05-29 09:53:22 -04:00
BIO * in = BIO_new ( BIO_s_file ( ) ) ;
2012-09-20 10:19:02 -04:00
if ( in = = NULL )
goto end ;
2015-05-29 09:53:22 -04:00
if ( BIO_read_filename ( in , filename ) < = 0 )
2012-09-20 10:19:02 -04:00
goto end ;
2022-02-11 06:04:55 -05:00
dh = ssl_sock_get_dh_from_bio ( in ) ;
2015-05-29 09:53:22 -04:00
end :
if ( in )
BIO_free ( in ) ;
2018-08-16 09:14:12 -04:00
ERR_clear_error ( ) ;
2015-05-29 09:53:22 -04:00
return dh ;
}
int ssl_sock_load_global_dh_param_from_file ( const char * filename )
{
global_dh = ssl_sock_get_dh_from_file ( filename ) ;
if ( global_dh ) {
return 0 ;
}
return - 1 ;
}
2012-09-20 10:19:02 -04:00
# endif
2019-10-03 18:29:42 -04:00
/* This function allocates a sni_ctx and adds it to the ckch_inst */
2019-10-03 18:53:29 -04:00
static int ckch_inst_add_cert_sni ( SSL_CTX * ctx , struct ckch_inst * ckch_inst ,
2019-10-03 18:29:42 -04:00
struct bind_conf * s , struct ssl_bind_conf * conf ,
struct pkey_info kinfo , char * name , int order )
2013-01-22 09:31:15 -05:00
{
struct sni_ctx * sc ;
MEDIUM: ssl: allow multiple fallback certificate to allow ECDSA/RSA selection
This patch changes the default certificate mechanism.
Since the beginning of SSL in HAProxy, the default certificate was the first
certificate of a bind line. This allowed to fallback on this certificate
when no servername extension was sent by the server, or when no SAN nor
CN was available in the certificate.
When using a multi-certificate bundle (ecdsa+rsa), it was possible to
have both certificates as the fallback one, leting openssl chose the
right one. This was possible because a multi-certificate bundle
was generating a unique SSL_CTX for both certificates.
When the haproxy and openssl architecture evolved, we decided to
use multiple SSL_CTX for a multi-cert bundle, in order to simplify the
code and allow updates over the CLI.
However only one default_ctx was allowed, so we lost the ability to
chose between ECDSA and RSA for the default certificate.
This patch allows to use a '*' filter for a certificate, which allow to
lookup between multiple '*' filter, and have one in RSA and another one
in ECDSA. It replaces the default_ctx mechanism in the ClientHello
callback and use the standard algorithm to look for a default cert and
chose between ECDSA and RSA.
/!\ This patch breaks the automatic setting of the default certificate, which
will be introduce in the next patch. So the first certificate of a bind
line won't be used as a defaullt anymore.
To use this feature, one could use crt-list with '*' filters:
$ cat foo.crtlist
foobar.pem.rsa *
foobar.pem.ecdsa *
In order to test the feature, it's easy to send a request without
the servername extension and use ECDSA or RSA compatible ciphers:
$ openssl s_client -connect localhost:8443 -tls1_2 -cipher ECDHE-RSA-AES256-GCM-SHA384
$ openssl s_client -connect localhost:8443 -tls1_2 -cipher ECDHE-ECDSA-AES256-GCM-SHA384
2024-01-10 08:05:59 -05:00
int wild = 0 , neg = 0 , default_crt = 0 ;
2013-05-07 14:20:06 -04:00
if ( * name = = ' ! ' ) {
neg = 1 ;
name + + ;
}
if ( * name = = ' * ' ) {
wild = 1 ;
name + + ;
MEDIUM: ssl: allow multiple fallback certificate to allow ECDSA/RSA selection
This patch changes the default certificate mechanism.
Since the beginning of SSL in HAProxy, the default certificate was the first
certificate of a bind line. This allowed to fallback on this certificate
when no servername extension was sent by the server, or when no SAN nor
CN was available in the certificate.
When using a multi-certificate bundle (ecdsa+rsa), it was possible to
have both certificates as the fallback one, leting openssl chose the
right one. This was possible because a multi-certificate bundle
was generating a unique SSL_CTX for both certificates.
When the haproxy and openssl architecture evolved, we decided to
use multiple SSL_CTX for a multi-cert bundle, in order to simplify the
code and allow updates over the CLI.
However only one default_ctx was allowed, so we lost the ability to
chose between ECDSA and RSA for the default certificate.
This patch allows to use a '*' filter for a certificate, which allow to
lookup between multiple '*' filter, and have one in RSA and another one
in ECDSA. It replaces the default_ctx mechanism in the ClientHello
callback and use the standard algorithm to look for a default cert and
chose between ECDSA and RSA.
/!\ This patch breaks the automatic setting of the default certificate, which
will be introduce in the next patch. So the first certificate of a bind
line won't be used as a defaullt anymore.
To use this feature, one could use crt-list with '*' filters:
$ cat foo.crtlist
foobar.pem.rsa *
foobar.pem.ecdsa *
In order to test the feature, it's easy to send a request without
the servername extension and use ECDSA or RSA compatible ciphers:
$ openssl s_client -connect localhost:8443 -tls1_2 -cipher ECDHE-RSA-AES256-GCM-SHA384
$ openssl s_client -connect localhost:8443 -tls1_2 -cipher ECDHE-ECDSA-AES256-GCM-SHA384
2024-01-10 08:05:59 -05:00
/* if this was only a '*' filter, this is a default cert */
if ( ! * name )
default_crt = 1 ;
2013-05-07 14:20:06 -04:00
}
/* !* filter is a nop */
if ( neg & & wild )
return order ;
MEDIUM: ssl: allow multiple fallback certificate to allow ECDSA/RSA selection
This patch changes the default certificate mechanism.
Since the beginning of SSL in HAProxy, the default certificate was the first
certificate of a bind line. This allowed to fallback on this certificate
when no servername extension was sent by the server, or when no SAN nor
CN was available in the certificate.
When using a multi-certificate bundle (ecdsa+rsa), it was possible to
have both certificates as the fallback one, leting openssl chose the
right one. This was possible because a multi-certificate bundle
was generating a unique SSL_CTX for both certificates.
When the haproxy and openssl architecture evolved, we decided to
use multiple SSL_CTX for a multi-cert bundle, in order to simplify the
code and allow updates over the CLI.
However only one default_ctx was allowed, so we lost the ability to
chose between ECDSA and RSA for the default certificate.
This patch allows to use a '*' filter for a certificate, which allow to
lookup between multiple '*' filter, and have one in RSA and another one
in ECDSA. It replaces the default_ctx mechanism in the ClientHello
callback and use the standard algorithm to look for a default cert and
chose between ECDSA and RSA.
/!\ This patch breaks the automatic setting of the default certificate, which
will be introduce in the next patch. So the first certificate of a bind
line won't be used as a defaullt anymore.
To use this feature, one could use crt-list with '*' filters:
$ cat foo.crtlist
foobar.pem.rsa *
foobar.pem.ecdsa *
In order to test the feature, it's easy to send a request without
the servername extension and use ECDSA or RSA compatible ciphers:
$ openssl s_client -connect localhost:8443 -tls1_2 -cipher ECDHE-RSA-AES256-GCM-SHA384
$ openssl s_client -connect localhost:8443 -tls1_2 -cipher ECDHE-ECDSA-AES256-GCM-SHA384
2024-01-10 08:05:59 -05:00
if ( * name | | default_crt ) {
2013-05-07 14:20:06 -04:00
int j , len ;
len = strlen ( name ) ;
2016-10-06 04:56:48 -04:00
for ( j = 0 ; j < len & & j < trash . size ; j + + )
2020-07-05 15:46:32 -04:00
trash . area [ j ] = tolower ( ( unsigned char ) name [ j ] ) ;
2016-10-06 04:56:48 -04:00
if ( j > = trash . size )
2019-10-03 17:46:33 -04:00
return - 1 ;
2018-07-13 04:54:26 -04:00
trash . area [ j ] = 0 ;
2016-10-06 04:56:48 -04:00
2013-01-22 09:31:15 -05:00
sc = malloc ( sizeof ( struct sni_ctx ) + len + 1 ) ;
2016-10-06 04:35:29 -04:00
if ( ! sc )
2019-10-03 17:46:33 -04:00
return - 1 ;
2018-07-13 04:54:26 -04:00
memcpy ( sc - > name . key , trash . area , len + 1 ) ;
2020-04-08 10:11:26 -04:00
SSL_CTX_up_ref ( ctx ) ;
2013-01-22 09:31:15 -05:00
sc - > ctx = ctx ;
2016-12-29 12:26:15 -05:00
sc - > conf = conf ;
2017-10-27 12:43:29 -04:00
sc - > kinfo = kinfo ;
2013-05-07 14:20:06 -04:00
sc - > order = order + + ;
sc - > neg = neg ;
2019-10-03 18:53:29 -04:00
sc - > wild = wild ;
sc - > name . node . leaf_p = NULL ;
2020-03-05 04:17:47 -05:00
sc - > ckch_inst = ckch_inst ;
2021-04-21 01:32:39 -04:00
LIST_APPEND ( & ckch_inst - > sni_ctx , & sc - > by_ckch_inst ) ;
2013-01-22 09:31:15 -05:00
}
return order ;
}
2019-10-03 18:53:29 -04:00
/*
* Insert the sni_ctxs that are listed in the ckch_inst , in the bind_conf ' s sni_ctx tree
* This function can ' t return an error .
*
* * CAUTION * : The caller must lock the sni tree if called in multithreading mode
*/
2020-05-13 11:23:59 -04:00
void ssl_sock_load_cert_sni ( struct ckch_inst * ckch_inst , struct bind_conf * bind_conf )
2019-10-03 18:53:29 -04:00
{
struct sni_ctx * sc0 , * sc0b , * sc1 ;
struct ebmb_node * node ;
list_for_each_entry_safe ( sc0 , sc0b , & ckch_inst - > sni_ctx , by_ckch_inst ) {
/* ignore if sc0 was already inserted in a tree */
if ( sc0 - > name . node . leaf_p )
continue ;
/* Check for duplicates. */
if ( sc0 - > wild )
node = ebst_lookup ( & bind_conf - > sni_w_ctx , ( char * ) sc0 - > name . key ) ;
else
node = ebst_lookup ( & bind_conf - > sni_ctx , ( char * ) sc0 - > name . key ) ;
for ( ; node ; node = ebmb_next_dup ( node ) ) {
sc1 = ebmb_entry ( node , struct sni_ctx , name ) ;
if ( sc1 - > ctx = = sc0 - > ctx & & sc1 - > conf = = sc0 - > conf
& & sc1 - > neg = = sc0 - > neg & & sc1 - > wild = = sc0 - > wild ) {
/* it's a duplicate, we should remove and free it */
2021-04-21 01:32:39 -04:00
LIST_DELETE ( & sc0 - > by_ckch_inst ) ;
2020-04-08 10:11:26 -04:00
SSL_CTX_free ( sc0 - > ctx ) ;
2021-02-20 04:46:51 -05:00
ha_free ( & sc0 ) ;
2019-10-14 04:46:58 -04:00
break ;
2019-10-03 18:53:29 -04:00
}
}
/* if duplicate, ignore the insertion */
if ( ! sc0 )
continue ;
if ( sc0 - > wild )
ebst_insert ( & bind_conf - > sni_w_ctx , & sc0 - > name ) ;
else
ebst_insert ( & bind_conf - > sni_ctx , & sc0 - > name ) ;
2021-03-17 09:56:54 -04:00
}
2019-10-03 18:53:29 -04:00
}
2019-07-23 09:00:54 -04:00
/*
2019-10-08 05:36:53 -04:00
* tree used to store the ckchs ordered by filename / bundle name
2019-07-23 09:00:54 -04:00
*/
2019-10-08 05:36:53 -04:00
struct eb_root ckchs_tree = EB_ROOT_UNIQUE ;
2019-07-23 09:00:54 -04:00
2020-03-06 15:54:13 -05:00
/* tree of crtlist (crt-list/directory) */
2020-05-13 11:23:59 -04:00
struct eb_root crtlists_tree = EB_ROOT_UNIQUE ;
2015-12-02 13:01:29 -05:00
2019-10-17 07:27:40 -04:00
/* Loads Diffie-Hellman parameter from a ckchs to an SSL_CTX.
2020-03-10 03:06:11 -04:00
* If there is no DH parameter available in the ckchs , the global
2019-10-17 07:27:40 -04:00
* DH parameter is loaded into the SSL_CTX and if there is no
* DH parameter available in ckchs nor in global , the default
* DH parameters are applied on the SSL_CTX .
* Returns a bitfield containing the flags :
* ERR_FATAL in any fatal error case
* ERR_ALERT if a reason of the error is availabine in err
* ERR_WARN if a warning is available into err
* The value 0 means there is no error nor warning and
* the operation succeed .
*/
2019-07-23 10:06:08 -04:00
# ifndef OPENSSL_NO_DH
2022-11-22 05:51:53 -05:00
static int ssl_sock_load_dh_params ( SSL_CTX * ctx , const struct ckch_data * data ,
2019-10-17 07:27:40 -04:00
const char * path , char * * err )
2019-07-23 10:06:08 -04:00
{
2019-10-17 07:27:40 -04:00
int ret = 0 ;
2022-02-11 06:04:55 -05:00
HASSL_DH * dh = NULL ;
2019-07-23 10:06:08 -04:00
2022-11-22 05:51:53 -05:00
if ( data & & data - > dh ) {
dh = data - > dh ;
2022-02-11 06:04:55 -05:00
if ( ! ssl_sock_set_tmp_dh ( ctx , dh ) ) {
2019-10-17 08:53:03 -04:00
memprintf ( err , " %sunable to load the DH parameter specified in '%s' " ,
err & & * err ? * err : " " , path ) ;
memprintf ( err , " %s, DH ciphers won't be available. \n " ,
err & & * err ? * err : " " ) ;
ret | = ERR_WARN ;
goto end ;
}
2019-07-23 10:06:08 -04:00
if ( ssl_dh_ptr_index > = 0 ) {
/* store a pointer to the DH params to avoid complaining about
ssl - default - dh - param not being set for this SSL_CTX */
SSL_CTX_set_ex_data ( ctx , ssl_dh_ptr_index , dh ) ;
}
}
else if ( global_dh ) {
2022-02-11 06:04:55 -05:00
if ( ! ssl_sock_set_tmp_dh ( ctx , global_dh ) ) {
2019-10-17 08:53:03 -04:00
memprintf ( err , " %sunable to use the global DH parameter for certificate '%s' " ,
err & & * err ? * err : " " , path ) ;
memprintf ( err , " %s, DH ciphers won't be available. \n " ,
err & & * err ? * err : " " ) ;
ret | = ERR_WARN ;
goto end ;
}
2019-07-23 10:06:08 -04:00
}
else {
/* Clear openssl global errors stack */
ERR_clear_error ( ) ;
2022-04-12 05:31:55 -04:00
/* We do not want DHE ciphers to be added to the cipher list
* unless there is an explicit global dh option in the conf .
*/
if ( global_ssl . default_dh_param ) {
if ( global_ssl . default_dh_param < = 1024 ) {
/* we are limited to DH parameter of 1024 bits anyway */
if ( local_dh_1024 = = NULL )
local_dh_1024 = ssl_get_dh_1024 ( ) ;
if ( local_dh_1024 = = NULL ) {
memprintf ( err , " %sunable to load default 1024 bits DH parameter for certificate '%s'. \n " ,
err & & * err ? * err : " " , path ) ;
ret | = ERR_ALERT | ERR_FATAL ;
goto end ;
}
2019-07-23 10:06:08 -04:00
2022-04-12 05:31:55 -04:00
if ( ! ssl_sock_set_tmp_dh ( ctx , local_dh_1024 ) ) {
memprintf ( err , " %sunable to load default 1024 bits DH parameter for certificate '%s'. \n " ,
err & & * err ? * err : " " , path ) ;
memprintf ( err , " %s, DH ciphers won't be available. \n " ,
err & & * err ? * err : " " ) ;
ret | = ERR_WARN ;
goto end ;
}
2019-10-17 08:53:03 -04:00
}
2022-04-12 05:31:55 -04:00
else {
2022-02-11 06:04:56 -05:00
# if (HA_OPENSSL_VERSION_NUMBER < 0x3000000fL)
2022-04-12 05:31:55 -04:00
SSL_CTX_set_tmp_dh_callback ( ctx , ssl_get_tmp_dh_cbk ) ;
2022-02-11 06:04:56 -05:00
# else
2022-11-22 05:51:53 -05:00
ssl_sock_set_tmp_dh_from_pkey ( ctx , data ? data - > key : NULL ) ;
2022-02-11 06:04:56 -05:00
# endif
2022-04-12 05:31:55 -04:00
}
2019-07-23 10:06:08 -04:00
}
}
end :
2020-02-05 05:46:33 -05:00
ERR_clear_error ( ) ;
2019-07-23 10:06:08 -04:00
return ret ;
}
# endif
2021-01-25 11:19:42 -05:00
/* Load a certificate chain into an SSL context.
2019-10-17 07:25:14 -04:00
* Returns a bitfield containing the flags :
* ERR_FATAL in any fatal error case
* ERR_ALERT if the reason of the error is available in err
* ERR_WARN if a warning is available into err
2022-12-15 09:44:37 -05:00
* The caller is responsible of freeing the newly built or newly refcounted
* find_chain element .
2019-10-17 07:25:14 -04:00
* The value 0 means there is no error nor warning and
* the operation succeed .
2015-12-01 15:16:07 -05:00
*/
2022-11-22 05:51:53 -05:00
static int ssl_sock_load_cert_chain ( const char * path , const struct ckch_data * data ,
2021-01-25 11:19:42 -05:00
SSL_CTX * ctx , STACK_OF ( X509 ) * * find_chain , char * * err )
2015-12-01 15:16:07 -05:00
{
2019-10-17 07:25:14 -04:00
int errcode = 0 ;
2022-11-15 10:56:03 -05:00
int ret ;
ERR_clear_error ( ) ;
2019-10-17 07:25:14 -04:00
2021-01-25 11:19:42 -05:00
if ( find_chain = = NULL ) {
errcode | = ERR_FATAL ;
goto end ;
2015-12-01 15:16:07 -05:00
}
2022-11-22 05:51:53 -05:00
if ( ! SSL_CTX_use_certificate ( ctx , data - > cert ) ) {
2022-11-15 10:56:03 -05:00
ret = ERR_get_error ( ) ;
memprintf ( err , " %sunable to load SSL certificate into SSL Context '%s': %s. \n " ,
err & & * err ? * err : " " , path , ERR_reason_error_string ( ret ) ) ;
2019-10-17 07:25:14 -04:00
errcode | = ERR_ALERT | ERR_FATAL ;
goto end ;
2015-12-01 15:16:07 -05:00
}
2022-11-22 05:51:53 -05:00
if ( data - > chain ) {
2022-12-15 09:44:37 -05:00
* find_chain = X509_chain_up_ref ( data - > chain ) ;
2020-02-18 09:27:32 -05:00
} else {
/* Find Certificate Chain in global */
struct issuer_chain * issuer ;
2022-11-22 05:51:53 -05:00
issuer = ssl_get0_issuer_chain ( data - > cert ) ;
2020-02-18 09:27:32 -05:00
if ( issuer )
2022-12-15 09:44:37 -05:00
* find_chain = X509_chain_up_ref ( issuer - > chain ) ;
2020-02-18 09:27:32 -05:00
}
2020-02-27 08:48:35 -05:00
2021-01-25 11:19:42 -05:00
if ( ! * find_chain ) {
2020-08-12 14:02:10 -04:00
/* always put a null chain stack in the SSL_CTX so it does not
* try to build the chain from the verify store */
2021-01-25 11:19:42 -05:00
* find_chain = sk_X509_new_null ( ) ;
2020-08-12 14:02:10 -04:00
}
2022-11-22 05:51:53 -05:00
/* Load all certs in the data into the ctx_chain for the ssl_ctx */
2020-06-02 12:27:20 -04:00
# ifdef SSL_CTX_set1_chain
2021-01-25 11:19:42 -05:00
if ( ! SSL_CTX_set1_chain ( ctx , * find_chain ) ) {
2022-11-15 10:56:03 -05:00
ret = ERR_get_error ( ) ;
2023-04-17 08:32:25 -04:00
memprintf ( err , " %sunable to load chain certificate into SSL Context '%s': %s. \n " ,
2022-11-15 10:56:03 -05:00
err & & * err ? * err : " " , path , ERR_reason_error_string ( ret ) ) ;
2020-08-12 14:02:10 -04:00
errcode | = ERR_ALERT | ERR_FATAL ;
goto end ;
}
2020-06-02 12:27:20 -04:00
# else
{ /* legacy compat (< openssl 1.0.2) */
2020-02-28 10:00:34 -05:00
X509 * ca ;
2022-12-15 09:44:37 -05:00
while ( ( ca = sk_X509_shift ( * find_chain ) ) )
2020-02-28 10:00:34 -05:00
if ( ! SSL_CTX_add_extra_chain_cert ( ctx , ca ) ) {
memprintf ( err , " %sunable to load chain certificate into SSL Context '%s'. \n " ,
err & & * err ? * err : " " , path ) ;
2020-06-02 12:27:20 -04:00
X509_free ( ca ) ;
2020-02-28 10:00:34 -05:00
errcode | = ERR_ALERT | ERR_FATAL ;
goto end ;
}
}
2020-06-02 12:27:20 -04:00
# endif
2015-12-01 15:16:07 -05:00
2020-08-10 11:28:23 -04:00
# ifdef SSL_CTX_build_cert_chain
2020-08-10 10:18:45 -04:00
/* remove the Root CA from the SSL_CTX if the option is activated */
if ( global_ssl . skip_self_issued_ca ) {
if ( ! SSL_CTX_build_cert_chain ( ctx , SSL_BUILD_CHAIN_FLAG_NO_ROOT | SSL_BUILD_CHAIN_FLAG_UNTRUSTED | SSL_BUILD_CHAIN_FLAG_IGNORE_ERROR ) ) {
memprintf ( err , " %sunable to load chain certificate into SSL Context '%s'. \n " ,
err & & * err ? * err : " " , path ) ;
errcode | = ERR_ALERT | ERR_FATAL ;
goto end ;
}
}
2020-08-10 11:28:23 -04:00
# endif
2020-08-10 10:18:45 -04:00
2021-01-25 11:19:42 -05:00
end :
return errcode ;
}
/* Loads the info in ckch into ctx
* Returns a bitfield containing the flags :
* ERR_FATAL in any fatal error case
* ERR_ALERT if the reason of the error is available in err
* ERR_WARN if a warning is available into err
* The value 0 means there is no error nor warning and
* the operation succeed .
*/
2022-12-20 05:11:08 -05:00
static int ssl_sock_put_ckch_into_ctx ( const char * path , struct ckch_data * data , SSL_CTX * ctx , char * * err )
2021-01-25 11:19:42 -05:00
{
int errcode = 0 ;
STACK_OF ( X509 ) * find_chain = NULL ;
2022-10-27 08:41:07 -04:00
ERR_clear_error ( ) ;
2022-11-22 05:51:53 -05:00
if ( SSL_CTX_use_PrivateKey ( ctx , data - > key ) < = 0 ) {
2022-10-27 08:41:07 -04:00
int ret ;
ret = ERR_get_error ( ) ;
memprintf ( err , " %sunable to load SSL private key into SSL Context '%s': %s. \n " ,
err & & * err ? * err : " " , path , ERR_reason_error_string ( ret ) ) ;
2021-01-25 11:19:42 -05:00
errcode | = ERR_ALERT | ERR_FATAL ;
return errcode ;
}
/* Load certificate chain */
2022-11-22 05:51:53 -05:00
errcode | = ssl_sock_load_cert_chain ( path , data , ctx , & find_chain , err ) ;
2021-01-25 11:19:42 -05:00
if ( errcode & ERR_CODE )
goto end ;
2019-07-23 10:06:08 -04:00
# ifndef OPENSSL_NO_DH
/* store a NULL pointer to indicate we have not yet loaded
a custom DH param file */
if ( ssl_dh_ptr_index > = 0 ) {
SSL_CTX_set_ex_data ( ctx , ssl_dh_ptr_index , NULL ) ;
}
2022-11-22 05:51:53 -05:00
errcode | = ssl_sock_load_dh_params ( ctx , data , path , err ) ;
2019-10-17 07:27:40 -04:00
if ( errcode & ERR_CODE ) {
2019-07-23 10:06:08 -04:00
memprintf ( err , " %sunable to load DH parameters from file '%s'. \n " ,
err & & * err ? * err : " " , path ) ;
2019-10-17 07:25:14 -04:00
goto end ;
2019-07-23 10:06:08 -04:00
}
# endif
2021-02-06 08:55:27 -05:00
# ifdef HAVE_SSL_CTX_ADD_SERVER_CUSTOM_EXT
2022-11-22 05:51:53 -05:00
if ( sctl_ex_index > = 0 & & data - > sctl ) {
if ( ssl_sock_load_sctl ( ctx , data - > sctl ) < 0 ) {
2019-10-10 09:16:44 -04:00
memprintf ( err , " %s '%s.sctl' is present but cannot be read or parsed'. \n " ,
2019-11-23 17:45:10 -05:00
err & & * err ? * err : " " , path ) ;
2019-10-17 07:25:14 -04:00
errcode | = ERR_ALERT | ERR_FATAL ;
goto end ;
2019-10-10 09:16:44 -04:00
}
}
# endif
2020-10-26 08:55:30 -04:00
# if ((defined SSL_CTRL_SET_TLSEXT_STATUS_REQ_CB && !defined OPENSSL_NO_OCSP) || defined OPENSSL_IS_BORINGSSL)
2022-12-20 05:11:12 -05:00
/* Load OCSP Info into context
* If OCSP update mode is set to ' on ' , an entry will be created in the
* ocsp tree even if no ocsp_response was known during init , unless the
2023-04-01 06:26:42 -04:00
* frontend ' s conf disables ocsp update explicitly .
2022-12-20 05:11:12 -05:00
*/
2023-03-01 10:11:50 -05:00
if ( ssl_sock_load_ocsp ( path , ctx , data , find_chain ) < 0 ) {
2022-12-20 05:11:12 -05:00
if ( data - > ocsp_response )
2019-11-23 17:45:10 -05:00
memprintf ( err , " %s '%s.ocsp' is present and activates OCSP but it is impossible to compute the OCSP certificate ID (maybe the issuer could not be found)'. \n " ,
2022-12-20 05:11:12 -05:00
err & & * err ? * err : " " , path ) ;
else
2024-02-01 05:58:14 -05:00
memprintf ( err , " %s '%s' has an OCSP auto-update set to 'on' but an error occurred (maybe the OCSP URI or the issuer could not be found)'. \n " ,
2022-12-20 05:11:12 -05:00
err & & * err ? * err : " " , path ) ;
errcode | = ERR_ALERT | ERR_FATAL ;
goto end ;
2019-10-11 02:59:13 -04:00
}
# endif
2019-10-17 07:25:14 -04:00
end :
2022-12-15 09:44:37 -05:00
sk_X509_pop_free ( find_chain , X509_free ) ;
2019-10-17 07:25:14 -04:00
return errcode ;
2015-12-01 15:16:07 -05:00
}
2021-01-25 11:19:43 -05:00
/* Loads the info of a ckch built out of a backend certificate into an SSL ctx
* Returns a bitfield containing the flags :
* ERR_FATAL in any fatal error case
* ERR_ALERT if the reason of the error is available in err
* ERR_WARN if a warning is available into err
* The value 0 means there is no error nor warning and
* the operation succeed .
*/
2022-11-22 05:51:53 -05:00
static int ssl_sock_put_srv_ckch_into_ctx ( const char * path , const struct ckch_data * data ,
2021-01-25 11:19:43 -05:00
SSL_CTX * ctx , char * * err )
{
int errcode = 0 ;
STACK_OF ( X509 ) * find_chain = NULL ;
/* Load the private key */
2022-11-22 05:51:53 -05:00
if ( SSL_CTX_use_PrivateKey ( ctx , data - > key ) < = 0 ) {
2021-01-25 11:19:43 -05:00
memprintf ( err , " %sunable to load SSL private key into SSL Context '%s'. \n " ,
err & & * err ? * err : " " , path ) ;
errcode | = ERR_ALERT | ERR_FATAL ;
}
/* Load certificate chain */
2022-11-22 05:51:53 -05:00
errcode | = ssl_sock_load_cert_chain ( path , data , ctx , & find_chain , err ) ;
2021-01-25 11:19:43 -05:00
if ( errcode & ERR_CODE )
goto end ;
if ( SSL_CTX_check_private_key ( ctx ) < = 0 ) {
memprintf ( err , " %sinconsistencies between private key and certificate loaded from PEM file '%s'. \n " ,
err & & * err ? * err : " " , path ) ;
errcode | = ERR_ALERT | ERR_FATAL ;
}
end :
2022-12-15 09:44:37 -05:00
sk_X509_pop_free ( find_chain , X509_free ) ;
2021-01-25 11:19:43 -05:00
return errcode ;
}
2019-10-07 07:52:11 -04:00
/*
* This function allocate a ckch_inst and create its snis
2019-10-17 07:16:58 -04:00
*
* Returns a bitfield containing the flags :
* ERR_FATAL in any fatal error case
* ERR_ALERT if the reason of the error is available in err
* ERR_WARN if a warning is available into err
2019-10-07 07:52:11 -04:00
*/
2020-05-13 11:23:59 -04:00
int ckch_inst_new_load_store ( const char * path , struct ckch_store * ckchs , struct bind_conf * bind_conf ,
2024-01-10 10:07:17 -05:00
struct ssl_bind_conf * ssl_conf , char * * sni_filter , int fcount , int is_default , struct ckch_inst * * ckchi , char * * err )
2012-09-07 11:30:07 -04:00
{
2019-05-15 09:33:54 -04:00
SSL_CTX * ctx ;
int i ;
2012-09-07 11:30:07 -04:00
int order = 0 ;
X509_NAME * xname ;
char * str ;
2017-02-20 10:11:50 -05:00
EVP_PKEY * pkey ;
2017-10-27 12:43:29 -04:00
struct pkey_info kinfo = { . sig = TLSEXT_signature_anonymous , . bits = 0 } ;
2012-09-07 11:30:07 -04:00
# ifdef SSL_CTRL_SET_TLSEXT_HOSTNAME
STACK_OF ( GENERAL_NAME ) * names ;
# endif
2022-11-22 05:51:53 -05:00
struct ckch_data * data ;
2019-10-07 07:52:11 -04:00
struct ckch_inst * ckch_inst = NULL ;
2019-10-17 07:16:58 -04:00
int errcode = 0 ;
* ckchi = NULL ;
2019-05-15 10:08:56 -04:00
2022-11-22 05:51:53 -05:00
if ( ! ckchs | | ! ckchs - > data )
2019-10-17 07:16:58 -04:00
return ERR_FATAL ;
2012-09-07 11:30:07 -04:00
2022-11-22 05:51:53 -05:00
data = ckchs - > data ;
2019-07-18 13:28:17 -04:00
2019-05-15 09:33:54 -04:00
ctx = SSL_CTX_new ( SSLv23_server_method ( ) ) ;
if ( ! ctx ) {
memprintf ( err , " %sunable to allocate SSL context for cert '%s'. \n " ,
err & & * err ? * err : " " , path ) ;
2019-10-17 07:16:58 -04:00
errcode | = ERR_ALERT | ERR_FATAL ;
goto error ;
2019-05-15 09:33:54 -04:00
}
2024-03-12 11:22:34 -04:00
if ( global_ssl . security_level > - 1 )
SSL_CTX_set_security_level ( ctx , global_ssl . security_level ) ;
2022-11-22 05:51:53 -05:00
errcode | = ssl_sock_put_ckch_into_ctx ( path , data , ctx , err ) ;
2019-10-17 07:25:14 -04:00
if ( errcode & ERR_CODE )
2019-10-07 07:52:11 -04:00
goto error ;
ckch_inst = ckch_inst_new ( ) ;
if ( ! ckch_inst ) {
memprintf ( err , " %sunable to allocate SSL context for cert '%s'. \n " ,
err & & * err ? * err : " " , path ) ;
2019-10-17 07:16:58 -04:00
errcode | = ERR_ALERT | ERR_FATAL ;
2019-10-04 09:37:05 -04:00
goto error ;
2019-05-15 09:33:54 -04:00
}
2022-11-22 05:51:53 -05:00
pkey = X509_get_pubkey ( data - > cert ) ;
2017-02-20 10:11:50 -05:00
if ( pkey ) {
2017-10-27 12:43:29 -04:00
kinfo . bits = EVP_PKEY_bits ( pkey ) ;
2017-02-20 10:11:50 -05:00
switch ( EVP_PKEY_base_id ( pkey ) ) {
case EVP_PKEY_RSA :
2017-10-27 12:43:29 -04:00
kinfo . sig = TLSEXT_signature_rsa ;
2017-02-20 10:11:50 -05:00
break ;
case EVP_PKEY_EC :
2017-10-27 12:43:29 -04:00
kinfo . sig = TLSEXT_signature_ecdsa ;
break ;
case EVP_PKEY_DSA :
kinfo . sig = TLSEXT_signature_dsa ;
2017-02-20 10:11:50 -05:00
break ;
}
EVP_PKEY_free ( pkey ) ;
}
2013-04-22 07:05:23 -04:00
if ( fcount ) {
2019-10-03 17:46:33 -04:00
while ( fcount - - ) {
2019-10-03 18:53:29 -04:00
order = ckch_inst_add_cert_sni ( ctx , ckch_inst , bind_conf , ssl_conf , kinfo , sni_filter [ fcount ] , order ) ;
2019-10-03 17:46:33 -04:00
if ( order < 0 ) {
memprintf ( err , " %sunable to create a sni context. \n " , err & & * err ? * err : " " ) ;
2019-10-17 07:16:58 -04:00
errcode | = ERR_ALERT | ERR_FATAL ;
2019-10-04 09:37:05 -04:00
goto error ;
2019-10-03 17:46:33 -04:00
}
}
2013-01-22 09:31:15 -05:00
}
else {
2012-09-07 11:30:07 -04:00
# ifdef SSL_CTRL_SET_TLSEXT_HOSTNAME
2022-11-22 05:51:53 -05:00
names = X509_get_ext_d2i ( data - > cert , NID_subject_alt_name , NULL , NULL ) ;
2013-01-22 09:31:15 -05:00
if ( names ) {
for ( i = 0 ; i < sk_GENERAL_NAME_num ( names ) ; i + + ) {
GENERAL_NAME * name = sk_GENERAL_NAME_value ( names , i ) ;
if ( name - > type = = GEN_DNS ) {
if ( ASN1_STRING_to_UTF8 ( ( unsigned char * * ) & str , name - > d . dNSName ) > = 0 ) {
2019-10-03 18:53:29 -04:00
order = ckch_inst_add_cert_sni ( ctx , ckch_inst , bind_conf , ssl_conf , kinfo , str , order ) ;
2013-01-22 09:31:15 -05:00
OPENSSL_free ( str ) ;
2019-10-03 17:46:33 -04:00
if ( order < 0 ) {
memprintf ( err , " %sunable to create a sni context. \n " , err & & * err ? * err : " " ) ;
2019-10-17 07:16:58 -04:00
errcode | = ERR_ALERT | ERR_FATAL ;
2019-10-04 09:37:05 -04:00
goto error ;
2019-10-03 17:46:33 -04:00
}
2012-09-07 11:30:07 -04:00
}
}
}
2013-01-22 09:31:15 -05:00
sk_GENERAL_NAME_pop_free ( names , GENERAL_NAME_free ) ;
2012-09-07 11:30:07 -04:00
}
# endif /* SSL_CTRL_SET_TLSEXT_HOSTNAME */
2022-11-22 05:51:53 -05:00
xname = X509_get_subject_name ( data - > cert ) ;
2013-01-22 09:31:15 -05:00
i = - 1 ;
while ( ( i = X509_NAME_get_index_by_NID ( xname , NID_commonName , i ) ) ! = - 1 ) {
X509_NAME_ENTRY * entry = X509_NAME_get_entry ( xname , i ) ;
2016-08-29 07:26:37 -04:00
ASN1_STRING * value ;
value = X509_NAME_ENTRY_get_data ( entry ) ;
if ( ASN1_STRING_to_UTF8 ( ( unsigned char * * ) & str , value ) > = 0 ) {
2019-10-03 18:53:29 -04:00
order = ckch_inst_add_cert_sni ( ctx , ckch_inst , bind_conf , ssl_conf , kinfo , str , order ) ;
2013-01-22 09:31:15 -05:00
OPENSSL_free ( str ) ;
2019-10-03 17:46:33 -04:00
if ( order < 0 ) {
memprintf ( err , " %sunable to create a sni context. \n " , err & & * err ? * err : " " ) ;
2019-10-17 07:16:58 -04:00
errcode | = ERR_ALERT | ERR_FATAL ;
2019-10-04 09:37:05 -04:00
goto error ;
2019-10-03 17:46:33 -04:00
}
2012-09-07 11:30:07 -04:00
}
}
}
/* we must not free the SSL_CTX anymore below, since it's already in
* the tree , so it will be discovered and cleaned in time .
*/
2012-09-20 10:19:02 -04:00
2024-01-10 10:07:17 -05:00
if ( is_default ) {
2019-11-04 11:56:13 -05:00
ckch_inst - > is_default = 1 ;
2024-01-10 10:07:17 -05:00
/* insert an empty SNI which will be used to lookup default certificate */
order = ckch_inst_add_cert_sni ( ctx , ckch_inst , bind_conf , ssl_conf , kinfo , " * " , order ) ;
if ( order < 0 ) {
memprintf ( err , " %sunable to create a sni context. \n " , err & & * err ? * err : " " ) ;
errcode | = ERR_ALERT | ERR_FATAL ;
goto error ;
}
2016-12-29 12:26:15 -05:00
}
2012-09-07 11:30:07 -04:00
2021-03-17 09:56:54 -04:00
/* Always keep a reference to the newly constructed SSL_CTX in the
* instance . This way if the instance has no SNIs , the SSL_CTX will
* still be linked . */
SSL_CTX_up_ref ( ctx ) ;
ckch_inst - > ctx = ctx ;
2019-10-03 18:29:42 -04:00
/* everything succeed, the ckch instance can be used */
ckch_inst - > bind_conf = bind_conf ;
2019-09-19 11:12:49 -04:00
ckch_inst - > ssl_conf = ssl_conf ;
2020-03-05 04:17:47 -05:00
ckch_inst - > ckch_store = ckchs ;
2019-10-03 18:29:42 -04:00
2020-04-08 10:11:26 -04:00
SSL_CTX_free ( ctx ) ; /* we need to free the ctx since we incremented the refcount where it's used */
2019-10-17 07:16:58 -04:00
* ckchi = ckch_inst ;
return errcode ;
2019-10-04 09:37:05 -04:00
error :
/* free the allocated sni_ctxs */
2019-10-07 07:52:11 -04:00
if ( ckch_inst ) {
2020-04-09 10:31:05 -04:00
ckch_inst_free ( ckch_inst ) ;
2019-10-07 07:52:11 -04:00
ckch_inst = NULL ;
2019-10-04 09:37:05 -04:00
}
SSL_CTX_free ( ctx ) ;
2019-10-17 07:16:58 -04:00
return errcode ;
2019-10-07 07:52:11 -04:00
}
2021-01-25 11:19:43 -05:00
/*
* This function allocate a ckch_inst that will be used on the backend side
* ( server line )
*
* Returns a bitfield containing the flags :
* ERR_FATAL in any fatal error case
* ERR_ALERT if the reason of the error is available in err
* ERR_WARN if a warning is available into err
*/
int ckch_inst_new_load_srv_store ( const char * path , struct ckch_store * ckchs ,
2021-01-26 05:27:42 -05:00
struct ckch_inst * * ckchi , char * * err )
2021-01-25 11:19:43 -05:00
{
SSL_CTX * ctx ;
2022-11-22 05:51:53 -05:00
struct ckch_data * data ;
2021-01-25 11:19:43 -05:00
struct ckch_inst * ckch_inst = NULL ;
int errcode = 0 ;
* ckchi = NULL ;
2022-11-22 05:51:53 -05:00
if ( ! ckchs | | ! ckchs - > data )
2021-01-25 11:19:43 -05:00
return ERR_FATAL ;
2022-11-22 05:51:53 -05:00
data = ckchs - > data ;
2021-01-25 11:19:43 -05:00
ctx = SSL_CTX_new ( SSLv23_client_method ( ) ) ;
if ( ! ctx ) {
memprintf ( err , " %sunable to allocate SSL context for cert '%s'. \n " ,
err & & * err ? * err : " " , path ) ;
errcode | = ERR_ALERT | ERR_FATAL ;
goto error ;
}
2024-03-12 11:22:34 -04:00
if ( global_ssl . security_level > - 1 )
SSL_CTX_set_security_level ( ctx , global_ssl . security_level ) ;
2022-11-22 05:51:53 -05:00
errcode | = ssl_sock_put_srv_ckch_into_ctx ( path , data , ctx , err ) ;
2021-01-25 11:19:43 -05:00
if ( errcode & ERR_CODE )
goto error ;
ckch_inst = ckch_inst_new ( ) ;
if ( ! ckch_inst ) {
memprintf ( err , " %sunable to allocate SSL context for cert '%s'. \n " ,
err & & * err ? * err : " " , path ) ;
errcode | = ERR_ALERT | ERR_FATAL ;
goto error ;
}
/* everything succeed, the ckch instance can be used */
ckch_inst - > bind_conf = NULL ;
ckch_inst - > ssl_conf = NULL ;
ckch_inst - > ckch_store = ckchs ;
2021-01-26 05:27:42 -05:00
ckch_inst - > ctx = ctx ;
2021-01-26 06:01:46 -05:00
ckch_inst - > is_server_instance = 1 ;
2021-01-25 11:19:43 -05:00
* ckchi = ckch_inst ;
return errcode ;
error :
SSL_CTX_free ( ctx ) ;
return errcode ;
}
2019-10-16 11:06:25 -04:00
/* Returns a set of ERR_* flags possibly with an error in <err>. */
2019-10-07 07:52:11 -04:00
static int ssl_sock_load_ckchs ( const char * path , struct ckch_store * ckchs ,
struct bind_conf * bind_conf , struct ssl_bind_conf * ssl_conf ,
2024-01-10 10:07:17 -05:00
char * * sni_filter , int fcount ,
int is_default ,
struct ckch_inst * * ckch_inst , char * * err )
2019-10-07 07:52:11 -04:00
{
2019-10-17 07:16:58 -04:00
int errcode = 0 ;
2019-10-07 07:52:11 -04:00
/* we found the ckchs in the tree, we can use it directly */
2024-01-10 10:07:17 -05:00
errcode | = ckch_inst_new_load_store ( path , ckchs , bind_conf , ssl_conf , sni_filter , fcount , is_default , ckch_inst , err ) ;
2019-10-07 07:52:11 -04:00
2019-10-17 07:16:58 -04:00
if ( errcode & ERR_CODE )
return errcode ;
2019-10-07 07:52:11 -04:00
2020-03-09 11:48:43 -04:00
ssl_sock_load_cert_sni ( * ckch_inst , bind_conf ) ;
2019-10-07 07:52:11 -04:00
/* succeed, add the instance to the ckch_store's list of instance */
2021-04-21 01:32:39 -04:00
LIST_APPEND ( & ckchs - > ckch_inst , & ( ( * ckch_inst ) - > by_ckchs ) ) ;
2019-10-17 07:16:58 -04:00
return errcode ;
2012-09-07 11:30:07 -04:00
}
2021-01-26 06:01:46 -05:00
/* This function generates a <struct ckch_inst *> for a <struct server *>, and
* fill the SSL_CTX of the server .
*
* Returns a set of ERR_ * flags possibly with an error in < err > . */
2021-01-25 11:19:43 -05:00
static int ssl_sock_load_srv_ckchs ( const char * path , struct ckch_store * ckchs ,
2021-01-26 06:01:46 -05:00
struct server * server , struct ckch_inst * * ckch_inst , char * * err )
2021-01-25 11:19:43 -05:00
{
int errcode = 0 ;
/* we found the ckchs in the tree, we can use it directly */
2021-01-26 05:27:42 -05:00
errcode | = ckch_inst_new_load_srv_store ( path , ckchs , ckch_inst , err ) ;
2021-01-25 11:19:43 -05:00
if ( errcode & ERR_CODE )
return errcode ;
2021-01-26 06:01:46 -05:00
( * ckch_inst ) - > server = server ;
/* Keep the reference to the SSL_CTX in the server. */
SSL_CTX_up_ref ( ( * ckch_inst ) - > ctx ) ;
server - > ssl_ctx . ctx = ( * ckch_inst ) - > ctx ;
2021-01-25 11:19:43 -05:00
/* succeed, add the instance to the ckch_store's list of instance */
2021-04-21 01:32:39 -04:00
LIST_APPEND ( & ckchs - > ckch_inst , & ( ( * ckch_inst ) - > by_ckchs ) ) ;
2021-01-25 11:19:43 -05:00
return errcode ;
}
2020-03-06 16:26:32 -05:00
2020-03-30 12:45:10 -04:00
/* Make sure openssl opens /dev/urandom before the chroot. The work is only
* done once . Zero is returned if the operation fails . No error is returned
* if the random is said as not implemented , because we expect that openssl
* will use another method once needed .
*/
2021-05-19 09:35:29 -04:00
int ssl_initialize_random ( void )
2020-03-30 12:45:10 -04:00
{
unsigned char random ;
static int random_initialized = 0 ;
if ( ! random_initialized & & RAND_bytes ( & random , 1 ) ! = 0 )
random_initialized = 1 ;
return random_initialized ;
}
2020-03-06 15:54:13 -05:00
/* Load a crt-list file, this is done in 2 parts:
* - store the content of the file in a crtlist structure with crtlist_entry structures
* - generate the instances by iterating on entries in the crtlist struct
*
* Nothing is locked there , this function is used in the configuration parser .
*
* Returns a set of ERR_ * flags possibly with an error in < err > .
*/
2020-03-06 16:26:32 -05:00
int ssl_sock_load_cert_list_file ( char * file , int dir , struct bind_conf * bind_conf , struct proxy * curproxy , char * * err )
2020-03-06 15:54:13 -05:00
{
struct crtlist * crtlist = NULL ;
struct ebmb_node * eb ;
2020-03-30 11:01:33 -04:00
struct crtlist_entry * entry = NULL ;
2020-03-25 10:10:49 -04:00
struct bind_conf_list * bind_conf_node = NULL ;
2020-03-06 15:54:13 -05:00
int cfgerr = 0 ;
2020-04-08 07:15:18 -04:00
char * end ;
2020-03-06 15:54:13 -05:00
2020-03-25 10:10:49 -04:00
bind_conf_node = malloc ( sizeof ( * bind_conf_node ) ) ;
if ( ! bind_conf_node ) {
memprintf ( err , " %sCan't alloc memory! \n " , err & & * err ? * err : " " ) ;
cfgerr | = ERR_FATAL | ERR_ALERT ;
goto error ;
}
bind_conf_node - > next = NULL ;
bind_conf_node - > bind_conf = bind_conf ;
2020-04-08 07:15:18 -04:00
/* strip trailing slashes, including first one */
for ( end = file + strlen ( file ) - 1 ; end > = file & & * end = = ' / ' ; end - - )
* end = 0 ;
2020-03-06 15:54:13 -05:00
/* look for an existing crtlist or create one */
eb = ebst_lookup ( & crtlists_tree , file ) ;
if ( eb ) {
crtlist = ebmb_entry ( eb , struct crtlist , node ) ;
} else {
2020-03-06 16:26:32 -05:00
/* load a crt-list OR a directory */
if ( dir )
cfgerr | = crtlist_load_cert_dir ( file , bind_conf , & crtlist , err ) ;
else
cfgerr | = crtlist_parse_file ( file , bind_conf , curproxy , & crtlist , err ) ;
2020-03-06 15:54:13 -05:00
if ( ! ( cfgerr & ERR_CODE ) )
ebst_insert ( & crtlists_tree , & crtlist - > node ) ;
}
if ( cfgerr & ERR_CODE ) {
cfgerr | = ERR_FATAL | ERR_ALERT ;
goto error ;
}
/* generates ckch instance from the crtlist_entry */
list_for_each_entry ( entry , & crtlist - > ord_entries , by_crtlist ) {
struct ckch_store * store ;
struct ckch_inst * ckch_inst = NULL ;
2024-01-10 10:07:17 -05:00
int is_default = 0 ;
2020-03-06 15:54:13 -05:00
store = entry - > node . key ;
2024-01-10 10:07:17 -05:00
/* if the SNI trees were empty the first "crt" become a default certificate,
* it can be applied on multiple certificates if it ' s a bundle */
if ( eb_is_empty ( & bind_conf - > sni_ctx ) & & eb_is_empty ( & bind_conf - > sni_w_ctx ) )
is_default = 1 ;
cfgerr | = ssl_sock_load_ckchs ( store - > path , store , bind_conf , entry - > ssl_conf , entry - > filters , entry - > fcount , is_default , & ckch_inst , err ) ;
2020-03-06 15:54:13 -05:00
if ( cfgerr & ERR_CODE ) {
memprintf ( err , " error processing line %d in file '%s' : %s " , entry - > linenum , file , * err ) ;
goto error ;
}
2021-04-21 01:32:39 -04:00
LIST_APPEND ( & entry - > ckch_inst , & ckch_inst - > by_crtlist_entry ) ;
2020-04-08 10:29:15 -04:00
ckch_inst - > crtlist_entry = entry ;
2020-03-06 15:54:13 -05:00
}
2020-03-25 10:10:49 -04:00
/* add the bind_conf to the list */
bind_conf_node - > next = crtlist - > bind_conf ;
crtlist - > bind_conf = bind_conf_node ;
2020-03-06 15:54:13 -05:00
return cfgerr ;
error :
{
2020-03-30 11:01:33 -04:00
struct crtlist_entry * lastentry ;
2020-03-06 15:54:13 -05:00
struct ckch_inst * inst , * s_inst ;
2020-03-30 11:01:33 -04:00
lastentry = entry ; /* which entry we tried to generate last */
if ( lastentry ) {
list_for_each_entry ( entry , & crtlist - > ord_entries , by_crtlist ) {
if ( entry = = lastentry ) /* last entry we tried to generate, no need to go further */
break ;
list_for_each_entry_safe ( inst , s_inst , & entry - > ckch_inst , by_crtlist_entry ) {
2020-03-06 15:54:13 -05:00
2020-03-30 11:01:33 -04:00
/* this was not generated for this bind_conf, skip */
if ( inst - > bind_conf ! = bind_conf )
continue ;
2020-04-09 10:31:05 -04:00
/* free the sni_ctx and instance */
ckch_inst_free ( inst ) ;
2020-03-30 11:01:33 -04:00
}
2020-03-06 15:54:13 -05:00
}
}
2020-03-25 10:10:49 -04:00
free ( bind_conf_node ) ;
2020-03-06 15:54:13 -05:00
}
2013-01-22 09:31:15 -05:00
return cfgerr ;
}
2020-03-16 09:45:55 -04:00
/* Returns a set of ERR_* flags possibly with an error in <err>. */
2024-01-12 11:32:48 -05:00
int ssl_sock_load_cert ( char * path , struct bind_conf * bind_conf , int is_default , char * * err )
2020-03-16 09:45:55 -04:00
{
struct stat buf ;
int cfgerr = 0 ;
struct ckch_store * ckchs ;
2020-03-09 11:48:43 -04:00
struct ckch_inst * ckch_inst = NULL ;
2020-11-20 09:36:13 -05:00
int found = 0 ; /* did we found a file to load ? */
2024-01-10 10:07:17 -05:00
/* if the SNI trees were empty the first "crt" become a default certificate,
* it can be applied on multiple certificates if it ' s a bundle */
2024-01-12 11:32:48 -05:00
if ( is_default = = 0 ) {
if ( eb_is_empty ( & bind_conf - > sni_ctx ) & & eb_is_empty ( & bind_conf - > sni_w_ctx ) )
is_default = 1 ;
}
2020-03-16 09:45:55 -04:00
if ( ( ckchs = ckchs_lookup ( path ) ) ) {
/* we found the ckchs in the tree, we can use it directly */
2024-01-10 10:07:17 -05:00
cfgerr | = ssl_sock_load_ckchs ( path , ckchs , bind_conf , NULL , NULL , 0 , is_default , & ckch_inst , err ) ;
2024-03-25 11:50:24 -04:00
2024-03-25 11:50:25 -04:00
/* The ckch_store might have been created through a crt-list
* line so we must check that the ocsp - update modes are still
* compatible between the global mode and the explicit one from
* the crt - list . */
cfgerr | = ocsp_update_check_cfg_consistency ( ckchs , NULL , path , err ) ;
2024-03-25 11:50:24 -04:00
2020-11-20 09:36:13 -05:00
found + + ;
} else if ( stat ( path , & buf ) = = 0 ) {
found + + ;
2020-03-16 09:45:55 -04:00
if ( S_ISDIR ( buf . st_mode ) = = 0 ) {
2020-09-16 10:08:08 -04:00
ckchs = ckchs_load_cert_file ( path , err ) ;
2020-03-16 09:45:55 -04:00
if ( ! ckchs )
2020-11-20 09:36:13 -05:00
cfgerr | = ERR_ALERT | ERR_FATAL ;
2024-01-10 10:07:17 -05:00
cfgerr | = ssl_sock_load_ckchs ( path , ckchs , bind_conf , NULL , NULL , 0 , is_default , & ckch_inst , err ) ;
2020-03-16 09:45:55 -04:00
} else {
2020-11-20 09:36:13 -05:00
cfgerr | = ssl_sock_load_cert_list_file ( path , 1 , bind_conf , bind_conf - > frontend , err ) ;
2020-03-16 09:45:55 -04:00
}
} else {
/* stat failed, could be a bundle */
if ( global_ssl . extra_files & SSL_GF_BUNDLE ) {
2020-09-16 08:48:52 -04:00
char fp [ MAXPATHLEN + 1 ] = { 0 } ;
int n = 0 ;
/* Load all possible certs and keys in separate ckch_store */
for ( n = 0 ; n < SSL_SOCK_NUM_KEYTYPES ; n + + ) {
struct stat buf ;
int ret ;
ret = snprintf ( fp , sizeof ( fp ) , " %s.%s " , path , SSL_SOCK_KEYTYPE_NAMES [ n ] ) ;
if ( ret > sizeof ( fp ) )
continue ;
if ( ( ckchs = ckchs_lookup ( fp ) ) ) {
2024-01-10 10:07:17 -05:00
cfgerr | = ssl_sock_load_ckchs ( fp , ckchs , bind_conf , NULL , NULL , 0 , is_default , & ckch_inst , err ) ;
2020-11-20 09:36:13 -05:00
found + + ;
2020-09-16 08:48:52 -04:00
} else {
if ( stat ( fp , & buf ) = = 0 ) {
2020-11-20 09:36:13 -05:00
found + + ;
2020-09-16 10:08:08 -04:00
ckchs = ckchs_load_cert_file ( fp , err ) ;
2020-09-16 08:48:52 -04:00
if ( ! ckchs )
2020-11-20 09:36:13 -05:00
cfgerr | = ERR_ALERT | ERR_FATAL ;
2024-01-10 10:07:17 -05:00
cfgerr | = ssl_sock_load_ckchs ( fp , ckchs , bind_conf , NULL , NULL , 0 , is_default , & ckch_inst , err ) ;
2020-09-16 08:48:52 -04:00
}
}
}
2020-12-04 09:45:02 -05:00
# if HA_OPENSSL_VERSION_NUMBER < 0x10101000L
if ( found ) {
memprintf ( err , " %sCan't load '%s'. Loading a multi certificates bundle requires OpenSSL >= 1.1.1 \n " ,
err & & * err ? * err : " " , path ) ;
cfgerr | = ERR_ALERT | ERR_FATAL ;
}
# endif
2020-03-16 09:45:55 -04:00
}
}
2020-11-20 09:36:13 -05:00
if ( ! found ) {
memprintf ( err , " %sunable to stat SSL certificate from file '%s' : %s. \n " ,
err & & * err ? * err : " " , path , strerror ( errno ) ) ;
cfgerr | = ERR_ALERT | ERR_FATAL ;
}
2020-03-16 09:45:55 -04:00
return cfgerr ;
}
2021-01-25 11:19:43 -05:00
/* Create a full ssl context and ckch instance that will be used for a specific
* backend server ( server configuration line ) .
* Returns a set of ERR_ * flags possibly with an error in < err > .
*/
2021-05-21 10:22:11 -04:00
int ssl_sock_load_srv_cert ( char * path , struct server * server , int create_if_none , char * * err )
2021-01-25 11:19:43 -05:00
{
struct stat buf ;
int cfgerr = 0 ;
struct ckch_store * ckchs ;
int found = 0 ; /* did we found a file to load ? */
if ( ( ckchs = ckchs_lookup ( path ) ) ) {
/* we found the ckchs in the tree, we can use it directly */
2021-01-26 06:01:46 -05:00
cfgerr | = ssl_sock_load_srv_ckchs ( path , ckchs , server , & server - > ssl_ctx . inst , err ) ;
2021-01-25 11:19:43 -05:00
found + + ;
2021-05-21 10:22:11 -04:00
} else {
if ( ! create_if_none ) {
memprintf ( err , " %sunable to stat SSL certificate '%s'. \n " ,
err & & * err ? * err : " " , path ) ;
cfgerr | = ERR_ALERT | ERR_FATAL ;
goto out ;
}
if ( stat ( path , & buf ) = = 0 ) {
/* We do not manage directories on backend side. */
if ( S_ISDIR ( buf . st_mode ) = = 0 ) {
+ + found ;
ckchs = ckchs_load_cert_file ( path , err ) ;
if ( ! ckchs )
cfgerr | = ERR_ALERT | ERR_FATAL ;
cfgerr | = ssl_sock_load_srv_ckchs ( path , ckchs , server , & server - > ssl_ctx . inst , err ) ;
}
2021-01-25 11:19:43 -05:00
}
}
if ( ! found ) {
memprintf ( err , " %sunable to stat SSL certificate from file '%s' : %s. \n " ,
err & & * err ? * err : " " , path , strerror ( errno ) ) ;
cfgerr | = ERR_ALERT | ERR_FATAL ;
}
2021-05-21 10:22:11 -04:00
out :
2021-01-25 11:19:43 -05:00
return cfgerr ;
}
2017-03-06 09:34:44 -05:00
/* Create an initial CTX used to start the SSL connection before switchctx */
2017-05-05 12:06:12 -04:00
static int
2017-03-06 09:34:44 -05:00
ssl_sock_initial_ctx ( struct bind_conf * bind_conf )
2012-09-07 11:30:07 -04:00
{
2017-03-03 06:21:32 -05:00
SSL_CTX * ctx = NULL ;
2017-03-30 13:19:37 -04:00
long options =
2012-09-07 11:30:07 -04:00
SSL_OP_ALL | /* all known workarounds for bugs */
SSL_OP_NO_SSLv2 |
SSL_OP_NO_COMPRESSION |
2012-09-20 10:19:02 -04:00
SSL_OP_SINGLE_DH_USE |
2012-09-20 11:10:03 -04:00
SSL_OP_SINGLE_ECDH_USE |
2012-10-04 12:44:19 -04:00
SSL_OP_NO_SESSION_RESUMPTION_ON_RENEGOTIATION |
2018-05-18 11:55:57 -04:00
SSL_OP_PRIORITIZE_CHACHA |
2012-10-04 12:44:19 -04:00
SSL_OP_CIPHER_SERVER_PREFERENCE ;
2017-03-30 13:19:37 -04:00
long mode =
2012-09-07 11:30:07 -04:00
SSL_MODE_ENABLE_PARTIAL_WRITE |
SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER |
2014-11-13 08:06:52 -05:00
SSL_MODE_RELEASE_BUFFERS |
SSL_MODE_SMALL_BUFFERS ;
2017-08-09 12:26:20 -04:00
struct tls_version_filter * conf_ssl_methods = & bind_conf - > ssl_conf . ssl_methods ;
2017-03-30 13:25:07 -04:00
int i , min , max , hole ;
2017-05-05 12:06:12 -04:00
int flags = MC_SSL_O_ALL ;
int cfgerr = 0 ;
2020-06-02 04:52:24 -04:00
const int default_min_ver = CONF_TLSV12 ;
2017-03-03 06:21:32 -05:00
2017-03-30 13:19:37 -04:00
ctx = SSL_CTX_new ( SSLv23_server_method ( ) ) ;
2017-05-05 12:06:12 -04:00
bind_conf - > initial_ctx = ctx ;
2024-03-12 11:22:34 -04:00
if ( global_ssl . security_level > - 1 )
SSL_CTX_set_security_level ( ctx , global_ssl . security_level ) ;
2017-05-05 12:06:12 -04:00
if ( conf_ssl_methods - > flags & & ( conf_ssl_methods - > min | | conf_ssl_methods - > max ) )
2017-11-24 10:50:31 -05:00
ha_warning ( " Proxy '%s': no-sslv3/no-tlsv1x are ignored for bind '%s' at [%s:%d]. "
" Use only 'ssl-min-ver' and 'ssl-max-ver' to fix. \n " ,
bind_conf - > frontend - > id , bind_conf - > arg , bind_conf - > file , bind_conf - > line ) ;
2017-05-05 12:06:12 -04:00
else
flags = conf_ssl_methods - > flags ;
2017-03-30 13:19:37 -04:00
2017-05-15 09:53:41 -04:00
min = conf_ssl_methods - > min ;
max = conf_ssl_methods - > max ;
2020-06-02 04:52:24 -04:00
/* default minimum is TLSV12, */
if ( ! min ) {
if ( ! max | | ( max > = default_min_ver ) ) {
min = default_min_ver ;
} else {
ha_warning ( " Proxy '%s': Ambiguous configuration for bind '%s' at [%s:%d]: the ssl-min-ver value is not configured and the ssl-max-ver value is lower than the default ssl-min-ver value (%s). "
" Setting the ssl-min-ver to %s. Use 'ssl-min-ver' to fix this. \n " ,
bind_conf - > frontend - > id , bind_conf - > arg , bind_conf - > file , bind_conf - > line , methodVersions [ default_min_ver ] . name , methodVersions [ max ] . name ) ;
min = max ;
}
}
2017-05-18 06:33:19 -04:00
/* Real min and max should be determinate with configuration and openssl's capabilities */
2017-05-15 09:53:41 -04:00
if ( min )
flags | = ( methodVersions [ min ] . flag - 1 ) ;
if ( max )
flags | = ~ ( ( methodVersions [ max ] . flag < < 1 ) - 1 ) ;
2017-05-18 06:33:19 -04:00
/* find min, max and holes */
2017-03-30 13:25:07 -04:00
min = max = CONF_TLSV_NONE ;
hole = 0 ;
2017-03-30 13:19:37 -04:00
for ( i = CONF_TLSV_MIN ; i < = CONF_TLSV_MAX ; i + + )
2017-03-30 13:25:07 -04:00
/* version is in openssl && version not disable in configuration */
2017-05-05 12:06:12 -04:00
if ( methodVersions [ i ] . option & & ! ( flags & methodVersions [ i ] . flag ) ) {
2017-03-30 13:25:07 -04:00
if ( min ) {
if ( hole ) {
2017-11-24 10:50:31 -05:00
ha_warning ( " Proxy '%s': SSL/TLS versions range not contiguous for bind '%s' at [%s:%d]. "
" Hole find for %s. Use only 'ssl-min-ver' and 'ssl-max-ver' to fix. \n " ,
bind_conf - > frontend - > id , bind_conf - > arg , bind_conf - > file , bind_conf - > line ,
methodVersions [ hole ] . name ) ;
2017-03-30 13:25:07 -04:00
hole = 0 ;
}
max = i ;
}
else {
min = max = i ;
}
}
else {
if ( min )
hole = i ;
}
2017-05-05 12:06:12 -04:00
if ( ! min ) {
2017-11-24 10:50:31 -05:00
ha_alert ( " Proxy '%s': all SSL/TLS versions are disabled for bind '%s' at [%s:%d]. \n " ,
bind_conf - > frontend - > id , bind_conf - > arg , bind_conf - > file , bind_conf - > line ) ;
2017-05-05 12:06:12 -04:00
cfgerr + = 1 ;
}
2017-05-18 06:46:50 -04:00
/* save real min/max in bind_conf */
conf_ssl_methods - > min = min ;
conf_ssl_methods - > max = max ;
2017-03-30 13:19:37 -04:00
2019-05-09 07:26:41 -04:00
# if (HA_OPENSSL_VERSION_NUMBER < 0x1010000fL)
2017-03-30 13:19:37 -04:00
/* Keep force-xxx implementation as it is in older haproxy. It's a
2018-11-15 12:07:59 -05:00
precautionary measure to avoid any surprise with older openssl version . */
2017-03-30 13:19:37 -04:00
if ( min = = max )
2017-05-18 06:33:19 -04:00
methodVersions [ min ] . ctx_set_version ( ctx , SET_SERVER ) ;
2017-05-05 12:06:12 -04:00
else
2020-06-11 11:34:00 -04:00
for ( i = CONF_TLSV_MIN ; i < = CONF_TLSV_MAX ; i + + ) {
/* clear every version flags in case SSL_CTX_new()
* returns an SSL_CTX with disabled versions */
SSL_CTX_clear_options ( ctx , methodVersions [ i ] . option ) ;
2017-05-05 12:06:12 -04:00
if ( flags & methodVersions [ i ] . flag )
options | = methodVersions [ i ] . option ;
2020-06-11 11:34:00 -04:00
}
2017-03-30 13:19:37 -04:00
# else /* openssl >= 1.1.0 */
2017-03-30 13:25:07 -04:00
/* set the max_version is required to cap TLS version or activate new TLS (v1.3) */
2017-05-18 06:33:19 -04:00
methodVersions [ min ] . ctx_set_version ( ctx , SET_MIN ) ;
methodVersions [ max ] . ctx_set_version ( ctx , SET_MAX ) ;
2017-03-03 06:21:32 -05:00
# endif
2017-03-30 13:19:37 -04:00
if ( bind_conf - > ssl_options & BC_SSL_O_NO_TLS_TICKETS )
options | = SSL_OP_NO_TICKET ;
if ( bind_conf - > ssl_options & BC_SSL_O_PREF_CLIE_CIPH )
options & = ~ SSL_OP_CIPHER_SERVER_PREFERENCE ;
2019-01-21 12:35:03 -05:00
# ifdef SSL_OP_NO_RENEGOTIATION
options | = SSL_OP_NO_RENEGOTIATION ;
# endif
2017-03-30 13:19:37 -04:00
SSL_CTX_set_options ( ctx , options ) ;
2017-01-13 20:42:15 -05:00
2020-11-13 15:56:34 -05:00
# ifdef SSL_MODE_ASYNC
2017-01-13 20:42:15 -05:00
if ( global_ssl . async )
mode | = SSL_MODE_ASYNC ;
# endif
2017-03-30 13:19:37 -04:00
SSL_CTX_set_mode ( ctx , mode ) ;
2017-03-03 06:21:32 -05:00
if ( global_ssl . life_time )
SSL_CTX_set_timeout ( ctx , global_ssl . life_time ) ;
2017-03-06 09:34:44 -05:00
# ifdef SSL_CTRL_SET_TLSEXT_HOSTNAME
2022-09-07 04:54:17 -04:00
# ifdef OPENSSL_IS_BORINGSSL
2017-03-06 09:34:44 -05:00
SSL_CTX_set_select_certificate_cb ( ctx , ssl_sock_switchctx_cbk ) ;
SSL_CTX_set_tlsext_servername_callback ( ctx , ssl_sock_switchctx_err_cbk ) ;
2022-09-07 04:54:17 -04:00
# elif defined(HAVE_SSL_CLIENT_HELLO_CB)
# if defined(SSL_OP_NO_ANTI_REPLAY)
2019-12-17 09:39:54 -05:00
if ( bind_conf - > ssl_conf . early_data )
2019-01-02 12:46:41 -05:00
SSL_CTX_set_options ( ctx , SSL_OP_NO_ANTI_REPLAY ) ;
2022-09-07 04:54:17 -04:00
# endif /* ! SSL_OP_NO_ANTI_REPLAY */
2017-08-16 05:33:17 -04:00
SSL_CTX_set_client_hello_cb ( ctx , ssl_sock_switchctx_cbk , NULL ) ;
SSL_CTX_set_tlsext_servername_callback ( ctx , ssl_sock_switchctx_err_cbk ) ;
2023-12-08 05:33:03 -05:00
# elif defined(USE_OPENSSL_WOLFSSL)
2023-11-16 12:16:53 -05:00
SSL_CTX_set_cert_cb ( ctx , ssl_sock_switchctx_wolfSSL_cbk , bind_conf ) ;
# else
/* ! OPENSSL_IS_BORINGSSL && ! HAVE_SSL_CLIENT_HELLO_CB */
2017-03-06 09:34:44 -05:00
SSL_CTX_set_tlsext_servername_callback ( ctx , ssl_sock_switchctx_cbk ) ;
2022-09-07 04:54:17 -04:00
# endif
2017-08-14 05:01:25 -04:00
SSL_CTX_set_tlsext_servername_arg ( ctx , bind_conf ) ;
2022-09-07 04:54:17 -04:00
# endif /* ! SSL_CTRL_SET_TLSEXT_HOSTNAME */
2017-05-05 12:06:12 -04:00
return cfgerr ;
2017-03-03 06:21:32 -05:00
}
MEDIUM: shctx: separate ssl and shctx
This patch reorganize the shctx API in a generic storage API, separating
the shared SSL session handling from its core.
The shctx API only handles the generic data part, it does not know what
kind of data you use with it.
A shared_context is a storage structure allocated in a shared memory,
allowing its usage in a multithread or a multiprocess context.
The structure use 2 linked list, one containing the available blocks,
and another for the hot locked blocks. At initialization the available
list is filled with <maxblocks> blocks of size <blocksize>. An <extra>
space is initialized outside the list in case you need some specific
storage.
+-----------------------+--------+--------+--------+--------+----
| struct shared_context | extra | block1 | block2 | block3 | ...
+-----------------------+--------+--------+--------+--------+----
<-------- maxblocks --------->
* blocksize
The API allows to store content on several linked blocks. For example,
if you allocated blocks of 16 bytes, and you want to store an object of
60 bytes, the object will be allocated in a row of 4 blocks.
The API was made for LRU usage, each time you get an object, it pushes
the object at the end of the list. When it needs more space, it discards
The functions name have been renamed in a more logical way, the part
regarding shctx have been prefixed by shctx_ and the functions for the
shared ssl session cache have been prefixed by sh_ssl_sess_.
2017-10-30 15:08:51 -04:00
2023-11-16 11:38:26 -05:00
static inline void sh_ssl_sess_free_blocks ( struct shared_block * first , void * data )
MEDIUM: shctx: separate ssl and shctx
This patch reorganize the shctx API in a generic storage API, separating
the shared SSL session handling from its core.
The shctx API only handles the generic data part, it does not know what
kind of data you use with it.
A shared_context is a storage structure allocated in a shared memory,
allowing its usage in a multithread or a multiprocess context.
The structure use 2 linked list, one containing the available blocks,
and another for the hot locked blocks. At initialization the available
list is filled with <maxblocks> blocks of size <blocksize>. An <extra>
space is initialized outside the list in case you need some specific
storage.
+-----------------------+--------+--------+--------+--------+----
| struct shared_context | extra | block1 | block2 | block3 | ...
+-----------------------+--------+--------+--------+--------+----
<-------- maxblocks --------->
* blocksize
The API allows to store content on several linked blocks. For example,
if you allocated blocks of 16 bytes, and you want to store an object of
60 bytes, the object will be allocated in a row of 4 blocks.
The API was made for LRU usage, each time you get an object, it pushes
the object at the end of the list. When it needs more space, it discards
The functions name have been renamed in a more logical way, the part
regarding shctx have been prefixed by shctx_ and the functions for the
shared ssl session cache have been prefixed by sh_ssl_sess_.
2017-10-30 15:08:51 -04:00
{
2023-11-16 11:38:26 -05:00
struct sh_ssl_sess_hdr * sh_ssl_sess = ( struct sh_ssl_sess_hdr * ) first - > data ;
if ( first - > len > 0 )
sh_ssl_sess_tree_delete ( sh_ssl_sess ) ;
MEDIUM: shctx: separate ssl and shctx
This patch reorganize the shctx API in a generic storage API, separating
the shared SSL session handling from its core.
The shctx API only handles the generic data part, it does not know what
kind of data you use with it.
A shared_context is a storage structure allocated in a shared memory,
allowing its usage in a multithread or a multiprocess context.
The structure use 2 linked list, one containing the available blocks,
and another for the hot locked blocks. At initialization the available
list is filled with <maxblocks> blocks of size <blocksize>. An <extra>
space is initialized outside the list in case you need some specific
storage.
+-----------------------+--------+--------+--------+--------+----
| struct shared_context | extra | block1 | block2 | block3 | ...
+-----------------------+--------+--------+--------+--------+----
<-------- maxblocks --------->
* blocksize
The API allows to store content on several linked blocks. For example,
if you allocated blocks of 16 bytes, and you want to store an object of
60 bytes, the object will be allocated in a row of 4 blocks.
The API was made for LRU usage, each time you get an object, it pushes
the object at the end of the list. When it needs more space, it discards
The functions name have been renamed in a more logical way, the part
regarding shctx have been prefixed by shctx_ and the functions for the
shared ssl session cache have been prefixed by sh_ssl_sess_.
2017-10-30 15:08:51 -04:00
}
/* return first block from sh_ssl_sess */
static inline struct shared_block * sh_ssl_sess_first_block ( struct sh_ssl_sess_hdr * sh_ssl_sess )
{
2023-04-15 17:39:43 -04:00
return ( struct shared_block * ) ( ( unsigned char * ) sh_ssl_sess - offsetof ( struct shared_block , data ) ) ;
MEDIUM: shctx: separate ssl and shctx
This patch reorganize the shctx API in a generic storage API, separating
the shared SSL session handling from its core.
The shctx API only handles the generic data part, it does not know what
kind of data you use with it.
A shared_context is a storage structure allocated in a shared memory,
allowing its usage in a multithread or a multiprocess context.
The structure use 2 linked list, one containing the available blocks,
and another for the hot locked blocks. At initialization the available
list is filled with <maxblocks> blocks of size <blocksize>. An <extra>
space is initialized outside the list in case you need some specific
storage.
+-----------------------+--------+--------+--------+--------+----
| struct shared_context | extra | block1 | block2 | block3 | ...
+-----------------------+--------+--------+--------+--------+----
<-------- maxblocks --------->
* blocksize
The API allows to store content on several linked blocks. For example,
if you allocated blocks of 16 bytes, and you want to store an object of
60 bytes, the object will be allocated in a row of 4 blocks.
The API was made for LRU usage, each time you get an object, it pushes
the object at the end of the list. When it needs more space, it discards
The functions name have been renamed in a more logical way, the part
regarding shctx have been prefixed by shctx_ and the functions for the
shared ssl session cache have been prefixed by sh_ssl_sess_.
2017-10-30 15:08:51 -04:00
}
/* store a session into the cache
* s_id : session id padded with zero to SSL_MAX_SSL_SESSION_ID_LENGTH
* data : asn1 encoded session
* data_len : asn1 encoded session length
* Returns 1 id session was stored ( else 0 )
*/
static int sh_ssl_sess_store ( unsigned char * s_id , unsigned char * data , int data_len )
{
struct shared_block * first ;
struct sh_ssl_sess_hdr * sh_ssl_sess , * oldsh_ssl_sess ;
2018-10-22 11:55:57 -04:00
first = shctx_row_reserve_hot ( ssl_shctx , NULL , data_len + sizeof ( struct sh_ssl_sess_hdr ) ) ;
MEDIUM: shctx: separate ssl and shctx
This patch reorganize the shctx API in a generic storage API, separating
the shared SSL session handling from its core.
The shctx API only handles the generic data part, it does not know what
kind of data you use with it.
A shared_context is a storage structure allocated in a shared memory,
allowing its usage in a multithread or a multiprocess context.
The structure use 2 linked list, one containing the available blocks,
and another for the hot locked blocks. At initialization the available
list is filled with <maxblocks> blocks of size <blocksize>. An <extra>
space is initialized outside the list in case you need some specific
storage.
+-----------------------+--------+--------+--------+--------+----
| struct shared_context | extra | block1 | block2 | block3 | ...
+-----------------------+--------+--------+--------+--------+----
<-------- maxblocks --------->
* blocksize
The API allows to store content on several linked blocks. For example,
if you allocated blocks of 16 bytes, and you want to store an object of
60 bytes, the object will be allocated in a row of 4 blocks.
The API was made for LRU usage, each time you get an object, it pushes
the object at the end of the list. When it needs more space, it discards
The functions name have been renamed in a more logical way, the part
regarding shctx have been prefixed by shctx_ and the functions for the
shared ssl session cache have been prefixed by sh_ssl_sess_.
2017-10-30 15:08:51 -04:00
if ( ! first ) {
/* Could not retrieve enough free blocks to store that session */
return 0 ;
}
2023-11-16 11:38:23 -05:00
shctx_wrlock ( ssl_shctx ) ;
MEDIUM: shctx: separate ssl and shctx
This patch reorganize the shctx API in a generic storage API, separating
the shared SSL session handling from its core.
The shctx API only handles the generic data part, it does not know what
kind of data you use with it.
A shared_context is a storage structure allocated in a shared memory,
allowing its usage in a multithread or a multiprocess context.
The structure use 2 linked list, one containing the available blocks,
and another for the hot locked blocks. At initialization the available
list is filled with <maxblocks> blocks of size <blocksize>. An <extra>
space is initialized outside the list in case you need some specific
storage.
+-----------------------+--------+--------+--------+--------+----
| struct shared_context | extra | block1 | block2 | block3 | ...
+-----------------------+--------+--------+--------+--------+----
<-------- maxblocks --------->
* blocksize
The API allows to store content on several linked blocks. For example,
if you allocated blocks of 16 bytes, and you want to store an object of
60 bytes, the object will be allocated in a row of 4 blocks.
The API was made for LRU usage, each time you get an object, it pushes
the object at the end of the list. When it needs more space, it discards
The functions name have been renamed in a more logical way, the part
regarding shctx have been prefixed by shctx_ and the functions for the
shared ssl session cache have been prefixed by sh_ssl_sess_.
2017-10-30 15:08:51 -04:00
/* STORE the key in the first elem */
sh_ssl_sess = ( struct sh_ssl_sess_hdr * ) first - > data ;
memcpy ( sh_ssl_sess - > key_data , s_id , SSL_MAX_SSL_SESSION_ID_LENGTH ) ;
first - > len = sizeof ( struct sh_ssl_sess_hdr ) ;
/* it returns the already existing node
or current node if none , never returns null */
oldsh_ssl_sess = sh_ssl_sess_tree_insert ( sh_ssl_sess ) ;
if ( oldsh_ssl_sess ! = sh_ssl_sess ) {
/* NOTE: Row couldn't be in use because we lock read & write function */
/* release the reserved row */
2023-01-31 08:12:28 -05:00
first - > len = 0 ; /* the len must be liberated in order not to call the release callback on it */
2023-11-16 11:38:19 -05:00
shctx_row_reattach ( ssl_shctx , first ) ;
MEDIUM: shctx: separate ssl and shctx
This patch reorganize the shctx API in a generic storage API, separating
the shared SSL session handling from its core.
The shctx API only handles the generic data part, it does not know what
kind of data you use with it.
A shared_context is a storage structure allocated in a shared memory,
allowing its usage in a multithread or a multiprocess context.
The structure use 2 linked list, one containing the available blocks,
and another for the hot locked blocks. At initialization the available
list is filled with <maxblocks> blocks of size <blocksize>. An <extra>
space is initialized outside the list in case you need some specific
storage.
+-----------------------+--------+--------+--------+--------+----
| struct shared_context | extra | block1 | block2 | block3 | ...
+-----------------------+--------+--------+--------+--------+----
<-------- maxblocks --------->
* blocksize
The API allows to store content on several linked blocks. For example,
if you allocated blocks of 16 bytes, and you want to store an object of
60 bytes, the object will be allocated in a row of 4 blocks.
The API was made for LRU usage, each time you get an object, it pushes
the object at the end of the list. When it needs more space, it discards
The functions name have been renamed in a more logical way, the part
regarding shctx have been prefixed by shctx_ and the functions for the
shared ssl session cache have been prefixed by sh_ssl_sess_.
2017-10-30 15:08:51 -04:00
/* replace the previous session already in the tree */
sh_ssl_sess = oldsh_ssl_sess ;
/* ignore the previous session data, only use the header */
first = sh_ssl_sess_first_block ( sh_ssl_sess ) ;
2023-11-16 11:38:19 -05:00
shctx_row_detach ( ssl_shctx , first ) ;
MEDIUM: shctx: separate ssl and shctx
This patch reorganize the shctx API in a generic storage API, separating
the shared SSL session handling from its core.
The shctx API only handles the generic data part, it does not know what
kind of data you use with it.
A shared_context is a storage structure allocated in a shared memory,
allowing its usage in a multithread or a multiprocess context.
The structure use 2 linked list, one containing the available blocks,
and another for the hot locked blocks. At initialization the available
list is filled with <maxblocks> blocks of size <blocksize>. An <extra>
space is initialized outside the list in case you need some specific
storage.
+-----------------------+--------+--------+--------+--------+----
| struct shared_context | extra | block1 | block2 | block3 | ...
+-----------------------+--------+--------+--------+--------+----
<-------- maxblocks --------->
* blocksize
The API allows to store content on several linked blocks. For example,
if you allocated blocks of 16 bytes, and you want to store an object of
60 bytes, the object will be allocated in a row of 4 blocks.
The API was made for LRU usage, each time you get an object, it pushes
the object at the end of the list. When it needs more space, it discards
The functions name have been renamed in a more logical way, the part
regarding shctx have been prefixed by shctx_ and the functions for the
shared ssl session cache have been prefixed by sh_ssl_sess_.
2017-10-30 15:08:51 -04:00
first - > len = sizeof ( struct sh_ssl_sess_hdr ) ;
}
2023-11-16 11:38:14 -05:00
if ( shctx_row_data_append ( ssl_shctx , first , data , data_len ) < 0 ) {
2023-11-16 11:38:19 -05:00
shctx_row_reattach ( ssl_shctx , first ) ;
MEDIUM: shctx: separate ssl and shctx
This patch reorganize the shctx API in a generic storage API, separating
the shared SSL session handling from its core.
The shctx API only handles the generic data part, it does not know what
kind of data you use with it.
A shared_context is a storage structure allocated in a shared memory,
allowing its usage in a multithread or a multiprocess context.
The structure use 2 linked list, one containing the available blocks,
and another for the hot locked blocks. At initialization the available
list is filled with <maxblocks> blocks of size <blocksize>. An <extra>
space is initialized outside the list in case you need some specific
storage.
+-----------------------+--------+--------+--------+--------+----
| struct shared_context | extra | block1 | block2 | block3 | ...
+-----------------------+--------+--------+--------+--------+----
<-------- maxblocks --------->
* blocksize
The API allows to store content on several linked blocks. For example,
if you allocated blocks of 16 bytes, and you want to store an object of
60 bytes, the object will be allocated in a row of 4 blocks.
The API was made for LRU usage, each time you get an object, it pushes
the object at the end of the list. When it needs more space, it discards
The functions name have been renamed in a more logical way, the part
regarding shctx have been prefixed by shctx_ and the functions for the
shared ssl session cache have been prefixed by sh_ssl_sess_.
2017-10-30 15:08:51 -04:00
return 0 ;
2018-01-03 13:15:51 -05:00
}
2023-11-16 11:38:19 -05:00
shctx_row_reattach ( ssl_shctx , first ) ;
MEDIUM: shctx: separate ssl and shctx
This patch reorganize the shctx API in a generic storage API, separating
the shared SSL session handling from its core.
The shctx API only handles the generic data part, it does not know what
kind of data you use with it.
A shared_context is a storage structure allocated in a shared memory,
allowing its usage in a multithread or a multiprocess context.
The structure use 2 linked list, one containing the available blocks,
and another for the hot locked blocks. At initialization the available
list is filled with <maxblocks> blocks of size <blocksize>. An <extra>
space is initialized outside the list in case you need some specific
storage.
+-----------------------+--------+--------+--------+--------+----
| struct shared_context | extra | block1 | block2 | block3 | ...
+-----------------------+--------+--------+--------+--------+----
<-------- maxblocks --------->
* blocksize
The API allows to store content on several linked blocks. For example,
if you allocated blocks of 16 bytes, and you want to store an object of
60 bytes, the object will be allocated in a row of 4 blocks.
The API was made for LRU usage, each time you get an object, it pushes
the object at the end of the list. When it needs more space, it discards
The functions name have been renamed in a more logical way, the part
regarding shctx have been prefixed by shctx_ and the functions for the
shared ssl session cache have been prefixed by sh_ssl_sess_.
2017-10-30 15:08:51 -04:00
2023-11-16 11:38:23 -05:00
shctx_wrunlock ( ssl_shctx ) ;
MEDIUM: shctx: separate ssl and shctx
This patch reorganize the shctx API in a generic storage API, separating
the shared SSL session handling from its core.
The shctx API only handles the generic data part, it does not know what
kind of data you use with it.
A shared_context is a storage structure allocated in a shared memory,
allowing its usage in a multithread or a multiprocess context.
The structure use 2 linked list, one containing the available blocks,
and another for the hot locked blocks. At initialization the available
list is filled with <maxblocks> blocks of size <blocksize>. An <extra>
space is initialized outside the list in case you need some specific
storage.
+-----------------------+--------+--------+--------+--------+----
| struct shared_context | extra | block1 | block2 | block3 | ...
+-----------------------+--------+--------+--------+--------+----
<-------- maxblocks --------->
* blocksize
The API allows to store content on several linked blocks. For example,
if you allocated blocks of 16 bytes, and you want to store an object of
60 bytes, the object will be allocated in a row of 4 blocks.
The API was made for LRU usage, each time you get an object, it pushes
the object at the end of the list. When it needs more space, it discards
The functions name have been renamed in a more logical way, the part
regarding shctx have been prefixed by shctx_ and the functions for the
shared ssl session cache have been prefixed by sh_ssl_sess_.
2017-10-30 15:08:51 -04:00
return 1 ;
}
2017-10-30 14:36:36 -04:00
2017-11-03 08:43:35 -04:00
/* SSL callback used when a new session is created while connecting to a server */
static int ssl_sess_new_srv_cb ( SSL * ssl , SSL_SESSION * sess )
{
BUG/MAJOR: ssl: OpenSSL context is stored in non-reserved memory slot
We never saw unexplicated crash with SSL, so I suppose that we are
luck, or the slot 0 is always reserved. Anyway the usage of the macro
SSL_get_app_data() and SSL_set_app_data() seem wrong. This patch change
the deprecated functions SSL_get_app_data() and SSL_set_app_data()
by the new functions SSL_get_ex_data() and SSL_set_ex_data(), and
it reserves the slot in the SSL memory space.
For information, this is the two declaration which seems wrong or
incomplete in the OpenSSL ssl.h file. We can see the usage of the
slot 0 whoch is hardcoded, but never reserved.
#define SSL_set_app_data(s,arg) (SSL_set_ex_data(s,0,(char *)arg))
#define SSL_get_app_data(s) (SSL_get_ex_data(s,0))
This patch must be backported at least in 1.8, maybe in other versions.
2018-06-17 15:37:05 -04:00
struct connection * conn = SSL_get_ex_data ( ssl , ssl_app_data_index ) ;
2017-11-16 11:42:52 -05:00
struct server * s ;
2023-08-21 05:55:42 -04:00
uint old_tid ;
2017-11-03 08:43:35 -04:00
2018-09-20 04:57:52 -04:00
s = __objt_server ( conn - > target ) ;
2017-11-03 08:43:35 -04:00
2021-02-08 04:43:44 -05:00
/* RWLOCK: only read lock the SSL cache even when writing in it because there is
* one cache per thread , it only prevents to flush it from the CLI in
2023-08-21 02:41:49 -04:00
* another thread . However , we also write - lock our session element while
* updating it to make sure no other thread is reading it while we ' re copying
* or releasing it .
*/
2021-02-08 04:43:44 -05:00
2017-11-16 11:42:52 -05:00
if ( ! ( s - > ssl_ctx . options & SRV_SSL_O_NO_REUSE ) ) {
int len ;
unsigned char * ptr ;
2021-11-16 20:59:21 -05:00
const char * sni ;
2017-11-03 08:43:35 -04:00
2023-08-21 05:17:10 -04:00
/* determine the required len to store this new session */
2017-11-16 11:42:52 -05:00
len = i2d_SSL_SESSION ( sess , NULL ) ;
2021-11-16 20:59:21 -05:00
sni = SSL_get_servername ( ssl , TLSEXT_NAMETYPE_host_name ) ;
2021-02-08 04:43:44 -05:00
HA_RWLOCK_RDLOCK ( SSL_SERVER_LOCK , & s - > ssl_ctx . lock ) ;
2023-08-21 02:41:49 -04:00
ptr = s - > ssl_ctx . reused_sess [ tid ] . ptr ;
/* we're updating the possibly shared session right now */
HA_RWLOCK_WRLOCK ( SSL_SERVER_LOCK , & s - > ssl_ctx . reused_sess [ tid ] . sess_lock ) ;
if ( ! ptr | | s - > ssl_ctx . reused_sess [ tid ] . allocated_size < len ) {
/* insufficient storage, reallocate */
2023-08-21 02:12:12 -04:00
len = ( len + 7 ) & - 8 ; /* round to the nearest 8 bytes */
2023-08-21 02:41:49 -04:00
ptr = realloc ( ptr , len ) ;
2023-08-21 02:45:35 -04:00
if ( ! ptr )
free ( s - > ssl_ctx . reused_sess [ tid ] . ptr ) ;
2021-02-26 15:05:08 -05:00
s - > ssl_ctx . reused_sess [ tid ] . ptr = ptr ;
2017-11-16 11:42:52 -05:00
s - > ssl_ctx . reused_sess [ tid ] . allocated_size = len ;
}
2023-08-21 05:17:10 -04:00
2023-08-21 02:41:49 -04:00
if ( ptr ) {
2023-08-21 05:17:10 -04:00
/* store the new session into ptr and advance it; save the
* resulting size . It ' s guaranteed to be equal to the returned
* len above , and the pointer to be advanced by as much .
*/
s - > ssl_ctx . reused_sess [ tid ] . size = i2d_SSL_SESSION ( sess , & ptr ) ;
2017-11-16 11:42:52 -05:00
}
2021-11-16 20:59:21 -05:00
2023-08-21 02:41:49 -04:00
/* done updating the session */
2023-08-21 05:55:42 -04:00
/* Now we'll try to add or remove this entry as a valid one:
* - if no entry is set and we have one , let ' s share it
* - if our entry was set and we have no more , let ' s clear it
*/
old_tid = HA_ATOMIC_LOAD ( & s - > ssl_ctx . last_ssl_sess_tid ) ; // 0=none, >0 = tid + 1
if ( ! s - > ssl_ctx . reused_sess [ tid ] . ptr & & old_tid = = tid + 1 )
HA_ATOMIC_CAS ( & s - > ssl_ctx . last_ssl_sess_tid , & old_tid , 0 ) ; // no more valid
else if ( s - > ssl_ctx . reused_sess [ tid ] . ptr & & ! old_tid )
HA_ATOMIC_CAS ( & s - > ssl_ctx . last_ssl_sess_tid , & old_tid , tid + 1 ) ;
2021-11-16 20:59:21 -05:00
if ( s - > ssl_ctx . reused_sess [ tid ] . sni ) {
/* if the new sni is empty or isn' t the same as the old one */
if ( ( ! sni ) | | strcmp ( s - > ssl_ctx . reused_sess [ tid ] . sni , sni ) ! = 0 ) {
ha_free ( & s - > ssl_ctx . reused_sess [ tid ] . sni ) ;
if ( sni )
s - > ssl_ctx . reused_sess [ tid ] . sni = strdup ( sni ) ;
}
} else if ( sni ) {
/* if there wasn't an old sni but there is a new one */
s - > ssl_ctx . reused_sess [ tid ] . sni = strdup ( sni ) ;
}
2023-08-21 02:41:49 -04:00
HA_RWLOCK_WRUNLOCK ( SSL_SERVER_LOCK , & s - > ssl_ctx . reused_sess [ tid ] . sess_lock ) ;
2021-02-08 04:43:44 -05:00
HA_RWLOCK_RDUNLOCK ( SSL_SERVER_LOCK , & s - > ssl_ctx . lock ) ;
2017-11-16 11:42:52 -05:00
} else {
2021-02-08 04:43:44 -05:00
HA_RWLOCK_RDLOCK ( SSL_SERVER_LOCK , & s - > ssl_ctx . lock ) ;
2023-08-21 02:41:49 -04:00
if ( s - > ssl_ctx . reused_sess [ tid ] . ptr ) {
HA_RWLOCK_WRLOCK ( SSL_SERVER_LOCK , & s - > ssl_ctx . reused_sess [ tid ] . sess_lock ) ;
ha_free ( & s - > ssl_ctx . reused_sess [ tid ] . ptr ) ;
HA_RWLOCK_WRUNLOCK ( SSL_SERVER_LOCK , & s - > ssl_ctx . reused_sess [ tid ] . sess_lock ) ;
}
2023-08-21 05:55:42 -04:00
old_tid = HA_ATOMIC_LOAD ( & s - > ssl_ctx . last_ssl_sess_tid ) ; // 0=none, >0 = tid + 1
if ( old_tid = = tid + 1 )
HA_ATOMIC_CAS ( & s - > ssl_ctx . last_ssl_sess_tid , & old_tid , 0 ) ; // no more valid
2021-02-08 04:43:44 -05:00
HA_RWLOCK_RDUNLOCK ( SSL_SERVER_LOCK , & s - > ssl_ctx . lock ) ;
2017-11-16 11:42:52 -05:00
}
return 0 ;
2017-11-03 08:43:35 -04:00
}
2017-11-16 11:42:52 -05:00
2017-10-30 14:36:36 -04:00
/* SSL callback used on new session creation */
MEDIUM: shctx: separate ssl and shctx
This patch reorganize the shctx API in a generic storage API, separating
the shared SSL session handling from its core.
The shctx API only handles the generic data part, it does not know what
kind of data you use with it.
A shared_context is a storage structure allocated in a shared memory,
allowing its usage in a multithread or a multiprocess context.
The structure use 2 linked list, one containing the available blocks,
and another for the hot locked blocks. At initialization the available
list is filled with <maxblocks> blocks of size <blocksize>. An <extra>
space is initialized outside the list in case you need some specific
storage.
+-----------------------+--------+--------+--------+--------+----
| struct shared_context | extra | block1 | block2 | block3 | ...
+-----------------------+--------+--------+--------+--------+----
<-------- maxblocks --------->
* blocksize
The API allows to store content on several linked blocks. For example,
if you allocated blocks of 16 bytes, and you want to store an object of
60 bytes, the object will be allocated in a row of 4 blocks.
The API was made for LRU usage, each time you get an object, it pushes
the object at the end of the list. When it needs more space, it discards
The functions name have been renamed in a more logical way, the part
regarding shctx have been prefixed by shctx_ and the functions for the
shared ssl session cache have been prefixed by sh_ssl_sess_.
2017-10-30 15:08:51 -04:00
int sh_ssl_sess_new_cb ( SSL * ssl , SSL_SESSION * sess )
2017-10-30 14:36:36 -04:00
{
unsigned char encsess [ SHSESS_MAX_DATA_LEN ] ; /* encoded session */
unsigned char encid [ SSL_MAX_SSL_SESSION_ID_LENGTH ] ; /* encoded id */
unsigned char * p ;
int data_len ;
2019-10-08 12:27:37 -04:00
unsigned int sid_length ;
2017-10-30 14:36:36 -04:00
const unsigned char * sid_data ;
/* Session id is already stored in to key and session id is known
2020-03-10 03:06:11 -04:00
* so we don ' t store it to keep size .
2019-10-08 12:27:37 -04:00
* note : SSL_SESSION_set1_id is using
* a memcpy so we need to use a different pointer
* than sid_data or sid_ctx_data to avoid valgrind
* complaining .
2017-10-30 14:36:36 -04:00
*/
sid_data = SSL_SESSION_get_id ( sess , & sid_length ) ;
2019-10-08 12:27:37 -04:00
/* copy value in an other buffer */
memcpy ( encid , sid_data , sid_length ) ;
/* pad with 0 */
if ( sid_length < SSL_MAX_SSL_SESSION_ID_LENGTH )
memset ( encid + sid_length , 0 , SSL_MAX_SSL_SESSION_ID_LENGTH - sid_length ) ;
/* force length to zero to avoid ASN1 encoding */
SSL_SESSION_set1_id ( sess , encid , 0 ) ;
/* force length to zero to avoid ASN1 encoding */
SSL_SESSION_set1_id_context ( sess , ( const unsigned char * ) SHCTX_APPNAME , 0 ) ;
2017-10-30 14:36:36 -04:00
/* check if buffer is large enough for the ASN1 encoded session */
data_len = i2d_SSL_SESSION ( sess , NULL ) ;
if ( data_len > SHSESS_MAX_DATA_LEN )
goto err ;
p = encsess ;
/* process ASN1 session encoding before the lock */
i2d_SSL_SESSION ( sess , & p ) ;
/* store to cache */
MEDIUM: shctx: separate ssl and shctx
This patch reorganize the shctx API in a generic storage API, separating
the shared SSL session handling from its core.
The shctx API only handles the generic data part, it does not know what
kind of data you use with it.
A shared_context is a storage structure allocated in a shared memory,
allowing its usage in a multithread or a multiprocess context.
The structure use 2 linked list, one containing the available blocks,
and another for the hot locked blocks. At initialization the available
list is filled with <maxblocks> blocks of size <blocksize>. An <extra>
space is initialized outside the list in case you need some specific
storage.
+-----------------------+--------+--------+--------+--------+----
| struct shared_context | extra | block1 | block2 | block3 | ...
+-----------------------+--------+--------+--------+--------+----
<-------- maxblocks --------->
* blocksize
The API allows to store content on several linked blocks. For example,
if you allocated blocks of 16 bytes, and you want to store an object of
60 bytes, the object will be allocated in a row of 4 blocks.
The API was made for LRU usage, each time you get an object, it pushes
the object at the end of the list. When it needs more space, it discards
The functions name have been renamed in a more logical way, the part
regarding shctx have been prefixed by shctx_ and the functions for the
shared ssl session cache have been prefixed by sh_ssl_sess_.
2017-10-30 15:08:51 -04:00
sh_ssl_sess_store ( encid , encsess , data_len ) ;
2017-10-30 14:36:36 -04:00
err :
/* reset original length values */
2019-10-08 12:27:37 -04:00
SSL_SESSION_set1_id ( sess , encid , sid_length ) ;
SSL_SESSION_set1_id_context ( sess , ( const unsigned char * ) SHCTX_APPNAME , strlen ( SHCTX_APPNAME ) ) ;
2017-10-30 14:36:36 -04:00
return 0 ; /* do not increment session reference count */
}
/* SSL callback used on lookup an existing session cause none found in internal cache */
MEDIUM: shctx: separate ssl and shctx
This patch reorganize the shctx API in a generic storage API, separating
the shared SSL session handling from its core.
The shctx API only handles the generic data part, it does not know what
kind of data you use with it.
A shared_context is a storage structure allocated in a shared memory,
allowing its usage in a multithread or a multiprocess context.
The structure use 2 linked list, one containing the available blocks,
and another for the hot locked blocks. At initialization the available
list is filled with <maxblocks> blocks of size <blocksize>. An <extra>
space is initialized outside the list in case you need some specific
storage.
+-----------------------+--------+--------+--------+--------+----
| struct shared_context | extra | block1 | block2 | block3 | ...
+-----------------------+--------+--------+--------+--------+----
<-------- maxblocks --------->
* blocksize
The API allows to store content on several linked blocks. For example,
if you allocated blocks of 16 bytes, and you want to store an object of
60 bytes, the object will be allocated in a row of 4 blocks.
The API was made for LRU usage, each time you get an object, it pushes
the object at the end of the list. When it needs more space, it discards
The functions name have been renamed in a more logical way, the part
regarding shctx have been prefixed by shctx_ and the functions for the
shared ssl session cache have been prefixed by sh_ssl_sess_.
2017-10-30 15:08:51 -04:00
SSL_SESSION * sh_ssl_sess_get_cb ( SSL * ssl , __OPENSSL_110_CONST__ unsigned char * key , int key_len , int * do_copy )
2017-10-30 14:36:36 -04:00
{
MEDIUM: shctx: separate ssl and shctx
This patch reorganize the shctx API in a generic storage API, separating
the shared SSL session handling from its core.
The shctx API only handles the generic data part, it does not know what
kind of data you use with it.
A shared_context is a storage structure allocated in a shared memory,
allowing its usage in a multithread or a multiprocess context.
The structure use 2 linked list, one containing the available blocks,
and another for the hot locked blocks. At initialization the available
list is filled with <maxblocks> blocks of size <blocksize>. An <extra>
space is initialized outside the list in case you need some specific
storage.
+-----------------------+--------+--------+--------+--------+----
| struct shared_context | extra | block1 | block2 | block3 | ...
+-----------------------+--------+--------+--------+--------+----
<-------- maxblocks --------->
* blocksize
The API allows to store content on several linked blocks. For example,
if you allocated blocks of 16 bytes, and you want to store an object of
60 bytes, the object will be allocated in a row of 4 blocks.
The API was made for LRU usage, each time you get an object, it pushes
the object at the end of the list. When it needs more space, it discards
The functions name have been renamed in a more logical way, the part
regarding shctx have been prefixed by shctx_ and the functions for the
shared ssl session cache have been prefixed by sh_ssl_sess_.
2017-10-30 15:08:51 -04:00
struct sh_ssl_sess_hdr * sh_ssl_sess ;
2017-10-30 14:36:36 -04:00
unsigned char data [ SHSESS_MAX_DATA_LEN ] , * p ;
unsigned char tmpkey [ SSL_MAX_SSL_SESSION_ID_LENGTH ] ;
SSL_SESSION * sess ;
MEDIUM: shctx: separate ssl and shctx
This patch reorganize the shctx API in a generic storage API, separating
the shared SSL session handling from its core.
The shctx API only handles the generic data part, it does not know what
kind of data you use with it.
A shared_context is a storage structure allocated in a shared memory,
allowing its usage in a multithread or a multiprocess context.
The structure use 2 linked list, one containing the available blocks,
and another for the hot locked blocks. At initialization the available
list is filled with <maxblocks> blocks of size <blocksize>. An <extra>
space is initialized outside the list in case you need some specific
storage.
+-----------------------+--------+--------+--------+--------+----
| struct shared_context | extra | block1 | block2 | block3 | ...
+-----------------------+--------+--------+--------+--------+----
<-------- maxblocks --------->
* blocksize
The API allows to store content on several linked blocks. For example,
if you allocated blocks of 16 bytes, and you want to store an object of
60 bytes, the object will be allocated in a row of 4 blocks.
The API was made for LRU usage, each time you get an object, it pushes
the object at the end of the list. When it needs more space, it discards
The functions name have been renamed in a more logical way, the part
regarding shctx have been prefixed by shctx_ and the functions for the
shared ssl session cache have been prefixed by sh_ssl_sess_.
2017-10-30 15:08:51 -04:00
struct shared_block * first ;
2017-10-30 14:36:36 -04:00
2021-06-15 10:39:22 -04:00
_HA_ATOMIC_INC ( & global . shctx_lookups ) ;
2017-10-30 14:36:36 -04:00
/* allow the session to be freed automatically by openssl */
* do_copy = 0 ;
/* tree key is zeros padded sessionid */
if ( key_len < SSL_MAX_SSL_SESSION_ID_LENGTH ) {
memcpy ( tmpkey , key , key_len ) ;
memset ( tmpkey + key_len , 0 , SSL_MAX_SSL_SESSION_ID_LENGTH - key_len ) ;
key = tmpkey ;
}
/* lock cache */
2023-11-16 11:38:21 -05:00
shctx_wrlock ( ssl_shctx ) ;
2017-10-30 14:36:36 -04:00
/* lookup for session */
MEDIUM: shctx: separate ssl and shctx
This patch reorganize the shctx API in a generic storage API, separating
the shared SSL session handling from its core.
The shctx API only handles the generic data part, it does not know what
kind of data you use with it.
A shared_context is a storage structure allocated in a shared memory,
allowing its usage in a multithread or a multiprocess context.
The structure use 2 linked list, one containing the available blocks,
and another for the hot locked blocks. At initialization the available
list is filled with <maxblocks> blocks of size <blocksize>. An <extra>
space is initialized outside the list in case you need some specific
storage.
+-----------------------+--------+--------+--------+--------+----
| struct shared_context | extra | block1 | block2 | block3 | ...
+-----------------------+--------+--------+--------+--------+----
<-------- maxblocks --------->
* blocksize
The API allows to store content on several linked blocks. For example,
if you allocated blocks of 16 bytes, and you want to store an object of
60 bytes, the object will be allocated in a row of 4 blocks.
The API was made for LRU usage, each time you get an object, it pushes
the object at the end of the list. When it needs more space, it discards
The functions name have been renamed in a more logical way, the part
regarding shctx have been prefixed by shctx_ and the functions for the
shared ssl session cache have been prefixed by sh_ssl_sess_.
2017-10-30 15:08:51 -04:00
sh_ssl_sess = sh_ssl_sess_tree_lookup ( key ) ;
if ( ! sh_ssl_sess ) {
2017-10-30 14:36:36 -04:00
/* no session found: unlock cache and exit */
2023-11-16 11:38:21 -05:00
shctx_wrunlock ( ssl_shctx ) ;
2021-06-15 10:39:22 -04:00
_HA_ATOMIC_INC ( & global . shctx_misses ) ;
2017-10-30 14:36:36 -04:00
return NULL ;
}
MEDIUM: shctx: separate ssl and shctx
This patch reorganize the shctx API in a generic storage API, separating
the shared SSL session handling from its core.
The shctx API only handles the generic data part, it does not know what
kind of data you use with it.
A shared_context is a storage structure allocated in a shared memory,
allowing its usage in a multithread or a multiprocess context.
The structure use 2 linked list, one containing the available blocks,
and another for the hot locked blocks. At initialization the available
list is filled with <maxblocks> blocks of size <blocksize>. An <extra>
space is initialized outside the list in case you need some specific
storage.
+-----------------------+--------+--------+--------+--------+----
| struct shared_context | extra | block1 | block2 | block3 | ...
+-----------------------+--------+--------+--------+--------+----
<-------- maxblocks --------->
* blocksize
The API allows to store content on several linked blocks. For example,
if you allocated blocks of 16 bytes, and you want to store an object of
60 bytes, the object will be allocated in a row of 4 blocks.
The API was made for LRU usage, each time you get an object, it pushes
the object at the end of the list. When it needs more space, it discards
The functions name have been renamed in a more logical way, the part
regarding shctx have been prefixed by shctx_ and the functions for the
shared ssl session cache have been prefixed by sh_ssl_sess_.
2017-10-30 15:08:51 -04:00
/* sh_ssl_sess (shared_block->data) is at the end of shared_block */
first = sh_ssl_sess_first_block ( sh_ssl_sess ) ;
shctx_row_data_get ( ssl_shctx , first , data , sizeof ( struct sh_ssl_sess_hdr ) , first - > len - sizeof ( struct sh_ssl_sess_hdr ) ) ;
2017-10-30 14:36:36 -04:00
2023-11-16 11:38:21 -05:00
shctx_wrunlock ( ssl_shctx ) ;
2017-10-30 14:36:36 -04:00
/* decode ASN1 session */
p = data ;
MEDIUM: shctx: separate ssl and shctx
This patch reorganize the shctx API in a generic storage API, separating
the shared SSL session handling from its core.
The shctx API only handles the generic data part, it does not know what
kind of data you use with it.
A shared_context is a storage structure allocated in a shared memory,
allowing its usage in a multithread or a multiprocess context.
The structure use 2 linked list, one containing the available blocks,
and another for the hot locked blocks. At initialization the available
list is filled with <maxblocks> blocks of size <blocksize>. An <extra>
space is initialized outside the list in case you need some specific
storage.
+-----------------------+--------+--------+--------+--------+----
| struct shared_context | extra | block1 | block2 | block3 | ...
+-----------------------+--------+--------+--------+--------+----
<-------- maxblocks --------->
* blocksize
The API allows to store content on several linked blocks. For example,
if you allocated blocks of 16 bytes, and you want to store an object of
60 bytes, the object will be allocated in a row of 4 blocks.
The API was made for LRU usage, each time you get an object, it pushes
the object at the end of the list. When it needs more space, it discards
The functions name have been renamed in a more logical way, the part
regarding shctx have been prefixed by shctx_ and the functions for the
shared ssl session cache have been prefixed by sh_ssl_sess_.
2017-10-30 15:08:51 -04:00
sess = d2i_SSL_SESSION ( NULL , ( const unsigned char * * ) & p , first - > len - sizeof ( struct sh_ssl_sess_hdr ) ) ;
2017-10-30 14:36:36 -04:00
/* Reset session id and session id contenxt */
if ( sess ) {
SSL_SESSION_set1_id ( sess , key , key_len ) ;
SSL_SESSION_set1_id_context ( sess , ( const unsigned char * ) SHCTX_APPNAME , strlen ( SHCTX_APPNAME ) ) ;
}
return sess ;
}
MEDIUM: shctx: separate ssl and shctx
This patch reorganize the shctx API in a generic storage API, separating
the shared SSL session handling from its core.
The shctx API only handles the generic data part, it does not know what
kind of data you use with it.
A shared_context is a storage structure allocated in a shared memory,
allowing its usage in a multithread or a multiprocess context.
The structure use 2 linked list, one containing the available blocks,
and another for the hot locked blocks. At initialization the available
list is filled with <maxblocks> blocks of size <blocksize>. An <extra>
space is initialized outside the list in case you need some specific
storage.
+-----------------------+--------+--------+--------+--------+----
| struct shared_context | extra | block1 | block2 | block3 | ...
+-----------------------+--------+--------+--------+--------+----
<-------- maxblocks --------->
* blocksize
The API allows to store content on several linked blocks. For example,
if you allocated blocks of 16 bytes, and you want to store an object of
60 bytes, the object will be allocated in a row of 4 blocks.
The API was made for LRU usage, each time you get an object, it pushes
the object at the end of the list. When it needs more space, it discards
The functions name have been renamed in a more logical way, the part
regarding shctx have been prefixed by shctx_ and the functions for the
shared ssl session cache have been prefixed by sh_ssl_sess_.
2017-10-30 15:08:51 -04:00
2017-10-30 14:36:36 -04:00
/* SSL callback used to signal session is no more used in internal cache */
MEDIUM: shctx: separate ssl and shctx
This patch reorganize the shctx API in a generic storage API, separating
the shared SSL session handling from its core.
The shctx API only handles the generic data part, it does not know what
kind of data you use with it.
A shared_context is a storage structure allocated in a shared memory,
allowing its usage in a multithread or a multiprocess context.
The structure use 2 linked list, one containing the available blocks,
and another for the hot locked blocks. At initialization the available
list is filled with <maxblocks> blocks of size <blocksize>. An <extra>
space is initialized outside the list in case you need some specific
storage.
+-----------------------+--------+--------+--------+--------+----
| struct shared_context | extra | block1 | block2 | block3 | ...
+-----------------------+--------+--------+--------+--------+----
<-------- maxblocks --------->
* blocksize
The API allows to store content on several linked blocks. For example,
if you allocated blocks of 16 bytes, and you want to store an object of
60 bytes, the object will be allocated in a row of 4 blocks.
The API was made for LRU usage, each time you get an object, it pushes
the object at the end of the list. When it needs more space, it discards
The functions name have been renamed in a more logical way, the part
regarding shctx have been prefixed by shctx_ and the functions for the
shared ssl session cache have been prefixed by sh_ssl_sess_.
2017-10-30 15:08:51 -04:00
void sh_ssl_sess_remove_cb ( SSL_CTX * ctx , SSL_SESSION * sess )
2017-10-30 14:36:36 -04:00
{
MEDIUM: shctx: separate ssl and shctx
This patch reorganize the shctx API in a generic storage API, separating
the shared SSL session handling from its core.
The shctx API only handles the generic data part, it does not know what
kind of data you use with it.
A shared_context is a storage structure allocated in a shared memory,
allowing its usage in a multithread or a multiprocess context.
The structure use 2 linked list, one containing the available blocks,
and another for the hot locked blocks. At initialization the available
list is filled with <maxblocks> blocks of size <blocksize>. An <extra>
space is initialized outside the list in case you need some specific
storage.
+-----------------------+--------+--------+--------+--------+----
| struct shared_context | extra | block1 | block2 | block3 | ...
+-----------------------+--------+--------+--------+--------+----
<-------- maxblocks --------->
* blocksize
The API allows to store content on several linked blocks. For example,
if you allocated blocks of 16 bytes, and you want to store an object of
60 bytes, the object will be allocated in a row of 4 blocks.
The API was made for LRU usage, each time you get an object, it pushes
the object at the end of the list. When it needs more space, it discards
The functions name have been renamed in a more logical way, the part
regarding shctx have been prefixed by shctx_ and the functions for the
shared ssl session cache have been prefixed by sh_ssl_sess_.
2017-10-30 15:08:51 -04:00
struct sh_ssl_sess_hdr * sh_ssl_sess ;
2017-10-30 14:36:36 -04:00
unsigned char tmpkey [ SSL_MAX_SSL_SESSION_ID_LENGTH ] ;
unsigned int sid_length ;
const unsigned char * sid_data ;
( void ) ctx ;
sid_data = SSL_SESSION_get_id ( sess , & sid_length ) ;
/* tree key is zeros padded sessionid */
if ( sid_length < SSL_MAX_SSL_SESSION_ID_LENGTH ) {
memcpy ( tmpkey , sid_data , sid_length ) ;
memset ( tmpkey + sid_length , 0 , SSL_MAX_SSL_SESSION_ID_LENGTH - sid_length ) ;
sid_data = tmpkey ;
}
2023-11-16 11:38:21 -05:00
shctx_wrlock ( ssl_shctx ) ;
2017-10-30 14:36:36 -04:00
/* lookup for session */
MEDIUM: shctx: separate ssl and shctx
This patch reorganize the shctx API in a generic storage API, separating
the shared SSL session handling from its core.
The shctx API only handles the generic data part, it does not know what
kind of data you use with it.
A shared_context is a storage structure allocated in a shared memory,
allowing its usage in a multithread or a multiprocess context.
The structure use 2 linked list, one containing the available blocks,
and another for the hot locked blocks. At initialization the available
list is filled with <maxblocks> blocks of size <blocksize>. An <extra>
space is initialized outside the list in case you need some specific
storage.
+-----------------------+--------+--------+--------+--------+----
| struct shared_context | extra | block1 | block2 | block3 | ...
+-----------------------+--------+--------+--------+--------+----
<-------- maxblocks --------->
* blocksize
The API allows to store content on several linked blocks. For example,
if you allocated blocks of 16 bytes, and you want to store an object of
60 bytes, the object will be allocated in a row of 4 blocks.
The API was made for LRU usage, each time you get an object, it pushes
the object at the end of the list. When it needs more space, it discards
The functions name have been renamed in a more logical way, the part
regarding shctx have been prefixed by shctx_ and the functions for the
shared ssl session cache have been prefixed by sh_ssl_sess_.
2017-10-30 15:08:51 -04:00
sh_ssl_sess = sh_ssl_sess_tree_lookup ( sid_data ) ;
if ( sh_ssl_sess ) {
2017-10-30 14:36:36 -04:00
/* free session */
MEDIUM: shctx: separate ssl and shctx
This patch reorganize the shctx API in a generic storage API, separating
the shared SSL session handling from its core.
The shctx API only handles the generic data part, it does not know what
kind of data you use with it.
A shared_context is a storage structure allocated in a shared memory,
allowing its usage in a multithread or a multiprocess context.
The structure use 2 linked list, one containing the available blocks,
and another for the hot locked blocks. At initialization the available
list is filled with <maxblocks> blocks of size <blocksize>. An <extra>
space is initialized outside the list in case you need some specific
storage.
+-----------------------+--------+--------+--------+--------+----
| struct shared_context | extra | block1 | block2 | block3 | ...
+-----------------------+--------+--------+--------+--------+----
<-------- maxblocks --------->
* blocksize
The API allows to store content on several linked blocks. For example,
if you allocated blocks of 16 bytes, and you want to store an object of
60 bytes, the object will be allocated in a row of 4 blocks.
The API was made for LRU usage, each time you get an object, it pushes
the object at the end of the list. When it needs more space, it discards
The functions name have been renamed in a more logical way, the part
regarding shctx have been prefixed by shctx_ and the functions for the
shared ssl session cache have been prefixed by sh_ssl_sess_.
2017-10-30 15:08:51 -04:00
sh_ssl_sess_tree_delete ( sh_ssl_sess ) ;
2017-10-30 14:36:36 -04:00
}
/* unlock cache */
2023-11-16 11:38:21 -05:00
shctx_wrunlock ( ssl_shctx ) ;
2017-10-30 14:36:36 -04:00
}
/* Set session cache mode to server and disable openssl internal cache.
* Set shared cache callbacks on an ssl context .
* Shared context MUST be firstly initialized */
MEDIUM: shctx: separate ssl and shctx
This patch reorganize the shctx API in a generic storage API, separating
the shared SSL session handling from its core.
The shctx API only handles the generic data part, it does not know what
kind of data you use with it.
A shared_context is a storage structure allocated in a shared memory,
allowing its usage in a multithread or a multiprocess context.
The structure use 2 linked list, one containing the available blocks,
and another for the hot locked blocks. At initialization the available
list is filled with <maxblocks> blocks of size <blocksize>. An <extra>
space is initialized outside the list in case you need some specific
storage.
+-----------------------+--------+--------+--------+--------+----
| struct shared_context | extra | block1 | block2 | block3 | ...
+-----------------------+--------+--------+--------+--------+----
<-------- maxblocks --------->
* blocksize
The API allows to store content on several linked blocks. For example,
if you allocated blocks of 16 bytes, and you want to store an object of
60 bytes, the object will be allocated in a row of 4 blocks.
The API was made for LRU usage, each time you get an object, it pushes
the object at the end of the list. When it needs more space, it discards
The functions name have been renamed in a more logical way, the part
regarding shctx have been prefixed by shctx_ and the functions for the
shared ssl session cache have been prefixed by sh_ssl_sess_.
2017-10-30 15:08:51 -04:00
void ssl_set_shctx ( SSL_CTX * ctx )
2017-10-30 14:36:36 -04:00
{
SSL_CTX_set_session_id_context ( ctx , ( const unsigned char * ) SHCTX_APPNAME , strlen ( SHCTX_APPNAME ) ) ;
if ( ! ssl_shctx ) {
SSL_CTX_set_session_cache_mode ( ctx , SSL_SESS_CACHE_OFF ) ;
return ;
}
SSL_CTX_set_session_cache_mode ( ctx , SSL_SESS_CACHE_SERVER |
SSL_SESS_CACHE_NO_INTERNAL |
SSL_SESS_CACHE_NO_AUTO_CLEAR ) ;
/* Set callbacks */
MEDIUM: shctx: separate ssl and shctx
This patch reorganize the shctx API in a generic storage API, separating
the shared SSL session handling from its core.
The shctx API only handles the generic data part, it does not know what
kind of data you use with it.
A shared_context is a storage structure allocated in a shared memory,
allowing its usage in a multithread or a multiprocess context.
The structure use 2 linked list, one containing the available blocks,
and another for the hot locked blocks. At initialization the available
list is filled with <maxblocks> blocks of size <blocksize>. An <extra>
space is initialized outside the list in case you need some specific
storage.
+-----------------------+--------+--------+--------+--------+----
| struct shared_context | extra | block1 | block2 | block3 | ...
+-----------------------+--------+--------+--------+--------+----
<-------- maxblocks --------->
* blocksize
The API allows to store content on several linked blocks. For example,
if you allocated blocks of 16 bytes, and you want to store an object of
60 bytes, the object will be allocated in a row of 4 blocks.
The API was made for LRU usage, each time you get an object, it pushes
the object at the end of the list. When it needs more space, it discards
The functions name have been renamed in a more logical way, the part
regarding shctx have been prefixed by shctx_ and the functions for the
shared ssl session cache have been prefixed by sh_ssl_sess_.
2017-10-30 15:08:51 -04:00
SSL_CTX_sess_set_new_cb ( ctx , sh_ssl_sess_new_cb ) ;
SSL_CTX_sess_set_get_cb ( ctx , sh_ssl_sess_get_cb ) ;
SSL_CTX_sess_set_remove_cb ( ctx , sh_ssl_sess_remove_cb ) ;
2017-10-30 14:36:36 -04:00
}
WIP/MINOR: ssl: add sample fetches for keylog in frontend
OpenSSL 1.1.1 provides a callback registering function
SSL_CTX_set_keylog_callback, which allows one to receive a string
containing the keys to deciphers TLSv1.3.
Unfortunately it is not possible to store this data in binary form and
we can only get this information using the callback. Which means that we
need to store it until the connection is closed.
This patches add 2 pools, the first one, pool_head_ssl_keylog is used to
store a struct ssl_keylog which will be inserted as a ex_data in a SSL *.
The second one is pool_head_ssl_keylog_str which will be used to store
the hexadecimal strings.
To enable the capture of the keys, you need to set "tune.ssl.keylog on"
in your configuration.
The following fetches were implemented:
ssl_fc_client_early_traffic_secret,
ssl_fc_client_handshake_traffic_secret,
ssl_fc_server_handshake_traffic_secret,
ssl_fc_client_traffic_secret_0,
ssl_fc_server_traffic_secret_0,
ssl_fc_exporter_secret,
ssl_fc_early_exporter_secret
2020-07-06 05:41:30 -04:00
/*
* https : //developer.mozilla.org/en-US/docs/Mozilla/Projects/NSS/Key_Log_Format
*
* The format is :
* * < Label > < space > < ClientRandom > < space > < Secret >
* We only need to copy the secret as there is a sample fetch for the ClientRandom
*/
2021-06-09 10:46:12 -04:00
# ifdef HAVE_SSL_KEYLOG
WIP/MINOR: ssl: add sample fetches for keylog in frontend
OpenSSL 1.1.1 provides a callback registering function
SSL_CTX_set_keylog_callback, which allows one to receive a string
containing the keys to deciphers TLSv1.3.
Unfortunately it is not possible to store this data in binary form and
we can only get this information using the callback. Which means that we
need to store it until the connection is closed.
This patches add 2 pools, the first one, pool_head_ssl_keylog is used to
store a struct ssl_keylog which will be inserted as a ex_data in a SSL *.
The second one is pool_head_ssl_keylog_str which will be used to store
the hexadecimal strings.
To enable the capture of the keys, you need to set "tune.ssl.keylog on"
in your configuration.
The following fetches were implemented:
ssl_fc_client_early_traffic_secret,
ssl_fc_client_handshake_traffic_secret,
ssl_fc_server_handshake_traffic_secret,
ssl_fc_client_traffic_secret_0,
ssl_fc_server_traffic_secret_0,
ssl_fc_exporter_secret,
ssl_fc_early_exporter_secret
2020-07-06 05:41:30 -04:00
void SSL_CTX_keylog ( const SSL * ssl , const char * line )
{
struct ssl_keylog * keylog ;
char * lastarg = NULL ;
char * dst = NULL ;
2023-06-07 05:25:35 -04:00
# ifdef USE_QUIC_OPENSSL_COMPAT
quic_tls_compat_keylog_callback ( ssl , line ) ;
# endif
WIP/MINOR: ssl: add sample fetches for keylog in frontend
OpenSSL 1.1.1 provides a callback registering function
SSL_CTX_set_keylog_callback, which allows one to receive a string
containing the keys to deciphers TLSv1.3.
Unfortunately it is not possible to store this data in binary form and
we can only get this information using the callback. Which means that we
need to store it until the connection is closed.
This patches add 2 pools, the first one, pool_head_ssl_keylog is used to
store a struct ssl_keylog which will be inserted as a ex_data in a SSL *.
The second one is pool_head_ssl_keylog_str which will be used to store
the hexadecimal strings.
To enable the capture of the keys, you need to set "tune.ssl.keylog on"
in your configuration.
The following fetches were implemented:
ssl_fc_client_early_traffic_secret,
ssl_fc_client_handshake_traffic_secret,
ssl_fc_server_handshake_traffic_secret,
ssl_fc_client_traffic_secret_0,
ssl_fc_server_traffic_secret_0,
ssl_fc_exporter_secret,
ssl_fc_early_exporter_secret
2020-07-06 05:41:30 -04:00
keylog = SSL_get_ex_data ( ssl , ssl_keylog_index ) ;
if ( ! keylog )
return ;
lastarg = strrchr ( line , ' ' ) ;
if ( lastarg = = NULL | | + + lastarg = = NULL )
return ;
dst = pool_alloc ( pool_head_ssl_keylog_str ) ;
if ( ! dst )
return ;
strncpy ( dst , lastarg , SSL_KEYLOG_MAX_SECRET_SIZE - 1 ) ;
dst [ SSL_KEYLOG_MAX_SECRET_SIZE - 1 ] = ' \0 ' ;
if ( strncmp ( line , " CLIENT_RANDOM " , strlen ( " CLIENT RANDOM " ) ) = = 0 ) {
if ( keylog - > client_random )
goto error ;
keylog - > client_random = dst ;
} else if ( strncmp ( line , " CLIENT_EARLY_TRAFFIC_SECRET " , strlen ( " CLIENT_EARLY_TRAFFIC_SECRET " ) ) = = 0 ) {
if ( keylog - > client_early_traffic_secret )
goto error ;
keylog - > client_early_traffic_secret = dst ;
} else if ( strncmp ( line , " CLIENT_HANDSHAKE_TRAFFIC_SECRET " , strlen ( " CLIENT_HANDSHAKE_TRAFFIC_SECRET " ) ) = = 0 ) {
if ( keylog - > client_handshake_traffic_secret )
goto error ;
keylog - > client_handshake_traffic_secret = dst ;
} else if ( strncmp ( line , " SERVER_HANDSHAKE_TRAFFIC_SECRET " , strlen ( " SERVER_HANDSHAKE_TRAFFIC_SECRET " ) ) = = 0 ) {
if ( keylog - > server_handshake_traffic_secret )
goto error ;
keylog - > server_handshake_traffic_secret = dst ;
} else if ( strncmp ( line , " CLIENT_TRAFFIC_SECRET_0 " , strlen ( " CLIENT_TRAFFIC_SECRET_0 " ) ) = = 0 ) {
if ( keylog - > client_traffic_secret_0 )
goto error ;
keylog - > client_traffic_secret_0 = dst ;
} else if ( strncmp ( line , " SERVER_TRAFFIC_SECRET_0 " , strlen ( " SERVER_TRAFFIC_SECRET_0 " ) ) = = 0 ) {
if ( keylog - > server_traffic_secret_0 )
goto error ;
keylog - > server_traffic_secret_0 = dst ;
} else if ( strncmp ( line , " EARLY_EXPORTER_SECRET " , strlen ( " EARLY_EXPORTER_SECRET " ) ) = = 0 ) {
if ( keylog - > early_exporter_secret )
goto error ;
keylog - > early_exporter_secret = dst ;
} else if ( strncmp ( line , " EXPORTER_SECRET " , strlen ( " EXPORTER_SECRET " ) ) = = 0 ) {
if ( keylog - > exporter_secret )
goto error ;
keylog - > exporter_secret = dst ;
} else {
goto error ;
}
return ;
error :
pool_free ( pool_head_ssl_keylog_str , dst ) ;
return ;
}
# endif
2019-11-21 09:48:10 -05:00
/*
* This function applies the SSL configuration on a SSL_CTX
* It returns an error code and fills the < err > buffer
*/
2021-02-19 11:41:55 -05:00
static int ssl_sock_prepare_ctx ( struct bind_conf * bind_conf , struct ssl_bind_conf * ssl_conf , SSL_CTX * ctx , char * * err )
2017-03-03 06:21:32 -05:00
{
struct proxy * curproxy = bind_conf - > frontend ;
int cfgerr = 0 ;
int verify = SSL_VERIFY_NONE ;
2018-01-04 12:55:19 -05:00
struct ssl_bind_conf __maybe_unused * ssl_conf_cur ;
2016-12-29 12:26:15 -05:00
const char * conf_ciphers ;
2020-11-21 04:37:34 -05:00
# ifdef HAVE_SSL_CTX_SET_CIPHERSUITES
2018-09-14 05:14:21 -04:00
const char * conf_ciphersuites ;
# endif
2017-01-09 10:15:54 -05:00
const char * conf_curves = NULL ;
2023-05-02 12:26:46 -04:00
X509_STORE * store = SSL_CTX_get_cert_store ( ctx ) ;
2023-05-04 09:33:55 -04:00
# if defined(SSL_CTX_set1_sigalgs_list)
const char * conf_sigalgs = NULL ;
# endif
2023-05-04 18:05:46 -04:00
# if defined(SSL_CTX_set1_client_sigalgs_list)
const char * conf_client_sigalgs = NULL ;
# endif
2012-09-07 11:30:07 -04:00
2017-05-18 06:46:50 -04:00
if ( ssl_conf ) {
struct tls_version_filter * conf_ssl_methods = & ssl_conf - > ssl_methods ;
int i , min , max ;
int flags = MC_SSL_O_ALL ;
/* Real min and max should be determinate with configuration and openssl's capabilities */
2017-08-09 12:26:20 -04:00
min = conf_ssl_methods - > min ? conf_ssl_methods - > min : bind_conf - > ssl_conf . ssl_methods . min ;
max = conf_ssl_methods - > max ? conf_ssl_methods - > max : bind_conf - > ssl_conf . ssl_methods . max ;
2017-05-18 06:46:50 -04:00
if ( min )
flags | = ( methodVersions [ min ] . flag - 1 ) ;
if ( max )
flags | = ~ ( ( methodVersions [ max ] . flag < < 1 ) - 1 ) ;
min = max = CONF_TLSV_NONE ;
for ( i = CONF_TLSV_MIN ; i < = CONF_TLSV_MAX ; i + + )
if ( methodVersions [ i ] . option & & ! ( flags & methodVersions [ i ] . flag ) ) {
if ( min )
max = i ;
else
min = max = i ;
}
/* save real min/max */
conf_ssl_methods - > min = min ;
conf_ssl_methods - > max = max ;
if ( ! min ) {
2019-11-23 17:45:10 -05:00
memprintf ( err , " %sProxy '%s': all SSL/TLS versions are disabled for bind '%s' at [%s:%d]. \n " ,
err & & * err ? * err : " " , bind_conf - > frontend - > id , bind_conf - > arg , bind_conf - > file , bind_conf - > line ) ;
2019-11-21 09:48:10 -05:00
cfgerr | = ERR_ALERT | ERR_FATAL ;
2017-05-18 06:46:50 -04:00
}
}
2016-12-29 12:26:15 -05:00
switch ( ( ssl_conf & & ssl_conf - > verify ) ? ssl_conf - > verify : bind_conf - > ssl_conf . verify ) {
2014-01-29 06:24:34 -05:00
case SSL_SOCK_VERIFY_NONE :
verify = SSL_VERIFY_NONE ;
break ;
case SSL_SOCK_VERIFY_OPTIONAL :
verify = SSL_VERIFY_PEER ;
break ;
case SSL_SOCK_VERIFY_REQUIRED :
verify = SSL_VERIFY_PEER | SSL_VERIFY_FAIL_IF_NO_PEER_CERT ;
break ;
}
SSL_CTX_set_verify ( ctx , verify , ssl_sock_bind_verifycbk ) ;
if ( verify & SSL_VERIFY_PEER ) {
2016-12-29 12:26:15 -05:00
char * ca_file = ( ssl_conf & & ssl_conf - > ca_file ) ? ssl_conf - > ca_file : bind_conf - > ssl_conf . ca_file ;
2019-12-16 10:39:17 -05:00
char * ca_verify_file = ( ssl_conf & & ssl_conf - > ca_verify_file ) ? ssl_conf - > ca_verify_file : bind_conf - > ssl_conf . ca_verify_file ;
2016-12-29 12:26:15 -05:00
char * crl_file = ( ssl_conf & & ssl_conf - > crl_file ) ? ssl_conf - > crl_file : bind_conf - > ssl_conf . crl_file ;
2019-12-16 10:39:17 -05:00
if ( ca_file | | ca_verify_file ) {
2019-10-24 05:32:47 -04:00
/* set CAfile to verify */
2019-12-16 10:39:17 -05:00
if ( ca_file & & ! ssl_set_verify_locations_file ( ctx , ca_file ) ) {
2019-10-24 05:32:47 -04:00
memprintf ( err , " %sProxy '%s': unable to set CA file '%s' for bind '%s' at [%s:%d]. \n " ,
2019-11-23 17:45:10 -05:00
err & & * err ? * err : " " , curproxy - > id , ca_file , bind_conf - > arg , bind_conf - > file , bind_conf - > line ) ;
2019-11-21 09:48:10 -05:00
cfgerr | = ERR_ALERT | ERR_FATAL ;
2012-09-20 12:23:56 -04:00
}
2019-12-16 10:39:17 -05:00
if ( ca_verify_file & & ! ssl_set_verify_locations_file ( ctx , ca_verify_file ) ) {
memprintf ( err , " %sProxy '%s': unable to set CA-no-names file '%s' for bind '%s' at [%s:%d]. \n " ,
err & & * err ? * err : " " , curproxy - > id , ca_verify_file , bind_conf - > arg , bind_conf - > file , bind_conf - > line ) ;
cfgerr | = ERR_ALERT | ERR_FATAL ;
}
if ( ca_file & & ! ( ( ssl_conf & & ssl_conf - > no_ca_names ) | | bind_conf - > ssl_conf . no_ca_names ) ) {
2017-07-28 09:01:05 -04:00
/* set CA names for client cert request, function returns void */
2019-10-24 12:08:51 -04:00
SSL_CTX_set_client_CA_list ( ctx , SSL_dup_CA_list ( ssl_get_client_ca_file ( ca_file ) ) ) ;
2017-07-28 09:01:05 -04:00
}
2023-05-02 12:26:46 -04:00
# ifdef USE_OPENSSL_WOLFSSL
/* WolfSSL activates CRL checks by default so we need to disable it */
X509_STORE_set_flags ( store , 0 ) ;
# endif
2012-09-20 12:23:56 -04:00
}
2014-01-29 06:24:34 -05:00
else {
2019-11-23 17:45:10 -05:00
memprintf ( err , " %sProxy '%s': verify is enabled but no CA file specified for bind '%s' at [%s:%d]. \n " ,
err & & * err ? * err : " " , curproxy - > id , bind_conf - > arg , bind_conf - > file , bind_conf - > line ) ;
2019-11-21 09:48:10 -05:00
cfgerr | = ERR_ALERT | ERR_FATAL ;
2014-01-29 06:24:34 -05:00
}
2012-10-02 13:25:50 -04:00
# ifdef X509_V_FLAG_CRL_CHECK
2016-12-29 12:26:15 -05:00
if ( crl_file ) {
2012-09-20 12:23:56 -04:00
2019-11-21 13:09:31 -05:00
if ( ! ssl_set_cert_crl_file ( store , crl_file ) ) {
2019-11-23 17:45:10 -05:00
memprintf ( err , " %sProxy '%s': unable to configure CRL file '%s' for bind '%s' at [%s:%d]. \n " ,
err & & * err ? * err : " " , curproxy - > id , crl_file , bind_conf - > arg , bind_conf - > file , bind_conf - > line ) ;
2019-11-21 09:48:10 -05:00
cfgerr | = ERR_ALERT | ERR_FATAL ;
2012-09-20 12:23:56 -04:00
}
2012-10-02 09:20:55 -04:00
else {
X509_STORE_set_flags ( store , X509_V_FLAG_CRL_CHECK | X509_V_FLAG_CRL_CHECK_ALL ) ;
}
2012-09-20 12:23:56 -04:00
}
2012-10-02 13:25:50 -04:00
# endif
2012-12-14 05:21:13 -05:00
ERR_clear_error ( ) ;
2012-09-20 12:23:56 -04:00
}
2015-02-27 13:56:49 -05:00
# if (defined SSL_CTRL_SET_TLSEXT_TICKET_KEY_CB && TLS_TICKETS_NO > 0)
2015-05-09 02:46:00 -04:00
if ( bind_conf - > keys_ref ) {
2022-02-08 11:45:58 -05:00
if ( ! SSL_CTX_set_tlsext_ticket_key_evp_cb ( ctx , ssl_tlsext_ticket_key_cb ) ) {
2019-11-23 17:45:10 -05:00
memprintf ( err , " %sProxy '%s': unable to set callback for TLS ticket validation for bind '%s' at [%s:%d]. \n " ,
err & & * err ? * err : " " , curproxy - > id , bind_conf - > arg , bind_conf - > file , bind_conf - > line ) ;
2019-11-21 09:48:10 -05:00
cfgerr | = ERR_ALERT | ERR_FATAL ;
2015-02-27 13:56:49 -05:00
}
}
# endif
MEDIUM: shctx: separate ssl and shctx
This patch reorganize the shctx API in a generic storage API, separating
the shared SSL session handling from its core.
The shctx API only handles the generic data part, it does not know what
kind of data you use with it.
A shared_context is a storage structure allocated in a shared memory,
allowing its usage in a multithread or a multiprocess context.
The structure use 2 linked list, one containing the available blocks,
and another for the hot locked blocks. At initialization the available
list is filled with <maxblocks> blocks of size <blocksize>. An <extra>
space is initialized outside the list in case you need some specific
storage.
+-----------------------+--------+--------+--------+--------+----
| struct shared_context | extra | block1 | block2 | block3 | ...
+-----------------------+--------+--------+--------+--------+----
<-------- maxblocks --------->
* blocksize
The API allows to store content on several linked blocks. For example,
if you allocated blocks of 16 bytes, and you want to store an object of
60 bytes, the object will be allocated in a row of 4 blocks.
The API was made for LRU usage, each time you get an object, it pushes
the object at the end of the list. When it needs more space, it discards
The functions name have been renamed in a more logical way, the part
regarding shctx have been prefixed by shctx_ and the functions for the
shared ssl session cache have been prefixed by sh_ssl_sess_.
2017-10-30 15:08:51 -04:00
ssl_set_shctx ( ctx ) ;
2016-12-29 12:26:15 -05:00
conf_ciphers = ( ssl_conf & & ssl_conf - > ciphers ) ? ssl_conf - > ciphers : bind_conf - > ssl_conf . ciphers ;
if ( conf_ciphers & &
! SSL_CTX_set_cipher_list ( ctx , conf_ciphers ) ) {
2019-11-23 17:45:10 -05:00
memprintf ( err , " %sProxy '%s': unable to set SSL cipher list to '%s' for bind '%s' at [%s:%d]. \n " ,
err & & * err ? * err : " " , curproxy - > id , conf_ciphers , bind_conf - > arg , bind_conf - > file , bind_conf - > line ) ;
2019-11-21 09:48:10 -05:00
cfgerr | = ERR_ALERT | ERR_FATAL ;
2012-09-07 11:30:07 -04:00
}
2020-11-21 04:37:34 -05:00
# ifdef HAVE_SSL_CTX_SET_CIPHERSUITES
2018-09-14 05:14:21 -04:00
conf_ciphersuites = ( ssl_conf & & ssl_conf - > ciphersuites ) ? ssl_conf - > ciphersuites : bind_conf - > ssl_conf . ciphersuites ;
if ( conf_ciphersuites & &
! SSL_CTX_set_ciphersuites ( ctx , conf_ciphersuites ) ) {
2019-11-23 17:45:10 -05:00
memprintf ( err , " %sProxy '%s': unable to set TLS 1.3 cipher suites to '%s' for bind '%s' at [%s:%d]. \n " ,
err & & * err ? * err : " " , curproxy - > id , conf_ciphersuites , bind_conf - > arg , bind_conf - > file , bind_conf - > line ) ;
2019-11-21 09:48:10 -05:00
cfgerr | = ERR_ALERT | ERR_FATAL ;
2018-09-14 05:14:21 -04:00
}
# endif
2017-03-03 11:04:14 -05:00
# ifndef OPENSSL_NO_DH
2022-04-12 05:31:55 -04:00
if ( ! local_dh_1024 )
local_dh_1024 = ssl_get_dh_1024 ( ) ;
if ( ! local_dh_2048 )
local_dh_2048 = ssl_get_dh_2048 ( ) ;
if ( ! local_dh_4096 )
local_dh_4096 = ssl_get_dh_4096 ( ) ;
2014-07-15 05:36:40 -04:00
# endif /* OPENSSL_NO_DH */
2012-09-07 11:30:07 -04:00
SSL_CTX_set_info_callback ( ctx , ssl_sock_infocbk ) ;
2021-02-08 06:55:06 -05:00
# ifdef SSL_CTRL_SET_MSG_CALLBACK
2014-04-25 13:05:36 -04:00
SSL_CTX_set_msg_callback ( ctx , ssl_sock_msgcbk ) ;
2014-05-08 16:45:11 -04:00
# endif
2021-06-09 10:46:12 -04:00
# ifdef HAVE_SSL_KEYLOG
2022-11-18 09:00:15 -05:00
/* only activate the keylog callback if it was required to prevent performance loss */
if ( global_ssl . keylog > 0 )
SSL_CTX_set_keylog_callback ( ctx , SSL_CTX_keylog ) ;
WIP/MINOR: ssl: add sample fetches for keylog in frontend
OpenSSL 1.1.1 provides a callback registering function
SSL_CTX_set_keylog_callback, which allows one to receive a string
containing the keys to deciphers TLSv1.3.
Unfortunately it is not possible to store this data in binary form and
we can only get this information using the callback. Which means that we
need to store it until the connection is closed.
This patches add 2 pools, the first one, pool_head_ssl_keylog is used to
store a struct ssl_keylog which will be inserted as a ex_data in a SSL *.
The second one is pool_head_ssl_keylog_str which will be used to store
the hexadecimal strings.
To enable the capture of the keys, you need to set "tune.ssl.keylog on"
in your configuration.
The following fetches were implemented:
ssl_fc_client_early_traffic_secret,
ssl_fc_client_handshake_traffic_secret,
ssl_fc_server_handshake_traffic_secret,
ssl_fc_client_traffic_secret_0,
ssl_fc_server_traffic_secret_0,
ssl_fc_exporter_secret,
ssl_fc_early_exporter_secret
2020-07-06 05:41:30 -04:00
# endif
2014-04-25 13:05:36 -04:00
2018-02-15 07:34:58 -05:00
# if defined(OPENSSL_NPN_NEGOTIATED) && !defined(OPENSSL_NO_NEXTPROTONEG)
2016-12-29 12:26:15 -05:00
ssl_conf_cur = NULL ;
if ( ssl_conf & & ssl_conf - > npn_str )
ssl_conf_cur = ssl_conf ;
else if ( bind_conf - > ssl_conf . npn_str )
ssl_conf_cur = & bind_conf - > ssl_conf ;
if ( ssl_conf_cur )
SSL_CTX_set_next_protos_advertised_cb ( ctx , ssl_sock_advertise_npn_protos , ssl_conf_cur ) ;
2012-10-18 12:57:14 -04:00
# endif
2014-02-13 06:29:42 -05:00
# ifdef TLSEXT_TYPE_application_layer_protocol_negotiation
2016-12-29 12:26:15 -05:00
ssl_conf_cur = NULL ;
if ( ssl_conf & & ssl_conf - > alpn_str )
ssl_conf_cur = ssl_conf ;
else if ( bind_conf - > ssl_conf . alpn_str )
ssl_conf_cur = & bind_conf - > ssl_conf ;
2023-04-19 03:05:49 -04:00
if ( ssl_conf_cur & & ssl_conf_cur - > alpn_len )
2016-12-29 12:26:15 -05:00
SSL_CTX_set_alpn_select_cb ( ctx , ssl_sock_advertise_alpn_protos , ssl_conf_cur ) ;
2013-04-01 20:30:41 -04:00
# endif
2020-11-03 14:39:07 -05:00
# if defined(SSL_CTX_set1_curves_list)
2017-01-09 10:15:54 -05:00
conf_curves = ( ssl_conf & & ssl_conf - > curves ) ? ssl_conf - > curves : bind_conf - > ssl_conf . curves ;
if ( conf_curves ) {
if ( ! SSL_CTX_set1_curves_list ( ctx , conf_curves ) ) {
2019-11-23 17:45:10 -05:00
memprintf ( err , " %sProxy '%s': unable to set SSL curves list to '%s' for bind '%s' at [%s:%d]. \n " ,
err & & * err ? * err : " " , curproxy - > id , conf_curves , bind_conf - > arg , bind_conf - > file , bind_conf - > line ) ;
2019-11-21 09:48:10 -05:00
cfgerr | = ERR_ALERT | ERR_FATAL ;
2017-01-09 10:15:54 -05:00
}
2017-03-20 06:11:49 -04:00
( void ) SSL_CTX_set_ecdh_auto ( ctx , 1 ) ;
2017-01-09 10:15:54 -05:00
}
2022-02-08 11:45:54 -05:00
# endif /* defined(SSL_CTX_set1_curves_list) */
2017-01-09 10:15:54 -05:00
if ( ! conf_curves ) {
2019-05-09 07:26:41 -04:00
# if (HA_OPENSSL_VERSION_NUMBER >= 0x10101000L)
2022-02-08 11:45:54 -05:00
# if defined(SSL_CTX_set1_curves_list)
2016-12-29 12:26:15 -05:00
const char * ecdhe = ( ssl_conf & & ssl_conf - > ecdhe ) ? ssl_conf - > ecdhe :
MEDIUM: ssl: Handle early data with OpenSSL 1.1.1
When compiled with Openssl >= 1.1.1, before attempting to do the handshake,
try to read any early data. If any early data is present, then we'll create
the session, read the data, and handle the request before we're doing the
handshake.
For this, we add a new connection flag, CO_FL_EARLY_SSL_HS, which is not
part of the CO_FL_HANDSHAKE set, allowing to proceed with a session even
before an SSL handshake is completed.
As early data do have security implication, we let the origin server know
the request comes from early data by adding the "Early-Data" header, as
specified in this draft from the HTTP working group :
https://datatracker.ietf.org/doc/html/draft-ietf-httpbis-replay
2017-09-22 12:26:28 -04:00
( bind_conf - > ssl_conf . ecdhe ? bind_conf - > ssl_conf . ecdhe :
NULL ) ;
2022-02-08 11:45:54 -05:00
if ( ecdhe & & SSL_CTX_set1_curves_list ( ctx , ecdhe ) = = 0 ) {
memprintf ( err , " %sProxy '%s': unable to set elliptic named curve to '%s' for bind '%s' at [%s:%d]. \n " ,
err & & * err ? * err : " " , curproxy - > id , ecdhe , bind_conf - > arg , bind_conf - > file , bind_conf - > line ) ;
cfgerr | = ERR_ALERT | ERR_FATAL ;
MEDIUM: ssl: Handle early data with OpenSSL 1.1.1
When compiled with Openssl >= 1.1.1, before attempting to do the handshake,
try to read any early data. If any early data is present, then we'll create
the session, read the data, and handle the request before we're doing the
handshake.
For this, we add a new connection flag, CO_FL_EARLY_SSL_HS, which is not
part of the CO_FL_HANDSHAKE set, allowing to proceed with a session even
before an SSL handshake is completed.
As early data do have security implication, we let the origin server know
the request comes from early data by adding the "Early-Data" header, as
specified in this draft from the HTTP working group :
https://datatracker.ietf.org/doc/html/draft-ietf-httpbis-replay
2017-09-22 12:26:28 -04:00
}
2022-02-08 11:45:54 -05:00
# endif /* defined(SSL_CTX_set1_curves_list) */
MEDIUM: ssl: Handle early data with OpenSSL 1.1.1
When compiled with Openssl >= 1.1.1, before attempting to do the handshake,
try to read any early data. If any early data is present, then we'll create
the session, read the data, and handle the request before we're doing the
handshake.
For this, we add a new connection flag, CO_FL_EARLY_SSL_HS, which is not
part of the CO_FL_HANDSHAKE set, allowing to proceed with a session even
before an SSL handshake is completed.
As early data do have security implication, we let the origin server know
the request comes from early data by adding the "Early-Data" header, as
specified in this draft from the HTTP working group :
https://datatracker.ietf.org/doc/html/draft-ietf-httpbis-replay
2017-09-22 12:26:28 -04:00
# else
2022-02-08 11:45:54 -05:00
# if defined(SSL_CTX_set_tmp_ecdh) && !defined(OPENSSL_NO_ECDH)
int i ;
EC_KEY * ecdh ;
MEDIUM: ssl: Handle early data with OpenSSL 1.1.1
When compiled with Openssl >= 1.1.1, before attempting to do the handshake,
try to read any early data. If any early data is present, then we'll create
the session, read the data, and handle the request before we're doing the
handshake.
For this, we add a new connection flag, CO_FL_EARLY_SSL_HS, which is not
part of the CO_FL_HANDSHAKE set, allowing to proceed with a session even
before an SSL handshake is completed.
As early data do have security implication, we let the origin server know
the request comes from early data by adding the "Early-Data" header, as
specified in this draft from the HTTP working group :
https://datatracker.ietf.org/doc/html/draft-ietf-httpbis-replay
2017-09-22 12:26:28 -04:00
const char * ecdhe = ( ssl_conf & & ssl_conf - > ecdhe ) ? ssl_conf - > ecdhe :
( bind_conf - > ssl_conf . ecdhe ? bind_conf - > ssl_conf . ecdhe :
ECDHE_DEFAULT_CURVE ) ;
2012-09-20 11:10:03 -04:00
2016-12-29 12:26:15 -05:00
i = OBJ_sn2nid ( ecdhe ) ;
2012-09-20 11:10:03 -04:00
if ( ! i | | ( ( ecdh = EC_KEY_new_by_curve_name ( i ) ) = = NULL ) ) {
2019-11-23 17:45:10 -05:00
memprintf ( err , " %sProxy '%s': unable to set elliptic named curve to '%s' for bind '%s' at [%s:%d]. \n " ,
2022-02-08 11:45:54 -05:00
err & & * err ? * err : " " , curproxy - > id , ecdhe , bind_conf - > arg , bind_conf - > file , bind_conf - > line ) ;
2019-11-21 09:48:10 -05:00
cfgerr | = ERR_ALERT | ERR_FATAL ;
2012-09-20 11:10:03 -04:00
}
else {
SSL_CTX_set_tmp_ecdh ( ctx , ecdh ) ;
EC_KEY_free ( ecdh ) ;
}
2022-02-08 11:45:54 -05:00
# endif /* defined(SSL_CTX_set_tmp_ecdh) && !defined(OPENSSL_NO_ECDH) */
# endif /* HA_OPENSSL_VERSION_NUMBER >= 0x10101000L */
2012-09-20 11:10:03 -04:00
}
2023-05-04 09:33:55 -04:00
# if defined(SSL_CTX_set1_sigalgs_list)
conf_sigalgs = ( ssl_conf & & ssl_conf - > sigalgs ) ? ssl_conf - > sigalgs : bind_conf - > ssl_conf . sigalgs ;
if ( conf_sigalgs ) {
if ( ! SSL_CTX_set1_sigalgs_list ( ctx , conf_sigalgs ) ) {
memprintf ( err , " %sProxy '%s': unable to set SSL Signature Algorithm list to '%s' for bind '%s' at [%s:%d]. \n " ,
err & & * err ? * err : " " , curproxy - > id , conf_sigalgs , bind_conf - > arg , bind_conf - > file , bind_conf - > line ) ;
cfgerr | = ERR_ALERT | ERR_FATAL ;
}
}
# endif
2023-05-04 18:05:46 -04:00
# if defined(SSL_CTX_set1_client_sigalgs_list)
conf_client_sigalgs = ( ssl_conf & & ssl_conf - > client_sigalgs ) ? ssl_conf - > client_sigalgs : bind_conf - > ssl_conf . client_sigalgs ;
if ( conf_client_sigalgs ) {
if ( ! SSL_CTX_set1_client_sigalgs_list ( ctx , conf_client_sigalgs ) ) {
memprintf ( err , " %sProxy '%s': unable to set SSL Signature Algorithm list to '%s' for bind '%s' at [%s:%d]. \n " ,
err & & * err ? * err : " " , curproxy - > id , conf_client_sigalgs , bind_conf - > arg , bind_conf - > file , bind_conf - > line ) ;
cfgerr | = ERR_ALERT | ERR_FATAL ;
}
}
# endif
2023-06-07 05:19:51 -04:00
# ifdef USE_QUIC_OPENSSL_COMPAT
if ( ! quic_tls_compat_init ( bind_conf , ctx ) )
cfgerr | = ERR_ALERT | ERR_FATAL ;
# endif
2012-09-07 11:30:07 -04:00
return cfgerr ;
}
2021-02-19 11:41:55 -05:00
/*
* Prepare the SSL_CTX based on the bind line configuration .
* Since the CA file loading is made depending on the verify option of the bind
* line , the link between the SSL_CTX and the CA file tree entry is made here .
* If we want to create a link between the CA file entry and the corresponding
* ckch instance ( for CA file hot update ) , it needs to be done after
* ssl_sock_prepare_ctx .
* Returns 0 in case of success .
*/
int ssl_sock_prep_ctx_and_inst ( struct bind_conf * bind_conf , struct ssl_bind_conf * ssl_conf ,
SSL_CTX * ctx , struct ckch_inst * ckch_inst , char * * err )
{
int errcode = 0 ;
errcode | = ssl_sock_prepare_ctx ( bind_conf , ssl_conf , ctx , err ) ;
if ( ! errcode & & ckch_inst )
ckch_inst_add_cafile_link ( ckch_inst , bind_conf , ssl_conf , NULL ) ;
return errcode ;
}
2013-06-27 03:05:25 -04:00
static int ssl_sock_srv_hostcheck ( const char * pattern , const char * hostname )
{
const char * pattern_wildcard , * pattern_left_label_end , * hostname_left_label_end ;
size_t prefixlen , suffixlen ;
/* Trivial case */
2020-09-14 09:20:10 -04:00
if ( strcasecmp ( pattern , hostname ) = = 0 )
2013-06-27 03:05:25 -04:00
return 1 ;
/* The rest of this logic is based on RFC 6125, section 6.4.3
* ( http : //tools.ietf.org/html/rfc6125#section-6.4.3) */
2013-10-08 05:27:28 -04:00
pattern_wildcard = NULL ;
pattern_left_label_end = pattern ;
while ( * pattern_left_label_end ! = ' . ' ) {
switch ( * pattern_left_label_end ) {
case 0 :
/* End of label not found */
return 0 ;
case ' * ' :
/* If there is more than one wildcards */
if ( pattern_wildcard )
return 0 ;
pattern_wildcard = pattern_left_label_end ;
break ;
}
pattern_left_label_end + + ;
}
/* If it's not trivial and there is no wildcard, it can't
* match */
if ( ! pattern_wildcard )
2013-06-27 03:05:25 -04:00
return 0 ;
/* Make sure all labels match except the leftmost */
hostname_left_label_end = strchr ( hostname , ' . ' ) ;
if ( ! hostname_left_label_end
2020-09-14 09:20:10 -04:00
| | strcasecmp ( pattern_left_label_end , hostname_left_label_end ) ! = 0 )
2013-06-27 03:05:25 -04:00
return 0 ;
/* Make sure the leftmost label of the hostname is long enough
* that the wildcard can match */
2013-10-08 05:39:35 -04:00
if ( hostname_left_label_end - hostname < ( pattern_left_label_end - pattern ) - 1 )
2013-06-27 03:05:25 -04:00
return 0 ;
/* Finally compare the string on either side of the
* wildcard */
prefixlen = pattern_wildcard - pattern ;
suffixlen = pattern_left_label_end - ( pattern_wildcard + 1 ) ;
2020-09-14 09:20:10 -04:00
if ( ( prefixlen & & ( strncasecmp ( pattern , hostname , prefixlen ) ! = 0 ) )
| | ( suffixlen & & ( strncasecmp ( pattern_wildcard + 1 , hostname_left_label_end - suffixlen , suffixlen ) ! = 0 ) ) )
2013-06-27 03:05:25 -04:00
return 0 ;
return 1 ;
}
static int ssl_sock_srv_verifycbk ( int ok , X509_STORE_CTX * ctx )
{
SSL * ssl ;
struct connection * conn ;
2019-02-26 12:37:15 -05:00
struct ssl_sock_ctx * ssl_ctx ;
2017-07-05 12:23:03 -04:00
const char * servername ;
2017-07-26 14:09:56 -04:00
const char * sni ;
2013-06-27 03:05:25 -04:00
int depth ;
X509 * cert ;
STACK_OF ( GENERAL_NAME ) * alt_names ;
int i ;
X509_NAME * cert_subject ;
char * str ;
if ( ok = = 0 )
return ok ;
ssl = X509_STORE_CTX_get_ex_data ( ctx , SSL_get_ex_data_X509_STORE_CTX_idx ( ) ) ;
BUG/MAJOR: ssl: OpenSSL context is stored in non-reserved memory slot
We never saw unexplicated crash with SSL, so I suppose that we are
luck, or the slot 0 is always reserved. Anyway the usage of the macro
SSL_get_app_data() and SSL_set_app_data() seem wrong. This patch change
the deprecated functions SSL_get_app_data() and SSL_set_app_data()
by the new functions SSL_get_ex_data() and SSL_set_ex_data(), and
it reserves the slot in the SSL memory space.
For information, this is the two declaration which seems wrong or
incomplete in the OpenSSL ssl.h file. We can see the usage of the
slot 0 whoch is hardcoded, but never reserved.
#define SSL_set_app_data(s,arg) (SSL_set_ex_data(s,0,(char *)arg))
#define SSL_get_app_data(s) (SSL_get_ex_data(s,0))
This patch must be backported at least in 1.8, maybe in other versions.
2018-06-17 15:37:05 -04:00
conn = SSL_get_ex_data ( ssl , ssl_app_data_index ) ;
2022-04-12 01:31:06 -04:00
ssl_ctx = __conn_get_ssl_sock_ctx ( conn ) ;
2013-06-27 03:05:25 -04:00
2017-07-28 05:38:41 -04:00
/* We're checking if the provided hostnames match the desired one. The
* desired hostname comes from the SNI we presented if any , or if not
* provided then it may have been explicitly stated using a " verifyhost "
* directive . If neither is set , we don ' t care about the name so the
* verification is OK .
2017-07-05 12:23:03 -04:00
*/
2019-02-26 12:37:15 -05:00
servername = SSL_get_servername ( ssl_ctx - > ssl , TLSEXT_NAMETYPE_host_name ) ;
2017-07-26 14:09:56 -04:00
sni = servername ;
2017-07-05 12:23:03 -04:00
if ( ! servername ) {
2018-09-20 04:57:52 -04:00
servername = __objt_server ( conn - > target ) - > ssl_ctx . verify_host ;
2017-07-05 12:23:03 -04:00
if ( ! servername )
return ok ;
}
2013-06-27 03:05:25 -04:00
/* We only need to verify the CN on the actual server cert,
* not the indirect CAs */
depth = X509_STORE_CTX_get_error_depth ( ctx ) ;
if ( depth ! = 0 )
return ok ;
/* At this point, the cert is *not* OK unless we can find a
* hostname match */
ok = 0 ;
cert = X509_STORE_CTX_get_current_cert ( ctx ) ;
/* It seems like this might happen if verify peer isn't set */
if ( ! cert )
return ok ;
alt_names = X509_get_ext_d2i ( cert , NID_subject_alt_name , NULL , NULL ) ;
if ( alt_names ) {
for ( i = 0 ; ! ok & & i < sk_GENERAL_NAME_num ( alt_names ) ; i + + ) {
GENERAL_NAME * name = sk_GENERAL_NAME_value ( alt_names , i ) ;
if ( name - > type = = GEN_DNS ) {
2019-05-09 07:26:41 -04:00
# if HA_OPENSSL_VERSION_NUMBER < 0x00907000L
2013-09-17 09:47:48 -04:00
if ( ASN1_STRING_to_UTF8 ( ( unsigned char * * ) & str , name - > d . ia5 ) > = 0 ) {
# else
2013-06-27 03:05:25 -04:00
if ( ASN1_STRING_to_UTF8 ( ( unsigned char * * ) & str , name - > d . dNSName ) > = 0 ) {
2013-09-17 09:47:48 -04:00
# endif
2013-06-27 03:05:25 -04:00
ok = ssl_sock_srv_hostcheck ( str , servername ) ;
OPENSSL_free ( str ) ;
}
}
}
2013-09-17 09:19:54 -04:00
sk_GENERAL_NAME_pop_free ( alt_names , GENERAL_NAME_free ) ;
2013-06-27 03:05:25 -04:00
}
cert_subject = X509_get_subject_name ( cert ) ;
i = - 1 ;
while ( ! ok & & ( i = X509_NAME_get_index_by_NID ( cert_subject , NID_commonName , i ) ) ! = - 1 ) {
X509_NAME_ENTRY * entry = X509_NAME_get_entry ( cert_subject , i ) ;
2016-08-29 07:26:37 -04:00
ASN1_STRING * value ;
value = X509_NAME_ENTRY_get_data ( entry ) ;
if ( ASN1_STRING_to_UTF8 ( ( unsigned char * * ) & str , value ) > = 0 ) {
2013-06-27 03:05:25 -04:00
ok = ssl_sock_srv_hostcheck ( str , servername ) ;
OPENSSL_free ( str ) ;
}
}
2017-07-26 14:09:56 -04:00
/* report the mismatch and indicate if SNI was used or not */
if ( ! ok & & ! conn - > err_code )
conn - > err_code = sni ? CO_ER_SSL_MISMATCH_SNI : CO_ER_SSL_MISMATCH ;
2013-06-27 03:05:25 -04:00
return ok ;
}
2012-10-11 08:00:19 -04:00
/* prepare ssl context from servers options. Returns an error count */
2016-12-22 11:08:28 -05:00
int ssl_sock_prepare_srv_ctx ( struct server * srv )
2012-10-11 08:00:19 -04:00
{
int cfgerr = 0 ;
2021-12-28 12:47:17 -05:00
SSL_CTX * ctx ;
2022-12-13 12:17:44 -05:00
/* Automatic memory computations need to know we use SSL there
* If this is an internal proxy , don ' t use it for the computation */
2022-12-14 04:34:36 -05:00
if ( ! ( srv - > proxy - > cap & PR_CAP_INT ) )
2022-12-13 12:17:44 -05:00
global . ssl_used_backend = 1 ;
2015-01-15 15:32:40 -05:00
/* Initiate SSL context for current server */
2017-06-15 10:37:39 -04:00
if ( ! srv - > ssl_ctx . reused_sess ) {
2017-11-16 11:42:52 -05:00
if ( ( srv - > ssl_ctx . reused_sess = calloc ( 1 , global . nbthread * sizeof ( * srv - > ssl_ctx . reused_sess ) ) ) = = NULL ) {
2021-05-28 04:34:01 -04:00
ha_alert ( " out of memory. \n " ) ;
2017-06-15 10:37:39 -04:00
cfgerr + + ;
return cfgerr ;
}
}
2020-03-27 13:55:49 -04:00
if ( srv - > use_ssl = = 1 )
2012-10-11 08:00:19 -04:00
srv - > xprt = & ssl_sock ;
2021-12-28 12:47:17 -05:00
if ( srv - > ssl_ctx . client_crt ) {
const int create_if_none = srv - > flags & SRV_F_DYNAMIC ? 0 : 1 ;
char * err = NULL ;
int err_code = 0 ;
/* If there is a crt keyword there, the SSL_CTX will be created here. */
err_code = ssl_sock_load_srv_cert ( srv - > ssl_ctx . client_crt , srv , create_if_none , & err ) ;
if ( err_code ! = ERR_NONE ) {
if ( ( err_code & ERR_WARN ) & & ! ( err_code & ERR_ALERT ) )
ha_warning ( " %s " , err ) ;
else
ha_alert ( " %s " , err ) ;
if ( err_code & ( ERR_FATAL | ERR_ABORT ) )
cfgerr + + ;
}
ha_free ( & err ) ;
}
ctx = srv - > ssl_ctx . ctx ;
2021-01-25 11:19:43 -05:00
/* The context will be uninitialized if there wasn't any "cert" option
* in the server line . */
2017-03-03 06:21:32 -05:00
if ( ! ctx ) {
2021-01-25 11:19:43 -05:00
ctx = SSL_CTX_new ( SSLv23_client_method ( ) ) ;
if ( ! ctx ) {
2021-05-28 04:34:01 -04:00
ha_alert ( " unable to allocate ssl context. \n " ) ;
2021-01-25 11:19:43 -05:00
cfgerr + + ;
return cfgerr ;
}
2024-03-12 11:22:34 -04:00
if ( global_ssl . security_level > - 1 )
SSL_CTX_set_security_level ( ctx , global_ssl . security_level ) ;
2017-03-30 13:19:37 -04:00
2021-01-25 11:19:43 -05:00
srv - > ssl_ctx . ctx = ctx ;
}
2021-01-25 11:19:41 -05:00
2021-02-19 11:41:55 -05:00
cfgerr + = ssl_sock_prep_srv_ctx_and_inst ( srv , srv - > ssl_ctx . ctx , srv - > ssl_ctx . inst ) ;
2021-01-25 11:19:41 -05:00
return cfgerr ;
}
/* Initialize an SSL context that will be used on the backend side.
* Returns an error count .
*/
2021-02-19 11:41:55 -05:00
static int ssl_sock_prepare_srv_ssl_ctx ( const struct server * srv , SSL_CTX * ctx )
2021-01-25 11:19:41 -05:00
{
struct proxy * curproxy = srv - > proxy ;
int cfgerr = 0 ;
long options =
SSL_OP_ALL | /* all known workarounds for bugs */
SSL_OP_NO_SSLv2 |
SSL_OP_NO_COMPRESSION ;
long mode =
SSL_MODE_ENABLE_PARTIAL_WRITE |
SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER |
SSL_MODE_RELEASE_BUFFERS |
SSL_MODE_SMALL_BUFFERS ;
int verify = SSL_VERIFY_NONE ;
const struct tls_version_filter * conf_ssl_methods = & srv - > ssl_ctx . methods ;
int i , min , max , hole ;
int flags = MC_SSL_O_ALL ;
2023-06-29 07:29:59 -04:00
# if defined(SSL_CTX_set1_sigalgs_list)
const char * conf_sigalgs = NULL ;
# endif
2023-06-29 08:11:46 -04:00
# if defined(SSL_CTX_set1_client_sigalgs_list)
const char * conf_client_sigalgs = NULL ;
# endif
2023-09-07 17:13:15 -04:00
# if defined(SSL_CTX_set1_curves_list)
const char * conf_curves = NULL ;
# endif
2021-01-25 11:19:41 -05:00
2017-05-05 12:06:12 -04:00
if ( conf_ssl_methods - > flags & & ( conf_ssl_methods - > min | | conf_ssl_methods - > max ) )
2021-05-28 04:34:01 -04:00
ha_warning ( " no-sslv3/no-tlsv1x are ignored for this server. "
" Use only 'ssl-min-ver' and 'ssl-max-ver' to fix. \n " ) ;
2017-05-05 12:06:12 -04:00
else
flags = conf_ssl_methods - > flags ;
2017-03-30 13:25:07 -04:00
/* Real min and max should be determinate with configuration and openssl's capabilities */
if ( conf_ssl_methods - > min )
2017-05-05 12:06:12 -04:00
flags | = ( methodVersions [ conf_ssl_methods - > min ] . flag - 1 ) ;
2017-03-30 13:25:07 -04:00
if ( conf_ssl_methods - > max )
2017-05-05 12:06:12 -04:00
flags | = ~ ( ( methodVersions [ conf_ssl_methods - > max ] . flag < < 1 ) - 1 ) ;
2017-03-30 13:25:07 -04:00
2017-05-18 06:33:19 -04:00
/* find min, max and holes */
2017-03-30 13:25:07 -04:00
min = max = CONF_TLSV_NONE ;
hole = 0 ;
2017-03-30 13:19:37 -04:00
for ( i = CONF_TLSV_MIN ; i < = CONF_TLSV_MAX ; i + + )
2017-03-30 13:25:07 -04:00
/* version is in openssl && version not disable in configuration */
2017-05-05 12:06:12 -04:00
if ( methodVersions [ i ] . option & & ! ( flags & methodVersions [ i ] . flag ) ) {
2017-03-30 13:25:07 -04:00
if ( min ) {
if ( hole ) {
2021-06-04 12:22:08 -04:00
ha_warning ( " %s '%s': SSL/TLS versions range not contiguous for server '%s'. "
2017-11-24 10:50:31 -05:00
" Hole find for %s. Use only 'ssl-min-ver' and 'ssl-max-ver' to fix. \n " ,
proxy_type_str ( curproxy ) , curproxy - > id , srv - > id ,
methodVersions [ hole ] . name ) ;
2017-03-30 13:25:07 -04:00
hole = 0 ;
}
max = i ;
}
else {
min = max = i ;
}
}
else {
if ( min )
hole = i ;
}
2017-05-05 12:06:12 -04:00
if ( ! min ) {
2021-06-04 12:22:08 -04:00
ha_alert ( " %s '%s': all SSL/TLS versions are disabled for server '%s'. \n " ,
2017-11-24 10:50:31 -05:00
proxy_type_str ( curproxy ) , curproxy - > id , srv - > id ) ;
2017-05-05 12:06:12 -04:00
cfgerr + = 1 ;
}
2017-03-30 13:19:37 -04:00
2019-05-09 07:26:41 -04:00
# if (HA_OPENSSL_VERSION_NUMBER < 0x1010000fL)
2017-03-30 13:19:37 -04:00
/* Keep force-xxx implementation as it is in older haproxy. It's a
2018-11-15 12:07:59 -05:00
precautionary measure to avoid any surprise with older openssl version . */
2017-03-30 13:19:37 -04:00
if ( min = = max )
2017-05-18 06:33:19 -04:00
methodVersions [ min ] . ctx_set_version ( ctx , SET_CLIENT ) ;
2017-05-05 12:06:12 -04:00
else
for ( i = CONF_TLSV_MIN ; i < = CONF_TLSV_MAX ; i + + )
if ( flags & methodVersions [ i ] . flag )
options | = methodVersions [ i ] . option ;
2017-03-30 13:19:37 -04:00
# else /* openssl >= 1.1.0 */
2017-03-30 13:25:07 -04:00
/* set the max_version is required to cap TLS version or activate new TLS (v1.3) */
2017-05-18 06:33:19 -04:00
methodVersions [ min ] . ctx_set_version ( ctx , SET_MIN ) ;
methodVersions [ max ] . ctx_set_version ( ctx , SET_MAX ) ;
2017-03-30 13:19:37 -04:00
# endif
if ( srv - > ssl_ctx . options & SRV_SSL_O_NO_TLS_TICKETS )
options | = SSL_OP_NO_TICKET ;
2017-03-03 06:21:32 -05:00
SSL_CTX_set_options ( ctx , options ) ;
2017-01-13 20:42:15 -05:00
2020-11-13 15:56:34 -05:00
# ifdef SSL_MODE_ASYNC
2017-01-13 20:42:15 -05:00
if ( global_ssl . async )
mode | = SSL_MODE_ASYNC ;
# endif
2017-03-03 06:21:32 -05:00
SSL_CTX_set_mode ( ctx , mode ) ;
2014-01-29 06:24:34 -05:00
if ( global . ssl_server_verify = = SSL_SERVER_VERIFY_REQUIRED )
verify = SSL_VERIFY_PEER ;
switch ( srv - > ssl_ctx . verify ) {
case SSL_SOCK_VERIFY_NONE :
verify = SSL_VERIFY_NONE ;
break ;
case SSL_SOCK_VERIFY_REQUIRED :
verify = SSL_VERIFY_PEER ;
break ;
}
2021-01-25 11:19:41 -05:00
SSL_CTX_set_verify ( ctx , verify ,
2017-07-05 12:23:03 -04:00
( srv - > ssl_ctx . verify_host | | ( verify & SSL_VERIFY_PEER ) ) ? ssl_sock_srv_verifycbk : NULL ) ;
2014-01-29 06:24:34 -05:00
if ( verify & SSL_VERIFY_PEER ) {
2012-10-11 10:11:36 -04:00
if ( srv - > ssl_ctx . ca_file ) {
2019-10-24 05:32:47 -04:00
/* set CAfile to verify */
2021-01-25 11:19:41 -05:00
if ( ! ssl_set_verify_locations_file ( ctx , srv - > ssl_ctx . ca_file ) ) {
2021-05-28 04:34:01 -04:00
ha_alert ( " unable to set CA file '%s'. \n " ,
srv - > ssl_ctx . ca_file ) ;
2012-10-11 10:11:36 -04:00
cfgerr + + ;
}
}
2014-01-29 06:24:34 -05:00
else {
if ( global . ssl_server_verify = = SSL_SERVER_VERIFY_REQUIRED )
2021-05-28 04:34:01 -04:00
ha_alert ( " verify is enabled by default but no CA file specified. If you're running on a LAN where you're certain to trust the server's certificate, please set an explicit 'verify none' statement on the 'server' line, or use 'ssl-server-verify none' in the global section to disable server-side verifications by default. \n " ) ;
2014-01-29 06:24:34 -05:00
else
2021-05-28 04:34:01 -04:00
ha_alert ( " verify is enabled but no CA file specified. \n " ) ;
2014-01-29 06:24:34 -05:00
cfgerr + + ;
}
2012-10-11 10:11:36 -04:00
# ifdef X509_V_FLAG_CRL_CHECK
if ( srv - > ssl_ctx . crl_file ) {
2021-01-25 11:19:41 -05:00
X509_STORE * store = SSL_CTX_get_cert_store ( ctx ) ;
2012-10-11 10:11:36 -04:00
2019-11-21 13:09:31 -05:00
if ( ! ssl_set_cert_crl_file ( store , srv - > ssl_ctx . crl_file ) ) {
2021-05-28 04:34:01 -04:00
ha_alert ( " unable to configure CRL file '%s'. \n " ,
srv - > ssl_ctx . crl_file ) ;
2012-10-11 10:11:36 -04:00
cfgerr + + ;
}
else {
X509_STORE_set_flags ( store , X509_V_FLAG_CRL_CHECK | X509_V_FLAG_CRL_CHECK_ALL ) ;
}
}
# endif
}
2021-01-25 11:19:41 -05:00
SSL_CTX_set_session_cache_mode ( ctx , SSL_SESS_CACHE_CLIENT | SSL_SESS_CACHE_NO_INTERNAL_STORE ) ;
SSL_CTX_sess_set_new_cb ( ctx , ssl_sess_new_srv_cb ) ;
2012-10-11 08:00:19 -04:00
if ( srv - > ssl_ctx . ciphers & &
2021-01-25 11:19:41 -05:00
! SSL_CTX_set_cipher_list ( ctx , srv - > ssl_ctx . ciphers ) ) {
2021-05-28 04:34:01 -04:00
ha_alert ( " unable to set SSL cipher list to '%s'. \n " ,
srv - > ssl_ctx . ciphers ) ;
2012-10-11 08:00:19 -04:00
cfgerr + + ;
}
2020-11-21 04:37:34 -05:00
# ifdef HAVE_SSL_CTX_SET_CIPHERSUITES
2018-09-14 05:14:21 -04:00
if ( srv - > ssl_ctx . ciphersuites & &
2021-01-25 11:19:41 -05:00
! SSL_CTX_set_ciphersuites ( ctx , srv - > ssl_ctx . ciphersuites ) ) {
2021-05-28 04:34:01 -04:00
ha_alert ( " unable to set TLS 1.3 cipher suites to '%s'. \n " ,
srv - > ssl_ctx . ciphersuites ) ;
2018-09-14 05:14:21 -04:00
cfgerr + + ;
}
# endif
2018-11-20 17:33:50 -05:00
# if defined(OPENSSL_NPN_NEGOTIATED) && !defined(OPENSSL_NO_NEXTPROTONEG)
if ( srv - > ssl_ctx . npn_str )
2021-01-25 11:19:41 -05:00
SSL_CTX_set_next_proto_select_cb ( ctx , ssl_sock_srv_select_protos , ( struct server * ) srv ) ;
2018-11-20 17:33:50 -05:00
# endif
# ifdef TLSEXT_TYPE_application_layer_protocol_negotiation
2023-04-19 03:05:49 -04:00
if ( srv - > ssl_ctx . alpn_str & & srv - > ssl_ctx . alpn_len )
2018-11-20 17:33:50 -05:00
SSL_CTX_set_alpn_protos ( ctx , ( unsigned char * ) srv - > ssl_ctx . alpn_str , srv - > ssl_ctx . alpn_len ) ;
# endif
2023-06-29 07:29:59 -04:00
# if defined(SSL_CTX_set1_sigalgs_list)
conf_sigalgs = srv - > ssl_ctx . sigalgs ;
if ( conf_sigalgs ) {
if ( ! SSL_CTX_set1_sigalgs_list ( ctx , conf_sigalgs ) ) {
ha_alert ( " Proxy '%s': unable to set SSL Signature Algorithm list to '%s' for server '%s'. \n " ,
curproxy - > id , conf_sigalgs , srv - > id ) ;
cfgerr + + ;
}
}
# endif
2023-06-29 08:11:46 -04:00
# if defined(SSL_CTX_set1_client_sigalgs_list)
conf_client_sigalgs = srv - > ssl_ctx . client_sigalgs ;
if ( conf_client_sigalgs ) {
if ( ! SSL_CTX_set1_client_sigalgs_list ( ctx , conf_client_sigalgs ) ) {
ha_alert ( " Proxy '%s': unable to set SSL Client Signature Algorithm list to '%s' for server '%s'. \n " ,
curproxy - > id , conf_client_sigalgs , srv - > id ) ;
cfgerr + + ;
}
}
# endif
2018-09-14 05:14:21 -04:00
2023-09-07 17:13:15 -04:00
# if defined(SSL_CTX_set1_curves_list)
conf_curves = srv - > ssl_ctx . curves ;
if ( conf_curves ) {
if ( ! SSL_CTX_set1_curves_list ( ctx , conf_curves ) ) {
ha_alert ( " Proxy '%s': unable to set SSL curves list to '%s' for server '%s'. \n " ,
curproxy - > id , conf_curves , srv - > id ) ;
cfgerr + + ;
}
}
# endif /* defined(SSL_CTX_set1_curves_list) */
2012-10-11 08:00:19 -04:00
return cfgerr ;
}
2021-02-19 11:41:55 -05:00
/*
* Prepare the frontend ' s SSL_CTX based on the server line configuration .
* Since the CA file loading is made depending on the verify option of the
* server line , the link between the SSL_CTX and the CA file tree entry is
* made here .
* If we want to create a link between the CA file entry and the corresponding
* ckch instance ( for CA file hot update ) , it needs to be done after
* ssl_sock_prepare_srv_ssl_ctx .
* Returns an error count .
*/
int ssl_sock_prep_srv_ctx_and_inst ( const struct server * srv , SSL_CTX * ctx ,
struct ckch_inst * ckch_inst )
{
int cfgerr = 0 ;
cfgerr + = ssl_sock_prepare_srv_ssl_ctx ( srv , ctx ) ;
if ( ! cfgerr & & ckch_inst )
ckch_inst_add_cafile_link ( ckch_inst , NULL , NULL , srv ) ;
return cfgerr ;
}
2020-11-23 08:33:30 -05:00
/*
* Create an initial CTX used to start the SSL connections .
* May be used by QUIC xprt which makes usage of SSL sessions initialized from SSL_CTXs .
* Returns 0 if succeeded , or something > 0 if not .
*/
# ifdef USE_QUIC
static int ssl_initial_ctx ( struct bind_conf * bind_conf )
{
if ( bind_conf - > xprt = = xprt_get ( XPRT_QUIC ) )
return ssl_quic_initial_ctx ( bind_conf ) ;
else
return ssl_sock_initial_ctx ( bind_conf ) ;
}
# else
static int ssl_initial_ctx ( struct bind_conf * bind_conf )
{
return ssl_sock_initial_ctx ( bind_conf ) ;
}
# endif
2012-09-13 11:54:29 -04:00
/* Walks down the two trees in bind_conf and prepares all certs. The pointer may
2012-09-07 11:30:07 -04:00
* be NULL , in which case nothing is done . Returns the number of errors
* encountered .
*/
2016-12-22 11:08:28 -05:00
int ssl_sock_prepare_all_ctx ( struct bind_conf * bind_conf )
2012-09-07 11:30:07 -04:00
{
struct ebmb_node * node ;
struct sni_ctx * sni ;
int err = 0 ;
2019-11-21 09:48:10 -05:00
int errcode = 0 ;
char * errmsg = NULL ;
2012-09-07 11:30:07 -04:00
2015-01-15 15:32:40 -05:00
/* Automatic memory computations need to know we use SSL there */
global . ssl_used_frontend = 1 ;
2017-03-06 09:34:44 -05:00
/* Create initial_ctx used to start the ssl connection before do switchctx */
if ( ! bind_conf - > initial_ctx ) {
2020-11-23 08:33:30 -05:00
err + = ssl_initial_ctx ( bind_conf ) ;
2017-03-06 09:34:44 -05:00
/* It should not be necessary to call this function, but it's
necessary first to check and move all initialisation related
2020-11-23 08:33:30 -05:00
to initial_ctx in ssl_initial_ctx . */
2021-02-19 11:41:55 -05:00
errcode | = ssl_sock_prep_ctx_and_inst ( bind_conf , NULL , bind_conf - > initial_ctx , NULL , & errmsg ) ;
}
2014-10-30 14:25:24 -04:00
2012-09-13 11:54:29 -04:00
node = ebmb_first ( & bind_conf - > sni_ctx ) ;
2012-09-07 11:30:07 -04:00
while ( node ) {
sni = ebmb_entry ( node , struct sni_ctx , name ) ;
2024-01-11 09:10:33 -05:00
if ( ! sni - > order ) {
/* only initialize the CTX on its first occurrence */
2021-02-19 11:41:55 -05:00
errcode | = ssl_sock_prep_ctx_and_inst ( bind_conf , sni - > conf , sni - > ctx , sni - > ckch_inst , & errmsg ) ;
}
2012-09-07 11:30:07 -04:00
node = ebmb_next ( node ) ;
}
2012-09-13 11:54:29 -04:00
node = ebmb_first ( & bind_conf - > sni_w_ctx ) ;
2012-09-07 11:30:07 -04:00
while ( node ) {
sni = ebmb_entry ( node , struct sni_ctx , name ) ;
2024-01-11 09:10:33 -05:00
if ( ! sni - > order ) {
/* only initialize the CTX on its first occurrence */
2021-02-19 11:41:55 -05:00
errcode | = ssl_sock_prep_ctx_and_inst ( bind_conf , sni - > conf , sni - > ctx , sni - > ckch_inst , & errmsg ) ;
2019-11-21 09:48:10 -05:00
}
2012-09-07 11:30:07 -04:00
node = ebmb_next ( node ) ;
}
2019-11-21 09:48:10 -05:00
if ( errcode & ERR_WARN ) {
2019-11-23 17:52:30 -05:00
ha_warning ( " %s " , errmsg ) ;
2019-11-21 09:48:10 -05:00
} else if ( errcode & ERR_CODE ) {
2019-11-23 17:52:30 -05:00
ha_alert ( " %s " , errmsg ) ;
2019-11-21 09:48:10 -05:00
err + + ;
}
free ( errmsg ) ;
2012-09-07 11:30:07 -04:00
return err ;
}
2016-12-21 17:38:39 -05:00
/* Prepares all the contexts for a bind_conf and allocates the shared SSL
* context if needed . Returns < 0 on error , 0 on success . The warnings and
* alerts are directly emitted since the rest of the stack does it below .
*/
int ssl_sock_prepare_bind_conf ( struct bind_conf * bind_conf )
{
struct proxy * px = bind_conf - > frontend ;
int alloc_ctx ;
int err ;
2024-01-10 10:07:17 -05:00
/* check if some certificates were loaded but no ssl keyword is used */
2022-05-20 09:56:32 -04:00
if ( ! ( bind_conf - > options & BC_O_USE_SSL ) ) {
2024-01-10 10:07:17 -05:00
if ( ! eb_is_empty ( & bind_conf - > sni_ctx ) | | ! eb_is_empty ( & bind_conf - > sni_w_ctx ) ) {
2017-11-24 10:50:31 -05:00
ha_warning ( " Proxy '%s': A certificate was specified but SSL was not enabled on bind '%s' at [%s:%d] (use 'ssl'). \n " ,
px - > id , bind_conf - > arg , bind_conf - > file , bind_conf - > line ) ;
2016-12-21 17:38:39 -05:00
}
return 0 ;
}
2024-01-10 10:07:17 -05:00
/* check if we have certificates */
if ( eb_is_empty ( & bind_conf - > sni_ctx ) & & eb_is_empty ( & bind_conf - > sni_w_ctx ) ) {
2022-05-20 10:03:18 -04:00
if ( bind_conf - > strict_sni & & ! ( bind_conf - > options & BC_O_GENERATE_CERTS ) ) {
2017-11-24 10:50:31 -05:00
ha_warning ( " Proxy '%s': no SSL certificate specified for bind '%s' at [%s:%d], ssl connections will fail (use 'crt'). \n " ,
px - > id , bind_conf - > arg , bind_conf - > file , bind_conf - > line ) ;
2017-08-09 05:24:25 -04:00
}
else {
2017-11-24 10:50:31 -05:00
ha_alert ( " Proxy '%s': no SSL certificate specified for bind '%s' at [%s:%d] (use 'crt'). \n " ,
px - > id , bind_conf - > arg , bind_conf - > file , bind_conf - > line ) ;
2017-08-09 05:24:25 -04:00
return - 1 ;
}
2016-12-21 17:38:39 -05:00
}
2024-01-11 09:10:33 -05:00
if ( ( bind_conf - > options & BC_O_GENERATE_CERTS ) ) {
struct sni_ctx * sni_ctx ;
/* if we use the generate-certificates option, look for the first default cert available */
sni_ctx = ssl_sock_chose_sni_ctx ( bind_conf , " " , 1 , 1 ) ;
if ( ! sni_ctx ) {
ha_alert ( " Proxy '%s': no SSL certificate specified for bind '%s' and 'generate-certificates' option at [%s:%d] (use 'crt'). \n " ,
px - > id , bind_conf - > arg , bind_conf - > file , bind_conf - > line ) ;
return - 1 ;
}
}
2017-12-04 12:46:39 -05:00
if ( ! ssl_shctx & & global . tune . sslcachesize ) {
2017-11-28 05:04:43 -05:00
alloc_ctx = shctx_init ( & ssl_shctx , global . tune . sslcachesize ,
2018-10-22 10:21:39 -04:00
sizeof ( struct sh_ssl_sess_hdr ) + SHSESS_BLOCK_MIN_SIZE , - 1 ,
2023-11-16 11:38:27 -05:00
sizeof ( * sh_ssl_sess_tree ) ) ;
2018-10-25 14:22:46 -04:00
if ( alloc_ctx < = 0 ) {
2017-11-28 05:04:43 -05:00
if ( alloc_ctx = = SHCTX_E_INIT_LOCK )
ha_alert ( " Unable to initialize the lock for the shared SSL session cache. You can retry using the global statement 'tune.ssl.force-private-cache' but it could increase CPU usage due to renegotiations if nbproc > 1. \n " ) ;
else
ha_alert ( " Unable to allocate SSL session cache. \n " ) ;
return - 1 ;
}
/* free block callback */
ssl_shctx - > free_block = sh_ssl_sess_free_blocks ;
/* init the root tree within the extra space */
sh_ssl_sess_tree = ( void * ) ssl_shctx + sizeof ( struct shared_context ) ;
* sh_ssl_sess_tree = EB_ROOT_UNIQUE ;
2016-12-21 17:38:39 -05:00
}
err = 0 ;
/* initialize all certificate contexts */
err + = ssl_sock_prepare_all_ctx ( bind_conf ) ;
/* initialize CA variables if the certificates generation is enabled */
err + = ssl_sock_load_ca ( bind_conf ) ;
return - err ;
}
2015-07-29 07:02:40 -04:00
2021-12-30 05:25:43 -05:00
/* release ssl context allocated for servers. Most of the field free here
* must also be allocated in srv_ssl_settings_cpy ( ) */
2015-07-29 07:02:40 -04:00
void ssl_sock_free_srv_ctx ( struct server * srv )
{
2018-11-20 17:33:50 -05:00
# ifdef TLSEXT_TYPE_application_layer_protocol_negotiation
2021-12-30 05:25:43 -05:00
ha_free ( & srv - > ssl_ctx . alpn_str ) ;
2018-11-20 17:33:50 -05:00
# endif
2018-11-25 07:21:27 -05:00
# ifdef OPENSSL_NPN_NEGOTIATED
2021-12-30 05:25:43 -05:00
ha_free ( & srv - > ssl_ctx . npn_str ) ;
2018-11-26 16:57:17 -05:00
# endif
2020-10-07 07:20:23 -04:00
if ( srv - > ssl_ctx . reused_sess ) {
int i ;
2021-11-16 20:59:21 -05:00
for ( i = 0 ; i < global . nbthread ; i + + ) {
2021-02-26 15:06:32 -05:00
ha_free ( & srv - > ssl_ctx . reused_sess [ i ] . ptr ) ;
2021-11-16 20:59:21 -05:00
ha_free ( & srv - > ssl_ctx . reused_sess [ i ] . sni ) ;
}
2021-02-26 15:06:32 -05:00
ha_free ( & srv - > ssl_ctx . reused_sess ) ;
2020-10-07 07:20:23 -04:00
}
2021-02-26 15:06:32 -05:00
if ( srv - > ssl_ctx . ctx ) {
2015-07-29 07:02:40 -04:00
SSL_CTX_free ( srv - > ssl_ctx . ctx ) ;
2021-02-26 15:06:32 -05:00
srv - > ssl_ctx . ctx = NULL ;
}
2021-12-30 05:25:43 -05:00
ha_free ( & srv - > ssl_ctx . ca_file ) ;
ha_free ( & srv - > ssl_ctx . crl_file ) ;
ha_free ( & srv - > ssl_ctx . client_crt ) ;
ha_free ( & srv - > ssl_ctx . verify_host ) ;
# ifdef SSL_CTRL_SET_TLSEXT_HOSTNAME
ha_free ( & srv - > sni_expr ) ;
2022-03-16 12:48:19 -04:00
release_sample_expr ( srv - > ssl_ctx . sni ) ;
srv - > ssl_ctx . sni = NULL ;
2021-12-30 05:25:43 -05:00
# endif
ha_free ( & srv - > ssl_ctx . ciphers ) ;
# ifdef HAVE_SSL_CTX_SET_CIPHERSUITES
ha_free ( & srv - > ssl_ctx . ciphersuites ) ;
# endif
2021-12-30 08:45:19 -05:00
/* If there is a certificate we must unlink the ckch instance */
ckch_inst_free ( srv - > ssl_ctx . inst ) ;
2015-07-29 07:02:40 -04:00
}
2012-09-13 11:54:29 -04:00
/* Walks down the two trees in bind_conf and frees all the certs. The pointer may
2012-09-07 11:30:07 -04:00
* be NULL , in which case nothing is done . The default_ctx is nullified too .
*/
2012-09-13 11:54:29 -04:00
void ssl_sock_free_all_ctx ( struct bind_conf * bind_conf )
2012-09-07 11:30:07 -04:00
{
struct ebmb_node * node , * back ;
struct sni_ctx * sni ;
2012-09-13 11:54:29 -04:00
node = ebmb_first ( & bind_conf - > sni_ctx ) ;
2012-09-07 11:30:07 -04:00
while ( node ) {
sni = ebmb_entry ( node , struct sni_ctx , name ) ;
back = ebmb_next ( node ) ;
ebmb_delete ( node ) ;
2020-04-08 10:11:26 -04:00
SSL_CTX_free ( sni - > ctx ) ;
2021-04-21 01:32:39 -04:00
LIST_DELETE ( & sni - > by_ckch_inst ) ;
2012-09-07 11:30:07 -04:00
free ( sni ) ;
node = back ;
}
2012-09-13 11:54:29 -04:00
node = ebmb_first ( & bind_conf - > sni_w_ctx ) ;
2012-09-07 11:30:07 -04:00
while ( node ) {
sni = ebmb_entry ( node , struct sni_ctx , name ) ;
back = ebmb_next ( node ) ;
ebmb_delete ( node ) ;
2020-04-08 10:11:26 -04:00
SSL_CTX_free ( sni - > ctx ) ;
2021-04-21 01:32:39 -04:00
LIST_DELETE ( & sni - > by_ckch_inst ) ;
2012-09-07 11:30:07 -04:00
free ( sni ) ;
node = back ;
}
2020-06-24 03:54:29 -04:00
2017-03-06 09:34:44 -05:00
SSL_CTX_free ( bind_conf - > initial_ctx ) ;
bind_conf - > initial_ctx = NULL ;
2012-09-07 11:30:07 -04:00
}
2020-06-24 03:54:29 -04:00
void ssl_sock_deinit ( )
{
crtlist_deinit ( ) ; /* must be free'd before the ckchs */
ckch_deinit ( ) ;
}
REGISTER_POST_DEINIT ( ssl_sock_deinit ) ;
2016-12-22 11:30:54 -05:00
/* Destroys all the contexts for a bind_conf. This is used during deinit(). */
void ssl_sock_destroy_bind_conf ( struct bind_conf * bind_conf )
{
ssl_sock_free_ca ( bind_conf ) ;
ssl_sock_free_all_ctx ( bind_conf ) ;
2016-12-29 12:26:15 -05:00
ssl_sock_free_ssl_conf ( & bind_conf - > ssl_conf ) ;
2016-12-22 11:30:54 -05:00
free ( bind_conf - > ca_sign_file ) ;
free ( bind_conf - > ca_sign_pass ) ;
2018-07-17 04:05:32 -04:00
if ( bind_conf - > keys_ref & & ! - - bind_conf - > keys_ref - > refcount ) {
2016-12-22 11:30:54 -05:00
free ( bind_conf - > keys_ref - > filename ) ;
free ( bind_conf - > keys_ref - > tlskeys ) ;
2021-04-21 01:32:39 -04:00
LIST_DELETE ( & bind_conf - > keys_ref - > list ) ;
2016-12-22 11:30:54 -05:00
free ( bind_conf - > keys_ref ) ;
}
bind_conf - > keys_ref = NULL ;
bind_conf - > ca_sign_pass = NULL ;
bind_conf - > ca_sign_file = NULL ;
}
2020-11-09 09:59:23 -05:00
/*
* Try to allocate the BIO and SSL session objects of < conn > connection with < bio > and
* < ssl > as addresses , < bio_meth > as BIO method and < ssl_ctx > as SSL context inherited settings .
* Connect the allocated BIO to the allocated SSL session . Also set < ctx > as address of custom
* data for the BIO and store < conn > as user data of the SSL session object .
2021-01-05 12:10:46 -05:00
* This is the responsibility of the caller to check the validity of all the pointers passed
2020-11-09 09:59:23 -05:00
* as parameters to this function .
* Return 0 if succeeded , - 1 if not . If failed , sets the - > err_code member of < conn > to
* CO_ER_SSL_NO_MEM .
*/
int ssl_bio_and_sess_init ( struct connection * conn , SSL_CTX * ssl_ctx ,
SSL * * ssl , BIO * * bio , BIO_METHOD * bio_meth , void * ctx )
{
int retry = 1 ;
retry :
/* Alloc a new SSL session. */
* ssl = SSL_new ( ssl_ctx ) ;
if ( ! * ssl ) {
if ( ! retry - - )
goto err ;
pool_gc ( NULL ) ;
goto retry ;
}
* bio = BIO_new ( bio_meth ) ;
if ( ! * bio ) {
SSL_free ( * ssl ) ;
* ssl = NULL ;
if ( ! retry - - )
goto err ;
pool_gc ( NULL ) ;
goto retry ;
}
BIO_set_data ( * bio , ctx ) ;
SSL_set_bio ( * ssl , * bio , * bio ) ;
/* set connection pointer. */
if ( ! SSL_set_ex_data ( * ssl , ssl_app_data_index , conn ) ) {
SSL_free ( * ssl ) ;
* ssl = NULL ;
if ( ! retry - - )
goto err ;
pool_gc ( NULL ) ;
goto retry ;
}
return 0 ;
err :
conn - > err_code = CO_ER_SSL_NO_MEM ;
return - 1 ;
}
2021-03-05 17:47:00 -05:00
/* This function is called when all the XPRT have been initialized. We can
* now attempt to start the SSL handshake .
*/
static int ssl_sock_start ( struct connection * conn , void * xprt_ctx )
{
struct ssl_sock_ctx * ctx = xprt_ctx ;
if ( ctx - > xprt - > start ) {
int ret ;
ret = ctx - > xprt - > start ( conn , ctx - > xprt_ctx ) ;
if ( ret < 0 )
return ret ;
}
tasklet_wakeup ( ctx - > wait_event . tasklet ) ;
return 0 ;
}
2023-10-25 09:38:04 -04:00
/* Similar to increment_actconn() but for SSL connections. */
int increment_sslconn ( )
{
unsigned int count , next_sslconn ;
do {
count = global . sslconns ;
if ( global . maxsslconn & & count > = global . maxsslconn ) {
/* maxconn reached */
next_sslconn = 0 ;
goto end ;
}
/* try to increment sslconns */
next_sslconn = count + 1 ;
} while ( ! _HA_ATOMIC_CAS ( & global . sslconns , & count , next_sslconn ) & & __ha_cpu_relax ( ) ) ;
end :
return next_sslconn ;
}
2012-05-18 09:47:34 -04:00
/*
* This function is called if SSL * context is not yet allocated . The function
* is designed to be called before any other data - layer operation and sets the
* handshake flag on the connection . It is safe to call it multiple times .
* It returns 0 on success and - 1 in error case .
*/
2019-03-21 13:27:17 -04:00
static int ssl_sock_init ( struct connection * conn , void * * xprt_ctx )
2012-05-18 09:47:34 -04:00
{
2019-02-26 12:37:15 -05:00
struct ssl_sock_ctx * ctx ;
2023-10-25 09:38:04 -04:00
int next_sslconn = 0 ;
2012-05-18 09:47:34 -04:00
/* already initialized */
2019-03-21 13:27:17 -04:00
if ( * xprt_ctx )
2012-05-18 09:47:34 -04:00
return 0 ;
2019-02-26 12:37:15 -05:00
ctx = pool_alloc ( ssl_sock_ctx_pool ) ;
if ( ! ctx ) {
conn - > err_code = CO_ER_SSL_NO_MEM ;
return - 1 ;
}
2019-06-14 08:42:29 -04:00
ctx - > wait_event . tasklet = tasklet_new ( ) ;
if ( ! ctx - > wait_event . tasklet ) {
2019-05-20 08:02:16 -04:00
conn - > err_code = CO_ER_SSL_NO_MEM ;
pool_free ( ssl_sock_ctx_pool , ctx ) ;
return - 1 ;
}
2019-06-14 08:42:29 -04:00
ctx - > wait_event . tasklet - > process = ssl_sock_io_cb ;
ctx - > wait_event . tasklet - > context = ctx ;
MINOR: ssl: mark the SSL handshake tasklet as heavy
There's a fairness issue between SSL and clear text. A full end-to-end
cleartext connection can require up to ~7.7 wakeups on average, plus 3.3
for the SSL tasklet, one of which is particularly expensive. So if we
accept to process many handshakes taking 1ms each, we significantly
increase the processing time of regular tasks just by adding an extra
delay between their calls. Ideally in order to be fair we should have a
1:18 call ratio, but this requires a bit more accounting. With very little
effort we can mark the SSL handshake tasklet as TASK_HEAVY until the
handshake completes, and remove it once done.
Doing so reduces from 14 to 3.0 ms the total response time experienced
by HTTP clients running in parallel to 1000 SSL clients doing full
handshakes in loops. Better, when tune.sched.low-latency is set to "on",
the latency further drops to 1.8 ms.
The tasks latency distribution explain pretty well what is happening:
Without the patch:
$ socat - /tmp/sock1 <<< "show profiling"
Per-task CPU profiling : on # set profiling tasks {on|auto|off}
Tasks activity:
function calls cpu_tot cpu_avg lat_tot lat_avg
ssl_sock_io_cb 2785375 19.35m 416.9us 5.401h 6.980ms
h1_io_cb 1868949 9.853s 5.271us 4.829h 9.302ms
process_stream 1864066 7.582s 4.067us 2.058h 3.974ms
si_cs_io_cb 1733808 1.932s 1.114us 26.83m 928.5us
h1_timeout_task 935760 - - 1.033h 3.975ms
accept_queue_process 303606 4.627s 15.24us 16.65m 3.291ms
srv_cleanup_toremove_connections452 64.31ms 142.3us 2.447s 5.415ms
task_run_applet 47 5.149ms 109.6us 57.09ms 1.215ms
srv_cleanup_idle_connections 34 2.210ms 65.00us 87.49ms 2.573ms
With the patch:
$ socat - /tmp/sock1 <<< "show profiling"
Per-task CPU profiling : on # set profiling tasks {on|auto|off}
Tasks activity:
function calls cpu_tot cpu_avg lat_tot lat_avg
ssl_sock_io_cb 3000365 21.08m 421.6us 20.30h 24.36ms
h1_io_cb 2031932 9.278s 4.565us 46.70m 1.379ms
process_stream 2010682 7.391s 3.675us 22.83m 681.2us
si_cs_io_cb 1702070 1.571s 922.0ns 8.732m 307.8us
h1_timeout_task 1009594 - - 17.63m 1.048ms
accept_queue_process 339595 4.792s 14.11us 3.714m 656.2us
srv_cleanup_toremove_connections779 75.42ms 96.81us 438.3ms 562.6us
srv_cleanup_idle_connections 48 2.498ms 52.05us 178.1us 3.709us
task_run_applet 17 1.738ms 102.3us 11.29ms 663.9us
other 1 947.8us 947.8us 202.6us 202.6us
=> h1_io_cb() and process_stream() are divided by 6 while ssl_sock_io_cb() is
multipled by 4
And with low-latency on:
$ socat - /tmp/sock1 <<< "show profiling"
Per-task CPU profiling : on # set profiling tasks {on|auto|off}
Tasks activity:
function calls cpu_tot cpu_avg lat_tot lat_avg
ssl_sock_io_cb 3000565 20.96m 419.1us 20.74h 24.89ms
h1_io_cb 2019702 9.294s 4.601us 49.22m 1.462ms
process_stream 2009755 6.570s 3.269us 1.493m 44.57us
si_cs_io_cb 1997820 1.566s 783.0ns 2.985m 89.66us
h1_timeout_task 1009742 - - 1.647m 97.86us
accept_queue_process 494509 4.697s 9.498us 1.240m 150.4us
srv_cleanup_toremove_connections1120 92.32ms 82.43us 463.0ms 413.4us
srv_cleanup_idle_connections 70 2.703ms 38.61us 204.5us 2.921us
task_run_applet 13 1.303ms 100.3us 85.12us 6.548us
=> process_stream() is divided by 100 while ssl_sock_io_cb() is
multipled by 4
Interestingly, the total HTTPS response time doesn't increase and even very
slightly decreases, with an overall ~1% higher request rate. The net effect
here is a redistribution of the CPU resources between internal tasks, and
in the case of SSL, handshakes wait bit more but everything after completes
faster.
This was made simple enough to be backportable if it helps some users
suffering from high latencies in mixed traffic.
2021-02-25 09:31:00 -05:00
ctx - > wait_event . tasklet - > state | = TASK_HEAVY ; // assign it to the bulk queue during handshake
2019-05-20 08:02:16 -04:00
ctx - > wait_event . events = 0 ;
2019-02-28 12:10:45 -05:00
ctx - > sent_early_data = 0 ;
2019-12-19 09:02:39 -05:00
ctx - > early_buf = BUF_NULL ;
2019-04-07 16:00:38 -04:00
ctx - > conn = conn ;
2020-01-10 03:20:26 -05:00
ctx - > subs = NULL ;
2019-09-06 09:36:02 -04:00
ctx - > xprt_st = 0 ;
ctx - > xprt_ctx = NULL ;
2021-09-29 12:56:52 -04:00
ctx - > error_code = 0 ;
2019-04-07 16:00:38 -04:00
2023-10-25 09:38:04 -04:00
next_sslconn = increment_sslconn ( ) ;
if ( ! next_sslconn ) {
conn - > err_code = CO_ER_SSL_TOO_MANY ;
goto err ;
}
2019-04-07 16:00:38 -04:00
/* Only work with sockets for now, this should be adapted when we'll
* add QUIC support .
*/
ctx - > xprt = xprt_get ( XPRT_RAW ) ;
2019-05-23 12:24:07 -04:00
if ( ctx - > xprt - > init ) {
2019-05-20 08:02:16 -04:00
if ( ctx - > xprt - > init ( conn , & ctx - > xprt_ctx ) ! = 0 )
goto err ;
2019-05-23 12:24:07 -04:00
}
2019-02-26 12:37:15 -05:00
2012-05-18 09:47:34 -04:00
/* If it is in client mode initiate SSL session
in connect state otherwise accept state */
2012-11-11 18:42:33 -05:00
if ( objt_server ( conn - > target ) ) {
2023-08-30 06:00:29 -04:00
struct server * srv = __objt_server ( conn - > target ) ;
if ( ssl_bio_and_sess_init ( conn , srv - > ssl_ctx . ctx ,
2020-11-09 09:59:23 -05:00
& ctx - > ssl , & ctx - > bio , ha_meth , ctx ) = = - 1 )
2019-02-26 12:37:15 -05:00
goto err ;
2014-11-12 11:35:37 -05:00
2019-02-26 12:37:15 -05:00
SSL_set_connect_state ( ctx - > ssl ) ;
2023-08-30 06:00:29 -04:00
HA_RWLOCK_RDLOCK ( SSL_SERVER_LOCK , & srv - > ssl_ctx . lock ) ;
if ( srv - > ssl_ctx . reused_sess [ tid ] . ptr ) {
2023-08-21 05:17:10 -04:00
/* let's recreate a session from (ptr,size) and assign
* it to ctx - > ssl . Its refcount will be updated by the
* creation and by the assignment , so after assigning
* it or failing to , we must always free it to decrement
* the refcount .
*/
2023-08-30 06:00:29 -04:00
const unsigned char * ptr = srv - > ssl_ctx . reused_sess [ tid ] . ptr ;
SSL_SESSION * sess = d2i_SSL_SESSION ( NULL , & ptr , srv - > ssl_ctx . reused_sess [ tid ] . size ) ;
2019-02-26 12:37:15 -05:00
if ( sess & & ! SSL_set_session ( ctx - > ssl , sess ) ) {
2023-08-21 06:04:01 -04:00
uint old_tid = HA_ATOMIC_LOAD ( & srv - > ssl_ctx . last_ssl_sess_tid ) ; // 0=none, >0 = tid + 1
if ( old_tid = = tid + 1 )
HA_ATOMIC_CAS ( & srv - > ssl_ctx . last_ssl_sess_tid , & old_tid , 0 ) ; // no more valid
2017-11-16 11:42:52 -05:00
SSL_SESSION_free ( sess ) ;
2023-08-21 02:41:49 -04:00
HA_RWLOCK_WRLOCK ( SSL_SERVER_LOCK , & srv - > ssl_ctx . reused_sess [ tid ] . sess_lock ) ;
2023-08-30 06:00:29 -04:00
ha_free ( & srv - > ssl_ctx . reused_sess [ tid ] . ptr ) ;
2023-08-21 02:41:49 -04:00
HA_RWLOCK_WRTORD ( SSL_SERVER_LOCK , & srv - > ssl_ctx . reused_sess [ tid ] . sess_lock ) ;
2023-08-30 06:02:33 -04:00
if ( srv - > ssl_ctx . reused_sess [ tid ] . sni )
SSL_set_tlsext_host_name ( ctx - > ssl , srv - > ssl_ctx . reused_sess [ tid ] . sni ) ;
2023-08-21 02:41:49 -04:00
HA_RWLOCK_RDUNLOCK ( SSL_SERVER_LOCK , & srv - > ssl_ctx . reused_sess [ tid ] . sess_lock ) ;
2017-11-16 11:42:52 -05:00
} else if ( sess ) {
2023-08-21 05:17:10 -04:00
/* already assigned, not needed anymore */
2017-11-16 11:42:52 -05:00
SSL_SESSION_free ( sess ) ;
2023-08-21 02:41:49 -04:00
HA_RWLOCK_RDLOCK ( SSL_SERVER_LOCK , & srv - > ssl_ctx . reused_sess [ tid ] . sess_lock ) ;
2023-08-30 06:02:33 -04:00
if ( srv - > ssl_ctx . reused_sess [ tid ] . sni )
SSL_set_tlsext_host_name ( ctx - > ssl , srv - > ssl_ctx . reused_sess [ tid ] . sni ) ;
2023-08-21 02:41:49 -04:00
HA_RWLOCK_RDUNLOCK ( SSL_SERVER_LOCK , & srv - > ssl_ctx . reused_sess [ tid ] . sess_lock ) ;
2014-11-12 11:35:37 -05:00
}
MEDIUM: server/ssl: pick another thread's session when we have none yet
The per-thread SSL context in servers causes a burst of connection
renegotiations on startup, both for the forwarded traffic and for the
health checks. Health checks have been seen to continue to cause SSL
rekeying for several minutes after a restart on large thread-count
machines. The reason is that the context is exlusively per-thread
and that the more threads there are, the more likely it is for a new
connection to start on a thread that doesn't have such a context yet.
In order to improve this situation, this commit ensures that a thread
starting an SSL connection to a server without a session will first
look at the last session that was updated by another thread, and will
try to use it. In order to minimize the contention, we're using a read
lock here to protect the data, and the first-level index is an integer
containing the thread number, that is always valid and may always be
dereferenced. This way the session retrieval algorithm becomes quite
simple:
- if the last thread index is valid, then try to use the same session
under a read lock ;
- if any error happens, then atomically nuke the index so that other
threads don't use it and the next one to update a connection updates
it again
And for the ssl_sess_new_srv_cb(), we have this:
- update the entry under a write lock if the new session is valid,
otherwise kill it if the session is not valid;
- atomically update the index if it was 0 and the new one is valid,
otherwise atomically nuke it if the session failed.
Note that even if only the pointer is destroyed, the element will be
re-allocated by the next thread during the sess_new_srv_sb().
Right now a session is picked even if the SNI doesn't match, because
we don't know the SNI yet during ssl_sock_init(), but that's essentially
a matter of API, since connect_server() figures the SNI very early, then
calls conn_prepare() which calls ssl_sock_init(). Thus in the future we
could easily imaging storing a number of SNI-based contexts instead of
storing contexts per thread.
It could be worth backporting this to one LTS version after some
observation, though this is not strictly necessary. the current commit
depends on the following ones:
BUG/MINOR: ssl_sock: fix possible memory leak on OOM
MINOR: ssl_sock: avoid iterating realloc(+1) on stored context
DOC: ssl: add some comments about the non-obvious session allocation stuff
CLEANUP: ssl: keep a pointer to the server in ssl_sock_init()
MEDIUM: ssl_sock: always use the SSL's server name, not the one from the tid
MEDIUM: server/ssl: place an rwlock in the per-thread ssl server session
MINOR: server/ssl: maintain an index of the last known valid SSL session
MINOR: server/ssl: clear the shared good session index on failure
MEDIUM: server/ssl: pick another thread's session when we have none yet
2023-08-21 06:12:12 -04:00
} else {
/* No session available yet, let's see if we can pick one
* from another thread . If old_tid is non - null , it designates
* the index of a recently updated thread that might still have
* a usable session . All threads are collectively responsible
* for resetting the index if it fails .
*/
const unsigned char * ptr ;
SSL_SESSION * sess ;
uint old_tid = HA_ATOMIC_LOAD ( & srv - > ssl_ctx . last_ssl_sess_tid ) ; // 0=none, >0 = tid + 1
if ( old_tid ) {
HA_RWLOCK_RDLOCK ( SSL_SERVER_LOCK , & srv - > ssl_ctx . reused_sess [ old_tid - 1 ] . sess_lock ) ;
ptr = srv - > ssl_ctx . reused_sess [ old_tid - 1 ] . ptr ;
if ( ptr ) {
sess = d2i_SSL_SESSION ( NULL , & ptr , srv - > ssl_ctx . reused_sess [ old_tid - 1 ] . size ) ;
if ( sess ) {
if ( ! SSL_set_session ( ctx - > ssl , sess ) )
HA_ATOMIC_CAS ( & srv - > ssl_ctx . last_ssl_sess_tid , & old_tid , 0 ) ; // no more valid
SSL_SESSION_free ( sess ) ;
}
}
if ( srv - > ssl_ctx . reused_sess [ old_tid - 1 ] . sni )
SSL_set_tlsext_host_name ( ctx - > ssl , srv - > ssl_ctx . reused_sess [ old_tid - 1 ] . sni ) ;
HA_RWLOCK_RDUNLOCK ( SSL_SERVER_LOCK , & srv - > ssl_ctx . reused_sess [ old_tid - 1 ] . sess_lock ) ;
}
2014-11-12 11:35:37 -05:00
}
2023-08-30 06:00:29 -04:00
HA_RWLOCK_RDUNLOCK ( SSL_SERVER_LOCK , & srv - > ssl_ctx . lock ) ;
2013-06-27 03:05:25 -04:00
2012-05-18 09:47:34 -04:00
/* leave init state and start handshake */
2012-09-04 02:03:39 -04:00
conn - > flags | = CO_FL_SSL_WAIT_HS | CO_FL_WAIT_L6_CONN ;
2012-09-06 05:58:37 -04:00
2021-10-06 06:15:18 -04:00
_HA_ATOMIC_INC ( & global . totalsslconns ) ;
2019-03-21 13:27:17 -04:00
* xprt_ctx = ctx ;
2012-05-18 09:47:34 -04:00
return 0 ;
}
2012-11-11 18:42:33 -05:00
else if ( objt_listener ( conn - > target ) ) {
2020-11-09 09:59:23 -05:00
struct bind_conf * bc = __objt_listener ( conn - > target ) - > bind_conf ;
if ( ssl_bio_and_sess_init ( conn , bc - > initial_ctx ,
& ctx - > ssl , & ctx - > bio , ha_meth , ctx ) = = - 1 )
2019-02-26 12:37:15 -05:00
goto err ;
2014-11-12 11:35:37 -05:00
2020-10-24 14:42:30 -04:00
# ifdef SSL_READ_EARLY_DATA_SUCCESS
2020-11-09 09:59:23 -05:00
if ( bc - > ssl_conf . early_data ) {
2020-01-24 08:56:18 -05:00
b_alloc ( & ctx - > early_buf ) ;
SSL_set_max_early_data ( ctx - > ssl ,
/* Only allow early data if we managed to allocate
* a buffer .
*/
( ! b_is_null ( & ctx - > early_buf ) ) ?
global . tune . bufsize - global . tune . maxrewrite : 0 ) ;
}
# endif
2019-02-26 12:37:15 -05:00
SSL_set_accept_state ( ctx - > ssl ) ;
2012-09-03 14:36:47 -04:00
2012-05-18 09:47:34 -04:00
/* leave init state and start handshake */
2012-09-04 02:03:39 -04:00
conn - > flags | = CO_FL_SSL_WAIT_HS | CO_FL_WAIT_L6_CONN ;
2020-10-24 14:42:30 -04:00
# ifdef SSL_READ_EARLY_DATA_SUCCESS
2021-02-03 05:21:38 -05:00
if ( bc - > ssl_conf . early_data )
conn - > flags | = CO_FL_EARLY_SSL_HS ;
MEDIUM: ssl: Handle early data with OpenSSL 1.1.1
When compiled with Openssl >= 1.1.1, before attempting to do the handshake,
try to read any early data. If any early data is present, then we'll create
the session, read the data, and handle the request before we're doing the
handshake.
For this, we add a new connection flag, CO_FL_EARLY_SSL_HS, which is not
part of the CO_FL_HANDSHAKE set, allowing to proceed with a session even
before an SSL handshake is completed.
As early data do have security implication, we let the origin server know
the request comes from early data by adding the "Early-Data" header, as
specified in this draft from the HTTP working group :
https://datatracker.ietf.org/doc/html/draft-ietf-httpbis-replay
2017-09-22 12:26:28 -04:00
# endif
2012-09-06 05:58:37 -04:00
2021-10-06 06:15:18 -04:00
_HA_ATOMIC_INC ( & global . totalsslconns ) ;
2019-03-21 13:27:17 -04:00
* xprt_ctx = ctx ;
2012-05-18 09:47:34 -04:00
return 0 ;
}
/* don't know how to handle such a target */
2012-12-03 10:32:10 -05:00
conn - > err_code = CO_ER_SSL_NO_TARGET ;
2019-02-26 12:37:15 -05:00
err :
2023-10-25 09:38:04 -04:00
if ( next_sslconn )
_HA_ATOMIC_DEC ( & global . sslconns ) ;
2019-06-14 08:42:29 -04:00
if ( ctx & & ctx - > wait_event . tasklet )
tasklet_free ( ctx - > wait_event . tasklet ) ;
2019-02-26 12:37:15 -05:00
pool_free ( ssl_sock_ctx_pool , ctx ) ;
2012-05-18 09:47:34 -04:00
return - 1 ;
}
/* This is the callback which is used when an SSL handshake is pending. It
* updates the FD status if it wants some polling before being called again .
* It returns 0 if it fails in a fatal way or needs to poll to go further ,
* otherwise it returns non - zero and removes itself from the connection ' s
* flags ( the bit is provided in < flag > by the caller ) .
*/
2019-05-23 08:45:12 -04:00
static int ssl_sock_handshake ( struct connection * conn , unsigned int flag )
2012-05-18 09:47:34 -04:00
{
2022-04-11 05:29:11 -04:00
struct ssl_sock_ctx * ctx = conn_get_ssl_sock_ctx ( conn ) ;
2012-05-18 09:47:34 -04:00
int ret ;
2020-11-06 07:19:18 -05:00
struct ssl_counters * counters = NULL ;
struct ssl_counters * counters_px = NULL ;
2020-11-03 11:10:02 -05:00
struct listener * li ;
struct server * srv ;
BUG/MEDIUM: ssl: check a connection's status before computing a handshake
As spotted in issue #822, we're having a problem with error detection in
the SSL layer. The problem is that on an overwhelmed machine, accepted
connections can start to pile up, each of them requiring a slow handshake,
and during all this time if the client aborts, the handshake will still be
calculated.
The error controls are properly placed, it's just that the SSL layer
reads records exactly of the advertised size, without having the ability
to encounter a pending connection error. As such if injecting many TLS
connections to a listener with a huge backlog, it's fairly possible to
meet this situation:
12:50:48.236056 accept4(8, {sa_family=AF_INET, sin_port=htons(62794), sin_addr=inet_addr("127.0.0.1")}, [128->16], SOCK_NONBLOCK) = 1109
12:50:48.236071 setsockopt(1109, SOL_TCP, TCP_NODELAY, [1], 4) = 0
(process other connections' handshakes)
12:50:48.257270 getsockopt(1109, SOL_SOCKET, SO_ERROR, [ECONNRESET], [4]) = 0
(proof that error was detectable there but this code was added for the PoC)
12:50:48.257297 recvfrom(1109, "\26\3\1\2\0", 5, 0, NULL, NULL) = 5
12:50:48.257310 recvfrom(1109, "\1\0\1\3"..., 512, 0, NULL, NULL) = 512
(handshake calculation taking 700us)
12:50:48.258004 sendto(1109, "\26\3\3\0z"..., 1421, MSG_DONTWAIT|MSG_NOSIGNAL, NULL, 0) = -1 EPIPE (Broken pipe)
12:50:48.258036 close(1109) = 0
The situation was amplified by the multi-queue accept code, as it resulted
in many incoming connections to be accepted long before they could be
handled. Prior to this they would have been accepted and the handshake
immediately started, which would have resulted in most of the connections
waiting in the the system's accept queue, and dying there when the client
aborted, thus the error would have been detected before even trying to
pass them to the handshake code.
As a result, with a listener running on a very large backlog, it's possible
to quickly accept tens of thousands of connections and waste time slowly
running their handshakes while they get replaced by other ones.
This patch adds an SO_ERROR check on the connection's FD before starting
the handshake. This is not pretty as it requires to access the FD, but it
does the job.
Some improvements should be made over the long term so that the transport
layers can report extra information with their ->rcv_buf() call, or at the
very least, implement a ->get_conn_status() function to report various
flags such as shutr, shutw, error at various stages, allowing an upper
layer to inquire for the relevance of engaging into a long operation if
it's known the connection is not usable anymore. An even simpler step
could probably consist in implementing this in the control layer.
This patch is simple enough to be backported as far as 2.0.
Many thanks to @ngaugler for his numerous tests with detailed feedback.
2021-02-02 09:42:25 -05:00
socklen_t lskerr ;
int skerr ;
2012-05-18 09:47:34 -04:00
2014-01-23 07:50:42 -05:00
if ( ! conn_ctrl_ready ( conn ) )
MAJOR: connection: add two new flags to indicate readiness of control/transport
Currently the control and transport layers of a connection are supposed
to be initialized when their respective pointers are not NULL. This will
not work anymore when we plan to reuse connections, because there is an
asymmetry between the accept() side and the connect() side :
- on accept() side, the fd is set first, then the ctrl layer then the
transport layer ; upon error, they must be undone in the reverse order,
then the FD must be closed. The FD must not be deleted if the control
layer was not yet initialized ;
- on the connect() side, the fd is set last and there is no reliable way
to know if it has been initialized or not. In practice it's initialized
to -1 first but this is hackish and supposes that local FDs only will
be used forever. Also, there are even less solutions for keeping trace
of the transport layer's state.
Also it is possible to support delayed close() when something (eg: logs)
tracks some information requiring the transport and/or control layers,
making it even more difficult to clean them.
So the proposed solution is to add two flags to the connection :
- CO_FL_CTRL_READY is set when the control layer is initialized (fd_insert)
and cleared after it's released (fd_delete).
- CO_FL_XPRT_READY is set when the control layer is initialized (xprt->init)
and cleared after it's released (xprt->close).
The functions have been adapted to rely on this and not on the pointers
anymore. conn_xprt_close() was unused and dangerous : it did not close
the control layer (eg: the socket itself) but still marks the transport
layer as closed, preventing any future call to conn_full_close() from
finishing the job.
The problem comes from conn_full_close() in fact. It needs to close the
xprt and ctrl layers independantly. After that we're still having an issue :
we don't know based on ->ctrl alone whether the fd was registered or not.
For this we use the two new flags CO_FL_XPRT_READY and CO_FL_CTRL_READY. We
now rely on this and not on conn->xprt nor conn->ctrl anymore to decide what
remains to be done on the connection.
In order not to miss some flag assignments, we introduce conn_ctrl_init()
to initialize the control layer, register the fd using fd_insert() and set
the flag, and conn_ctrl_close() which unregisters the fd and removes the
flag, but only if the transport layer was closed.
Similarly, at the transport layer, conn_xprt_init() calls ->init and sets
the flag, while conn_xprt_close() checks the flag, calls ->close and clears
the flag, regardless xprt_ctx or xprt_st. This also ensures that the ->init
and the ->close functions are called only once each and in the correct order.
Note that conn_xprt_close() does nothing if the transport layer is still
tracked.
conn_full_close() now simply calls conn_xprt_close() then conn_full_close()
in turn, which do nothing if CO_FL_XPRT_TRACKED is set.
In order to handle the error path, we also provide conn_force_close() which
ignores CO_FL_XPRT_TRACKED and closes the transport and the control layers
in turns. All relevant instances of fd_delete() have been replaced with
conn_force_close(). Now we always know what state the connection is in and
we can expect to split its initialization.
2013-10-21 10:30:56 -04:00
return 0 ;
2020-11-13 10:05:00 -05:00
/* get counters */
switch ( obj_type ( conn - > target ) ) {
case OBJ_TYPE_LISTENER :
2021-07-26 03:59:06 -04:00
li = __objt_listener ( conn - > target ) ;
2020-11-13 10:05:00 -05:00
counters = EXTRA_COUNTERS_GET ( li - > extra_counters , & ssl_stats_module ) ;
counters_px = EXTRA_COUNTERS_GET ( li - > bind_conf - > frontend - > extra_counters_fe ,
& ssl_stats_module ) ;
break ;
case OBJ_TYPE_SERVER :
2021-07-26 03:59:06 -04:00
srv = __objt_server ( conn - > target ) ;
2020-11-13 10:05:00 -05:00
counters = EXTRA_COUNTERS_GET ( srv - > extra_counters , & ssl_stats_module ) ;
counters_px = EXTRA_COUNTERS_GET ( srv - > proxy - > extra_counters_be ,
& ssl_stats_module ) ;
break ;
default :
break ;
}
2022-04-11 05:29:11 -04:00
if ( ! ctx )
2012-05-18 09:47:34 -04:00
goto out_error ;
BUG/MEDIUM: ssl: check a connection's status before computing a handshake
As spotted in issue #822, we're having a problem with error detection in
the SSL layer. The problem is that on an overwhelmed machine, accepted
connections can start to pile up, each of them requiring a slow handshake,
and during all this time if the client aborts, the handshake will still be
calculated.
The error controls are properly placed, it's just that the SSL layer
reads records exactly of the advertised size, without having the ability
to encounter a pending connection error. As such if injecting many TLS
connections to a listener with a huge backlog, it's fairly possible to
meet this situation:
12:50:48.236056 accept4(8, {sa_family=AF_INET, sin_port=htons(62794), sin_addr=inet_addr("127.0.0.1")}, [128->16], SOCK_NONBLOCK) = 1109
12:50:48.236071 setsockopt(1109, SOL_TCP, TCP_NODELAY, [1], 4) = 0
(process other connections' handshakes)
12:50:48.257270 getsockopt(1109, SOL_SOCKET, SO_ERROR, [ECONNRESET], [4]) = 0
(proof that error was detectable there but this code was added for the PoC)
12:50:48.257297 recvfrom(1109, "\26\3\1\2\0", 5, 0, NULL, NULL) = 5
12:50:48.257310 recvfrom(1109, "\1\0\1\3"..., 512, 0, NULL, NULL) = 512
(handshake calculation taking 700us)
12:50:48.258004 sendto(1109, "\26\3\3\0z"..., 1421, MSG_DONTWAIT|MSG_NOSIGNAL, NULL, 0) = -1 EPIPE (Broken pipe)
12:50:48.258036 close(1109) = 0
The situation was amplified by the multi-queue accept code, as it resulted
in many incoming connections to be accepted long before they could be
handled. Prior to this they would have been accepted and the handshake
immediately started, which would have resulted in most of the connections
waiting in the the system's accept queue, and dying there when the client
aborted, thus the error would have been detected before even trying to
pass them to the handshake code.
As a result, with a listener running on a very large backlog, it's possible
to quickly accept tens of thousands of connections and waste time slowly
running their handshakes while they get replaced by other ones.
This patch adds an SO_ERROR check on the connection's FD before starting
the handshake. This is not pretty as it requires to access the FD, but it
does the job.
Some improvements should be made over the long term so that the transport
layers can report extra information with their ->rcv_buf() call, or at the
very least, implement a ->get_conn_status() function to report various
flags such as shutr, shutw, error at various stages, allowing an upper
layer to inquire for the relevance of engaging into a long operation if
it's known the connection is not usable anymore. An even simpler step
could probably consist in implementing this in the control layer.
This patch is simple enough to be backported as far as 2.0.
Many thanks to @ngaugler for his numerous tests with detailed feedback.
2021-02-02 09:42:25 -05:00
/* don't start calculating a handshake on a dead connection */
if ( conn - > flags & ( CO_FL_ERROR | CO_FL_SOCK_RD_SH | CO_FL_SOCK_WR_SH ) )
goto out_error ;
/* FIXME/WT: for now we don't have a clear way to inspect the connection
* status from the lower layers , so let ' s check the FD directly . Ideally
* the xprt layers should provide some status indicating their knowledge
* of shutdowns or error .
*/
2022-04-11 12:07:03 -04:00
BUG_ON ( conn - > flags & CO_FL_FDLESS ) ;
BUG/MEDIUM: ssl: check a connection's status before computing a handshake
As spotted in issue #822, we're having a problem with error detection in
the SSL layer. The problem is that on an overwhelmed machine, accepted
connections can start to pile up, each of them requiring a slow handshake,
and during all this time if the client aborts, the handshake will still be
calculated.
The error controls are properly placed, it's just that the SSL layer
reads records exactly of the advertised size, without having the ability
to encounter a pending connection error. As such if injecting many TLS
connections to a listener with a huge backlog, it's fairly possible to
meet this situation:
12:50:48.236056 accept4(8, {sa_family=AF_INET, sin_port=htons(62794), sin_addr=inet_addr("127.0.0.1")}, [128->16], SOCK_NONBLOCK) = 1109
12:50:48.236071 setsockopt(1109, SOL_TCP, TCP_NODELAY, [1], 4) = 0
(process other connections' handshakes)
12:50:48.257270 getsockopt(1109, SOL_SOCKET, SO_ERROR, [ECONNRESET], [4]) = 0
(proof that error was detectable there but this code was added for the PoC)
12:50:48.257297 recvfrom(1109, "\26\3\1\2\0", 5, 0, NULL, NULL) = 5
12:50:48.257310 recvfrom(1109, "\1\0\1\3"..., 512, 0, NULL, NULL) = 512
(handshake calculation taking 700us)
12:50:48.258004 sendto(1109, "\26\3\3\0z"..., 1421, MSG_DONTWAIT|MSG_NOSIGNAL, NULL, 0) = -1 EPIPE (Broken pipe)
12:50:48.258036 close(1109) = 0
The situation was amplified by the multi-queue accept code, as it resulted
in many incoming connections to be accepted long before they could be
handled. Prior to this they would have been accepted and the handshake
immediately started, which would have resulted in most of the connections
waiting in the the system's accept queue, and dying there when the client
aborted, thus the error would have been detected before even trying to
pass them to the handshake code.
As a result, with a listener running on a very large backlog, it's possible
to quickly accept tens of thousands of connections and waste time slowly
running their handshakes while they get replaced by other ones.
This patch adds an SO_ERROR check on the connection's FD before starting
the handshake. This is not pretty as it requires to access the FD, but it
does the job.
Some improvements should be made over the long term so that the transport
layers can report extra information with their ->rcv_buf() call, or at the
very least, implement a ->get_conn_status() function to report various
flags such as shutr, shutw, error at various stages, allowing an upper
layer to inquire for the relevance of engaging into a long operation if
it's known the connection is not usable anymore. An even simpler step
could probably consist in implementing this in the control layer.
This patch is simple enough to be backported as far as 2.0.
Many thanks to @ngaugler for his numerous tests with detailed feedback.
2021-02-02 09:42:25 -05:00
skerr = 0 ;
lskerr = sizeof ( skerr ) ;
if ( ( getsockopt ( conn - > handle . fd , SOL_SOCKET , SO_ERROR , & skerr , & lskerr ) < 0 ) | |
skerr ! = 0 )
goto out_error ;
2020-10-24 14:42:30 -04:00
# ifdef SSL_READ_EARLY_DATA_SUCCESS
MEDIUM: ssl: Handle early data with OpenSSL 1.1.1
When compiled with Openssl >= 1.1.1, before attempting to do the handshake,
try to read any early data. If any early data is present, then we'll create
the session, read the data, and handle the request before we're doing the
handshake.
For this, we add a new connection flag, CO_FL_EARLY_SSL_HS, which is not
part of the CO_FL_HANDSHAKE set, allowing to proceed with a session even
before an SSL handshake is completed.
As early data do have security implication, we let the origin server know
the request comes from early data by adding the "Early-Data" header, as
specified in this draft from the HTTP working group :
https://datatracker.ietf.org/doc/html/draft-ietf-httpbis-replay
2017-09-22 12:26:28 -04:00
/*
* Check if we have early data . If we do , we have to read them
* before SSL_do_handshake ( ) is called , And there ' s no way to
* detect early data , except to try to read them
*/
if ( conn - > flags & CO_FL_EARLY_SSL_HS ) {
2019-12-19 09:02:39 -05:00
size_t read_data = 0 ;
while ( 1 ) {
ret = SSL_read_early_data ( ctx - > ssl ,
b_tail ( & ctx - > early_buf ) , b_room ( & ctx - > early_buf ) ,
& read_data ) ;
if ( ret = = SSL_READ_EARLY_DATA_ERROR )
goto check_error ;
if ( read_data > 0 ) {
conn - > flags | = CO_FL_EARLY_DATA ;
b_add ( & ctx - > early_buf , read_data ) ;
}
if ( ret = = SSL_READ_EARLY_DATA_FINISH ) {
conn - > flags & = ~ CO_FL_EARLY_SSL_HS ;
if ( ! b_data ( & ctx - > early_buf ) )
b_free ( & ctx - > early_buf ) ;
break ;
}
}
MEDIUM: ssl: Handle early data with OpenSSL 1.1.1
When compiled with Openssl >= 1.1.1, before attempting to do the handshake,
try to read any early data. If any early data is present, then we'll create
the session, read the data, and handle the request before we're doing the
handshake.
For this, we add a new connection flag, CO_FL_EARLY_SSL_HS, which is not
part of the CO_FL_HANDSHAKE set, allowing to proceed with a session even
before an SSL handshake is completed.
As early data do have security implication, we let the origin server know
the request comes from early data by adding the "Early-Data" header, as
specified in this draft from the HTTP working group :
https://datatracker.ietf.org/doc/html/draft-ietf-httpbis-replay
2017-09-22 12:26:28 -04:00
}
# endif
2012-11-08 13:21:55 -05:00
/* If we use SSL_do_handshake to process a reneg initiated by
* the remote peer , it sometimes returns SSL_ERROR_SSL .
* Usually SSL_write and SSL_read are used and process implicitly
* the reneg handshake .
* Here we use SSL_peek as a workaround for reneg .
*/
MEDIUM: connection: remove CO_FL_CONNECTED and only rely on CO_FL_WAIT_*
Commit 477902bd2e ("MEDIUM: connections: Get ride of the xprt_done
callback.") broke the master CLI for a very obscure reason. It happens
that short requests immediately terminated by a shutdown are properly
received, CS_FL_EOS is correctly set, but in si_cs_recv(), we refrain
from setting CF_SHUTR on the channel because CO_FL_CONNECTED was not
yet set on the connection since we've not passed again through
conn_fd_handler() and it was not done in conn_complete_session(). While
commit a8a415d31a ("BUG/MEDIUM: connections: Set CO_FL_CONNECTED in
conn_complete_session()") fixed the issue, such accident may happen
again as the root cause is deeper and actually comes down to the fact
that CO_FL_CONNECTED is lazily set at various check points in the code
but not every time we drop one wait bit. It is not the first time we
face this situation.
Originally this flag was used to detect the transition between WAIT_*
and CONNECTED in order to call ->wake() from the FD handler. But since
at least 1.8-dev1 with commit 7bf3fa3c23 ("BUG/MAJOR: connection: update
CO_FL_CONNECTED before calling the data layer"), CO_FL_CONNECTED is
always synchronized against the two others before being checked. Moreover,
with the I/Os moved to tasklets, the decision to call the ->wake() function
is performed after the I/Os in si_cs_process() and equivalent, which don't
care about this transition either.
So in essence, checking for CO_FL_CONNECTED has become a lazy wait to
check for (CO_FL_WAIT_L4_CONN | CO_FL_WAIT_L6_CONN), but that always
relies on someone else having synchronized it.
This patch addresses it once for all by killing this flag and only checking
the two others (for which a composite mask CO_FL_WAIT_L4L6 was added). This
revealed a number of inconsistencies that were purposely not addressed here
for the sake of bisectability:
- while most places do check both L4+L6 and HANDSHAKE at the same time,
some places like assign_server() or back_handle_st_con() and a few
sample fetches looking for proxy protocol do check for L4+L6 but
don't care about HANDSHAKE ; these ones will probably fail on TCP
request session rules if the handshake is not complete.
- some handshake handlers do validate that a connection is established
at L4 but didn't clear CO_FL_WAIT_L4_CONN
- the ->ctl method of mux_fcgi, mux_pt and mux_h1 only checks for L4+L6
before declaring the mux ready while the snd_buf function also checks
for the handshake's completion. Likely the former should validate the
handshake as well and we should get rid of these extra tests in snd_buf.
- raw_sock_from_buf() would directly set CO_FL_CONNECTED and would only
later clear CO_FL_WAIT_L4_CONN.
- xprt_handshake would set CO_FL_CONNECTED itself without actually
clearing CO_FL_WAIT_L4_CONN, which could apparently happen only if
waiting for a pure Rx handshake.
- most places in ssl_sock that were checking CO_FL_CONNECTED don't need
to include the L4 check as an L6 check is enough to decide whether to
wait for more info or not.
It also becomes obvious when reading the test in si_cs_recv() that caused
the failure mentioned above that once converted it doesn't make any sense
anymore: having CS_FL_EOS set while still waiting for L4 and L6 to complete
cannot happen since for CS_FL_EOS to be set, the other ones must have been
validated.
Some of these parts will still deserve further cleanup, and some of the
observations above may induce some backports of potential bug fixes once
totally analyzed in their context. The risk of breaking existing stuff
is too high to blindly backport everything.
2020-01-23 03:11:58 -05:00
if ( ! ( conn - > flags & CO_FL_WAIT_L6_CONN ) & & SSL_renegotiate_pending ( ctx - > ssl ) ) {
2012-11-08 13:21:55 -05:00
char c ;
2019-02-26 12:37:15 -05:00
ret = SSL_peek ( ctx - > ssl , & c , 1 ) ;
2012-11-08 13:21:55 -05:00
if ( ret < = 0 ) {
/* handshake may have not been completed, let's find why */
2019-02-26 12:37:15 -05:00
ret = SSL_get_error ( ctx - > ssl , ret ) ;
2017-01-13 20:42:15 -05:00
2012-11-08 13:21:55 -05:00
if ( ret = = SSL_ERROR_WANT_WRITE ) {
/* SSL handshake needs to write, L4 connection may not be ready */
2019-05-28 04:12:02 -04:00
if ( ! ( ctx - > wait_event . events & SUB_RETRY_SEND ) )
2019-05-20 08:02:16 -04:00
ctx - > xprt - > subscribe ( conn , ctx - > xprt_ctx , SUB_RETRY_SEND , & ctx - > wait_event ) ;
2012-11-08 13:21:55 -05:00
return 0 ;
}
else if ( ret = = SSL_ERROR_WANT_READ ) {
/* handshake may have been completed but we have
* no more data to read .
*/
2019-02-26 12:37:15 -05:00
if ( ! SSL_renegotiate_pending ( ctx - > ssl ) ) {
2012-11-08 13:21:55 -05:00
ret = 1 ;
goto reneg_ok ;
}
/* SSL handshake needs to read, L4 connection is ready */
2019-05-28 04:12:02 -04:00
if ( ! ( ctx - > wait_event . events & SUB_RETRY_RECV ) )
2019-05-20 08:02:16 -04:00
ctx - > xprt - > subscribe ( conn , ctx - > xprt_ctx , SUB_RETRY_RECV , & ctx - > wait_event ) ;
2012-11-08 13:21:55 -05:00
return 0 ;
}
2020-11-13 15:56:34 -05:00
# ifdef SSL_MODE_ASYNC
2017-01-13 20:42:15 -05:00
else if ( ret = = SSL_ERROR_WANT_ASYNC ) {
2019-05-20 08:02:16 -04:00
ssl_async_process_fds ( ctx ) ;
2017-01-13 20:42:15 -05:00
return 0 ;
}
# endif
2012-11-08 13:21:55 -05:00
else if ( ret = = SSL_ERROR_SYSCALL ) {
/* if errno is null, then connection was successfully established */
if ( ! errno & & conn - > flags & CO_FL_WAIT_L4_CONN )
conn - > flags & = ~ CO_FL_WAIT_L4_CONN ;
2012-12-03 10:32:10 -05:00
if ( ! conn - > err_code ) {
2019-07-08 08:29:15 -04:00
# if defined(OPENSSL_IS_BORINGSSL) || defined(LIBRESSL_VERSION_NUMBER)
/* do not handle empty handshakes in BoringSSL or LibreSSL */
2017-01-13 11:48:18 -05:00
conn - > err_code = CO_ER_SSL_HANDSHAKE ;
# else
2016-08-29 07:26:37 -04:00
int empty_handshake ;
2019-05-09 08:13:35 -04:00
# if (HA_OPENSSL_VERSION_NUMBER >= 0x1010000fL)
2019-07-08 08:29:15 -04:00
/* use SSL_get_state() in OpenSSL >= 1.1.0; SSL_state() is broken */
2019-02-26 12:37:15 -05:00
OSSL_HANDSHAKE_STATE state = SSL_get_state ( ( SSL * ) ctx - > ssl ) ;
2016-08-29 07:26:37 -04:00
empty_handshake = state = = TLS_ST_BEFORE ;
# else
2019-07-08 08:29:15 -04:00
/* access packet_length directly in OpenSSL <= 1.0.2; SSL_state() is broken */
empty_handshake = ! ctx - > ssl - > packet_length ;
2016-08-29 07:26:37 -04:00
# endif
if ( empty_handshake ) {
2014-04-25 13:05:36 -04:00
if ( ! errno ) {
2019-02-28 12:10:45 -05:00
if ( ctx - > xprt_st & SSL_SOCK_RECV_HEARTBEAT )
2014-04-25 13:05:36 -04:00
conn - > err_code = CO_ER_SSL_HANDSHAKE_HB ;
else
conn - > err_code = CO_ER_SSL_EMPTY ;
}
else {
2019-02-28 12:10:45 -05:00
if ( ctx - > xprt_st & SSL_SOCK_RECV_HEARTBEAT )
2014-04-25 13:05:36 -04:00
conn - > err_code = CO_ER_SSL_HANDSHAKE_HB ;
else
conn - > err_code = CO_ER_SSL_ABORT ;
}
}
else {
2019-02-28 12:10:45 -05:00
if ( ctx - > xprt_st & SSL_SOCK_RECV_HEARTBEAT )
2014-04-25 13:05:36 -04:00
conn - > err_code = CO_ER_SSL_HANDSHAKE_HB ;
2012-12-03 10:32:10 -05:00
else
2014-04-25 13:05:36 -04:00
conn - > err_code = CO_ER_SSL_HANDSHAKE ;
}
2019-07-08 08:29:15 -04:00
# endif /* BoringSSL or LibreSSL */
2012-12-03 10:32:10 -05:00
}
2012-11-08 13:21:55 -05:00
goto out_error ;
}
else {
/* Fail on all other handshake errors */
/* Note: OpenSSL may leave unread bytes in the socket's
* buffer , causing an RST to be emitted upon close ( ) on
* TCP sockets . We first try to drain possibly pending
* data to avoid this as much as possible .
*/
2020-12-11 10:20:34 -05:00
conn_ctrl_drain ( conn ) ;
2012-12-03 10:32:10 -05:00
if ( ! conn - > err_code )
2019-02-28 12:10:45 -05:00
conn - > err_code = ( ctx - > xprt_st & SSL_SOCK_RECV_HEARTBEAT ) ?
2014-04-25 14:02:39 -04:00
CO_ER_SSL_KILLED_HB : CO_ER_SSL_HANDSHAKE ;
2012-11-08 13:21:55 -05:00
goto out_error ;
}
}
/* read some data: consider handshake completed */
goto reneg_ok ;
}
2019-02-26 12:37:15 -05:00
ret = SSL_do_handshake ( ctx - > ssl ) ;
MEDIUM: ssl: Handle early data with OpenSSL 1.1.1
When compiled with Openssl >= 1.1.1, before attempting to do the handshake,
try to read any early data. If any early data is present, then we'll create
the session, read the data, and handle the request before we're doing the
handshake.
For this, we add a new connection flag, CO_FL_EARLY_SSL_HS, which is not
part of the CO_FL_HANDSHAKE set, allowing to proceed with a session even
before an SSL handshake is completed.
As early data do have security implication, we let the origin server know
the request comes from early data by adding the "Early-Data" header, as
specified in this draft from the HTTP working group :
https://datatracker.ietf.org/doc/html/draft-ietf-httpbis-replay
2017-09-22 12:26:28 -04:00
check_error :
2012-05-18 09:47:34 -04:00
if ( ret ! = 1 ) {
/* handshake did not complete, let's find why */
2019-02-26 12:37:15 -05:00
ret = SSL_get_error ( ctx - > ssl , ret ) ;
2012-05-18 09:47:34 -04:00
2021-09-29 12:56:52 -04:00
if ( ! ctx - > error_code )
ctx - > error_code = ERR_peek_error ( ) ;
2021-07-29 03:45:51 -04:00
2012-05-18 09:47:34 -04:00
if ( ret = = SSL_ERROR_WANT_WRITE ) {
/* SSL handshake needs to write, L4 connection may not be ready */
2019-05-28 04:12:02 -04:00
if ( ! ( ctx - > wait_event . events & SUB_RETRY_SEND ) )
2019-05-20 08:02:16 -04:00
ctx - > xprt - > subscribe ( conn , ctx - > xprt_ctx , SUB_RETRY_SEND , & ctx - > wait_event ) ;
2012-05-18 09:47:34 -04:00
return 0 ;
}
else if ( ret = = SSL_ERROR_WANT_READ ) {
/* SSL handshake needs to read, L4 connection is ready */
2019-05-20 08:02:16 -04:00
if ( ! ( ctx - > wait_event . events & SUB_RETRY_RECV ) )
ctx - > xprt - > subscribe ( conn , ctx - > xprt_ctx ,
SUB_RETRY_RECV , & ctx - > wait_event ) ;
2012-05-18 09:47:34 -04:00
return 0 ;
}
2020-11-13 15:56:34 -05:00
# ifdef SSL_MODE_ASYNC
2017-01-13 20:42:15 -05:00
else if ( ret = = SSL_ERROR_WANT_ASYNC ) {
2019-05-20 08:02:16 -04:00
ssl_async_process_fds ( ctx ) ;
2017-01-13 20:42:15 -05:00
return 0 ;
}
# endif
2012-09-28 14:22:13 -04:00
else if ( ret = = SSL_ERROR_SYSCALL ) {
/* if errno is null, then connection was successfully established */
if ( ! errno & & conn - > flags & CO_FL_WAIT_L4_CONN )
conn - > flags & = ~ CO_FL_WAIT_L4_CONN ;
2017-01-13 11:48:18 -05:00
if ( ! conn - > err_code ) {
2019-07-08 08:29:15 -04:00
# if defined(OPENSSL_IS_BORINGSSL) || defined(LIBRESSL_VERSION_NUMBER)
/* do not handle empty handshakes in BoringSSL or LibreSSL */
2017-01-13 11:48:18 -05:00
conn - > err_code = CO_ER_SSL_HANDSHAKE ;
# else
int empty_handshake ;
2019-05-09 08:13:35 -04:00
# if (HA_OPENSSL_VERSION_NUMBER >= 0x1010000fL)
2019-07-08 08:29:15 -04:00
/* use SSL_get_state() in OpenSSL >= 1.1.0; SSL_state() is broken */
2019-02-26 12:37:15 -05:00
OSSL_HANDSHAKE_STATE state = SSL_get_state ( ctx - > ssl ) ;
2017-01-13 11:48:18 -05:00
empty_handshake = state = = TLS_ST_BEFORE ;
2016-08-29 07:26:37 -04:00
# else
2019-07-08 08:29:15 -04:00
/* access packet_length directly in OpenSSL <= 1.0.2; SSL_state() is broken */
empty_handshake = ! ctx - > ssl - > packet_length ;
2016-08-29 07:26:37 -04:00
# endif
2017-01-13 11:48:18 -05:00
if ( empty_handshake ) {
if ( ! errno ) {
2019-02-28 12:10:45 -05:00
if ( ctx - > xprt_st & SSL_SOCK_RECV_HEARTBEAT )
2017-01-13 11:48:18 -05:00
conn - > err_code = CO_ER_SSL_HANDSHAKE_HB ;
else
conn - > err_code = CO_ER_SSL_EMPTY ;
}
else {
2019-02-28 12:10:45 -05:00
if ( ctx - > xprt_st & SSL_SOCK_RECV_HEARTBEAT )
2017-01-13 11:48:18 -05:00
conn - > err_code = CO_ER_SSL_HANDSHAKE_HB ;
else
conn - > err_code = CO_ER_SSL_ABORT ;
}
2014-04-25 13:05:36 -04:00
}
else {
2019-02-28 12:10:45 -05:00
if ( ctx - > xprt_st & SSL_SOCK_RECV_HEARTBEAT )
2014-04-25 13:05:36 -04:00
conn - > err_code = CO_ER_SSL_HANDSHAKE_HB ;
else
2017-01-13 11:48:18 -05:00
conn - > err_code = CO_ER_SSL_HANDSHAKE ;
2014-04-25 13:05:36 -04:00
}
2019-07-08 08:29:15 -04:00
# endif /* BoringSSL or LibreSSL */
2014-04-25 13:05:36 -04:00
}
2012-09-28 14:22:13 -04:00
goto out_error ;
2023-06-26 11:42:09 -04:00
} else if ( ret = = SSL_ERROR_ZERO_RETURN ) {
/* The peer has closed the SSL session for writing by
* sending a close_notify alert */
conn_ctrl_drain ( conn ) ;
2023-06-26 13:08:00 -04:00
conn - > err_code = CO_ER_SSL_EMPTY ;
2023-06-26 11:42:09 -04:00
goto out_error ;
2012-09-28 14:22:13 -04:00
}
2012-05-18 09:47:34 -04:00
else {
/* Fail on all other handshake errors */
2012-10-19 14:52:18 -04:00
/* Note: OpenSSL may leave unread bytes in the socket's
* buffer , causing an RST to be emitted upon close ( ) on
* TCP sockets . We first try to drain possibly pending
* data to avoid this as much as possible .
*/
2020-12-11 10:20:34 -05:00
conn_ctrl_drain ( conn ) ;
2012-12-03 10:32:10 -05:00
if ( ! conn - > err_code )
2019-02-28 12:10:45 -05:00
conn - > err_code = ( ctx - > xprt_st & SSL_SOCK_RECV_HEARTBEAT ) ?
2014-04-25 14:02:39 -04:00
CO_ER_SSL_KILLED_HB : CO_ER_SSL_HANDSHAKE ;
2012-05-18 09:47:34 -04:00
goto out_error ;
}
}
2020-10-24 14:42:30 -04:00
# ifdef SSL_READ_EARLY_DATA_SUCCESS
2017-11-03 11:27:47 -04:00
else {
/*
* If the server refused the early data , we have to send a
* 425 to the client , as we no longer have the data to sent
* them again .
*/
if ( ( conn - > flags & CO_FL_EARLY_DATA ) & & ( objt_server ( conn - > target ) ) ) {
2019-02-26 12:37:15 -05:00
if ( SSL_get_early_data_status ( ctx - > ssl ) = = SSL_EARLY_DATA_REJECTED ) {
2017-11-03 11:27:47 -04:00
conn - > err_code = CO_ER_SSL_EARLY_FAILED ;
goto out_error ;
}
}
}
# endif
2012-05-18 09:47:34 -04:00
2012-11-08 13:21:55 -05:00
reneg_ok :
2017-06-06 08:35:14 -04:00
2020-11-13 15:56:34 -05:00
# ifdef SSL_MODE_ASYNC
2017-06-06 08:35:14 -04:00
/* ASYNC engine API doesn't support moving read/write
* buffers . So we disable ASYNC mode right after
2020-03-10 03:06:11 -04:00
* the handshake to avoid buffer overflow .
2017-06-06 08:35:14 -04:00
*/
if ( global_ssl . async )
2019-02-26 12:37:15 -05:00
SSL_clear_mode ( ctx - > ssl , SSL_MODE_ASYNC ) ;
2017-06-06 08:35:14 -04:00
# endif
2012-05-18 09:47:34 -04:00
/* Handshake succeeded */
2019-02-26 12:37:15 -05:00
if ( ! SSL_session_reused ( ctx - > ssl ) ) {
2014-05-28 06:28:58 -04:00
if ( objt_server ( conn - > target ) ) {
update_freq_ctr ( & global . ssl_be_keys_per_sec , 1 ) ;
if ( global . ssl_be_keys_per_sec . curr_ctr > global . ssl_be_keys_max )
global . ssl_be_keys_max = global . ssl_be_keys_per_sec . curr_ctr ;
2012-05-18 09:47:34 -04:00
}
2014-05-28 06:28:58 -04:00
else {
update_freq_ctr ( & global . ssl_fe_keys_per_sec , 1 ) ;
if ( global . ssl_fe_keys_per_sec . curr_ctr > global . ssl_fe_keys_max )
global . ssl_fe_keys_max = global . ssl_fe_keys_per_sec . curr_ctr ;
}
2020-11-03 11:10:02 -05:00
2020-11-06 07:19:18 -05:00
if ( counters ) {
2021-11-22 11:46:13 -05:00
HA_ATOMIC_INC ( & counters - > sess ) ;
HA_ATOMIC_INC ( & counters_px - > sess ) ;
2020-11-06 07:19:18 -05:00
}
2020-11-03 11:10:02 -05:00
}
2020-11-06 07:19:18 -05:00
else if ( counters ) {
2021-11-22 11:46:13 -05:00
HA_ATOMIC_INC ( & counters - > reused_sess ) ;
HA_ATOMIC_INC ( & counters_px - > reused_sess ) ;
2012-05-18 09:47:34 -04:00
}
/* The connection is now established at both layers, it's time to leave */
conn - > flags & = ~ ( flag | CO_FL_WAIT_L4_CONN | CO_FL_WAIT_L6_CONN ) ;
return 1 ;
out_error :
2012-12-14 05:21:13 -05:00
/* Clear openssl global errors stack */
2022-09-06 13:37:08 -04:00
ssl_sock_dump_errors ( conn , NULL ) ;
2012-12-14 05:21:13 -05:00
ERR_clear_error ( ) ;
2012-10-04 11:09:56 -04:00
/* free resumed session if exists */
2021-02-08 04:43:44 -05:00
if ( objt_server ( conn - > target ) ) {
struct server * s = __objt_server ( conn - > target ) ;
/* RWLOCK: only rdlock the SSL cache even when writing in it because there is
* one cache per thread , it only prevents to flush it from the CLI in
* another thread */
HA_RWLOCK_RDLOCK ( SSL_SERVER_LOCK , & s - > ssl_ctx . lock ) ;
2021-02-20 04:46:51 -05:00
if ( s - > ssl_ctx . reused_sess [ tid ] . ptr )
ha_free ( & s - > ssl_ctx . reused_sess [ tid ] . ptr ) ;
2021-02-08 04:43:44 -05:00
HA_RWLOCK_RDUNLOCK ( SSL_SERVER_LOCK , & s - > ssl_ctx . lock ) ;
2012-10-04 11:09:56 -04:00
}
2020-11-13 10:05:00 -05:00
if ( counters ) {
2021-11-22 11:46:13 -05:00
HA_ATOMIC_INC ( & counters - > failed_handshake ) ;
HA_ATOMIC_INC ( & counters_px - > failed_handshake ) ;
2020-11-13 10:05:00 -05:00
}
2012-05-18 09:47:34 -04:00
/* Fail on all other handshake errors */
conn - > flags | = CO_FL_ERROR ;
2012-12-03 10:32:10 -05:00
if ( ! conn - > err_code )
conn - > err_code = CO_ER_SSL_HANDSHAKE ;
2012-05-18 09:47:34 -04:00
return 0 ;
}
2020-01-17 01:52:13 -05:00
/* Called from the upper layer, to subscribe <es> to events <event_type>. The
* event subscriber < es > is not allowed to change from a previous call as long
* as at least one event is still subscribed . The < event_type > must only be a
* combination of SUB_RETRY_RECV and SUB_RETRY_SEND . It always returns 0 ,
* unless the transport layer was already released .
*/
static int ssl_subscribe ( struct connection * conn , void * xprt_ctx , int event_type , struct wait_event * es )
2019-03-21 11:30:07 -04:00
{
2019-05-20 08:02:16 -04:00
struct ssl_sock_ctx * ctx = xprt_ctx ;
2019-03-21 13:27:17 -04:00
2019-06-24 12:57:39 -04:00
if ( ! ctx )
return - 1 ;
2020-01-10 03:20:26 -05:00
BUG_ON ( event_type & ~ ( SUB_RETRY_SEND | SUB_RETRY_RECV ) ) ;
2020-01-17 01:52:13 -05:00
BUG_ON ( ctx - > subs & & ctx - > subs ! = es ) ;
2020-01-10 03:20:26 -05:00
2020-01-17 01:52:13 -05:00
ctx - > subs = es ;
es - > events | = event_type ;
2020-01-10 03:20:26 -05:00
/* we may have to subscribe to lower layers for new events */
event_type & = ~ ctx - > wait_event . events ;
if ( event_type & & ! ( conn - > flags & CO_FL_SSL_WAIT_HS ) )
ctx - > xprt - > subscribe ( conn , ctx - > xprt_ctx , event_type , & ctx - > wait_event ) ;
2019-05-20 08:02:16 -04:00
return 0 ;
2019-03-21 11:30:07 -04:00
}
2020-01-17 01:52:13 -05:00
/* Called from the upper layer, to unsubscribe <es> from events <event_type>.
* The < es > pointer is not allowed to differ from the one passed to the
* subscribe ( ) call . It always returns zero .
*/
static int ssl_unsubscribe ( struct connection * conn , void * xprt_ctx , int event_type , struct wait_event * es )
2019-03-21 11:30:07 -04:00
{
2019-05-20 08:02:16 -04:00
struct ssl_sock_ctx * ctx = xprt_ctx ;
2020-01-10 03:20:26 -05:00
BUG_ON ( event_type & ~ ( SUB_RETRY_SEND | SUB_RETRY_RECV ) ) ;
2020-01-17 01:52:13 -05:00
BUG_ON ( ctx - > subs & & ctx - > subs ! = es ) ;
2019-03-21 13:27:17 -04:00
2020-01-17 01:52:13 -05:00
es - > events & = ~ event_type ;
if ( ! es - > events )
2020-01-10 03:20:26 -05:00
ctx - > subs = NULL ;
/* If we subscribed, and we're not doing the handshake,
* then we subscribed because the upper layer asked for it ,
* as the upper layer is no longer interested , we can
* unsubscribe too .
*/
event_type & = ctx - > wait_event . events ;
if ( event_type & & ! ( ctx - > conn - > flags & CO_FL_SSL_WAIT_HS ) )
conn_unsubscribe ( conn , ctx - > xprt_ctx , event_type , & ctx - > wait_event ) ;
2019-05-20 08:02:16 -04:00
return 0 ;
}
2020-07-03 08:01:21 -04:00
/* The connection has been taken over, so destroy the old tasklet and create
* a new one . The original thread ID must be passed into orig_tid
* It should be called with the takeover lock for the old thread held .
* Returns 0 on success , and - 1 on failure
*/
2024-03-15 10:36:33 -04:00
static int ssl_takeover ( struct connection * conn , void * xprt_ctx , int orig_tid , int release )
2020-07-03 08:01:21 -04:00
{
struct ssl_sock_ctx * ctx = xprt_ctx ;
2024-03-15 10:36:33 -04:00
struct tasklet * tl = NULL ;
2020-07-03 08:01:21 -04:00
2024-03-15 10:36:33 -04:00
if ( ! release ) {
tl = tasklet_new ( ) ;
if ( ! tl )
return - 1 ;
}
2020-07-03 08:01:21 -04:00
ctx - > wait_event . tasklet - > context = NULL ;
tasklet_wakeup_on ( ctx - > wait_event . tasklet , orig_tid ) ;
2024-03-15 10:36:33 -04:00
2020-07-03 08:01:21 -04:00
ctx - > wait_event . tasklet = tl ;
2024-03-15 10:36:33 -04:00
if ( ! release ) {
ctx - > wait_event . tasklet - > process = ssl_sock_io_cb ;
ctx - > wait_event . tasklet - > context = ctx ;
}
2020-07-03 08:01:21 -04:00
return 0 ;
}
2021-03-02 11:29:56 -05:00
/* notify the next xprt that the connection is about to become idle and that it
* may be stolen at any time after the function returns and that any tasklet in
* the chain must be careful before dereferencing its context .
*/
static void ssl_set_idle ( struct connection * conn , void * xprt_ctx )
{
struct ssl_sock_ctx * ctx = xprt_ctx ;
if ( ! ctx | | ! ctx - > wait_event . tasklet )
return ;
HA_ATOMIC_OR ( & ctx - > wait_event . tasklet - > state , TASK_F_USR1 ) ;
if ( ctx - > xprt )
xprt_set_idle ( conn , ctx - > xprt , ctx - > xprt_ctx ) ;
}
/* notify the next xprt that the connection is not idle anymore and that it may
* not be stolen before the next xprt_set_idle ( ) .
*/
static void ssl_set_used ( struct connection * conn , void * xprt_ctx )
{
struct ssl_sock_ctx * ctx = xprt_ctx ;
if ( ! ctx | | ! ctx - > wait_event . tasklet )
return ;
HA_ATOMIC_OR ( & ctx - > wait_event . tasklet - > state , TASK_F_USR1 ) ;
if ( ctx - > xprt )
xprt_set_used ( conn , ctx - > xprt , ctx - > xprt_ctx ) ;
}
2019-05-27 13:50:12 -04:00
/* Use the provided XPRT as an underlying XPRT, and provide the old one.
* Returns 0 on success , and non - zero on failure .
*/
static int ssl_add_xprt ( struct connection * conn , void * xprt_ctx , void * toadd_ctx , const struct xprt_ops * toadd_ops , void * * oldxprt_ctx , const struct xprt_ops * * oldxprt_ops )
{
struct ssl_sock_ctx * ctx = xprt_ctx ;
if ( oldxprt_ops ! = NULL )
* oldxprt_ops = ctx - > xprt ;
if ( oldxprt_ctx ! = NULL )
* oldxprt_ctx = ctx - > xprt_ctx ;
ctx - > xprt = toadd_ops ;
ctx - > xprt_ctx = toadd_ctx ;
return 0 ;
}
2019-05-23 11:47:36 -04:00
/* Remove the specified xprt. If if it our underlying XPRT, remove it and
* return 0 , otherwise just call the remove_xprt method from the underlying
* XPRT .
*/
static int ssl_remove_xprt ( struct connection * conn , void * xprt_ctx , void * toremove_ctx , const struct xprt_ops * newops , void * newctx )
{
struct ssl_sock_ctx * ctx = xprt_ctx ;
if ( ctx - > xprt_ctx = = toremove_ctx ) {
ctx - > xprt_ctx = newctx ;
ctx - > xprt = newops ;
return 0 ;
}
return ( ctx - > xprt - > remove_xprt ( conn , ctx - > xprt_ctx , toremove_ctx , newops , newctx ) ) ;
}
2021-03-02 10:09:26 -05:00
struct task * ssl_sock_io_cb ( struct task * t , void * context , unsigned int state )
2019-05-20 08:02:16 -04:00
{
2020-07-03 08:01:21 -04:00
struct tasklet * tl = ( struct tasklet * ) t ;
2019-05-20 08:02:16 -04:00
struct ssl_sock_ctx * ctx = context ;
2020-07-03 08:01:21 -04:00
struct connection * conn ;
int conn_in_list ;
int ret = 0 ;
2019-05-20 08:02:16 -04:00
2021-03-02 11:29:56 -05:00
if ( state & TASK_F_USR1 ) {
/* the tasklet was idling on an idle connection, it might have
* been stolen , let ' s be careful !
*/
HA_SPIN_LOCK ( IDLE_CONNS_LOCK , & idle_conns [ tid ] . idle_conns_lock ) ;
if ( tl - > context = = NULL ) {
HA_SPIN_UNLOCK ( IDLE_CONNS_LOCK , & idle_conns [ tid ] . idle_conns_lock ) ;
tasklet_free ( tl ) ;
return NULL ;
}
conn = ctx - > conn ;
2023-10-12 08:01:49 -04:00
conn_in_list = conn - > flags & CO_FL_LIST_MASK ;
2021-03-02 11:29:56 -05:00
if ( conn_in_list )
2023-08-21 08:24:17 -04:00
conn_delete_from_tree ( conn ) ;
2021-01-11 03:21:52 -05:00
HA_SPIN_UNLOCK ( IDLE_CONNS_LOCK , & idle_conns [ tid ] . idle_conns_lock ) ;
2021-03-02 11:29:56 -05:00
} else {
conn = ctx - > conn ;
conn_in_list = 0 ;
2020-07-03 08:01:21 -04:00
}
2021-03-02 11:29:56 -05:00
2019-05-20 08:02:16 -04:00
/* First if we're doing an handshake, try that */
MINOR: ssl: mark the SSL handshake tasklet as heavy
There's a fairness issue between SSL and clear text. A full end-to-end
cleartext connection can require up to ~7.7 wakeups on average, plus 3.3
for the SSL tasklet, one of which is particularly expensive. So if we
accept to process many handshakes taking 1ms each, we significantly
increase the processing time of regular tasks just by adding an extra
delay between their calls. Ideally in order to be fair we should have a
1:18 call ratio, but this requires a bit more accounting. With very little
effort we can mark the SSL handshake tasklet as TASK_HEAVY until the
handshake completes, and remove it once done.
Doing so reduces from 14 to 3.0 ms the total response time experienced
by HTTP clients running in parallel to 1000 SSL clients doing full
handshakes in loops. Better, when tune.sched.low-latency is set to "on",
the latency further drops to 1.8 ms.
The tasks latency distribution explain pretty well what is happening:
Without the patch:
$ socat - /tmp/sock1 <<< "show profiling"
Per-task CPU profiling : on # set profiling tasks {on|auto|off}
Tasks activity:
function calls cpu_tot cpu_avg lat_tot lat_avg
ssl_sock_io_cb 2785375 19.35m 416.9us 5.401h 6.980ms
h1_io_cb 1868949 9.853s 5.271us 4.829h 9.302ms
process_stream 1864066 7.582s 4.067us 2.058h 3.974ms
si_cs_io_cb 1733808 1.932s 1.114us 26.83m 928.5us
h1_timeout_task 935760 - - 1.033h 3.975ms
accept_queue_process 303606 4.627s 15.24us 16.65m 3.291ms
srv_cleanup_toremove_connections452 64.31ms 142.3us 2.447s 5.415ms
task_run_applet 47 5.149ms 109.6us 57.09ms 1.215ms
srv_cleanup_idle_connections 34 2.210ms 65.00us 87.49ms 2.573ms
With the patch:
$ socat - /tmp/sock1 <<< "show profiling"
Per-task CPU profiling : on # set profiling tasks {on|auto|off}
Tasks activity:
function calls cpu_tot cpu_avg lat_tot lat_avg
ssl_sock_io_cb 3000365 21.08m 421.6us 20.30h 24.36ms
h1_io_cb 2031932 9.278s 4.565us 46.70m 1.379ms
process_stream 2010682 7.391s 3.675us 22.83m 681.2us
si_cs_io_cb 1702070 1.571s 922.0ns 8.732m 307.8us
h1_timeout_task 1009594 - - 17.63m 1.048ms
accept_queue_process 339595 4.792s 14.11us 3.714m 656.2us
srv_cleanup_toremove_connections779 75.42ms 96.81us 438.3ms 562.6us
srv_cleanup_idle_connections 48 2.498ms 52.05us 178.1us 3.709us
task_run_applet 17 1.738ms 102.3us 11.29ms 663.9us
other 1 947.8us 947.8us 202.6us 202.6us
=> h1_io_cb() and process_stream() are divided by 6 while ssl_sock_io_cb() is
multipled by 4
And with low-latency on:
$ socat - /tmp/sock1 <<< "show profiling"
Per-task CPU profiling : on # set profiling tasks {on|auto|off}
Tasks activity:
function calls cpu_tot cpu_avg lat_tot lat_avg
ssl_sock_io_cb 3000565 20.96m 419.1us 20.74h 24.89ms
h1_io_cb 2019702 9.294s 4.601us 49.22m 1.462ms
process_stream 2009755 6.570s 3.269us 1.493m 44.57us
si_cs_io_cb 1997820 1.566s 783.0ns 2.985m 89.66us
h1_timeout_task 1009742 - - 1.647m 97.86us
accept_queue_process 494509 4.697s 9.498us 1.240m 150.4us
srv_cleanup_toremove_connections1120 92.32ms 82.43us 463.0ms 413.4us
srv_cleanup_idle_connections 70 2.703ms 38.61us 204.5us 2.921us
task_run_applet 13 1.303ms 100.3us 85.12us 6.548us
=> process_stream() is divided by 100 while ssl_sock_io_cb() is
multipled by 4
Interestingly, the total HTTPS response time doesn't increase and even very
slightly decreases, with an overall ~1% higher request rate. The net effect
here is a redistribution of the CPU resources between internal tasks, and
in the case of SSL, handshakes wait bit more but everything after completes
faster.
This was made simple enough to be backportable if it helps some users
suffering from high latencies in mixed traffic.
2021-02-25 09:31:00 -05:00
if ( ctx - > conn - > flags & CO_FL_SSL_WAIT_HS ) {
2019-05-20 08:02:16 -04:00
ssl_sock_handshake ( ctx - > conn , CO_FL_SSL_WAIT_HS ) ;
MINOR: ssl: mark the SSL handshake tasklet as heavy
There's a fairness issue between SSL and clear text. A full end-to-end
cleartext connection can require up to ~7.7 wakeups on average, plus 3.3
for the SSL tasklet, one of which is particularly expensive. So if we
accept to process many handshakes taking 1ms each, we significantly
increase the processing time of regular tasks just by adding an extra
delay between their calls. Ideally in order to be fair we should have a
1:18 call ratio, but this requires a bit more accounting. With very little
effort we can mark the SSL handshake tasklet as TASK_HEAVY until the
handshake completes, and remove it once done.
Doing so reduces from 14 to 3.0 ms the total response time experienced
by HTTP clients running in parallel to 1000 SSL clients doing full
handshakes in loops. Better, when tune.sched.low-latency is set to "on",
the latency further drops to 1.8 ms.
The tasks latency distribution explain pretty well what is happening:
Without the patch:
$ socat - /tmp/sock1 <<< "show profiling"
Per-task CPU profiling : on # set profiling tasks {on|auto|off}
Tasks activity:
function calls cpu_tot cpu_avg lat_tot lat_avg
ssl_sock_io_cb 2785375 19.35m 416.9us 5.401h 6.980ms
h1_io_cb 1868949 9.853s 5.271us 4.829h 9.302ms
process_stream 1864066 7.582s 4.067us 2.058h 3.974ms
si_cs_io_cb 1733808 1.932s 1.114us 26.83m 928.5us
h1_timeout_task 935760 - - 1.033h 3.975ms
accept_queue_process 303606 4.627s 15.24us 16.65m 3.291ms
srv_cleanup_toremove_connections452 64.31ms 142.3us 2.447s 5.415ms
task_run_applet 47 5.149ms 109.6us 57.09ms 1.215ms
srv_cleanup_idle_connections 34 2.210ms 65.00us 87.49ms 2.573ms
With the patch:
$ socat - /tmp/sock1 <<< "show profiling"
Per-task CPU profiling : on # set profiling tasks {on|auto|off}
Tasks activity:
function calls cpu_tot cpu_avg lat_tot lat_avg
ssl_sock_io_cb 3000365 21.08m 421.6us 20.30h 24.36ms
h1_io_cb 2031932 9.278s 4.565us 46.70m 1.379ms
process_stream 2010682 7.391s 3.675us 22.83m 681.2us
si_cs_io_cb 1702070 1.571s 922.0ns 8.732m 307.8us
h1_timeout_task 1009594 - - 17.63m 1.048ms
accept_queue_process 339595 4.792s 14.11us 3.714m 656.2us
srv_cleanup_toremove_connections779 75.42ms 96.81us 438.3ms 562.6us
srv_cleanup_idle_connections 48 2.498ms 52.05us 178.1us 3.709us
task_run_applet 17 1.738ms 102.3us 11.29ms 663.9us
other 1 947.8us 947.8us 202.6us 202.6us
=> h1_io_cb() and process_stream() are divided by 6 while ssl_sock_io_cb() is
multipled by 4
And with low-latency on:
$ socat - /tmp/sock1 <<< "show profiling"
Per-task CPU profiling : on # set profiling tasks {on|auto|off}
Tasks activity:
function calls cpu_tot cpu_avg lat_tot lat_avg
ssl_sock_io_cb 3000565 20.96m 419.1us 20.74h 24.89ms
h1_io_cb 2019702 9.294s 4.601us 49.22m 1.462ms
process_stream 2009755 6.570s 3.269us 1.493m 44.57us
si_cs_io_cb 1997820 1.566s 783.0ns 2.985m 89.66us
h1_timeout_task 1009742 - - 1.647m 97.86us
accept_queue_process 494509 4.697s 9.498us 1.240m 150.4us
srv_cleanup_toremove_connections1120 92.32ms 82.43us 463.0ms 413.4us
srv_cleanup_idle_connections 70 2.703ms 38.61us 204.5us 2.921us
task_run_applet 13 1.303ms 100.3us 85.12us 6.548us
=> process_stream() is divided by 100 while ssl_sock_io_cb() is
multipled by 4
Interestingly, the total HTTPS response time doesn't increase and even very
slightly decreases, with an overall ~1% higher request rate. The net effect
here is a redistribution of the CPU resources between internal tasks, and
in the case of SSL, handshakes wait bit more but everything after completes
faster.
This was made simple enough to be backportable if it helps some users
suffering from high latencies in mixed traffic.
2021-02-25 09:31:00 -05:00
if ( ! ( ctx - > conn - > flags & CO_FL_SSL_WAIT_HS ) ) {
/* handshake completed, leave the bulk queue */
2021-03-09 11:58:02 -05:00
_HA_ATOMIC_AND ( & tl - > state , ~ TASK_HEAVY ) ;
MINOR: ssl: mark the SSL handshake tasklet as heavy
There's a fairness issue between SSL and clear text. A full end-to-end
cleartext connection can require up to ~7.7 wakeups on average, plus 3.3
for the SSL tasklet, one of which is particularly expensive. So if we
accept to process many handshakes taking 1ms each, we significantly
increase the processing time of regular tasks just by adding an extra
delay between their calls. Ideally in order to be fair we should have a
1:18 call ratio, but this requires a bit more accounting. With very little
effort we can mark the SSL handshake tasklet as TASK_HEAVY until the
handshake completes, and remove it once done.
Doing so reduces from 14 to 3.0 ms the total response time experienced
by HTTP clients running in parallel to 1000 SSL clients doing full
handshakes in loops. Better, when tune.sched.low-latency is set to "on",
the latency further drops to 1.8 ms.
The tasks latency distribution explain pretty well what is happening:
Without the patch:
$ socat - /tmp/sock1 <<< "show profiling"
Per-task CPU profiling : on # set profiling tasks {on|auto|off}
Tasks activity:
function calls cpu_tot cpu_avg lat_tot lat_avg
ssl_sock_io_cb 2785375 19.35m 416.9us 5.401h 6.980ms
h1_io_cb 1868949 9.853s 5.271us 4.829h 9.302ms
process_stream 1864066 7.582s 4.067us 2.058h 3.974ms
si_cs_io_cb 1733808 1.932s 1.114us 26.83m 928.5us
h1_timeout_task 935760 - - 1.033h 3.975ms
accept_queue_process 303606 4.627s 15.24us 16.65m 3.291ms
srv_cleanup_toremove_connections452 64.31ms 142.3us 2.447s 5.415ms
task_run_applet 47 5.149ms 109.6us 57.09ms 1.215ms
srv_cleanup_idle_connections 34 2.210ms 65.00us 87.49ms 2.573ms
With the patch:
$ socat - /tmp/sock1 <<< "show profiling"
Per-task CPU profiling : on # set profiling tasks {on|auto|off}
Tasks activity:
function calls cpu_tot cpu_avg lat_tot lat_avg
ssl_sock_io_cb 3000365 21.08m 421.6us 20.30h 24.36ms
h1_io_cb 2031932 9.278s 4.565us 46.70m 1.379ms
process_stream 2010682 7.391s 3.675us 22.83m 681.2us
si_cs_io_cb 1702070 1.571s 922.0ns 8.732m 307.8us
h1_timeout_task 1009594 - - 17.63m 1.048ms
accept_queue_process 339595 4.792s 14.11us 3.714m 656.2us
srv_cleanup_toremove_connections779 75.42ms 96.81us 438.3ms 562.6us
srv_cleanup_idle_connections 48 2.498ms 52.05us 178.1us 3.709us
task_run_applet 17 1.738ms 102.3us 11.29ms 663.9us
other 1 947.8us 947.8us 202.6us 202.6us
=> h1_io_cb() and process_stream() are divided by 6 while ssl_sock_io_cb() is
multipled by 4
And with low-latency on:
$ socat - /tmp/sock1 <<< "show profiling"
Per-task CPU profiling : on # set profiling tasks {on|auto|off}
Tasks activity:
function calls cpu_tot cpu_avg lat_tot lat_avg
ssl_sock_io_cb 3000565 20.96m 419.1us 20.74h 24.89ms
h1_io_cb 2019702 9.294s 4.601us 49.22m 1.462ms
process_stream 2009755 6.570s 3.269us 1.493m 44.57us
si_cs_io_cb 1997820 1.566s 783.0ns 2.985m 89.66us
h1_timeout_task 1009742 - - 1.647m 97.86us
accept_queue_process 494509 4.697s 9.498us 1.240m 150.4us
srv_cleanup_toremove_connections1120 92.32ms 82.43us 463.0ms 413.4us
srv_cleanup_idle_connections 70 2.703ms 38.61us 204.5us 2.921us
task_run_applet 13 1.303ms 100.3us 85.12us 6.548us
=> process_stream() is divided by 100 while ssl_sock_io_cb() is
multipled by 4
Interestingly, the total HTTPS response time doesn't increase and even very
slightly decreases, with an overall ~1% higher request rate. The net effect
here is a redistribution of the CPU resources between internal tasks, and
in the case of SSL, handshakes wait bit more but everything after completes
faster.
This was made simple enough to be backportable if it helps some users
suffering from high latencies in mixed traffic.
2021-02-25 09:31:00 -05:00
}
}
2019-05-20 08:02:16 -04:00
/* If we had an error, or the handshake is done and I/O is available,
* let the upper layer know .
2020-01-22 12:08:48 -05:00
* If no mux was set up yet , then call conn_create_mux ( )
2019-05-20 08:02:16 -04:00
* we can ' t be sure conn_fd_handler ( ) will be called again .
*/
if ( ( ctx - > conn - > flags & CO_FL_ERROR ) | |
! ( ctx - > conn - > flags & CO_FL_SSL_WAIT_HS ) ) {
int woke = 0 ;
/* On error, wake any waiter */
2020-01-10 03:20:26 -05:00
if ( ctx - > subs ) {
tasklet_wakeup ( ctx - > subs - > tasklet ) ;
ctx - > subs - > events = 0 ;
2019-05-20 08:02:16 -04:00
woke = 1 ;
2020-01-10 03:20:26 -05:00
ctx - > subs = NULL ;
2019-05-20 08:02:16 -04:00
}
2020-01-10 03:20:26 -05:00
2019-05-20 08:02:16 -04:00
/* If we're the first xprt for the connection, let the
2020-01-22 12:08:48 -05:00
* upper layers know . If we have no mux , create it ,
* and once we have a mux , call its wake method if we didn ' t
* woke a tasklet already .
2019-05-20 08:02:16 -04:00
*/
if ( ctx - > conn - > xprt_ctx = = ctx ) {
2020-01-22 12:08:48 -05:00
if ( ! ctx - > conn - > mux )
ret = conn_create_mux ( ctx - > conn ) ;
2019-05-20 08:02:16 -04:00
if ( ret > = 0 & & ! woke & & ctx - > conn - > mux & & ctx - > conn - > mux - > wake )
2020-07-03 08:01:21 -04:00
ret = ctx - > conn - > mux - > wake ( ctx - > conn ) ;
goto leave ;
2019-05-20 08:02:16 -04:00
}
}
2021-01-07 01:59:58 -05:00
# ifdef SSL_READ_EARLY_DATA_SUCCESS
2019-12-19 09:02:39 -05:00
/* If we have early data and somebody wants to receive, let them */
2020-01-10 03:20:26 -05:00
else if ( b_data ( & ctx - > early_buf ) & & ctx - > subs & &
ctx - > subs - > events & SUB_RETRY_RECV ) {
tasklet_wakeup ( ctx - > subs - > tasklet ) ;
ctx - > subs - > events & = ~ SUB_RETRY_RECV ;
if ( ! ctx - > subs - > events )
ctx - > subs = NULL ;
2019-12-19 09:02:39 -05:00
}
# endif
2020-07-03 08:01:21 -04:00
leave :
if ( ! ret & & conn_in_list ) {
struct server * srv = objt_server ( conn - > target ) ;
2021-01-11 03:21:52 -05:00
HA_SPIN_LOCK ( IDLE_CONNS_LOCK , & idle_conns [ tid ] . idle_conns_lock ) ;
2023-08-25 09:48:39 -04:00
_srv_add_idle ( srv , conn , conn_in_list = = CO_FL_SAFE_LIST ) ;
2021-01-11 03:21:52 -05:00
HA_SPIN_UNLOCK ( IDLE_CONNS_LOCK , & idle_conns [ tid ] . idle_conns_lock ) ;
2020-07-03 08:01:21 -04:00
}
2021-03-13 05:30:19 -05:00
return t ;
2019-03-21 11:30:07 -04:00
}
2012-05-18 09:47:34 -04:00
/* Receive up to <count> bytes from connection <conn>'s socket and store them
2014-01-14 05:31:27 -05:00
* into buffer < buf > . Only one call to recv ( ) is performed , unless the
2012-05-18 09:47:34 -04:00
* buffer wraps , in which case a second call may be performed . The connection ' s
* flags are updated with whatever special event is detected ( error , read0 ,
* empty ) . The caller is responsible for taking care of those events and
* avoiding the call if inappropriate . The function does not call the
* connection ' s polling update function , so the caller is responsible for this .
*/
2019-03-21 13:27:17 -04:00
static size_t ssl_sock_to_buf ( struct connection * conn , void * xprt_ctx , struct buffer * buf , size_t count , int flags )
2012-05-18 09:47:34 -04:00
{
2019-03-21 13:27:17 -04:00
struct ssl_sock_ctx * ctx = xprt_ctx ;
2018-07-18 05:22:03 -04:00
ssize_t ret ;
size_t try , done = 0 ;
2012-05-18 09:47:34 -04:00
2019-03-21 13:27:17 -04:00
if ( ! ctx )
2012-05-18 09:47:34 -04:00
goto out_error ;
2021-01-07 01:59:58 -05:00
# ifdef SSL_READ_EARLY_DATA_SUCCESS
2019-12-19 09:02:39 -05:00
if ( b_data ( & ctx - > early_buf ) ) {
try = b_contig_space ( buf ) ;
if ( try > b_data ( & ctx - > early_buf ) )
try = b_data ( & ctx - > early_buf ) ;
memcpy ( b_tail ( buf ) , b_head ( & ctx - > early_buf ) , try ) ;
b_add ( buf , try ) ;
b_del ( & ctx - > early_buf , try ) ;
if ( b_data ( & ctx - > early_buf ) = = 0 )
b_free ( & ctx - > early_buf ) ;
return try ;
}
# endif
2020-01-23 10:27:54 -05:00
if ( conn - > flags & ( CO_FL_WAIT_XPRT | CO_FL_SSL_WAIT_HS ) )
2012-05-18 09:47:34 -04:00
/* a handshake was requested */
return 0 ;
/* read the largest possible block. For this, we perform only one call
* to recv ( ) unless the buffer wraps and we exactly fill the first hunk ,
* in which case we accept to do it once again . A new attempt is made on
* EINTR too .
*/
2014-01-17 05:09:40 -05:00
while ( count > 0 ) {
MEDIUM: ssl: Handle early data with OpenSSL 1.1.1
When compiled with Openssl >= 1.1.1, before attempting to do the handshake,
try to read any early data. If any early data is present, then we'll create
the session, read the data, and handle the request before we're doing the
handshake.
For this, we add a new connection flag, CO_FL_EARLY_SSL_HS, which is not
part of the CO_FL_HANDSHAKE set, allowing to proceed with a session even
before an SSL handshake is completed.
As early data do have security implication, we let the origin server know
the request comes from early data by adding the "Early-Data" header, as
specified in this draft from the HTTP working group :
https://datatracker.ietf.org/doc/html/draft-ietf-httpbis-replay
2017-09-22 12:26:28 -04:00
2018-06-15 11:21:00 -04:00
try = b_contig_space ( buf ) ;
if ( ! try )
break ;
2014-01-14 05:31:27 -05:00
if ( try > count )
try = count ;
2018-06-15 11:21:00 -04:00
2019-02-26 12:37:15 -05:00
ret = SSL_read ( ctx - > ssl , b_tail ( buf ) , try ) ;
2019-08-05 12:04:16 -04:00
2012-09-03 14:36:47 -04:00
if ( conn - > flags & CO_FL_ERROR ) {
/* CO_FL_ERROR may be set by ssl_sock_infocbk */
2012-12-14 05:21:13 -05:00
goto out_error ;
2012-09-03 14:36:47 -04:00
}
2012-05-18 09:47:34 -04:00
if ( ret > 0 ) {
2018-06-28 12:17:23 -04:00
b_add ( buf , ret ) ;
2012-05-18 09:47:34 -04:00
done + = ret ;
count - = ret ;
}
else {
2019-02-26 12:37:15 -05:00
ret = SSL_get_error ( ctx - > ssl , ret ) ;
2012-05-18 09:47:34 -04:00
if ( ret = = SSL_ERROR_WANT_WRITE ) {
2012-11-08 11:56:20 -05:00
/* handshake is running, and it needs to enable write */
2012-05-18 09:47:34 -04:00
conn - > flags | = CO_FL_SSL_WAIT_HS ;
2019-05-20 08:02:16 -04:00
ctx - > xprt - > subscribe ( conn , ctx - > xprt_ctx , SUB_RETRY_SEND , & ctx - > wait_event ) ;
2020-11-13 15:56:34 -05:00
# ifdef SSL_MODE_ASYNC
2017-06-06 08:35:14 -04:00
/* Async mode can be re-enabled, because we're leaving data state.*/
if ( global_ssl . async )
2019-02-26 12:37:15 -05:00
SSL_set_mode ( ctx - > ssl , SSL_MODE_ASYNC ) ;
2017-06-06 08:35:14 -04:00
# endif
2012-05-18 09:47:34 -04:00
break ;
}
else if ( ret = = SSL_ERROR_WANT_READ ) {
2019-02-26 12:37:15 -05:00
if ( SSL_renegotiate_pending ( ctx - > ssl ) ) {
2019-05-20 08:02:16 -04:00
ctx - > xprt - > subscribe ( conn , ctx - > xprt_ctx ,
SUB_RETRY_RECV ,
& ctx - > wait_event ) ;
2012-11-08 12:02:56 -05:00
/* handshake is running, and it may need to re-enable read */
conn - > flags | = CO_FL_SSL_WAIT_HS ;
2020-11-13 15:56:34 -05:00
# ifdef SSL_MODE_ASYNC
2017-06-06 08:35:14 -04:00
/* Async mode can be re-enabled, because we're leaving data state.*/
if ( global_ssl . async )
2019-02-26 12:37:15 -05:00
SSL_set_mode ( ctx - > ssl , SSL_MODE_ASYNC ) ;
2017-06-06 08:35:14 -04:00
# endif
2012-11-08 12:02:56 -05:00
break ;
}
2012-05-18 09:47:34 -04:00
break ;
MEDIUM: ssl: Handle early data with OpenSSL 1.1.1
When compiled with Openssl >= 1.1.1, before attempting to do the handshake,
try to read any early data. If any early data is present, then we'll create
the session, read the data, and handle the request before we're doing the
handshake.
For this, we add a new connection flag, CO_FL_EARLY_SSL_HS, which is not
part of the CO_FL_HANDSHAKE set, allowing to proceed with a session even
before an SSL handshake is completed.
As early data do have security implication, we let the origin server know
the request comes from early data by adding the "Early-Data" header, as
specified in this draft from the HTTP working group :
https://datatracker.ietf.org/doc/html/draft-ietf-httpbis-replay
2017-09-22 12:26:28 -04:00
} else if ( ret = = SSL_ERROR_ZERO_RETURN )
goto read0 ;
2021-09-29 12:56:51 -04:00
else if ( ret = = SSL_ERROR_SSL ) {
2022-04-11 05:29:11 -04:00
struct ssl_sock_ctx * ctx = conn_get_ssl_sock_ctx ( conn ) ;
if ( ctx & & ! ctx - > error_code )
2021-09-29 12:56:53 -04:00
ctx - > error_code = ERR_peek_error ( ) ;
2021-09-29 12:56:51 -04:00
conn - > err_code = CO_ERR_SSL_FATAL ;
}
2018-02-19 08:25:15 -05:00
/* For SSL_ERROR_SYSCALL, make sure to clear the error
* stack before shutting down the connection for
* reading . */
2022-04-25 14:32:15 -04:00
if ( ret = = SSL_ERROR_SYSCALL & & ( ! errno | | errno = = EAGAIN | | errno = = EWOULDBLOCK ) )
2018-02-13 09:17:23 -05:00
goto clear_ssl_error ;
2012-05-18 09:47:34 -04:00
/* otherwise it's a real error */
goto out_error ;
}
}
2017-10-25 03:32:15 -04:00
leave :
2012-05-18 09:47:34 -04:00
return done ;
2018-02-19 08:25:15 -05:00
clear_ssl_error :
/* Clear openssl global errors stack */
2022-09-06 13:37:08 -04:00
ssl_sock_dump_errors ( conn , NULL ) ;
2018-02-19 08:25:15 -05:00
ERR_clear_error ( ) ;
2012-05-18 09:47:34 -04:00
read0 :
conn_sock_read0 ( conn ) ;
2017-10-25 03:32:15 -04:00
goto leave ;
2018-02-19 08:25:15 -05:00
2012-05-18 09:47:34 -04:00
out_error :
2018-02-13 09:17:23 -05:00
conn - > flags | = CO_FL_ERROR ;
2012-12-14 05:21:13 -05:00
/* Clear openssl global errors stack */
2022-09-06 13:37:08 -04:00
ssl_sock_dump_errors ( conn , NULL ) ;
2012-12-14 05:21:13 -05:00
ERR_clear_error ( ) ;
2017-10-25 03:32:15 -04:00
goto leave ;
2012-05-18 09:47:34 -04:00
}
2018-06-14 12:31:46 -04:00
/* Send up to <count> pending bytes from buffer <buf> to connection <conn>'s
* socket . < flags > may contain some CO_SFL_ * flags to hint the system about
* other pending data for example , but this flag is ignored at the moment .
2012-05-18 09:47:34 -04:00
* Only one call to send ( ) is performed , unless the buffer wraps , in which case
* a second call may be performed . The connection ' s flags are updated with
* whatever special event is detected ( error , empty ) . The caller is responsible
* for taking care of those events and avoiding the call if inappropriate . The
* function does not call the connection ' s polling update function , so the caller
2018-06-14 12:31:46 -04:00
* is responsible for this . The buffer ' s output is not adjusted , it ' s up to the
* caller to take care of this . It ' s up to the caller to update the buffer ' s
* contents based on the return value .
2012-05-18 09:47:34 -04:00
*/
2019-03-21 13:27:17 -04:00
static size_t ssl_sock_from_buf ( struct connection * conn , void * xprt_ctx , const struct buffer * buf , size_t count , int flags )
2012-05-18 09:47:34 -04:00
{
2019-03-21 13:27:17 -04:00
struct ssl_sock_ctx * ctx = xprt_ctx ;
2018-06-14 12:31:46 -04:00
ssize_t ret ;
size_t try , done ;
2012-05-18 09:47:34 -04:00
done = 0 ;
2019-03-21 13:27:17 -04:00
if ( ! ctx )
2012-05-18 09:47:34 -04:00
goto out_error ;
2020-01-23 10:27:54 -05:00
if ( conn - > flags & ( CO_FL_WAIT_XPRT | CO_FL_SSL_WAIT_HS | CO_FL_EARLY_SSL_HS ) )
2012-05-18 09:47:34 -04:00
/* a handshake was requested */
return 0 ;
/* send the largest possible block. For this we perform only one call
* to send ( ) unless the buffer wraps and we exactly fill the first hunk ,
* in which case we accept to do it once again .
*/
2018-06-14 12:31:46 -04:00
while ( count ) {
2020-10-24 14:42:30 -04:00
# ifdef SSL_READ_EARLY_DATA_SUCCESS
MEDIUM: ssl: Handle early data with OpenSSL 1.1.1
When compiled with Openssl >= 1.1.1, before attempting to do the handshake,
try to read any early data. If any early data is present, then we'll create
the session, read the data, and handle the request before we're doing the
handshake.
For this, we add a new connection flag, CO_FL_EARLY_SSL_HS, which is not
part of the CO_FL_HANDSHAKE set, allowing to proceed with a session even
before an SSL handshake is completed.
As early data do have security implication, we let the origin server know
the request comes from early data by adding the "Early-Data" header, as
specified in this draft from the HTTP working group :
https://datatracker.ietf.org/doc/html/draft-ietf-httpbis-replay
2017-09-22 12:26:28 -04:00
size_t written_data ;
# endif
2018-06-14 12:31:46 -04:00
try = b_contig_data ( buf , done ) ;
if ( try > count )
try = count ;
2013-02-21 01:46:09 -05:00
2022-04-27 07:04:54 -04:00
if ( global_ssl . hard_max_record & & try > global_ssl . hard_max_record )
try = global_ssl . hard_max_record ;
2014-02-01 20:00:24 -05:00
if ( ! ( flags & CO_SFL_STREAMER ) & &
2019-02-28 12:10:45 -05:00
! ( ctx - > xprt_st & SSL_SOCK_SEND_UNLIMITED ) & &
2016-12-22 17:12:01 -05:00
global_ssl . max_record & & try > global_ssl . max_record ) {
try = global_ssl . max_record ;
2014-02-17 09:43:01 -05:00
}
else {
/* we need to keep the information about the fact that
* we ' re not limiting the upcoming send ( ) , because if it
* fails , we ' ll have to retry with at least as many data .
*/
2019-02-28 12:10:45 -05:00
ctx - > xprt_st | = SSL_SOCK_SEND_UNLIMITED ;
2014-02-17 09:43:01 -05:00
}
2013-02-21 01:46:09 -05:00
2023-03-17 11:13:05 -04:00
if ( try < count | | flags & CO_SFL_MSG_MORE )
ctx - > xprt_st | = SSL_SOCK_SEND_MORE ;
else
ctx - > xprt_st & = ~ SSL_SOCK_SEND_MORE ;
2020-10-24 14:42:30 -04:00
# ifdef SSL_READ_EARLY_DATA_SUCCESS
2019-05-03 14:56:19 -04:00
if ( ! SSL_is_init_finished ( ctx - > ssl ) & & conn_is_back ( conn ) ) {
MEDIUM: ssl: Handle early data with OpenSSL 1.1.1
When compiled with Openssl >= 1.1.1, before attempting to do the handshake,
try to read any early data. If any early data is present, then we'll create
the session, read the data, and handle the request before we're doing the
handshake.
For this, we add a new connection flag, CO_FL_EARLY_SSL_HS, which is not
part of the CO_FL_HANDSHAKE set, allowing to proceed with a session even
before an SSL handshake is completed.
As early data do have security implication, we let the origin server know
the request comes from early data by adding the "Early-Data" header, as
specified in this draft from the HTTP working group :
https://datatracker.ietf.org/doc/html/draft-ietf-httpbis-replay
2017-09-22 12:26:28 -04:00
unsigned int max_early ;
2017-11-03 11:27:47 -04:00
if ( objt_listener ( conn - > target ) )
2019-02-26 12:37:15 -05:00
max_early = SSL_get_max_early_data ( ctx - > ssl ) ;
2017-11-03 11:27:47 -04:00
else {
2019-02-26 12:37:15 -05:00
if ( SSL_get0_session ( ctx - > ssl ) )
max_early = SSL_SESSION_get_max_early_data ( SSL_get0_session ( ctx - > ssl ) ) ;
2017-11-03 11:27:47 -04:00
else
max_early = 0 ;
}
2019-02-28 12:10:45 -05:00
if ( try + ctx - > sent_early_data > max_early ) {
try - = ( try + ctx - > sent_early_data ) - max_early ;
2017-11-03 11:27:47 -04:00
if ( try < = 0 ) {
2019-05-03 14:56:19 -04:00
conn - > flags | = CO_FL_SSL_WAIT_HS | CO_FL_WAIT_L6_CONN ;
2019-06-15 14:59:30 -04:00
tasklet_wakeup ( ctx - > wait_event . tasklet ) ;
MEDIUM: ssl: Handle early data with OpenSSL 1.1.1
When compiled with Openssl >= 1.1.1, before attempting to do the handshake,
try to read any early data. If any early data is present, then we'll create
the session, read the data, and handle the request before we're doing the
handshake.
For this, we add a new connection flag, CO_FL_EARLY_SSL_HS, which is not
part of the CO_FL_HANDSHAKE set, allowing to proceed with a session even
before an SSL handshake is completed.
As early data do have security implication, we let the origin server know
the request comes from early data by adding the "Early-Data" header, as
specified in this draft from the HTTP working group :
https://datatracker.ietf.org/doc/html/draft-ietf-httpbis-replay
2017-09-22 12:26:28 -04:00
break ;
2017-11-03 11:27:47 -04:00
}
MEDIUM: ssl: Handle early data with OpenSSL 1.1.1
When compiled with Openssl >= 1.1.1, before attempting to do the handshake,
try to read any early data. If any early data is present, then we'll create
the session, read the data, and handle the request before we're doing the
handshake.
For this, we add a new connection flag, CO_FL_EARLY_SSL_HS, which is not
part of the CO_FL_HANDSHAKE set, allowing to proceed with a session even
before an SSL handshake is completed.
As early data do have security implication, we let the origin server know
the request comes from early data by adding the "Early-Data" header, as
specified in this draft from the HTTP working group :
https://datatracker.ietf.org/doc/html/draft-ietf-httpbis-replay
2017-09-22 12:26:28 -04:00
}
2019-02-26 12:37:15 -05:00
ret = SSL_write_early_data ( ctx - > ssl , b_peek ( buf , done ) , try , & written_data ) ;
MEDIUM: ssl: Handle early data with OpenSSL 1.1.1
When compiled with Openssl >= 1.1.1, before attempting to do the handshake,
try to read any early data. If any early data is present, then we'll create
the session, read the data, and handle the request before we're doing the
handshake.
For this, we add a new connection flag, CO_FL_EARLY_SSL_HS, which is not
part of the CO_FL_HANDSHAKE set, allowing to proceed with a session even
before an SSL handshake is completed.
As early data do have security implication, we let the origin server know
the request comes from early data by adding the "Early-Data" header, as
specified in this draft from the HTTP working group :
https://datatracker.ietf.org/doc/html/draft-ietf-httpbis-replay
2017-09-22 12:26:28 -04:00
if ( ret = = 1 ) {
ret = written_data ;
2019-02-28 12:10:45 -05:00
ctx - > sent_early_data + = ret ;
2019-06-15 14:59:30 -04:00
if ( objt_server ( conn - > target ) ) {
2017-11-03 11:27:47 -04:00
conn - > flags | = CO_FL_SSL_WAIT_HS | CO_FL_WAIT_L6_CONN | CO_FL_EARLY_DATA ;
2019-06-15 14:59:30 -04:00
/* Initiate the handshake, now */
tasklet_wakeup ( ctx - > wait_event . tasklet ) ;
}
2017-11-03 11:27:47 -04:00
MEDIUM: ssl: Handle early data with OpenSSL 1.1.1
When compiled with Openssl >= 1.1.1, before attempting to do the handshake,
try to read any early data. If any early data is present, then we'll create
the session, read the data, and handle the request before we're doing the
handshake.
For this, we add a new connection flag, CO_FL_EARLY_SSL_HS, which is not
part of the CO_FL_HANDSHAKE set, allowing to proceed with a session even
before an SSL handshake is completed.
As early data do have security implication, we let the origin server know
the request comes from early data by adding the "Early-Data" header, as
specified in this draft from the HTTP working group :
https://datatracker.ietf.org/doc/html/draft-ietf-httpbis-replay
2017-09-22 12:26:28 -04:00
}
} else
# endif
2019-02-26 12:37:15 -05:00
ret = SSL_write ( ctx - > ssl , b_peek ( buf , done ) , try ) ;
2014-02-17 09:43:01 -05:00
2012-09-03 14:36:47 -04:00
if ( conn - > flags & CO_FL_ERROR ) {
/* CO_FL_ERROR may be set by ssl_sock_infocbk */
2012-12-14 05:21:13 -05:00
goto out_error ;
2012-09-03 14:36:47 -04:00
}
2012-05-18 09:47:34 -04:00
if ( ret > 0 ) {
MEDIUM: connection: remove CO_FL_CONNECTED and only rely on CO_FL_WAIT_*
Commit 477902bd2e ("MEDIUM: connections: Get ride of the xprt_done
callback.") broke the master CLI for a very obscure reason. It happens
that short requests immediately terminated by a shutdown are properly
received, CS_FL_EOS is correctly set, but in si_cs_recv(), we refrain
from setting CF_SHUTR on the channel because CO_FL_CONNECTED was not
yet set on the connection since we've not passed again through
conn_fd_handler() and it was not done in conn_complete_session(). While
commit a8a415d31a ("BUG/MEDIUM: connections: Set CO_FL_CONNECTED in
conn_complete_session()") fixed the issue, such accident may happen
again as the root cause is deeper and actually comes down to the fact
that CO_FL_CONNECTED is lazily set at various check points in the code
but not every time we drop one wait bit. It is not the first time we
face this situation.
Originally this flag was used to detect the transition between WAIT_*
and CONNECTED in order to call ->wake() from the FD handler. But since
at least 1.8-dev1 with commit 7bf3fa3c23 ("BUG/MAJOR: connection: update
CO_FL_CONNECTED before calling the data layer"), CO_FL_CONNECTED is
always synchronized against the two others before being checked. Moreover,
with the I/Os moved to tasklets, the decision to call the ->wake() function
is performed after the I/Os in si_cs_process() and equivalent, which don't
care about this transition either.
So in essence, checking for CO_FL_CONNECTED has become a lazy wait to
check for (CO_FL_WAIT_L4_CONN | CO_FL_WAIT_L6_CONN), but that always
relies on someone else having synchronized it.
This patch addresses it once for all by killing this flag and only checking
the two others (for which a composite mask CO_FL_WAIT_L4L6 was added). This
revealed a number of inconsistencies that were purposely not addressed here
for the sake of bisectability:
- while most places do check both L4+L6 and HANDSHAKE at the same time,
some places like assign_server() or back_handle_st_con() and a few
sample fetches looking for proxy protocol do check for L4+L6 but
don't care about HANDSHAKE ; these ones will probably fail on TCP
request session rules if the handshake is not complete.
- some handshake handlers do validate that a connection is established
at L4 but didn't clear CO_FL_WAIT_L4_CONN
- the ->ctl method of mux_fcgi, mux_pt and mux_h1 only checks for L4+L6
before declaring the mux ready while the snd_buf function also checks
for the handshake's completion. Likely the former should validate the
handshake as well and we should get rid of these extra tests in snd_buf.
- raw_sock_from_buf() would directly set CO_FL_CONNECTED and would only
later clear CO_FL_WAIT_L4_CONN.
- xprt_handshake would set CO_FL_CONNECTED itself without actually
clearing CO_FL_WAIT_L4_CONN, which could apparently happen only if
waiting for a pure Rx handshake.
- most places in ssl_sock that were checking CO_FL_CONNECTED don't need
to include the L4 check as an L6 check is enough to decide whether to
wait for more info or not.
It also becomes obvious when reading the test in si_cs_recv() that caused
the failure mentioned above that once converted it doesn't make any sense
anymore: having CS_FL_EOS set while still waiting for L4 and L6 to complete
cannot happen since for CS_FL_EOS to be set, the other ones must have been
validated.
Some of these parts will still deserve further cleanup, and some of the
observations above may induce some backports of potential bug fixes once
totally analyzed in their context. The risk of breaking existing stuff
is too high to blindly backport everything.
2020-01-23 03:11:58 -05:00
/* A send succeeded, so we can consider ourself connected */
conn - > flags & = ~ CO_FL_WAIT_L4L6 ;
2019-02-28 12:10:45 -05:00
ctx - > xprt_st & = ~ SSL_SOCK_SEND_UNLIMITED ;
2018-06-14 12:31:46 -04:00
count - = ret ;
2012-05-18 09:47:34 -04:00
done + = ret ;
}
else {
2019-02-26 12:37:15 -05:00
ret = SSL_get_error ( ctx - > ssl , ret ) ;
2017-01-13 20:42:15 -05:00
2012-05-18 09:47:34 -04:00
if ( ret = = SSL_ERROR_WANT_WRITE ) {
2019-02-26 12:37:15 -05:00
if ( SSL_renegotiate_pending ( ctx - > ssl ) ) {
2012-11-08 12:02:56 -05:00
/* handshake is running, and it may need to re-enable write */
conn - > flags | = CO_FL_SSL_WAIT_HS ;
2019-05-20 08:02:16 -04:00
ctx - > xprt - > subscribe ( conn , ctx - > xprt_ctx , SUB_RETRY_SEND , & ctx - > wait_event ) ;
2020-11-13 15:56:34 -05:00
# ifdef SSL_MODE_ASYNC
2017-06-06 08:35:14 -04:00
/* Async mode can be re-enabled, because we're leaving data state.*/
if ( global_ssl . async )
2019-02-26 12:37:15 -05:00
SSL_set_mode ( ctx - > ssl , SSL_MODE_ASYNC ) ;
2017-06-06 08:35:14 -04:00
# endif
2012-11-08 12:02:56 -05:00
break ;
}
2019-05-20 08:02:16 -04:00
2012-05-18 09:47:34 -04:00
break ;
}
else if ( ret = = SSL_ERROR_WANT_READ ) {
2012-11-08 11:56:20 -05:00
/* handshake is running, and it needs to enable read */
2012-05-18 09:47:34 -04:00
conn - > flags | = CO_FL_SSL_WAIT_HS ;
2019-05-20 08:02:16 -04:00
ctx - > xprt - > subscribe ( conn , ctx - > xprt_ctx ,
SUB_RETRY_RECV ,
& ctx - > wait_event ) ;
2020-11-13 15:56:34 -05:00
# ifdef SSL_MODE_ASYNC
2017-06-06 08:35:14 -04:00
/* Async mode can be re-enabled, because we're leaving data state.*/
if ( global_ssl . async )
2019-02-26 12:37:15 -05:00
SSL_set_mode ( ctx - > ssl , SSL_MODE_ASYNC ) ;
2017-06-06 08:35:14 -04:00
# endif
2017-01-13 20:42:15 -05:00
break ;
}
2021-09-29 12:56:51 -04:00
else if ( ret = = SSL_ERROR_SSL | | ret = = SSL_ERROR_SYSCALL ) {
2022-04-11 05:29:11 -04:00
struct ssl_sock_ctx * ctx = conn_get_ssl_sock_ctx ( conn ) ;
2022-04-12 01:31:06 -04:00
if ( ctx & & ! ctx - > error_code )
2021-09-29 12:56:53 -04:00
ctx - > error_code = ERR_peek_error ( ) ;
2021-09-29 12:56:51 -04:00
conn - > err_code = CO_ERR_SSL_FATAL ;
}
2012-05-18 09:47:34 -04:00
goto out_error ;
}
}
2017-10-25 03:32:15 -04:00
leave :
2012-05-18 09:47:34 -04:00
return done ;
out_error :
2012-12-14 05:21:13 -05:00
/* Clear openssl global errors stack */
2022-09-06 13:37:08 -04:00
ssl_sock_dump_errors ( conn , NULL ) ;
2012-12-14 05:21:13 -05:00
ERR_clear_error ( ) ;
2012-05-18 09:47:34 -04:00
conn - > flags | = CO_FL_ERROR ;
2017-10-25 03:32:15 -04:00
goto leave ;
2012-05-18 09:47:34 -04:00
}
2021-05-13 04:11:03 -04:00
void ssl_sock_close ( struct connection * conn , void * xprt_ctx ) {
2012-05-18 09:47:34 -04:00
2019-03-21 13:27:17 -04:00
struct ssl_sock_ctx * ctx = xprt_ctx ;
2019-02-26 12:37:15 -05:00
2019-05-20 08:02:16 -04:00
2019-03-21 13:27:17 -04:00
if ( ctx ) {
2019-05-20 08:02:16 -04:00
if ( ctx - > wait_event . events ! = 0 )
ctx - > xprt - > unsubscribe ( ctx - > conn , ctx - > xprt_ctx ,
ctx - > wait_event . events ,
& ctx - > wait_event ) ;
2020-01-10 03:20:26 -05:00
if ( ctx - > subs ) {
ctx - > subs - > events = 0 ;
tasklet_wakeup ( ctx - > subs - > tasklet ) ;
2019-05-20 08:02:16 -04:00
}
2020-01-10 03:20:26 -05:00
2019-05-23 12:41:47 -04:00
if ( ctx - > xprt - > close )
ctx - > xprt - > close ( conn , ctx - > xprt_ctx ) ;
2020-11-13 15:56:34 -05:00
# ifdef SSL_MODE_ASYNC
2017-05-17 14:42:48 -04:00
if ( global_ssl . async ) {
OSSL_ASYNC_FD all_fd [ 32 ] , afd ;
size_t num_all_fds = 0 ;
int i ;
2019-02-26 12:37:15 -05:00
SSL_get_all_async_fds ( ctx - > ssl , NULL , & num_all_fds ) ;
2017-05-17 14:42:48 -04:00
if ( num_all_fds > 32 ) {
send_log ( NULL , LOG_EMERG , " haproxy: openssl returns too many async fds. It seems a bug. Process may crash \n " ) ;
return ;
}
2019-02-26 12:37:15 -05:00
SSL_get_all_async_fds ( ctx - > ssl , all_fd , & num_all_fds ) ;
2017-05-17 14:42:48 -04:00
/* If an async job is pending, we must try to
to catch the end using polling before calling
SSL_free */
2019-02-26 12:37:15 -05:00
if ( num_all_fds & & SSL_waiting_for_async ( ctx - > ssl ) ) {
2017-05-17 14:42:48 -04:00
for ( i = 0 ; i < num_all_fds ; i + + ) {
/* switch on an handler designed to
* handle the SSL_free
*/
afd = all_fd [ i ] ;
fdtab [ afd ] . iocb = ssl_async_fd_free ;
2019-02-26 12:37:15 -05:00
fdtab [ afd ] . owner = ctx - > ssl ;
2017-05-17 14:42:48 -04:00
fd_want_recv ( afd ) ;
2017-05-31 06:02:53 -04:00
/* To ensure that the fd cache won't be used
* and we ' ll catch a real RD event .
*/
fd_cant_recv ( afd ) ;
2017-05-17 14:42:48 -04:00
}
2019-06-14 08:42:29 -04:00
tasklet_free ( ctx - > wait_event . tasklet ) ;
2019-02-26 12:37:15 -05:00
pool_free ( ssl_sock_ctx_pool , ctx ) ;
2021-04-06 07:53:36 -04:00
_HA_ATOMIC_INC ( & jobs ) ;
2017-01-13 20:42:15 -05:00
return ;
}
2017-05-17 14:42:48 -04:00
/* Else we can remove the fds from the fdtab
* and call SSL_free .
2020-08-26 05:44:17 -04:00
* note : we do a fd_stop_both and not a delete
2017-05-17 14:42:48 -04:00
* because the fd is owned by the engine .
* the engine is responsible to close
*/
2022-07-01 11:36:50 -04:00
for ( i = 0 ; i < num_all_fds ; i + + ) {
/* We want to remove the fd from the fdtab
* but we flag it to disown because the
* close is performed by the engine itself
*/
fdtab [ all_fd [ i ] ] . state | = FD_DISOWN ;
fd_delete ( all_fd [ i ] ) ;
}
2017-01-13 20:42:15 -05:00
}
# endif
2019-02-26 12:37:15 -05:00
SSL_free ( ctx - > ssl ) ;
2019-12-19 09:02:39 -05:00
b_free ( & ctx - > early_buf ) ;
2019-06-14 08:42:29 -04:00
tasklet_free ( ctx - > wait_event . tasklet ) ;
2019-02-26 12:37:15 -05:00
pool_free ( ssl_sock_ctx_pool , ctx ) ;
2021-10-06 06:15:18 -04:00
_HA_ATOMIC_DEC ( & global . sslconns ) ;
2012-05-18 09:47:34 -04:00
}
}
/* This function tries to perform a clean shutdown on an SSL connection, and in
* any case , flags the connection as reusable if no handshake was in progress .
*/
2019-03-21 13:27:17 -04:00
static void ssl_sock_shutw ( struct connection * conn , void * xprt_ctx , int clean )
2012-05-18 09:47:34 -04:00
{
2019-03-21 13:27:17 -04:00
struct ssl_sock_ctx * ctx = xprt_ctx ;
2019-02-26 12:37:15 -05:00
2020-01-23 10:27:54 -05:00
if ( conn - > flags & ( CO_FL_WAIT_XPRT | CO_FL_SSL_WAIT_HS ) )
2012-05-18 09:47:34 -04:00
return ;
2017-01-08 08:07:39 -05:00
if ( ! clean )
/* don't sent notify on SSL_shutdown */
2019-02-26 12:37:15 -05:00
SSL_set_quiet_shutdown ( ctx - > ssl , 1 ) ;
2012-05-18 09:47:34 -04:00
/* no handshake was in progress, try a clean ssl shutdown */
2019-02-26 12:37:15 -05:00
if ( SSL_shutdown ( ctx - > ssl ) < = 0 ) {
2012-12-14 05:21:13 -05:00
/* Clear openssl global errors stack */
2022-09-06 13:37:08 -04:00
ssl_sock_dump_errors ( conn , NULL ) ;
2012-12-14 05:21:13 -05:00
ERR_clear_error ( ) ;
}
2012-05-18 09:47:34 -04:00
}
2017-10-31 10:46:07 -04:00
2020-03-10 03:06:11 -04:00
/* used for ppv2 pkey algo (can be used for logging) */
2019-12-05 04:26:40 -05:00
int ssl_sock_get_pkey_algo ( struct connection * conn , struct buffer * out )
{
2022-04-11 05:29:11 -04:00
struct ssl_sock_ctx * ctx = conn_get_ssl_sock_ctx ( conn ) ;
2019-12-05 04:26:40 -05:00
X509 * crt ;
2022-04-11 05:29:11 -04:00
if ( ! ctx )
2019-12-05 04:26:40 -05:00
return 0 ;
crt = SSL_get_certificate ( ctx - > ssl ) ;
if ( ! crt )
return 0 ;
return cert_get_pkey_algo ( crt , out ) ;
}
2017-11-02 09:05:23 -04:00
/* used for ppv2 cert signature (can be used for logging) */
const char * ssl_sock_get_cert_sig ( struct connection * conn )
{
2022-04-11 05:29:11 -04:00
struct ssl_sock_ctx * ctx = conn_get_ssl_sock_ctx ( conn ) ;
2019-02-26 12:37:15 -05:00
2017-11-02 09:05:23 -04:00
__OPENSSL_110_CONST__ ASN1_OBJECT * algorithm ;
X509 * crt ;
2022-04-11 05:29:11 -04:00
if ( ! ctx )
2017-11-02 09:05:23 -04:00
return NULL ;
2019-02-26 12:37:15 -05:00
crt = SSL_get_certificate ( ctx - > ssl ) ;
2017-11-02 09:05:23 -04:00
if ( ! crt )
return NULL ;
X509_ALGOR_get0 ( & algorithm , NULL , NULL , X509_get0_tbs_sigalg ( crt ) ) ;
return OBJ_nid2sn ( OBJ_obj2nid ( algorithm ) ) ;
}
2018-02-01 12:29:59 -05:00
/* used for ppv2 authority */
const char * ssl_sock_get_sni ( struct connection * conn )
{
# ifdef SSL_CTRL_SET_TLSEXT_HOSTNAME
2022-04-11 05:29:11 -04:00
struct ssl_sock_ctx * ctx = conn_get_ssl_sock_ctx ( conn ) ;
2019-02-26 12:37:15 -05:00
2022-04-11 05:29:11 -04:00
if ( ! ctx )
2018-02-01 12:29:59 -05:00
return NULL ;
2019-02-26 12:37:15 -05:00
return SSL_get_servername ( ctx - > ssl , TLSEXT_NAMETYPE_host_name ) ;
2018-02-01 12:29:59 -05:00
# else
2019-02-26 12:37:15 -05:00
return NULL ;
2018-02-01 12:29:59 -05:00
# endif
}
2017-10-13 10:59:49 -04:00
/* used for logging/ppv2, may be changed for a sample fetch later */
2012-10-12 14:17:54 -04:00
const char * ssl_sock_get_cipher_name ( struct connection * conn )
{
2022-04-11 05:29:11 -04:00
struct ssl_sock_ctx * ctx = conn_get_ssl_sock_ctx ( conn ) ;
2019-02-26 12:37:15 -05:00
2022-04-11 05:29:11 -04:00
if ( ! ctx )
2012-10-12 14:17:54 -04:00
return NULL ;
2019-02-26 12:37:15 -05:00
return SSL_get_cipher_name ( ctx - > ssl ) ;
2012-10-12 14:17:54 -04:00
}
2017-10-13 10:59:49 -04:00
/* used for logging/ppv2, may be changed for a sample fetch later */
2012-10-12 14:17:54 -04:00
const char * ssl_sock_get_proto_version ( struct connection * conn )
{
2022-04-11 05:29:11 -04:00
struct ssl_sock_ctx * ctx = conn_get_ssl_sock_ctx ( conn ) ;
2019-02-26 12:37:15 -05:00
2022-04-11 05:29:11 -04:00
if ( ! ctx )
2012-10-12 14:17:54 -04:00
return NULL ;
2019-02-26 12:37:15 -05:00
return SSL_get_version ( ctx - > ssl ) ;
2012-10-12 14:17:54 -04:00
}
2018-12-21 13:45:40 -05:00
void ssl_sock_set_alpn ( struct connection * conn , const unsigned char * alpn , int len )
{
# ifdef TLSEXT_TYPE_application_layer_protocol_negotiation
2022-04-11 05:29:11 -04:00
struct ssl_sock_ctx * ctx = conn_get_ssl_sock_ctx ( conn ) ;
2019-02-26 12:37:15 -05:00
2022-04-11 05:29:11 -04:00
if ( ! ctx )
2019-06-28 08:10:33 -04:00
return ;
2019-02-26 12:37:15 -05:00
SSL_set_alpn_protos ( ctx - > ssl , alpn , len ) ;
2018-12-21 13:45:40 -05:00
# endif
}
2016-12-22 15:58:38 -05:00
/* Sets advertised SNI for outgoing connections. Please set <hostname> to NULL
* to disable SNI .
*/
2015-07-10 05:33:32 -04:00
void ssl_sock_set_servername ( struct connection * conn , const char * hostname )
{
# ifdef SSL_CTRL_SET_TLSEXT_HOSTNAME
2022-04-11 05:29:11 -04:00
struct ssl_sock_ctx * ctx = conn_get_ssl_sock_ctx ( conn ) ;
2016-12-22 15:58:38 -05:00
char * prev_name ;
2022-04-11 05:29:11 -04:00
if ( ! ctx )
2015-07-10 05:33:32 -04:00
return ;
2021-12-23 05:12:13 -05:00
BUG_ON ( ! ( conn - > flags & CO_FL_WAIT_L6_CONN ) ) ;
BUG_ON ( ! ( conn - > flags & CO_FL_SSL_WAIT_HS ) ) ;
2016-12-22 15:58:38 -05:00
/* if the SNI changes, we must destroy the reusable context so that a
2021-11-16 20:59:21 -05:00
* new connection will present a new SNI . compare with the SNI
2023-08-30 06:02:33 -04:00
* previously stored in the reused_sess . If the session was reused ,
* the associated SNI ( if any ) has already been assigned to the SSL
* during ssl_sock_init ( ) so SSL_get_servername ( ) will properly
* retrieve the currently known hostname for the SSL .
*/
2021-11-16 20:59:21 -05:00
2023-08-30 06:02:33 -04:00
prev_name = ( char * ) SSL_get_servername ( ctx - > ssl , TLSEXT_NAMETYPE_host_name ) ;
2016-12-22 15:58:38 -05:00
if ( ( ! prev_name & & hostname ) | |
2023-08-30 06:02:33 -04:00
! hostname | |
strcmp ( hostname , prev_name ) ! = 0 ) {
2019-02-26 12:37:15 -05:00
SSL_set_session ( ctx - > ssl , NULL ) ;
2023-08-30 06:02:33 -04:00
SSL_set_tlsext_host_name ( ctx - > ssl , hostname ) ;
}
2015-07-10 05:33:32 -04:00
# endif
}
2014-06-24 12:26:41 -04:00
/* Extract peer certificate's common name into the chunk dest
* Returns
* the len of the extracted common name
* or 0 if no CN found in DN
* or - 1 on error case ( i . e . no peer certificate )
*/
2018-07-13 05:56:34 -04:00
int ssl_sock_get_remote_common_name ( struct connection * conn ,
struct buffer * dest )
2014-05-08 23:42:08 -04:00
{
2022-04-11 05:29:11 -04:00
struct ssl_sock_ctx * ctx = conn_get_ssl_sock_ctx ( conn ) ;
2014-05-08 23:42:08 -04:00
X509 * crt = NULL ;
X509_NAME * name ;
const char find_cn [ ] = " CN " ;
2018-07-13 05:56:34 -04:00
const struct buffer find_cn_chunk = {
2018-07-13 04:54:26 -04:00
. area = ( char * ) & find_cn ,
. data = sizeof ( find_cn ) - 1
2014-05-08 23:42:08 -04:00
} ;
2014-06-24 12:26:41 -04:00
int result = - 1 ;
2014-05-08 23:42:08 -04:00
2022-04-11 05:29:11 -04:00
if ( ! ctx )
2014-06-24 12:26:41 -04:00
goto out ;
2014-05-08 23:42:08 -04:00
/* SSL_get_peer_certificate, it increase X509 * ref count */
2019-02-26 12:37:15 -05:00
crt = SSL_get_peer_certificate ( ctx - > ssl ) ;
2014-05-08 23:42:08 -04:00
if ( ! crt )
goto out ;
name = X509_get_subject_name ( crt ) ;
if ( ! name )
goto out ;
2014-06-24 12:26:41 -04:00
result = ssl_sock_get_dn_entry ( name , & find_cn_chunk , 1 , dest ) ;
out :
2014-05-08 23:42:08 -04:00
if ( crt )
X509_free ( crt ) ;
return result ;
}
2014-07-30 10:39:13 -04:00
/* returns 1 if client passed a certificate for this session, 0 if not */
int ssl_sock_get_cert_used_sess ( struct connection * conn )
{
2022-04-11 05:29:11 -04:00
struct ssl_sock_ctx * ctx = conn_get_ssl_sock_ctx ( conn ) ;
2014-07-30 10:39:13 -04:00
X509 * crt = NULL ;
2022-04-11 05:29:11 -04:00
if ( ! ctx )
2014-07-30 10:39:13 -04:00
return 0 ;
/* SSL_get_peer_certificate, it increase X509 * ref count */
2019-02-26 12:37:15 -05:00
crt = SSL_get_peer_certificate ( ctx - > ssl ) ;
2014-07-30 10:39:13 -04:00
if ( ! crt )
return 0 ;
X509_free ( crt ) ;
return 1 ;
}
/* returns 1 if client passed a certificate for this connection, 0 if not */
int ssl_sock_get_cert_used_conn ( struct connection * conn )
2014-05-08 23:42:08 -04:00
{
2022-04-11 05:29:11 -04:00
struct ssl_sock_ctx * ctx = conn_get_ssl_sock_ctx ( conn ) ;
2019-02-28 12:10:45 -05:00
2022-04-11 05:29:11 -04:00
if ( ! ctx )
2014-05-08 23:42:08 -04:00
return 0 ;
2019-02-28 12:10:45 -05:00
return SSL_SOCK_ST_FL_VERIFY_DONE & ctx - > xprt_st ? 1 : 0 ;
2014-05-08 23:42:08 -04:00
}
/* returns result from SSL verify */
unsigned int ssl_sock_get_verify_result ( struct connection * conn )
{
2022-04-11 05:29:11 -04:00
struct ssl_sock_ctx * ctx = conn_get_ssl_sock_ctx ( conn ) ;
2019-02-26 12:37:15 -05:00
2022-04-11 05:29:11 -04:00
if ( ! ctx )
2014-05-08 23:42:08 -04:00
return ( unsigned int ) X509_V_ERR_APPLICATION_VERIFICATION ;
2019-02-26 12:37:15 -05:00
return ( unsigned int ) SSL_get_verify_result ( ctx - > ssl ) ;
2014-05-08 23:42:08 -04:00
}
2016-12-04 12:44:29 -05:00
/* Returns the application layer protocol name in <str> and <len> when known.
* Zero is returned if the protocol name was not found , otherwise non - zero is
* returned . The string is allocated in the SSL context and doesn ' t have to be
* freed by the caller . NPN is also checked if available since older versions
* of openssl ( 1.0 .1 ) which are more common in field only support this one .
*/
2021-11-12 05:23:29 -05:00
int ssl_sock_get_alpn ( const struct connection * conn , void * xprt_ctx , const char * * str , int * len )
2016-12-04 12:44:29 -05:00
{
2019-02-26 12:37:15 -05:00
# if defined(TLSEXT_TYPE_application_layer_protocol_negotiation) || \
defined ( OPENSSL_NPN_NEGOTIATED ) & & ! defined ( OPENSSL_NO_NEXTPROTONEG )
2019-03-21 13:27:17 -04:00
struct ssl_sock_ctx * ctx = xprt_ctx ;
if ( ! ctx )
2016-12-04 12:44:29 -05:00
return 0 ;
* str = NULL ;
# ifdef TLSEXT_TYPE_application_layer_protocol_negotiation
2019-02-26 12:37:15 -05:00
SSL_get0_alpn_selected ( ctx - > ssl , ( const unsigned char * * ) str , ( unsigned * ) len ) ;
2016-12-04 12:44:29 -05:00
if ( * str )
return 1 ;
# endif
2018-02-15 07:34:58 -05:00
# if defined(OPENSSL_NPN_NEGOTIATED) && !defined(OPENSSL_NO_NEXTPROTONEG)
2019-02-26 12:37:15 -05:00
SSL_get0_next_proto_negotiated ( ctx - > ssl , ( const unsigned char * * ) str , ( unsigned * ) len ) ;
2016-12-04 12:44:29 -05:00
if ( * str )
return 1 ;
# endif
2019-03-21 13:27:17 -04:00
# endif
2016-12-04 12:44:29 -05:00
return 0 ;
}
2019-01-04 05:08:20 -05:00
/* "issuers-chain-path" load chain certificate in global */
2020-05-14 11:47:32 -04:00
int ssl_load_global_issuer_from_BIO ( BIO * in , char * fp , char * * err )
2019-01-04 05:08:20 -05:00
{
X509 * ca ;
X509_NAME * name = NULL ;
ASN1_OCTET_STRING * skid = NULL ;
STACK_OF ( X509 ) * chain = NULL ;
struct issuer_chain * issuer ;
struct eb64_node * node ;
char * path ;
u64 key ;
int ret = 0 ;
while ( ( ca = PEM_read_bio_X509 ( in , NULL , NULL , NULL ) ) ) {
if ( chain = = NULL ) {
chain = sk_X509_new_null ( ) ;
skid = X509_get_ext_d2i ( ca , NID_subject_key_identifier , NULL , NULL ) ;
name = X509_get_subject_name ( ca ) ;
}
if ( ! sk_X509_push ( chain , ca ) ) {
X509_free ( ca ) ;
goto end ;
}
}
if ( ! chain ) {
memprintf ( err , " unable to load issuers-chain %s : pem certificate not found. \n " , fp ) ;
goto end ;
}
if ( ! skid ) {
memprintf ( err , " unable to load issuers-chain %s : SubjectKeyIdentifier not found. \n " , fp ) ;
goto end ;
}
if ( ! name ) {
memprintf ( err , " unable to load issuers-chain %s : SubjectName not found. \n " , fp ) ;
goto end ;
}
2020-12-22 07:22:34 -05:00
key = XXH3 ( ASN1_STRING_get0_data ( skid ) , ASN1_STRING_length ( skid ) , 0 ) ;
2020-02-25 08:53:06 -05:00
for ( node = eb64_lookup ( & cert_issuer_tree , key ) ; node ; node = eb64_next ( node ) ) {
2019-01-04 05:08:20 -05:00
issuer = container_of ( node , typeof ( * issuer ) , node ) ;
if ( ! X509_NAME_cmp ( name , X509_get_subject_name ( sk_X509_value ( issuer - > chain , 0 ) ) ) ) {
memprintf ( err , " duplicate issuers-chain %s: %s already in store \n " , fp , issuer - > path ) ;
goto end ;
}
}
issuer = calloc ( 1 , sizeof * issuer ) ;
path = strdup ( fp ) ;
if ( ! issuer | | ! path ) {
free ( issuer ) ;
free ( path ) ;
goto end ;
}
issuer - > node . key = key ;
issuer - > path = path ;
issuer - > chain = chain ;
chain = NULL ;
2020-02-25 08:53:06 -05:00
eb64_insert ( & cert_issuer_tree , & issuer - > node ) ;
2019-01-04 05:08:20 -05:00
ret = 1 ;
end :
if ( skid )
ASN1_OCTET_STRING_free ( skid ) ;
if ( chain )
sk_X509_pop_free ( chain , X509_free ) ;
return ret ;
}
2020-05-14 04:14:37 -04:00
struct issuer_chain * ssl_get0_issuer_chain ( X509 * cert )
2020-02-18 09:19:24 -05:00
{
AUTHORITY_KEYID * akid ;
struct issuer_chain * issuer = NULL ;
akid = X509_get_ext_d2i ( cert , NID_authority_key_identifier , NULL , NULL ) ;
2020-11-19 10:24:13 -05:00
if ( akid & & akid - > keyid ) {
2020-02-18 09:19:24 -05:00
struct eb64_node * node ;
u64 hk ;
2020-12-22 07:22:34 -05:00
hk = XXH3 ( ASN1_STRING_get0_data ( akid - > keyid ) , ASN1_STRING_length ( akid - > keyid ) , 0 ) ;
2020-02-18 09:19:24 -05:00
for ( node = eb64_lookup ( & cert_issuer_tree , hk ) ; node ; node = eb64_next ( node ) ) {
struct issuer_chain * ti = container_of ( node , typeof ( * issuer ) , node ) ;
if ( X509_check_issued ( sk_X509_value ( ti - > chain , 0 ) , cert ) = = X509_V_OK ) {
issuer = ti ;
break ;
}
}
}
2022-11-03 10:16:48 -04:00
AUTHORITY_KEYID_free ( akid ) ;
2020-02-18 09:19:24 -05:00
return issuer ;
}
2020-05-14 11:47:32 -04:00
void ssl_free_global_issuers ( void )
2019-01-04 05:08:20 -05:00
{
struct eb64_node * node , * back ;
struct issuer_chain * issuer ;
2020-02-25 08:53:06 -05:00
node = eb64_first ( & cert_issuer_tree ) ;
2019-01-04 05:08:20 -05:00
while ( node ) {
issuer = container_of ( node , typeof ( * issuer ) , node ) ;
back = eb64_next ( node ) ;
eb64_delete ( node ) ;
free ( issuer - > path ) ;
sk_X509_pop_free ( issuer - > chain , X509_free ) ;
free ( issuer ) ;
node = back ;
}
}
2022-04-11 12:41:24 -04:00
# if defined(USE_ENGINE) && !defined(OPENSSL_NO_ENGINE)
2017-01-13 20:42:15 -05:00
static int ssl_check_async_engine_count ( void ) {
2020-11-06 09:24:23 -05:00
int err_code = ERR_NONE ;
2017-01-13 20:42:15 -05:00
2017-05-17 14:42:48 -04:00
if ( global_ssl . async & & ( openssl_engines_initialized > 32 ) ) {
2017-11-24 10:50:31 -05:00
ha_alert ( " ssl-mode-async only supports a maximum of 32 engines. \n " ) ;
2017-01-13 20:42:15 -05:00
err_code = ERR_ABORT ;
}
return err_code ;
}
2016-12-21 17:13:03 -05:00
# endif
2021-01-20 08:41:29 -05:00
/* "show fd" helper to dump ssl internals. Warning: the output buffer is often
2021-01-21 02:53:50 -05:00
* the common trash ! It returns non - zero if the connection entry looks suspicious .
2021-01-20 08:41:29 -05:00
*/
2021-01-21 02:26:06 -05:00
static int ssl_sock_show_fd ( struct buffer * buf , const struct connection * conn , const void * ctx )
2021-01-20 08:41:29 -05:00
{
const struct ssl_sock_ctx * sctx = ctx ;
2021-01-21 02:53:50 -05:00
int ret = 0 ;
2021-01-20 08:41:29 -05:00
if ( ! sctx )
2021-01-21 02:53:50 -05:00
return ret ;
2021-01-20 08:41:29 -05:00
2021-01-21 02:53:50 -05:00
if ( sctx - > conn ! = conn ) {
chunk_appendf ( & trash , " xctx.conn=%p(BOGUS) " , sctx - > conn ) ;
ret = 1 ;
}
2023-03-14 10:51:33 -04:00
chunk_appendf ( & trash , " xctx.st=%d .err=%ld " , sctx - > xprt_st , sctx - > error_code ) ;
2021-01-20 08:41:29 -05:00
if ( sctx - > xprt ) {
chunk_appendf ( & trash , " .xprt=%s " , sctx - > xprt - > name ) ;
if ( sctx - > xprt_ctx )
chunk_appendf ( & trash , " .xctx=%p " , sctx - > xprt_ctx ) ;
}
chunk_appendf ( & trash , " .wait.ev=%d " , sctx - > wait_event . events ) ;
2021-01-21 02:53:50 -05:00
/* as soon as a shutdown is reported the lower layer unregisters its
* subscriber , so the situations below are transient and rare enough to
* be reported as suspicious . In any case they shouldn ' t last .
*/
if ( ( sctx - > wait_event . events & 1 ) & & ( conn - > flags & ( CO_FL_SOCK_RD_SH | CO_FL_ERROR ) ) )
ret = 1 ;
if ( ( sctx - > wait_event . events & 2 ) & & ( conn - > flags & ( CO_FL_SOCK_WR_SH | CO_FL_ERROR ) ) )
ret = 1 ;
2021-01-20 08:41:29 -05:00
chunk_appendf ( & trash , " .subs=%p " , sctx - > subs ) ;
if ( sctx - > subs ) {
chunk_appendf ( & trash , " (ev=%d tl=%p " , sctx - > subs - > events , sctx - > subs - > tasklet ) ;
2021-01-21 02:53:50 -05:00
if ( sctx - > subs - > tasklet - > calls > = 1000000 )
ret = 1 ;
2021-01-20 08:41:29 -05:00
chunk_appendf ( & trash , " tl.calls=%d tl.ctx=%p tl.fct= " ,
sctx - > subs - > tasklet - > calls ,
sctx - > subs - > tasklet - > context ) ;
resolve_sym_name ( & trash , NULL , sctx - > subs - > tasklet - > process ) ;
chunk_appendf ( & trash , " ) " ) ;
}
chunk_appendf ( & trash , " .sent_early=%d " , sctx - > sent_early_data ) ;
chunk_appendf ( & trash , " .early_in=%d " , ( int ) sctx - > early_buf . data ) ;
2021-01-21 02:53:50 -05:00
return ret ;
2021-01-20 08:41:29 -05:00
}
MINOR: ssl: make tlskeys_list_get_next() take a list element
As reported in issue #1010, gcc-11 as of 2021-01-05 is overzealous in
its -Warray-bounds check as it considers that a cast of a global struct
accesses the entire struct even if only one specific element is accessed.
This instantly breaks all lists making use of container_of() to build
their iterators as soon as the starting point is known if the next
element is retrieved from the list head in a way that is visible to the
compiler's optimizer, because it decides that accessing the list's next
element dereferences the list as a larger struct (which it does not).
The temporary workaround consisted in disabling -Warray-bounds, but this
warning is traditionally quite effective at spotting real bugs, and we
actually have is a single occurrence of this issue in the whole code.
By changing the tlskeys_list_get_next() function to take a list element
as the starting point instead of the current element, we can avoid
the starting point issue but this requires to change all call places
to write hideous casts made of &((struct blah*)ref)->list. At the
moment we only have two such call places, the first one being used to
initialize the list (which is the one causing the warning) and which
is thus easy to simplify, and the second one for which we already have
an aliased pointer to the reference that is still valid at the call
place, and given the original pointer also remained unchanged, we can
safely use this alias, and this is safer than leaving a cast there.
Let's make this change now while it's still easy.
The generated code only changed in function cli_io_handler_tlskeys_files()
due to register allocation and the change of variable scope between the
old one and the new one.
2021-01-05 04:44:30 -05:00
# if (defined SSL_CTRL_SET_TLSEXT_TICKET_KEY_CB && TLS_TICKETS_NO > 0)
2016-10-29 12:09:35 -04:00
/* This function is used with TLS ticket keys management. It permits to browse
MINOR: ssl: make tlskeys_list_get_next() take a list element
As reported in issue #1010, gcc-11 as of 2021-01-05 is overzealous in
its -Warray-bounds check as it considers that a cast of a global struct
accesses the entire struct even if only one specific element is accessed.
This instantly breaks all lists making use of container_of() to build
their iterators as soon as the starting point is known if the next
element is retrieved from the list head in a way that is visible to the
compiler's optimizer, because it decides that accessing the list's next
element dereferences the list as a larger struct (which it does not).
The temporary workaround consisted in disabling -Warray-bounds, but this
warning is traditionally quite effective at spotting real bugs, and we
actually have is a single occurrence of this issue in the whole code.
By changing the tlskeys_list_get_next() function to take a list element
as the starting point instead of the current element, we can avoid
the starting point issue but this requires to change all call places
to write hideous casts made of &((struct blah*)ref)->list. At the
moment we only have two such call places, the first one being used to
initialize the list (which is the one causing the warning) and which
is thus easy to simplify, and the second one for which we already have
an aliased pointer to the reference that is still valid at the call
place, and given the original pointer also remained unchanged, we can
safely use this alias, and this is safer than leaving a cast there.
Let's make this change now while it's still easy.
The generated code only changed in function cli_io_handler_tlskeys_files()
due to register allocation and the change of variable scope between the
old one and the new one.
2021-01-05 04:44:30 -05:00
* each reference . The variable < ref > must point to the current node ' s list
* element ( which starts by the root ) , and < end > must point to the root node .
2016-10-29 12:09:35 -04:00
*/
static inline
MINOR: ssl: make tlskeys_list_get_next() take a list element
As reported in issue #1010, gcc-11 as of 2021-01-05 is overzealous in
its -Warray-bounds check as it considers that a cast of a global struct
accesses the entire struct even if only one specific element is accessed.
This instantly breaks all lists making use of container_of() to build
their iterators as soon as the starting point is known if the next
element is retrieved from the list head in a way that is visible to the
compiler's optimizer, because it decides that accessing the list's next
element dereferences the list as a larger struct (which it does not).
The temporary workaround consisted in disabling -Warray-bounds, but this
warning is traditionally quite effective at spotting real bugs, and we
actually have is a single occurrence of this issue in the whole code.
By changing the tlskeys_list_get_next() function to take a list element
as the starting point instead of the current element, we can avoid
the starting point issue but this requires to change all call places
to write hideous casts made of &((struct blah*)ref)->list. At the
moment we only have two such call places, the first one being used to
initialize the list (which is the one causing the warning) and which
is thus easy to simplify, and the second one for which we already have
an aliased pointer to the reference that is still valid at the call
place, and given the original pointer also remained unchanged, we can
safely use this alias, and this is safer than leaving a cast there.
Let's make this change now while it's still easy.
The generated code only changed in function cli_io_handler_tlskeys_files()
due to register allocation and the change of variable scope between the
old one and the new one.
2021-01-05 04:44:30 -05:00
struct tls_keys_ref * tlskeys_list_get_next ( struct list * ref , struct list * end )
2016-10-29 12:09:35 -04:00
{
2021-01-02 19:29:55 -05:00
/* Get next list entry. */
MINOR: ssl: make tlskeys_list_get_next() take a list element
As reported in issue #1010, gcc-11 as of 2021-01-05 is overzealous in
its -Warray-bounds check as it considers that a cast of a global struct
accesses the entire struct even if only one specific element is accessed.
This instantly breaks all lists making use of container_of() to build
their iterators as soon as the starting point is known if the next
element is retrieved from the list head in a way that is visible to the
compiler's optimizer, because it decides that accessing the list's next
element dereferences the list as a larger struct (which it does not).
The temporary workaround consisted in disabling -Warray-bounds, but this
warning is traditionally quite effective at spotting real bugs, and we
actually have is a single occurrence of this issue in the whole code.
By changing the tlskeys_list_get_next() function to take a list element
as the starting point instead of the current element, we can avoid
the starting point issue but this requires to change all call places
to write hideous casts made of &((struct blah*)ref)->list. At the
moment we only have two such call places, the first one being used to
initialize the list (which is the one causing the warning) and which
is thus easy to simplify, and the second one for which we already have
an aliased pointer to the reference that is still valid at the call
place, and given the original pointer also remained unchanged, we can
safely use this alias, and this is safer than leaving a cast there.
Let's make this change now while it's still easy.
The generated code only changed in function cli_io_handler_tlskeys_files()
due to register allocation and the change of variable scope between the
old one and the new one.
2021-01-05 04:44:30 -05:00
ref = ref - > n ;
2016-10-29 12:09:35 -04:00
2021-01-02 19:29:55 -05:00
/* If the entry is the last of the list, return NULL. */
MINOR: ssl: make tlskeys_list_get_next() take a list element
As reported in issue #1010, gcc-11 as of 2021-01-05 is overzealous in
its -Warray-bounds check as it considers that a cast of a global struct
accesses the entire struct even if only one specific element is accessed.
This instantly breaks all lists making use of container_of() to build
their iterators as soon as the starting point is known if the next
element is retrieved from the list head in a way that is visible to the
compiler's optimizer, because it decides that accessing the list's next
element dereferences the list as a larger struct (which it does not).
The temporary workaround consisted in disabling -Warray-bounds, but this
warning is traditionally quite effective at spotting real bugs, and we
actually have is a single occurrence of this issue in the whole code.
By changing the tlskeys_list_get_next() function to take a list element
as the starting point instead of the current element, we can avoid
the starting point issue but this requires to change all call places
to write hideous casts made of &((struct blah*)ref)->list. At the
moment we only have two such call places, the first one being used to
initialize the list (which is the one causing the warning) and which
is thus easy to simplify, and the second one for which we already have
an aliased pointer to the reference that is still valid at the call
place, and given the original pointer also remained unchanged, we can
safely use this alias, and this is safer than leaving a cast there.
Let's make this change now while it's still easy.
The generated code only changed in function cli_io_handler_tlskeys_files()
due to register allocation and the change of variable scope between the
old one and the new one.
2021-01-05 04:44:30 -05:00
if ( ref = = end )
2021-01-02 19:29:55 -05:00
return NULL ;
2016-10-29 12:09:35 -04:00
MINOR: ssl: make tlskeys_list_get_next() take a list element
As reported in issue #1010, gcc-11 as of 2021-01-05 is overzealous in
its -Warray-bounds check as it considers that a cast of a global struct
accesses the entire struct even if only one specific element is accessed.
This instantly breaks all lists making use of container_of() to build
their iterators as soon as the starting point is known if the next
element is retrieved from the list head in a way that is visible to the
compiler's optimizer, because it decides that accessing the list's next
element dereferences the list as a larger struct (which it does not).
The temporary workaround consisted in disabling -Warray-bounds, but this
warning is traditionally quite effective at spotting real bugs, and we
actually have is a single occurrence of this issue in the whole code.
By changing the tlskeys_list_get_next() function to take a list element
as the starting point instead of the current element, we can avoid
the starting point issue but this requires to change all call places
to write hideous casts made of &((struct blah*)ref)->list. At the
moment we only have two such call places, the first one being used to
initialize the list (which is the one causing the warning) and which
is thus easy to simplify, and the second one for which we already have
an aliased pointer to the reference that is still valid at the call
place, and given the original pointer also remained unchanged, we can
safely use this alias, and this is safer than leaving a cast there.
Let's make this change now while it's still easy.
The generated code only changed in function cli_io_handler_tlskeys_files()
due to register allocation and the change of variable scope between the
old one and the new one.
2021-01-05 04:44:30 -05:00
return LIST_ELEM ( ref , struct tls_keys_ref * , list ) ;
2016-10-29 12:09:35 -04:00
}
static inline
struct tls_keys_ref * tlskeys_ref_lookup_ref ( const char * reference )
{
int id ;
char * error ;
/* If the reference starts by a '#', this is numeric id. */
if ( reference [ 0 ] = = ' # ' ) {
/* Try to convert the numeric id. If the conversion fails, the lookup fails. */
id = strtol ( reference + 1 , & error , 10 ) ;
if ( * error ! = ' \0 ' )
return NULL ;
/* Perform the unique id lookup. */
return tlskeys_ref_lookupid ( id ) ;
}
/* Perform the string lookup. */
return tlskeys_ref_lookup ( reference ) ;
}
# endif
# if (defined SSL_CTRL_SET_TLSEXT_TICKET_KEY_CB && TLS_TICKETS_NO > 0)
2022-05-05 02:50:17 -04:00
/* dumps all tls keys. Relies on the show_keys_ctx context from the appctx. */
2022-05-05 02:59:17 -04:00
static int cli_io_handler_tlskeys_files ( struct appctx * appctx )
{
2022-05-05 02:50:17 -04:00
struct show_keys_ctx * ctx = appctx - > svcctx ;
2016-10-29 12:09:35 -04:00
2022-05-05 03:03:44 -04:00
switch ( ctx - > state ) {
case SHOW_KEYS_INIT :
2016-10-29 12:09:35 -04:00
/* Display the column headers. If the message cannot be sent,
2018-11-15 12:07:59 -05:00
* quit the function with returning 0. The function is called
2022-05-05 03:03:44 -04:00
* later and restart at the state " SHOW_KEYS_INIT " .
2016-10-29 12:09:35 -04:00
*/
chunk_reset ( & trash ) ;
2022-05-05 02:59:17 -04:00
if ( ctx - > dump_entries )
2016-10-29 12:09:35 -04:00
chunk_appendf ( & trash , " # id secret \n " ) ;
else
chunk_appendf ( & trash , " # id (file) \n " ) ;
2022-05-18 09:07:19 -04:00
if ( applet_putchk ( appctx , & trash ) = = - 1 )
2016-10-29 12:09:35 -04:00
return 0 ;
/* Now, we start the browsing of the references lists.
* Note that the following call to LIST_ELEM return bad pointer . The only
* available field of this pointer is < list > . It is used with the function
2023-11-21 13:54:16 -05:00
* tlskeys_list_get_next ( ) for returning the first available entry
2016-10-29 12:09:35 -04:00
*/
2022-05-05 02:50:17 -04:00
if ( ctx - > next_ref = = NULL )
ctx - > next_ref = tlskeys_list_get_next ( & tlskeys_reference , & tlskeys_reference ) ;
2016-10-29 12:09:35 -04:00
2022-05-05 03:03:44 -04:00
ctx - > state = SHOW_KEYS_LIST ;
2022-11-14 01:34:43 -05:00
__fallthrough ;
2016-10-29 12:09:35 -04:00
2022-05-05 03:03:44 -04:00
case SHOW_KEYS_LIST :
2022-05-05 02:50:17 -04:00
while ( ctx - > next_ref ) {
struct tls_keys_ref * ref = ctx - > next_ref ;
2016-10-29 12:09:35 -04:00
chunk_reset ( & trash ) ;
2022-05-05 02:59:17 -04:00
if ( ctx - > dump_entries & & ctx - > next_index = = 0 )
2016-10-29 12:09:35 -04:00
chunk_appendf ( & trash , " # " ) ;
2016-12-16 12:47:27 -05:00
2022-05-05 02:50:17 -04:00
if ( ctx - > next_index = = 0 )
2016-12-16 12:47:27 -05:00
chunk_appendf ( & trash , " %d (%s) \n " , ref - > unique_id , ref - > filename ) ;
2022-05-05 02:59:17 -04:00
if ( ctx - > dump_entries ) {
2018-02-16 05:23:49 -05:00
int head ;
HA_RWLOCK_RDLOCK ( TLSKEYS_REF_LOCK , & ref - > lock ) ;
head = ref - > tls_ticket_enc_index ;
2022-05-05 02:50:17 -04:00
while ( ctx - > next_index < TLS_TICKETS_NO ) {
2018-07-13 05:56:34 -04:00
struct buffer * t2 = get_trash_chunk ( ) ;
2016-10-29 12:09:35 -04:00
chunk_reset ( t2 ) ;
/* should never fail here because we dump only a key in the t2 buffer */
2019-01-10 11:51:55 -05:00
if ( ref - > key_size_bits = = 128 ) {
2022-05-05 02:50:17 -04:00
t2 - > data = a2base64 ( ( char * ) ( ref - > tlskeys + ( head + 2 + ctx - > next_index ) % TLS_TICKETS_NO ) ,
2019-01-10 11:51:55 -05:00
sizeof ( struct tls_sess_key_128 ) ,
t2 - > area , t2 - > size ) ;
2022-05-05 02:50:17 -04:00
chunk_appendf ( & trash , " %d.%d %s \n " , ref - > unique_id , ctx - > next_index ,
2019-01-10 11:51:55 -05:00
t2 - > area ) ;
}
else if ( ref - > key_size_bits = = 256 ) {
2022-05-05 02:50:17 -04:00
t2 - > data = a2base64 ( ( char * ) ( ref - > tlskeys + ( head + 2 + ctx - > next_index ) % TLS_TICKETS_NO ) ,
2019-01-10 11:51:55 -05:00
sizeof ( struct tls_sess_key_256 ) ,
t2 - > area , t2 - > size ) ;
2022-05-05 02:50:17 -04:00
chunk_appendf ( & trash , " %d.%d %s \n " , ref - > unique_id , ctx - > next_index ,
2019-01-10 11:51:55 -05:00
t2 - > area ) ;
}
else {
/* This case should never happen */
2022-05-05 02:50:17 -04:00
chunk_appendf ( & trash , " %d.%d <unknown> \n " , ref - > unique_id , ctx - > next_index ) ;
2019-01-10 11:51:55 -05:00
}
2016-10-29 12:09:35 -04:00
2022-05-18 09:07:19 -04:00
if ( applet_putchk ( appctx , & trash ) = = - 1 ) {
2016-10-29 12:09:35 -04:00
/* let's try again later from this stream. We add ourselves into
* this stream ' s users so that it can remove us upon termination .
*/
2018-02-16 05:23:49 -05:00
HA_RWLOCK_RDUNLOCK ( TLSKEYS_REF_LOCK , & ref - > lock ) ;
2016-10-29 12:09:35 -04:00
return 0 ;
}
2022-05-05 02:50:17 -04:00
ctx - > next_index + + ;
2016-10-29 12:09:35 -04:00
}
2018-02-16 05:23:49 -05:00
HA_RWLOCK_RDUNLOCK ( TLSKEYS_REF_LOCK , & ref - > lock ) ;
2022-05-05 02:50:17 -04:00
ctx - > next_index = 0 ;
2016-10-29 12:09:35 -04:00
}
2022-05-18 09:07:19 -04:00
if ( applet_putchk ( appctx , & trash ) = = - 1 ) {
2016-10-29 12:09:35 -04:00
/* let's try again later from this stream. We add ourselves into
* this stream ' s users so that it can remove us upon termination .
*/
return 0 ;
}
2022-05-05 02:50:17 -04:00
if ( ctx - > names_only = = 0 ) /* don't display everything if not necessary */
2016-10-29 12:09:35 -04:00
break ;
/* get next list entry and check the end of the list */
2022-05-05 02:50:17 -04:00
ctx - > next_ref = tlskeys_list_get_next ( & ref - > list , & tlskeys_reference ) ;
2016-10-29 12:09:35 -04:00
}
2022-05-05 03:03:44 -04:00
ctx - > state = SHOW_KEYS_DONE ;
2022-11-14 01:34:43 -05:00
__fallthrough ;
2016-10-29 12:09:35 -04:00
default :
return 1 ;
}
return 0 ;
}
2022-05-05 02:50:17 -04:00
/* Prepares a "show_keys_ctx" and sets the appropriate io_handler if needed */
2018-04-18 07:26:46 -04:00
static int cli_parse_show_tlskeys ( char * * args , char * payload , struct appctx * appctx , void * private )
2016-10-29 12:09:35 -04:00
{
2022-05-05 02:50:17 -04:00
struct show_keys_ctx * ctx = applet_reserve_svcctx ( appctx , sizeof ( * ctx ) ) ;
2016-10-29 12:09:35 -04:00
/* no parameter, shows only file list */
2022-12-20 05:11:17 -05:00
if ( ! * args [ 2 ] ) {
ctx - > names_only = 1 ;
return 0 ;
}
2021-06-10 07:51:13 -04:00
2022-12-20 05:11:17 -05:00
if ( args [ 2 ] [ 0 ] = = ' * ' ) {
/* list every TLS ticket keys */
ctx - > names_only = 1 ;
2021-06-10 07:51:13 -04:00
} else {
2022-12-20 05:11:17 -05:00
ctx - > next_ref = tlskeys_ref_lookup_ref ( args [ 2 ] ) ;
if ( ! ctx - > next_ref )
return cli_err ( appctx , " 'show tls-keys' unable to locate referenced filename \n " ) ;
2021-06-10 07:51:13 -04:00
}
2022-12-20 05:11:17 -05:00
ctx - > dump_entries = 1 ;
return 0 ;
}
2021-06-10 07:51:13 -04:00
2022-12-20 05:11:17 -05:00
static int cli_parse_set_tlskeys ( char * * args , char * payload , struct appctx * appctx , void * private )
{
struct tls_keys_ref * ref ;
int ret ;
2021-06-10 07:51:13 -04:00
2022-12-20 05:11:17 -05:00
/* Expect two parameters: the filename and the new new TLS key in encoding */
if ( ! * args [ 3 ] | | ! * args [ 4 ] )
return cli_err ( appctx , " 'set ssl tls-key' expects a filename and the new TLS key in base64 encoding. \n " ) ;
2021-06-10 07:51:13 -04:00
2022-12-20 05:11:17 -05:00
ref = tlskeys_ref_lookup_ref ( args [ 3 ] ) ;
if ( ! ref )
return cli_err ( appctx , " 'set ssl tls-key' unable to locate referenced filename \n " ) ;
2021-06-10 07:51:13 -04:00
2022-12-20 05:11:17 -05:00
ret = base64dec ( args [ 4 ] , strlen ( args [ 4 ] ) , trash . area , trash . size ) ;
if ( ret < 0 )
return cli_err ( appctx , " 'set ssl tls-key' received invalid base64 encoded TLS key. \n " ) ;
2021-06-10 07:51:13 -04:00
2022-12-20 05:11:17 -05:00
trash . data = ret ;
if ( ssl_sock_update_tlskey_ref ( ref , & trash ) < 0 )
return cli_err ( appctx , " 'set ssl tls-key' received a key of wrong size. \n " ) ;
2021-06-10 07:51:13 -04:00
2022-12-20 05:11:17 -05:00
return cli_msg ( appctx , LOG_INFO , " TLS ticket key updated! \n " ) ;
2021-06-10 07:51:13 -04:00
}
2022-12-20 05:11:17 -05:00
# endif
2021-06-10 07:51:13 -04:00
2022-04-21 06:06:41 -04:00
# ifdef HAVE_SSL_PROVIDERS
struct provider_name {
const char * name ;
struct list list ;
} ;
static int ssl_provider_get_name_cb ( OSSL_PROVIDER * provider , void * cbdata )
{
struct list * provider_names = cbdata ;
struct provider_name * item = NULL ;
const char * name = OSSL_PROVIDER_get0_name ( provider ) ;
if ( ! provider_names )
return 0 ;
item = calloc ( 1 , sizeof ( * item ) ) ;
if ( ! item )
return 0 ;
item - > name = name ;
LIST_APPEND ( provider_names , & item - > list ) ;
return 1 ;
}
static void ssl_provider_get_name_list ( struct list * provider_names )
{
if ( ! provider_names )
return ;
OSSL_PROVIDER_do_all ( NULL , ssl_provider_get_name_cb , provider_names ) ;
}
static void ssl_provider_clear_name_list ( struct list * provider_names )
{
struct provider_name * item = NULL , * item_s = NULL ;
if ( provider_names ) {
list_for_each_entry_safe ( item , item_s , provider_names , list ) {
LIST_DELETE ( & item - > list ) ;
free ( item ) ;
}
}
}
static int cli_io_handler_show_providers ( struct appctx * appctx )
{
struct buffer * trash = get_trash_chunk ( ) ;
struct list provider_names ;
struct provider_name * name ;
LIST_INIT ( & provider_names ) ;
chunk_appendf ( trash , " Loaded providers : \n " ) ;
ssl_provider_get_name_list ( & provider_names ) ;
list_for_each_entry ( name , & provider_names , list ) {
chunk_appendf ( trash , " \t - %s \n " , name - > name ) ;
}
ssl_provider_clear_name_list ( & provider_names ) ;
2022-05-18 09:07:19 -04:00
if ( applet_putchk ( appctx , trash ) = = - 1 )
2022-04-21 06:06:41 -04:00
goto yield ;
return 1 ;
yield :
return 0 ;
}
# endif
2021-06-10 07:51:13 -04:00
2016-10-29 12:09:35 -04:00
/* register cli keywords */
static struct cli_kw_list cli_kws = { { } , {
# if (defined SSL_CTRL_SET_TLSEXT_TICKET_KEY_CB && TLS_TICKETS_NO > 0)
2022-12-20 05:11:07 -05:00
{ { " show " , " tls-keys " , NULL } , " show tls-keys [id|*] : show tls keys references or dump tls ticket keys when id specified " , cli_parse_show_tlskeys , cli_io_handler_tlskeys_files } ,
{ { " set " , " ssl " , " tls-key " , NULL } , " set ssl tls-key [id|file] <key> : set the next TLS key for the <id> or <file> listener to <key> " , cli_parse_set_tlskeys , NULL } ,
# endif
2022-04-21 06:06:41 -04:00
# ifdef HAVE_SSL_PROVIDERS
{ { " show " , " ssl " , " providers " , NULL } , " show ssl providers : show loaded SSL providers " , NULL , cli_io_handler_show_providers } ,
# endif
2016-10-29 12:09:35 -04:00
{ { NULL } , NULL , NULL , NULL }
} } ;
2018-11-25 13:14:37 -05:00
INITCALL1 ( STG_REGISTER , cli_register_kw , & cli_kws ) ;
2016-10-29 12:09:35 -04:00
REORG: connection: rename the data layer the "transport layer"
While working on the changes required to make the health checks use the
new connections, it started to become obvious that some naming was not
logical at all in the connections. Specifically, it is not logical to
call the "data layer" the layer which is in charge for all the handshake
and which does not yet provide a data layer once established until a
session has allocated all the required buffers.
In fact, it's more a transport layer, which makes much more sense. The
transport layer offers a medium on which data can transit, and it offers
the functions to move these data when the upper layer requests this. And
it is the upper layer which iterates over the transport layer's functions
to move data which should be called the data layer.
The use case where it's obvious is with embryonic sessions : an incoming
SSL connection is accepted. Only the connection is allocated, not the
buffers nor stream interface, etc... The connection handles the SSL
handshake by itself. Once this handshake is complete, we can't use the
data functions because the buffers and stream interface are not there
yet. Hence we have to first call a specific function to complete the
session initialization, after which we'll be able to use the data
functions. This clearly proves that SSL here is only a transport layer
and that the stream interface constitutes the data layer.
A similar change will be performed to rename app_cb => data, but the
two could not be in the same commit for obvious reasons.
2012-10-02 18:19:48 -04:00
/* transport-layer operations for SSL sockets */
2020-05-14 11:47:32 -04:00
struct xprt_ops ssl_sock = {
2012-05-18 09:47:34 -04:00
. snd_buf = ssl_sock_from_buf ,
. rcv_buf = ssl_sock_to_buf ,
2019-03-21 11:30:07 -04:00
. subscribe = ssl_subscribe ,
. unsubscribe = ssl_unsubscribe ,
2019-05-23 11:47:36 -04:00
. remove_xprt = ssl_remove_xprt ,
2019-05-27 13:50:12 -04:00
. add_xprt = ssl_add_xprt ,
2012-05-18 09:47:34 -04:00
. rcv_pipe = NULL ,
. snd_pipe = NULL ,
. shutr = NULL ,
. shutw = ssl_sock_shutw ,
. close = ssl_sock_close ,
. init = ssl_sock_init ,
2021-03-05 17:47:00 -05:00
. start = ssl_sock_start ,
2016-12-21 17:38:39 -05:00
. prepare_bind_conf = ssl_sock_prepare_bind_conf ,
2016-12-22 11:30:54 -05:00
. destroy_bind_conf = ssl_sock_destroy_bind_conf ,
2016-12-22 15:16:08 -05:00
. prepare_srv = ssl_sock_prepare_srv_ctx ,
. destroy_srv = ssl_sock_free_srv_ctx ,
2016-12-04 12:44:29 -05:00
. get_alpn = ssl_sock_get_alpn ,
2020-07-03 08:01:21 -04:00
. takeover = ssl_takeover ,
2021-03-02 11:29:56 -05:00
. set_idle = ssl_set_idle ,
. set_used = ssl_set_used ,
2022-04-11 04:43:28 -04:00
. get_ssl_sock_ctx = ssl_sock_get_ctx ,
2016-11-24 10:58:12 -05:00
. name = " SSL " ,
2021-01-20 08:41:29 -05:00
. show_fd = ssl_sock_show_fd ,
2012-05-18 09:47:34 -04:00
} ;
2017-10-02 05:51:03 -04:00
enum act_return ssl_action_wait_for_hs ( struct act_rule * rule , struct proxy * px ,
struct session * sess , struct stream * s , int flags )
{
struct connection * conn ;
conn = objt_conn ( sess - > origin ) ;
2022-05-27 04:44:39 -04:00
if ( conn ) {
2017-10-02 05:51:03 -04:00
if ( conn - > flags & ( CO_FL_EARLY_SSL_HS | CO_FL_SSL_WAIT_HS ) ) {
2022-05-27 04:44:39 -04:00
sc_ep_set ( s - > scf , SE_FL_WAIT_FOR_HS ) ;
2022-12-12 02:08:15 -05:00
s - > req . flags | = CF_READ_EVENT ;
2017-10-02 05:51:03 -04:00
return ACT_RET_YIELD ;
}
}
return ( ACT_RET_CONT ) ;
}
static enum act_parse_ret ssl_parse_wait_for_hs ( const char * * args , int * orig_arg , struct proxy * px , struct act_rule * rule , char * * err )
{
rule - > action_ptr = ssl_action_wait_for_hs ;
return ACT_RET_PRS_OK ;
}
static struct action_kw_list http_req_actions = { ILH , {
{ " wait-for-handshake " , ssl_parse_wait_for_hs } ,
{ /* END */ }
} } ;
2018-11-25 13:14:37 -05:00
INITCALL1 ( STG_REGISTER , http_req_keywords_register , & http_req_actions ) ;
2021-02-06 08:59:22 -05:00
# ifdef HAVE_SSL_CTX_ADD_SERVER_CUSTOM_EXT
2015-03-07 17:03:59 -05:00
static void ssl_sock_sctl_free_func ( void * parent , void * ptr , CRYPTO_EX_DATA * ad , int idx , long argl , void * argp )
{
if ( ptr ) {
chunk_destroy ( ptr ) ;
free ( ptr ) ;
}
}
# endif
2020-08-04 11:41:39 -04:00
2017-03-07 12:34:58 -05:00
static void ssl_sock_capture_free_func ( void * parent , void * ptr , CRYPTO_EX_DATA * ad , int idx , long argl , void * argp )
{
2017-11-24 11:34:44 -05:00
pool_free ( pool_head_ssl_capture , ptr ) ;
2017-03-07 12:34:58 -05:00
}
2015-03-07 17:03:59 -05:00
2021-06-09 10:46:12 -04:00
# ifdef HAVE_SSL_KEYLOG
WIP/MINOR: ssl: add sample fetches for keylog in frontend
OpenSSL 1.1.1 provides a callback registering function
SSL_CTX_set_keylog_callback, which allows one to receive a string
containing the keys to deciphers TLSv1.3.
Unfortunately it is not possible to store this data in binary form and
we can only get this information using the callback. Which means that we
need to store it until the connection is closed.
This patches add 2 pools, the first one, pool_head_ssl_keylog is used to
store a struct ssl_keylog which will be inserted as a ex_data in a SSL *.
The second one is pool_head_ssl_keylog_str which will be used to store
the hexadecimal strings.
To enable the capture of the keys, you need to set "tune.ssl.keylog on"
in your configuration.
The following fetches were implemented:
ssl_fc_client_early_traffic_secret,
ssl_fc_client_handshake_traffic_secret,
ssl_fc_server_handshake_traffic_secret,
ssl_fc_client_traffic_secret_0,
ssl_fc_server_traffic_secret_0,
ssl_fc_exporter_secret,
ssl_fc_early_exporter_secret
2020-07-06 05:41:30 -04:00
static void ssl_sock_keylog_free_func ( void * parent , void * ptr , CRYPTO_EX_DATA * ad , int idx , long argl , void * argp )
{
struct ssl_keylog * keylog ;
if ( ! ptr )
return ;
keylog = ptr ;
pool_free ( pool_head_ssl_keylog_str , keylog - > client_random ) ;
pool_free ( pool_head_ssl_keylog_str , keylog - > client_early_traffic_secret ) ;
pool_free ( pool_head_ssl_keylog_str , keylog - > client_handshake_traffic_secret ) ;
pool_free ( pool_head_ssl_keylog_str , keylog - > server_handshake_traffic_secret ) ;
pool_free ( pool_head_ssl_keylog_str , keylog - > client_traffic_secret_0 ) ;
pool_free ( pool_head_ssl_keylog_str , keylog - > server_traffic_secret_0 ) ;
pool_free ( pool_head_ssl_keylog_str , keylog - > exporter_secret ) ;
pool_free ( pool_head_ssl_keylog_str , keylog - > early_exporter_secret ) ;
pool_free ( pool_head_ssl_keylog , ptr ) ;
}
# endif
2021-08-19 12:06:30 -04:00
static void ssl_sock_clt_crt_free_func ( void * parent , void * ptr , CRYPTO_EX_DATA * ad , int idx , long argl , void * argp )
{
if ( ! ptr )
return ;
X509_free ( ( X509 * ) ptr ) ;
}
2022-01-07 11:12:01 -05:00
static void ssl_sock_clt_sni_free_func ( void * parent , void * ptr , CRYPTO_EX_DATA * ad , int idx , long argl , void * argp )
{
pool_free ( ssl_sock_client_sni_pool , ptr ) ;
}
2012-10-10 17:04:25 -04:00
static void __ssl_sock_init ( void )
{
2019-05-25 10:30:50 -04:00
# if (!defined(OPENSSL_NO_COMP) && !defined(SSL_OP_NO_COMPRESSION))
2012-05-18 09:47:34 -04:00
STACK_OF ( SSL_COMP ) * cm ;
2019-05-24 18:38:14 -04:00
int n ;
2019-05-25 10:30:50 -04:00
# endif
2012-05-18 09:47:34 -04:00
2016-12-22 17:12:01 -05:00
if ( global_ssl . listen_default_ciphers )
global_ssl . listen_default_ciphers = strdup ( global_ssl . listen_default_ciphers ) ;
if ( global_ssl . connect_default_ciphers )
global_ssl . connect_default_ciphers = strdup ( global_ssl . connect_default_ciphers ) ;
2020-11-21 04:37:34 -05:00
# ifdef HAVE_SSL_CTX_SET_CIPHERSUITES
2018-09-14 05:14:21 -04:00
if ( global_ssl . listen_default_ciphersuites )
global_ssl . listen_default_ciphersuites = strdup ( global_ssl . listen_default_ciphersuites ) ;
if ( global_ssl . connect_default_ciphersuites )
global_ssl . connect_default_ciphersuites = strdup ( global_ssl . connect_default_ciphersuites ) ;
# endif
2014-02-13 05:36:41 -05:00
2016-12-22 14:25:26 -05:00
xprt_register ( XPRT_SSL , & ssl_sock ) ;
2019-05-09 07:26:41 -04:00
# if HA_OPENSSL_VERSION_NUMBER < 0x10100000L
2012-05-18 09:47:34 -04:00
SSL_library_init ( ) ;
2024-03-12 07:03:07 -04:00
# elif HA_OPENSSL_VERSION_NUMBER >= 0x10100000L
OPENSSL_init_ssl ( 0 , NULL ) ;
2018-12-14 11:47:02 -05:00
# endif
2019-05-25 10:30:50 -04:00
# if (!defined(OPENSSL_NO_COMP) && !defined(SSL_OP_NO_COMPRESSION))
2012-05-18 09:47:34 -04:00
cm = SSL_COMP_get_compression_methods ( ) ;
2019-05-24 18:38:14 -04:00
n = sk_SSL_COMP_num ( cm ) ;
while ( n - - ) {
( void ) sk_SSL_COMP_pop ( cm ) ;
}
2019-05-25 10:30:50 -04:00
# endif
2019-05-24 18:38:14 -04:00
2019-05-09 08:13:35 -04:00
# if defined(USE_THREAD) && (HA_OPENSSL_VERSION_NUMBER < 0x10100000L)
2017-06-15 10:37:39 -04:00
ssl_locking_init ( ) ;
# endif
2021-02-06 08:59:22 -05:00
# ifdef HAVE_SSL_CTX_ADD_SERVER_CUSTOM_EXT
2015-03-07 17:03:59 -05:00
sctl_ex_index = SSL_CTX_get_ex_new_index ( 0 , NULL , NULL , NULL , ssl_sock_sctl_free_func ) ;
# endif
2020-08-04 11:41:39 -04:00
# if ((defined SSL_CTRL_SET_TLSEXT_STATUS_REQ_CB && !defined OPENSSL_NO_OCSP) && !defined OPENSSL_IS_BORINGSSL)
ocsp_ex_index = SSL_CTX_get_ex_new_index ( 0 , NULL , NULL , NULL , ssl_sock_ocsp_free_func ) ;
# endif
BUG/MAJOR: ssl: OpenSSL context is stored in non-reserved memory slot
We never saw unexplicated crash with SSL, so I suppose that we are
luck, or the slot 0 is always reserved. Anyway the usage of the macro
SSL_get_app_data() and SSL_set_app_data() seem wrong. This patch change
the deprecated functions SSL_get_app_data() and SSL_set_app_data()
by the new functions SSL_get_ex_data() and SSL_set_ex_data(), and
it reserves the slot in the SSL memory space.
For information, this is the two declaration which seems wrong or
incomplete in the OpenSSL ssl.h file. We can see the usage of the
slot 0 whoch is hardcoded, but never reserved.
#define SSL_set_app_data(s,arg) (SSL_set_ex_data(s,0,(char *)arg))
#define SSL_get_app_data(s) (SSL_get_ex_data(s,0))
This patch must be backported at least in 1.8, maybe in other versions.
2018-06-17 15:37:05 -04:00
ssl_app_data_index = SSL_get_ex_new_index ( 0 , NULL , NULL , NULL , NULL ) ;
2018-06-17 15:33:01 -04:00
ssl_capture_ptr_index = SSL_get_ex_new_index ( 0 , NULL , NULL , NULL , ssl_sock_capture_free_func ) ;
2022-01-19 04:03:30 -05:00
# ifdef USE_QUIC
ssl_qc_app_data_index = SSL_get_ex_new_index ( 0 , NULL , NULL , NULL , NULL ) ;
# endif /* USE_QUIC */
2021-06-09 10:46:12 -04:00
# ifdef HAVE_SSL_KEYLOG
WIP/MINOR: ssl: add sample fetches for keylog in frontend
OpenSSL 1.1.1 provides a callback registering function
SSL_CTX_set_keylog_callback, which allows one to receive a string
containing the keys to deciphers TLSv1.3.
Unfortunately it is not possible to store this data in binary form and
we can only get this information using the callback. Which means that we
need to store it until the connection is closed.
This patches add 2 pools, the first one, pool_head_ssl_keylog is used to
store a struct ssl_keylog which will be inserted as a ex_data in a SSL *.
The second one is pool_head_ssl_keylog_str which will be used to store
the hexadecimal strings.
To enable the capture of the keys, you need to set "tune.ssl.keylog on"
in your configuration.
The following fetches were implemented:
ssl_fc_client_early_traffic_secret,
ssl_fc_client_handshake_traffic_secret,
ssl_fc_server_handshake_traffic_secret,
ssl_fc_client_traffic_secret_0,
ssl_fc_server_traffic_secret_0,
ssl_fc_exporter_secret,
ssl_fc_early_exporter_secret
2020-07-06 05:41:30 -04:00
ssl_keylog_index = SSL_get_ex_new_index ( 0 , NULL , NULL , NULL , ssl_sock_keylog_free_func ) ;
# endif
2021-08-19 12:06:30 -04:00
ssl_client_crt_ref_index = SSL_get_ex_new_index ( 0 , NULL , NULL , NULL , ssl_sock_clt_crt_free_func ) ;
2022-01-07 11:12:01 -05:00
ssl_client_sni_index = SSL_get_ex_new_index ( 0 , NULL , NULL , NULL , ssl_sock_clt_sni_free_func ) ;
2022-04-11 12:41:24 -04:00
# if defined(USE_ENGINE) && !defined(OPENSSL_NO_ENGINE)
2017-01-20 20:10:18 -05:00
ENGINE_load_builtin_engines ( ) ;
2017-01-13 20:42:15 -05:00
hap_register_post_check ( ssl_check_async_engine_count ) ;
2017-05-29 08:36:20 -04:00
# endif
2016-12-22 16:46:15 -05:00
# if (defined SSL_CTRL_SET_TLSEXT_TICKET_KEY_CB && TLS_TICKETS_NO > 0)
hap_register_post_check ( tlskeys_finalize_config ) ;
# endif
2015-01-15 15:34:39 -05:00
2018-11-26 04:19:54 -05:00
global . ssl_session_max_cost = SSL_SESSION_MAX_COST ;
global . ssl_handshake_max_cost = SSL_HANDSHAKE_MAX_COST ;
2019-01-04 05:08:20 -05:00
hap_register_post_deinit ( ssl_free_global_issuers ) ;
2018-11-26 04:19:54 -05:00
# ifndef OPENSSL_NO_DH
ssl_dh_ptr_index = SSL_CTX_get_ex_new_index ( 0 , NULL , NULL , NULL , NULL ) ;
hap_register_post_deinit ( ssl_free_dh ) ;
# endif
2022-04-11 12:41:24 -04:00
# if defined(USE_ENGINE) && !defined(OPENSSL_NO_ENGINE)
2018-11-26 04:19:54 -05:00
hap_register_post_deinit ( ssl_free_engines ) ;
# endif
2022-05-16 10:24:33 -04:00
# ifdef HAVE_SSL_PROVIDERS
hap_register_post_deinit ( ssl_unload_providers ) ;
# endif
2022-02-11 06:04:45 -05:00
# if HA_OPENSSL_VERSION_NUMBER < 0x3000000fL
2018-11-26 04:19:54 -05:00
/* Load SSL string for the verbose & debug mode. */
ERR_load_SSL_strings ( ) ;
2022-02-11 06:04:45 -05:00
# endif
2019-04-07 16:00:38 -04:00
ha_meth = BIO_meth_new ( 0x666 , " ha methods " ) ;
2023-05-26 04:42:47 -04:00
if ( ha_meth ! = NULL ) {
BIO_meth_set_write ( ha_meth , ha_ssl_write ) ;
BIO_meth_set_read ( ha_meth , ha_ssl_read ) ;
BIO_meth_set_ctrl ( ha_meth , ha_ssl_ctrl ) ;
BIO_meth_set_create ( ha_meth , ha_ssl_new ) ;
BIO_meth_set_destroy ( ha_meth , ha_ssl_free ) ;
BIO_meth_set_puts ( ha_meth , ha_ssl_puts ) ;
BIO_meth_set_gets ( ha_meth , ha_ssl_gets ) ;
}
2019-09-19 11:12:49 -04:00
HA_SPIN_INIT ( & ckch_lock ) ;
2020-05-08 12:30:00 -04:00
2022-12-20 05:11:02 -05:00
HA_SPIN_INIT ( & ocsp_tree_lock ) ;
2020-05-11 09:51:45 -04:00
/* Try to register dedicated SSL/TLS protocol message callbacks for
* heartbleed attack ( CVE - 2014 - 0160 ) and clienthello .
*/
hap_register_post_check ( ssl_sock_register_msg_callbacks ) ;
2020-05-08 12:30:00 -04:00
/* Try to free all callbacks that were registered by using
* ssl_sock_register_msg_callback ( ) .
*/
hap_register_post_deinit ( ssl_sock_unregister_msg_callbacks ) ;
2018-11-26 04:19:54 -05:00
}
2022-04-25 13:18:24 -04:00
INITCALL0 ( STG_REGISTER , __ssl_sock_init ) ;
2018-11-26 04:19:54 -05:00
/* Compute and register the version string */
static void ssl_register_build_options ( )
{
char * ptr = NULL ;
int i ;
2016-12-21 13:23:20 -05:00
memprintf ( & ptr , " Built with OpenSSL version : "
# ifdef OPENSSL_IS_BORINGSSL
2017-03-24 10:20:03 -04:00
" BoringSSL " ) ;
2016-12-21 13:23:20 -05:00
# else /* OPENSSL_IS_BORINGSSL */
OPENSSL_VERSION_TEXT
" \n Running on OpenSSL version : %s%s " ,
2018-12-14 11:47:02 -05:00
OpenSSL_version ( OPENSSL_VERSION ) ,
2019-05-09 07:41:45 -04:00
( ( OPENSSL_VERSION_NUMBER ^ OpenSSL_version_num ( ) ) > > 8 ) ? " (VERSIONS DIFFER!) " : " " ) ;
2016-12-21 13:23:20 -05:00
# endif
memprintf ( & ptr , " %s \n OpenSSL library supports TLS extensions : "
2019-05-09 07:26:41 -04:00
# if HA_OPENSSL_VERSION_NUMBER < 0x00907000L
2016-12-21 13:23:20 -05:00
" no (library version too old) "
# elif defined(OPENSSL_NO_TLSEXT)
" no (disabled via OPENSSL_NO_TLSEXT) "
# else
" yes "
# endif
" " , ptr ) ;
memprintf ( & ptr , " %s \n OpenSSL library supports SNI : "
# ifdef SSL_CTRL_SET_TLSEXT_HOSTNAME
" yes "
# else
# ifdef OPENSSL_NO_TLSEXT
" no (because of OPENSSL_NO_TLSEXT) "
# else
" no (version might be too old, 0.9.8f min needed) "
# endif
2017-03-24 10:20:03 -04:00
# endif
" " , ptr ) ;
2017-07-12 08:25:38 -04:00
memprintf ( & ptr , " %s \n OpenSSL library supports : " , ptr ) ;
for ( i = CONF_TLSV_MIN ; i < = CONF_TLSV_MAX ; i + + )
if ( methodVersions [ i ] . option )
memprintf ( & ptr , " %s %s " , ptr , methodVersions [ i ] . name ) ;
2016-12-21 13:23:20 -05:00
2022-04-21 06:06:41 -04:00
# ifdef HAVE_SSL_PROVIDERS
{
struct list provider_names ;
struct provider_name * name ;
LIST_INIT ( & provider_names ) ;
ssl_provider_get_name_list ( & provider_names ) ;
memprintf ( & ptr , " %s \n OpenSSL providers loaded : " , ptr ) ;
list_for_each_entry ( name , & provider_names , list ) {
memprintf ( & ptr , " %s %s " , ptr , name - > name ) ;
}
ssl_provider_clear_name_list ( & provider_names ) ;
}
# endif
2016-12-21 13:23:20 -05:00
hap_register_build_opts ( ptr , 1 ) ;
2018-11-26 04:19:54 -05:00
}
2016-12-21 13:23:20 -05:00
2018-11-26 04:19:54 -05:00
INITCALL0 ( STG_REGISTER , ssl_register_build_options ) ;
2015-05-28 10:23:00 -04:00
2022-04-11 12:41:24 -04:00
# if defined(USE_ENGINE) && !defined(OPENSSL_NO_ENGINE)
2017-01-20 20:10:18 -05:00
void ssl_free_engines ( void ) {
struct ssl_engine_list * wl , * wlb ;
/* free up engine list */
list_for_each_entry_safe ( wl , wlb , & openssl_engines , list ) {
ENGINE_finish ( wl - > e ) ;
ENGINE_free ( wl - > e ) ;
2021-04-21 01:32:39 -04:00
LIST_DELETE ( & wl - > list ) ;
2017-01-20 20:10:18 -05:00
free ( wl ) ;
}
}
2017-05-29 08:36:20 -04:00
# endif
2015-06-09 11:29:50 -04:00
2022-05-16 10:24:33 -04:00
# ifdef HAVE_SSL_PROVIDERS
void ssl_unload_providers ( void ) {
struct ssl_provider_list * prov , * provb ;
list_for_each_entry_safe ( prov , provb , & openssl_providers , list ) {
OSSL_PROVIDER_unload ( prov - > provider ) ;
LIST_DELETE ( & prov - > list ) ;
free ( prov ) ;
}
}
# endif
2015-05-28 10:39:47 -04:00
# ifndef OPENSSL_NO_DH
2017-01-20 20:10:18 -05:00
void ssl_free_dh ( void ) {
if ( local_dh_1024 ) {
2022-02-11 06:04:55 -05:00
HASSL_DH_free ( local_dh_1024 ) ;
2017-01-20 20:10:18 -05:00
local_dh_1024 = NULL ;
}
if ( local_dh_2048 ) {
2022-02-11 06:04:55 -05:00
HASSL_DH_free ( local_dh_2048 ) ;
2017-01-20 20:10:18 -05:00
local_dh_2048 = NULL ;
}
if ( local_dh_4096 ) {
2022-02-11 06:04:55 -05:00
HASSL_DH_free ( local_dh_4096 ) ;
2017-01-20 20:10:18 -05:00
local_dh_4096 = NULL ;
}
2015-05-29 09:53:22 -04:00
if ( global_dh ) {
2022-02-11 06:04:55 -05:00
HASSL_DH_free ( global_dh ) ;
2015-05-29 09:53:22 -04:00
global_dh = NULL ;
}
2017-01-20 20:10:18 -05:00
}
# endif
static void __ssl_sock_deinit ( void )
{
2015-05-28 10:39:47 -04:00
2019-05-09 08:13:35 -04:00
# if (HA_OPENSSL_VERSION_NUMBER < 0x10100000L)
2015-05-28 10:39:47 -04:00
ERR_remove_state ( 0 ) ;
ERR_free_strings ( ) ;
EVP_cleanup ( ) ;
2018-12-14 11:47:02 -05:00
# endif
2015-05-28 10:39:47 -04:00
2019-05-09 08:13:35 -04:00
# if (HA_OPENSSL_VERSION_NUMBER >= 0x00907000L) && (HA_OPENSSL_VERSION_NUMBER < 0x10100000L)
2015-05-28 10:39:47 -04:00
CRYPTO_cleanup_all_ex_data ( ) ;
# endif
2019-04-07 16:00:38 -04:00
BIO_meth_free ( ha_meth ) ;
2022-12-20 05:11:13 -05:00
# if !defined OPENSSL_NO_OCSP
ssl_destroy_ocsp_update_task ( ) ;
# endif
2015-05-28 10:39:47 -04:00
}
2022-04-25 13:18:24 -04:00
REGISTER_POST_DEINIT ( __ssl_sock_deinit ) ;
2015-05-28 10:39:47 -04:00
2020-11-14 13:25:33 -05:00
2012-05-18 09:47:34 -04:00
/*
* Local variables :
* c - indent - level : 8
* c - basic - offset : 8
* End :
*/