diff --git a/include/proto/shctx.h b/include/proto/shctx.h index dfa0c5736..2e72451d3 100644 --- a/include/proto/shctx.h +++ b/include/proto/shctx.h @@ -14,9 +14,9 @@ #ifndef SHCTX_H #define SHCTX_H +#include #include -#include #include #ifndef USE_PRIVATE_CACHE @@ -31,30 +31,14 @@ #endif #endif - -/* Allocate shared memory context. - * is the number of allocated blocks into cache (default 128 bytes) - * A block is large enough to contain a classic session (without client cert) - * If is set less or equal to 0, ssl cache is disabled. - * Set to 1 to use a mapped shared memory instead - * of private. (ignored if compiled with USE_PRIVATE_CACHE=1). - * Returns: -1 on alloc failure, if it performs context alloc, - * and 0 if cache is already allocated. - */ - -int shared_context_init(struct shared_context **orig_shctx, int size, int shared); - -/* Set shared cache callbacks on an ssl context. - * Set session cache mode to server and disable openssl internal cache. - * Shared context MUST be firstly initialized */ -void shared_context_set_cache(SSL_CTX *ctx); - - -int shsess_free(struct shared_context *shctx, struct shared_session *shsess); - -struct shared_session *shsess_get_next(struct shared_context *shctx, int data_len); - -int shsess_store(struct shared_context *shctx, unsigned char *s_id, unsigned char *data, int data_len); +int shctx_init(struct shared_context **orig_shctx, int maxblocks, int blocksize, int extra, int shared); +struct shared_block *shctx_row_reserve_hot(struct shared_context *shctx, int data_len); +void shctx_row_inc_hot(struct shared_context *shctx, struct shared_block *first); +void shctx_row_dec_hot(struct shared_context *shctx, struct shared_block *first); +int shctx_row_data_append(struct shared_context *shctx, + struct shared_block *first, unsigned char *data, int len); +int shctx_row_data_get(struct shared_context *shctx, struct shared_block *first, + unsigned char *dst, int offset, int len); /* Lock functions */ @@ -196,27 +180,20 @@ static inline void _shared_context_unlock(struct shared_context *shctx) /* List Macros */ -#define shblock_unset(s) (s)->n->p = (s)->p; \ - (s)->p->n = (s)->n; - -static inline void shblock_set_free(struct shared_context *shctx, +static inline void shctx_block_set_hot(struct shared_context *shctx, struct shared_block *s) { - shblock_unset(s); - (s)->n = &shctx->free; - (s)->p = shctx->free.p; - shctx->free.p->n = s; - shctx->free.p = s; + shctx->nbav--; + LIST_DEL(&s->list); + LIST_ADDQ(&shctx->hot, &s->list); } -static inline void shblock_set_active(struct shared_context *shctx, +static inline void shctx_block_set_avail(struct shared_context *shctx, struct shared_block *s) { - shblock_unset(s) - (s)->n = &shctx->active; - (s)->p = shctx->active.p; - shctx->active.p->n = s; - shctx->active.p = s; + shctx->nbav++; + LIST_DEL(&s->list); + LIST_ADDQ(&shctx->avail, &s->list); } #endif /* SHCTX_H */ diff --git a/include/proto/ssl_sock.h b/include/proto/ssl_sock.h index 9f974dd69..8f8d277aa 100644 --- a/include/proto/ssl_sock.h +++ b/include/proto/ssl_sock.h @@ -79,16 +79,13 @@ unsigned int ssl_sock_generated_cert_key(const void *data, size_t len); /* ssl shctx macro */ -#define shsess_tree_delete(s) ebmb_delete(&(s)->key); - -#define shsess_tree_insert(shctx, s) (struct shared_session *)ebmb_insert(&shctx->active.data.session.key.node.branches, \ - &(s)->key, SSL_MAX_SSL_SESSION_ID_LENGTH); - -#define shsess_tree_lookup(shctx, k) (struct shared_session *)ebmb_lookup(&shctx->active.data.session.key.node.branches, \ - (k), SSL_MAX_SSL_SESSION_ID_LENGTH); - +#define sh_ssl_sess_tree_delete(s) ebmb_delete(&(s)->key); +#define sh_ssl_sess_tree_insert(s) (struct sh_ssl_sess_hdr *)ebmb_insert(sh_ssl_sess_tree, \ + &(s)->key, SSL_MAX_SSL_SESSION_ID_LENGTH); +#define sh_ssl_sess_tree_lookup(k) (struct sh_ssl_sess_hdr *)ebmb_lookup(sh_ssl_sess_tree, \ + (k), SSL_MAX_SSL_SESSION_ID_LENGTH); #endif /* _PROTO_SSL_SOCK_H */ /* diff --git a/include/types/shctx.h b/include/types/shctx.h index 6ab6460a6..afef1a107 100644 --- a/include/types/shctx.h +++ b/include/types/shctx.h @@ -1,8 +1,6 @@ #ifndef __TYPES_SHCTX #define __TYPES_SHCTX -#include /* shared session depend of openssl */ - #ifndef SHSESS_BLOCK_MIN_SIZE #define SHSESS_BLOCK_MIN_SIZE 128 #endif @@ -18,20 +16,15 @@ #define SHCTX_E_ALLOC_CACHE -1 #define SHCTX_E_INIT_LOCK -2 -struct shared_session { - struct ebmb_node key; - unsigned char key_data[SSL_MAX_SSL_SESSION_ID_LENGTH]; - unsigned char data[SHSESS_BLOCK_MIN_SIZE]; -}; +#define SHCTX_F_REMOVING 0x1 /* Removing flag, does not accept new */ +/* generic shctx struct */ struct shared_block { - union { - struct shared_session session; - unsigned char data[sizeof(struct shared_session)]; - } data; - short int data_len; - struct shared_block *p; - struct shared_block *n; + struct list list; + short int len; /* data length for the row */ + short int block_count; /* number of blocks */ + unsigned int refcount; + unsigned char data[0]; }; struct shared_context { @@ -42,10 +35,12 @@ struct shared_context { unsigned int waiters; #endif #endif - struct shared_block active; - struct shared_block free; + struct list avail; /* list for active and free blocks */ + struct list hot; /* list for locked blocks */ + unsigned int nbav; /* number of available blocks */ + void (*free_block)(struct shared_block *first, struct shared_block *block); + short int block_size; + unsigned char data[0]; }; -extern struct shared_context *ssl_shctx; - #endif diff --git a/include/types/ssl_sock.h b/include/types/ssl_sock.h index ecdad46c4..5bd76ba9c 100644 --- a/include/types/ssl_sock.h +++ b/include/types/ssl_sock.h @@ -56,4 +56,10 @@ struct tls_keys_ref { int tls_ticket_enc_index; }; +/* shared ssl session */ +struct sh_ssl_sess_hdr { + struct ebmb_node key; + unsigned char key_data[SSL_MAX_SSL_SESSION_ID_LENGTH]; +}; + #endif /* _TYPES_SSL_SOCK_H */ diff --git a/src/shctx.c b/src/shctx.c index 853b522ee..69da33570 100644 --- a/src/shctx.c +++ b/src/shctx.c @@ -14,170 +14,236 @@ #include #include #include - -#include -#include -#include +#include +#include +#include "proto/shctx.h" #include -#include -#include - - #if !defined (USE_PRIVATE_CACHE) + int use_shared_mem = 0; + #endif -/* Tree Macros */ - -/* shared session functions */ - -/* Free session blocks, returns number of freed blocks */ -int shsess_free(struct shared_context *shctx, struct shared_session *shsess) +/* + * Reserve a row, put it in the hotlist, set the refcount to 1 + * + * Reserve blocks in the avail list and put them in the hot list + * Return the first block put in the hot list or NULL if not enough blocks available + */ +struct shared_block *shctx_row_reserve_hot(struct shared_context *shctx, int data_len) { - struct shared_block *block; - int ret = 1; + struct shared_block *block, *sblock, *ret = NULL, *first; + int enough = 0; + int freed = 0; - if (((struct shared_block *)shsess)->data_len <= sizeof(shsess->data)) { - shblock_set_free(shctx, (struct shared_block *)shsess); - return ret; - } - block = ((struct shared_block *)shsess)->n; - shblock_set_free(shctx, (struct shared_block *)shsess); - while (1) { - struct shared_block *next; + /* not enough usable blocks */ + if (data_len > shctx->nbav * shctx->block_size) + goto out; - if (block->data_len <= sizeof(block->data)) { - /* last block */ - shblock_set_free(shctx, block); - ret++; - break; + while (!enough && !LIST_ISEMPTY(&shctx->avail)) { + int count = 0; + int first_count = 0, first_len = 0; + + first = block = LIST_NEXT(&shctx->avail, struct shared_block *, list); + if (ret == NULL) + ret = first; + + first_count = first->block_count; + first_len = first->len; + /* + Should never been set to 0. + if (first->block_count == 0) + first->block_count = 1; + */ + + list_for_each_entry_safe_from(block, sblock, &shctx->avail, list) { + + /* release callback */ + if (first_len && shctx->free_block) + shctx->free_block(first, block); + + block->block_count = 1; + block->len = 0; + + freed++; + data_len -= shctx->block_size; + + if (data_len > 0) + shctx_block_set_hot(shctx, block); + + if (data_len <= 0 && !enough) { + shctx_block_set_hot(shctx, block); + ret->block_count = freed; + ret->refcount = 1; + enough = 1; + } + + count++; + if (count >= first_count) + break; } - next = block->n; - shblock_set_free(shctx, block); - ret++; - block = next; } + +out: return ret; } -/* This function frees enough blocks to store a new session of data_len. - * Returns a ptr on a free block if it succeeds, or NULL if there are not - * enough blocks to store that session. +/* + * if the refcount is 0 move the row to the hot list. Increment the refcount */ -struct shared_session *shsess_get_next(struct shared_context *shctx, int data_len) +void shctx_row_inc_hot(struct shared_context *shctx, struct shared_block *first) { - int head = 0; - struct shared_block *b; + struct shared_block *block, *sblock; + int count = 0; - b = shctx->free.n; - while (b != &shctx->free) { - if (!head) { - data_len -= sizeof(b->data.session.data); - head = 1; - } - else - data_len -= sizeof(b->data.data); - if (data_len <= 0) - return &shctx->free.n->data.session; - b = b->n; - } - b = shctx->active.n; - while (b != &shctx->active) { - int freed; + if (first->refcount <= 0) { - shsess_tree_delete(&b->data.session); - freed = shsess_free(shctx, &b->data.session); - if (!head) - data_len -= sizeof(b->data.session.data) + (freed-1)*sizeof(b->data.data); - else - data_len -= freed*sizeof(b->data.data); - if (data_len <= 0) - return &shctx->free.n->data.session; - b = shctx->active.n; - } - return NULL; -} + block = first; -/* store a session into the cache - * s_id : session id padded with zero to SSL_MAX_SSL_SESSION_ID_LENGTH - * data: asn1 encoded session - * data_len: asn1 encoded session length - * Returns 1 id session was stored (else 0) - */ -int shsess_store(struct shared_context *shctx, unsigned char *s_id, unsigned char *data, int data_len) -{ - struct shared_session *shsess, *oldshsess; + list_for_each_entry_safe_from(block, sblock, &shctx->avail, list) { - shsess = shsess_get_next(shctx, data_len); - if (!shsess) { - /* Could not retrieve enough free blocks to store that session */ - return 0; - } + shctx_block_set_hot(shctx, block); - /* prepare key */ - memcpy(shsess->key_data, s_id, SSL_MAX_SSL_SESSION_ID_LENGTH); - - /* it returns the already existing node - or current node if none, never returns null */ - oldshsess = shsess_tree_insert(shctx, shsess); - if (oldshsess != shsess) { - /* free all blocks used by old node */ - shsess_free(shctx, oldshsess); - shsess = oldshsess; - } - - ((struct shared_block *)shsess)->data_len = data_len; - if (data_len <= sizeof(shsess->data)) { - /* Store on a single block */ - memcpy(shsess->data, data, data_len); - shblock_set_active(shctx, (struct shared_block *)shsess); - } - else { - unsigned char *p; - /* Store on multiple blocks */ - int cur_len; - - memcpy(shsess->data, data, sizeof(shsess->data)); - p = data + sizeof(shsess->data); - cur_len = data_len - sizeof(shsess->data); - shblock_set_active(shctx, (struct shared_block *)shsess); - while (1) { - /* Store next data on free block. - * shsess_get_next guarantees that there are enough - * free blocks in queue. - */ - struct shared_block *block; - - block = shctx->free.n; - if (cur_len <= sizeof(block->data)) { - /* This is the last block */ - block->data_len = cur_len; - memcpy(block->data.data, p, cur_len); - shblock_set_active(shctx, block); + count++; + if (count >= first->block_count) + break; + } + } + + first->refcount++; +} + +/* + * decrement the refcount and move the row at the end of the avail list if it reaches 0. + */ +void shctx_row_dec_hot(struct shared_context *shctx, struct shared_block *first) +{ + struct shared_block *block, *sblock; + int count = 0; + + first->refcount--; + + if (first->refcount <= 0) { + + block = first; + + list_for_each_entry_safe_from(block, sblock, &shctx->hot, list) { + + shctx_block_set_avail(shctx, block); + + count++; + if (count >= first->block_count) break; - } - /* Intermediate block */ - block->data_len = cur_len; - memcpy(block->data.data, p, sizeof(block->data)); - p += sizeof(block->data.data); - cur_len -= sizeof(block->data.data); - shblock_set_active(shctx, block); } } - return 1; } +/* + * Append data in the row if there is enough space. + * The row should be in the hot list + * + * Return the amount of appended data if ret >= 0 + * or how much more space it needs to contains the data if < 0. + */ +int shctx_row_data_append(struct shared_context *shctx, struct shared_block *first, unsigned char *data, int len) +{ + int remain, start; + int count = 0; + struct shared_block *block; + + + /* return - needed to work */ + if (len > first->block_count * shctx->block_size - first->len) + return (first->block_count * shctx->block_size - first->len) - len; + + /* skipping full buffers, stop at the first buffer with remaining space */ + block = first; + list_for_each_entry_from(block, &shctx->hot, list) { + count++; + + + /* break if there is not enough blocks */ + if (count > first->block_count) + break; + + /* end of copy */ + if (len <= 0) + break; + + /* skip full buffers */ + if (count * shctx->block_size <= first->len) + continue; + + /* remaining space in the current block which is not full */ + remain = (shctx->block_size * count - first->len) % shctx->block_size; + /* if remain == 0, previous buffer are full, or first->len == 0 */ + remain = remain ? remain : shctx->block_size; + + /* start must be calculated before remain is modified */ + start = shctx->block_size - remain; + + /* must not try to copy more than len */ + remain = MIN(remain, len); + + memcpy(block->data + start, data, remain); + data += remain; + len -= remain; + first->len += remain; /* update len in the head of the row */ + } + + return len; +} + +/* + * Copy data from a row of blocks, return the remaining data to copy + * If 0 is returned, the full data has successfuly be copied + * + * The row should be in the hot list + */ +int shctx_row_data_get(struct shared_context *shctx, struct shared_block *first, + unsigned char *dst, int offset, int len) +{ + int count = 0, size = 0, start = -1; + struct shared_block *block; + + block = first; + count = 0; + /* Pass through the blocks to copy them */ + list_for_each_entry_from(block, &shctx->hot, list) { + if (count >= first->block_count || len <= 0) + break; + + count++; + /* continue until we are in right block + corresponding to the offset */ + if (count < offset / shctx->block_size + 1) + continue; + + /* on the first block, data won't possibly began at offset 0 */ + if (start == -1) + start = offset - (count - 1) * shctx->block_size; + + /* size can be lower than a block when copying the last block */ + size = MIN(shctx->block_size - start, len); + + memcpy(dst, block->data + start, size); + dst += size; + len -= size; + start = 0; + } + return len; +} /* Allocate shared memory context. - * is maximum cached sessions. - * If is set to less or equal to 0, ssl cache is disabled. - * Returns: -1 on alloc failure, if it performs context alloc, + * is maximum blocks. + * If is set to less or equal to 0, ssl cache is disabled. + * Returns: -1 on alloc failure, if it performs context alloc, * and 0 if cache is already allocated. */ -int shared_context_init(struct shared_context **orig_shctx, int size, int shared) +int shctx_init(struct shared_context **orig_shctx, int maxblocks, int blocksize, int extra, int shared) { int i; struct shared_context *shctx; @@ -187,23 +253,18 @@ int shared_context_init(struct shared_context **orig_shctx, int size, int shared pthread_mutexattr_t attr; #endif #endif - struct shared_block *prev,*cur; + void *cur; int maptype = MAP_PRIVATE; - if (orig_shctx && *orig_shctx) + if (maxblocks <= 0) return 0; - if (size<=0) - return 0; - - /* Increate size by one to reserve one node for lookup */ - size++; #ifndef USE_PRIVATE_CACHE if (shared) maptype = MAP_SHARED; #endif - shctx = (struct shared_context *)mmap(NULL, sizeof(struct shared_context)+(size*sizeof(struct shared_block)), + shctx = (struct shared_context *)mmap(NULL, sizeof(struct shared_context) + extra + (maxblocks * (sizeof(struct shared_block) + blocksize)), PROT_READ | PROT_WRITE, maptype | MAP_ANON, -1, 0); if (!shctx || shctx == MAP_FAILED) { shctx = NULL; @@ -211,11 +272,13 @@ int shared_context_init(struct shared_context **orig_shctx, int size, int shared goto err; } + shctx->nbav = 0; + #ifndef USE_PRIVATE_CACHE if (maptype == MAP_SHARED) { #ifdef USE_PTHREAD_PSHARED if (pthread_mutexattr_init(&attr)) { - munmap(shctx, sizeof(struct shared_context)+(size*sizeof(struct shared_block))); + munmap(shctx, sizeof(struct shared_context) + extra + (maxblocks * (sizeof(struct shared_block) + blocksize))); shctx = NULL; ret = SHCTX_E_INIT_LOCK; goto err; @@ -223,7 +286,7 @@ int shared_context_init(struct shared_context **orig_shctx, int size, int shared if (pthread_mutexattr_setpshared(&attr, PTHREAD_PROCESS_SHARED)) { pthread_mutexattr_destroy(&attr); - munmap(shctx, sizeof(struct shared_context)+(size*sizeof(struct shared_block))); + munmap(shctx, sizeof(struct shared_context) + extra + (maxblocks * (sizeof(struct shared_block) + blocksize))); shctx = NULL; ret = SHCTX_E_INIT_LOCK; goto err; @@ -231,7 +294,7 @@ int shared_context_init(struct shared_context **orig_shctx, int size, int shared if (pthread_mutex_init(&shctx->mutex, &attr)) { pthread_mutexattr_destroy(&attr); - munmap(shctx, sizeof(struct shared_context)+(size*sizeof(struct shared_block))); + munmap(shctx, sizeof(struct shared_context) + extra + (maxblocks * (sizeof(struct shared_block) + blocksize))); shctx = NULL; ret = SHCTX_E_INIT_LOCK; goto err; @@ -243,26 +306,23 @@ int shared_context_init(struct shared_context **orig_shctx, int size, int shared } #endif - memset(&shctx->active.data.session.key, 0, sizeof(struct ebmb_node)); - memset(&shctx->free.data.session.key, 0, sizeof(struct ebmb_node)); + LIST_INIT(&shctx->avail); + LIST_INIT(&shctx->hot); - /* No duplicate authorized in tree: */ - shctx->active.data.session.key.node.branches = EB_ROOT_UNIQUE; + shctx->block_size = blocksize; - cur = &shctx->active; - cur->n = cur->p = cur; - - cur = &shctx->free; - for (i = 0 ; i < size ; i++) { - prev = cur; - cur++; - prev->n = cur; - cur->p = prev; + /* init the free blocks after the shared context struct */ + cur = (void *)shctx + sizeof(struct shared_context) + extra; + for (i = 0; i < maxblocks; i++) { + struct shared_block *cur_block = (struct shared_block *)cur; + cur_block->len = 0; + cur_block->refcount = 0; + cur_block->block_count = 1; + LIST_ADDQ(&shctx->avail, &cur_block->list); + shctx->nbav++; + cur += sizeof(struct shared_block) + blocksize; } - cur->n = &shctx->free; - shctx->free.p = cur; - - ret = size; + ret = maxblocks; err: *orig_shctx = shctx; diff --git a/src/ssl_sock.c b/src/ssl_sock.c index 71bcbe3dc..09e27b30c 100644 --- a/src/ssl_sock.c +++ b/src/ssl_sock.c @@ -275,7 +275,16 @@ const char *SSL_SOCK_KEYTYPE_NAMES[] = { #define SSL_SOCK_NUM_KEYTYPES 1 #endif -struct shared_context *ssl_shctx = NULL; +static struct shared_context *ssl_shctx; /* ssl shared session cache */ +static struct eb_root *sh_ssl_sess_tree; /* ssl shared session tree */ + +#define sh_ssl_sess_tree_delete(s) ebmb_delete(&(s)->key); + +#define sh_ssl_sess_tree_insert(s) (struct sh_ssl_sess_hdr *)ebmb_insert(sh_ssl_sess_tree, \ + &(s)->key, SSL_MAX_SSL_SESSION_ID_LENGTH); + +#define sh_ssl_sess_tree_lookup(k) (struct sh_ssl_sess_hdr *)ebmb_lookup(sh_ssl_sess_tree, \ + (k), SSL_MAX_SSL_SESSION_ID_LENGTH); /* * This function gives the detail of the SSL error. It is used only @@ -3718,10 +3727,68 @@ ssl_sock_initial_ctx(struct bind_conf *bind_conf) return cfgerr; } -/* SSL context callbacks */ + +static inline void sh_ssl_sess_free_blocks(struct shared_block *first, struct shared_block *block) +{ + if (first == block) { + struct sh_ssl_sess_hdr *sh_ssl_sess = (struct sh_ssl_sess_hdr *)first->data; + if (first->len > 0) + sh_ssl_sess_tree_delete(sh_ssl_sess); + } +} + +/* return first block from sh_ssl_sess */ +static inline struct shared_block *sh_ssl_sess_first_block(struct sh_ssl_sess_hdr *sh_ssl_sess) +{ + return (struct shared_block *)((unsigned char *)sh_ssl_sess - ((struct shared_block *)NULL)->data); + +} + +/* store a session into the cache + * s_id : session id padded with zero to SSL_MAX_SSL_SESSION_ID_LENGTH + * data: asn1 encoded session + * data_len: asn1 encoded session length + * Returns 1 id session was stored (else 0) + */ +static int sh_ssl_sess_store(unsigned char *s_id, unsigned char *data, int data_len) +{ + struct shared_block *first; + struct sh_ssl_sess_hdr *sh_ssl_sess, *oldsh_ssl_sess; + + first = shctx_row_reserve_hot(ssl_shctx, data_len + sizeof(struct sh_ssl_sess_hdr)); + if (!first) { + /* Could not retrieve enough free blocks to store that session */ + return 0; + } + + /* STORE the key in the first elem */ + sh_ssl_sess = (struct sh_ssl_sess_hdr *)first->data; + memcpy(sh_ssl_sess->key_data, s_id, SSL_MAX_SSL_SESSION_ID_LENGTH); + first->len = sizeof(struct sh_ssl_sess_hdr); + + /* it returns the already existing node + or current node if none, never returns null */ + oldsh_ssl_sess = sh_ssl_sess_tree_insert(sh_ssl_sess); + if (oldsh_ssl_sess != sh_ssl_sess) { + /* NOTE: Row couldn't be in use because we lock read & write function */ + /* release the reserved row */ + shctx_row_dec_hot(ssl_shctx, first); + /* replace the previous session already in the tree */ + sh_ssl_sess = oldsh_ssl_sess; + /* ignore the previous session data, only use the header */ + first = sh_ssl_sess_first_block(sh_ssl_sess); + shctx_row_inc_hot(ssl_shctx, first); + first->len = sizeof(struct sh_ssl_sess_hdr); + } + + if (shctx_row_data_append(ssl_shctx, first, data, data_len) < 0) + return 0; + + return 1; +} /* SSL callback used on new session creation */ -int shctx_new_cb(SSL *ssl, SSL_SESSION *sess) +int sh_ssl_sess_new_cb(SSL *ssl, SSL_SESSION *sess) { unsigned char encsess[SHSESS_MAX_DATA_LEN]; /* encoded session */ unsigned char encid[SSL_MAX_SSL_SESSION_ID_LENGTH]; /* encoded id */ @@ -3755,12 +3822,9 @@ int shctx_new_cb(SSL *ssl, SSL_SESSION *sess) memset(encid + sid_length, 0, SSL_MAX_SSL_SESSION_ID_LENGTH-sid_length); shared_context_lock(ssl_shctx); - /* store to cache */ - shsess_store(ssl_shctx, encid, encsess, data_len); - + sh_ssl_sess_store(encid, encsess, data_len); shared_context_unlock(ssl_shctx); - err: /* reset original length values */ SSL_SESSION_set1_id(sess, sid_data, sid_length); @@ -3770,13 +3834,13 @@ err: } /* SSL callback used on lookup an existing session cause none found in internal cache */ -SSL_SESSION *shctx_get_cb(SSL *ssl, __OPENSSL_110_CONST__ unsigned char *key, int key_len, int *do_copy) +SSL_SESSION *sh_ssl_sess_get_cb(SSL *ssl, __OPENSSL_110_CONST__ unsigned char *key, int key_len, int *do_copy) { - struct shared_session *shsess; + struct sh_ssl_sess_hdr *sh_ssl_sess; unsigned char data[SHSESS_MAX_DATA_LEN], *p; unsigned char tmpkey[SSL_MAX_SSL_SESSION_ID_LENGTH]; - int data_len; SSL_SESSION *sess; + struct shared_block *first; global.shctx_lookups++; @@ -3794,53 +3858,24 @@ SSL_SESSION *shctx_get_cb(SSL *ssl, __OPENSSL_110_CONST__ unsigned char *key, in shared_context_lock(ssl_shctx); /* lookup for session */ - shsess = shsess_tree_lookup(ssl_shctx, key); - if (!shsess) { + sh_ssl_sess = sh_ssl_sess_tree_lookup(key); + if (!sh_ssl_sess) { /* no session found: unlock cache and exit */ shared_context_unlock(ssl_shctx); global.shctx_misses++; return NULL; } - data_len = ((struct shared_block *)shsess)->data_len; - if (data_len <= sizeof(shsess->data)) { - /* Session stored on single block */ - memcpy(data, shsess->data, data_len); - shblock_set_active(ssl_shctx, (struct shared_block *)shsess); - } - else { - /* Session stored on multiple blocks */ - struct shared_block *block; + /* sh_ssl_sess (shared_block->data) is at the end of shared_block */ + first = sh_ssl_sess_first_block(sh_ssl_sess); - memcpy(data, shsess->data, sizeof(shsess->data)); - p = data + sizeof(shsess->data); - block = ((struct shared_block *)shsess)->n; - shblock_set_active(ssl_shctx, (struct shared_block *)shsess); - while (1) { - /* Retrieve data from next block */ - struct shared_block *next; - - if (block->data_len <= sizeof(block->data.data)) { - /* This is the last block */ - memcpy(p, block->data.data, block->data_len); - p += block->data_len; - shblock_set_active(ssl_shctx, block); - break; - } - /* Intermediate block */ - memcpy(p, block->data.data, sizeof(block->data.data)); - p += sizeof(block->data.data); - next = block->n; - shblock_set_active(ssl_shctx, block); - block = next; - } - } + shctx_row_data_get(ssl_shctx, first, data, sizeof(struct sh_ssl_sess_hdr), first->len-sizeof(struct sh_ssl_sess_hdr)); shared_context_unlock(ssl_shctx); /* decode ASN1 session */ p = data; - sess = d2i_SSL_SESSION(NULL, (const unsigned char **)&p, data_len); + sess = d2i_SSL_SESSION(NULL, (const unsigned char **)&p, first->len-sizeof(struct sh_ssl_sess_hdr)); /* Reset session id and session id contenxt */ if (sess) { SSL_SESSION_set1_id(sess, key, key_len); @@ -3850,10 +3885,11 @@ SSL_SESSION *shctx_get_cb(SSL *ssl, __OPENSSL_110_CONST__ unsigned char *key, in return sess; } + /* SSL callback used to signal session is no more used in internal cache */ -void shctx_remove_cb(SSL_CTX *ctx, SSL_SESSION *sess) +void sh_ssl_sess_remove_cb(SSL_CTX *ctx, SSL_SESSION *sess) { - struct shared_session *shsess; + struct sh_ssl_sess_hdr *sh_ssl_sess; unsigned char tmpkey[SSL_MAX_SSL_SESSION_ID_LENGTH]; unsigned int sid_length; const unsigned char *sid_data; @@ -3870,11 +3906,10 @@ void shctx_remove_cb(SSL_CTX *ctx, SSL_SESSION *sess) shared_context_lock(ssl_shctx); /* lookup for session */ - shsess = shsess_tree_lookup(ssl_shctx, sid_data); - if (shsess) { + sh_ssl_sess = sh_ssl_sess_tree_lookup(sid_data); + if (sh_ssl_sess) { /* free session */ - shsess_tree_delete(shsess); - shsess_free(ssl_shctx, shsess); + sh_ssl_sess_tree_delete(sh_ssl_sess); } /* unlock cache */ @@ -3884,7 +3919,7 @@ void shctx_remove_cb(SSL_CTX *ctx, SSL_SESSION *sess) /* Set session cache mode to server and disable openssl internal cache. * Set shared cache callbacks on an ssl context. * Shared context MUST be firstly initialized */ -void shared_context_set_cache(SSL_CTX *ctx) +void ssl_set_shctx(SSL_CTX *ctx) { SSL_CTX_set_session_id_context(ctx, (const unsigned char *)SHCTX_APPNAME, strlen(SHCTX_APPNAME)); @@ -3898,9 +3933,9 @@ void shared_context_set_cache(SSL_CTX *ctx) SSL_SESS_CACHE_NO_AUTO_CLEAR); /* Set callbacks */ - SSL_CTX_sess_set_new_cb(ctx, shctx_new_cb); - SSL_CTX_sess_set_get_cb(ctx, shctx_get_cb); - SSL_CTX_sess_set_remove_cb(ctx, shctx_remove_cb); + SSL_CTX_sess_set_new_cb(ctx, sh_ssl_sess_new_cb); + SSL_CTX_sess_set_get_cb(ctx, sh_ssl_sess_get_cb); + SSL_CTX_sess_set_remove_cb(ctx, sh_ssl_sess_remove_cb); } int ssl_sock_prepare_ctx(struct bind_conf *bind_conf, struct ssl_bind_conf *ssl_conf, SSL_CTX *ctx) @@ -4000,7 +4035,7 @@ int ssl_sock_prepare_ctx(struct bind_conf *bind_conf, struct ssl_bind_conf *ssl_ } #endif - shared_context_set_cache(ctx); + ssl_set_shctx(ctx); conf_ciphers = (ssl_conf && ssl_conf->ciphers) ? ssl_conf->ciphers : bind_conf->ssl_conf.ciphers; if (conf_ciphers && !SSL_CTX_set_cipher_list(ctx, conf_ciphers)) { @@ -4564,7 +4599,7 @@ int ssl_sock_prepare_bind_conf(struct bind_conf *bind_conf) } } - alloc_ctx = shared_context_init(&ssl_shctx, global.tune.sslcachesize, (!global_ssl.private_cache && (global.nbproc > 1)) ? 1 : 0); + alloc_ctx = shctx_init(&ssl_shctx, global.tune.sslcachesize, sizeof(struct sh_ssl_sess_hdr) + SHSESS_BLOCK_MIN_SIZE, sizeof(*sh_ssl_sess_tree), (!global_ssl.private_cache && (global.nbproc > 1)) ? 1 : 0); if (alloc_ctx < 0) { if (alloc_ctx == SHCTX_E_INIT_LOCK) Alert("Unable to initialize the lock for the shared SSL session cache. You can retry using the global statement 'tune.ssl.force-private-cache' but it could increase CPU usage due to renegotiations if nbproc > 1.\n"); @@ -4572,6 +4607,11 @@ int ssl_sock_prepare_bind_conf(struct bind_conf *bind_conf) Alert("Unable to allocate SSL session cache.\n"); return -1; } + /* free block callback */ + ssl_shctx->free_block = sh_ssl_sess_free_blocks; + /* init the root tree within the extra space */ + sh_ssl_sess_tree = (void *)ssl_shctx + sizeof(struct shared_context); + *sh_ssl_sess_tree = EB_ROOT_UNIQUE; err = 0; /* initialize all certificate contexts */