MINOR: server/ssl: maintain an index of the last known valid SSL session

When a thread creates a new session for a server, if none was known yet,
we assign the thread id (hence the reused_sess index) to a shared variable
so that other threads will later be able to find it when they don't have
one yet. For now we only set and clear the pointer upon session creation,
we do not yet pick it.

Note that we could have done it per thread-group, so as to avoid any
cross-thread exchanges, but it's anticipated that this is essentially
used during startup, at a moment where the cost of inter-thread contention
is very low compared to the ability to restart at full speed, which
explains why instead we store a single entry.
This commit is contained in:
Willy Tarreau 2023-08-21 11:55:42 +02:00
parent 607041dec3
commit 52b260bae4
2 changed files with 16 additions and 0 deletions

View File

@ -390,6 +390,7 @@ struct server {
char *sni; /* SNI used for the session */
__decl_thread(HA_RWLOCK_T sess_lock);
} * reused_sess;
uint last_ssl_sess_tid; /* last tid+1 having updated reused_sess (0=none, >0=tid+1) */
struct ckch_inst *inst; /* Instance of the ckch_store in which the certificate was loaded (might be null if server has no certificate) */
__decl_thread(HA_RWLOCK_T lock); /* lock the cache and SSL_CTX during commit operations */

View File

@ -4261,6 +4261,7 @@ static int ssl_sess_new_srv_cb(SSL *ssl, SSL_SESSION *sess)
{
struct connection *conn = SSL_get_ex_data(ssl, ssl_app_data_index);
struct server *s;
uint old_tid;
s = __objt_server(conn->target);
@ -4306,6 +4307,16 @@ static int ssl_sess_new_srv_cb(SSL *ssl, SSL_SESSION *sess)
/* done updating the session */
/* Now we'll try to add or remove this entry as a valid one:
* - if no entry is set and we have one, let's share it
* - if our entry was set and we have no more, let's clear it
*/
old_tid = HA_ATOMIC_LOAD(&s->ssl_ctx.last_ssl_sess_tid); // 0=none, >0 = tid + 1
if (!s->ssl_ctx.reused_sess[tid].ptr && old_tid == tid + 1)
HA_ATOMIC_CAS(&s->ssl_ctx.last_ssl_sess_tid, &old_tid, 0); // no more valid
else if (s->ssl_ctx.reused_sess[tid].ptr && !old_tid)
HA_ATOMIC_CAS(&s->ssl_ctx.last_ssl_sess_tid, &old_tid, tid + 1);
if (s->ssl_ctx.reused_sess[tid].sni) {
/* if the new sni is empty or isn' t the same as the old one */
if ((!sni) || strcmp(s->ssl_ctx.reused_sess[tid].sni, sni) != 0) {
@ -4328,6 +4339,10 @@ static int ssl_sess_new_srv_cb(SSL *ssl, SSL_SESSION *sess)
HA_RWLOCK_WRUNLOCK(SSL_SERVER_LOCK, &s->ssl_ctx.reused_sess[tid].sess_lock);
}
old_tid = HA_ATOMIC_LOAD(&s->ssl_ctx.last_ssl_sess_tid); // 0=none, >0 = tid + 1
if (old_tid == tid + 1)
HA_ATOMIC_CAS(&s->ssl_ctx.last_ssl_sess_tid, &old_tid, 0); // no more valid
HA_RWLOCK_RDUNLOCK(SSL_SERVER_LOCK, &s->ssl_ctx.lock);
}