mirror of
https://git.haproxy.org/git/haproxy.git/
synced 2025-08-10 09:07:02 +02:00
MINOR: connection: align toremove_{lock,connections} and cleanup into idle_conns
We used to have 3 thread-based arrays for toremove_lock, idle_cleanup, and toremove_connections. The problem is that these items are small, and that this creates false sharing between threads since it's possible to pack up to 8-16 of these values into a single cache line. This can cause real damage where there is contention on the lock. This patch creates a new array of struct "idle_conns" that is aligned on a cache line and which contains all three members above. This way each thread has access to its variables without hindering the other ones. Just doing this increased the HTTP/1 request rate by 5% on a 16-thread machine. The definition was moved to connection.{c,h} since it appeared a more natural evolution of the ongoing changes given that there was already one of them declared in connection.h previously.
This commit is contained in:
parent
d79422a0ff
commit
4d82bf5c2e
@ -600,6 +600,16 @@ struct tlv_ssl {
|
|||||||
}__attribute__((packed));
|
}__attribute__((packed));
|
||||||
|
|
||||||
|
|
||||||
|
/* This structure is used to manage idle connections, their locking, and the
|
||||||
|
* list of such idle connections to be removed. It is per-thread and must be
|
||||||
|
* accessible from foreign threads.
|
||||||
|
*/
|
||||||
|
struct idle_conns {
|
||||||
|
struct mt_list toremove_conns;
|
||||||
|
__decl_thread(HA_SPINLOCK_T toremove_lock);
|
||||||
|
struct task *cleanup_task;
|
||||||
|
} THREAD_ALIGNED(64);
|
||||||
|
|
||||||
#endif /* _HAPROXY_CONNECTION_T_H */
|
#endif /* _HAPROXY_CONNECTION_T_H */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -77,7 +77,7 @@ int conn_recv_socks4_proxy_response(struct connection *conn);
|
|||||||
/* If we delayed the mux creation because we were waiting for the handshake, do it now */
|
/* If we delayed the mux creation because we were waiting for the handshake, do it now */
|
||||||
int conn_create_mux(struct connection *conn);
|
int conn_create_mux(struct connection *conn);
|
||||||
|
|
||||||
__decl_thread(extern HA_SPINLOCK_T toremove_lock[MAX_THREADS]);
|
extern struct idle_conns idle_conns[MAX_THREADS];
|
||||||
|
|
||||||
/* returns true is the transport layer is ready */
|
/* returns true is the transport layer is ready */
|
||||||
static inline int conn_xprt_ready(const struct connection *conn)
|
static inline int conn_xprt_ready(const struct connection *conn)
|
||||||
@ -494,9 +494,9 @@ static inline void conn_free(struct connection *conn)
|
|||||||
}
|
}
|
||||||
|
|
||||||
conn_force_unsubscribe(conn);
|
conn_force_unsubscribe(conn);
|
||||||
HA_SPIN_LOCK(OTHER_LOCK, &toremove_lock[tid]);
|
HA_SPIN_LOCK(OTHER_LOCK, &idle_conns[tid].toremove_lock);
|
||||||
MT_LIST_DEL((struct mt_list *)&conn->list);
|
MT_LIST_DEL((struct mt_list *)&conn->list);
|
||||||
HA_SPIN_UNLOCK(OTHER_LOCK, &toremove_lock[tid]);
|
HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].toremove_lock);
|
||||||
pool_free(pool_head_connection, conn);
|
pool_free(pool_head_connection, conn);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -38,8 +38,6 @@
|
|||||||
__decl_thread(extern HA_SPINLOCK_T idle_conn_srv_lock);
|
__decl_thread(extern HA_SPINLOCK_T idle_conn_srv_lock);
|
||||||
extern struct eb_root idle_conn_srv;
|
extern struct eb_root idle_conn_srv;
|
||||||
extern struct task *idle_conn_task;
|
extern struct task *idle_conn_task;
|
||||||
extern struct task *idle_conn_cleanup[MAX_THREADS];
|
|
||||||
extern struct mt_list toremove_connections[MAX_THREADS];
|
|
||||||
extern struct dict server_name_dict;
|
extern struct dict server_name_dict;
|
||||||
|
|
||||||
int srv_downtime(const struct server *s);
|
int srv_downtime(const struct server *s);
|
||||||
|
@ -1083,9 +1083,9 @@ static struct connection *conn_backend_get(struct server *srv, int is_safe)
|
|||||||
* thread may be trying to migrate that connection, and we don't want
|
* thread may be trying to migrate that connection, and we don't want
|
||||||
* to end up with two threads using the same connection.
|
* to end up with two threads using the same connection.
|
||||||
*/
|
*/
|
||||||
HA_SPIN_LOCK(OTHER_LOCK, &toremove_lock[tid]);
|
HA_SPIN_LOCK(OTHER_LOCK, &idle_conns[tid].toremove_lock);
|
||||||
conn = MT_LIST_POP(&mt_list[tid], struct connection *, list);
|
conn = MT_LIST_POP(&mt_list[tid], struct connection *, list);
|
||||||
HA_SPIN_UNLOCK(OTHER_LOCK, &toremove_lock[tid]);
|
HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].toremove_lock);
|
||||||
|
|
||||||
/* If we found a connection in our own list, and we don't have to
|
/* If we found a connection in our own list, and we don't have to
|
||||||
* steal one from another thread, then we're done.
|
* steal one from another thread, then we're done.
|
||||||
@ -1099,7 +1099,7 @@ static struct connection *conn_backend_get(struct server *srv, int is_safe)
|
|||||||
for (i = tid; !found && (i = ((i + 1 == global.nbthread) ? 0 : i + 1)) != tid;) {
|
for (i = tid; !found && (i = ((i + 1 == global.nbthread) ? 0 : i + 1)) != tid;) {
|
||||||
struct mt_list *elt1, elt2;
|
struct mt_list *elt1, elt2;
|
||||||
|
|
||||||
HA_SPIN_LOCK(OTHER_LOCK, &toremove_lock[i]);
|
HA_SPIN_LOCK(OTHER_LOCK, &idle_conns[i].toremove_lock);
|
||||||
mt_list_for_each_entry_safe(conn, &mt_list[i], list, elt1, elt2) {
|
mt_list_for_each_entry_safe(conn, &mt_list[i], list, elt1, elt2) {
|
||||||
if (conn->mux->takeover && conn->mux->takeover(conn) == 0) {
|
if (conn->mux->takeover && conn->mux->takeover(conn) == 0) {
|
||||||
MT_LIST_DEL_SAFE(elt1);
|
MT_LIST_DEL_SAFE(elt1);
|
||||||
@ -1107,7 +1107,7 @@ static struct connection *conn_backend_get(struct server *srv, int is_safe)
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
HA_SPIN_UNLOCK(OTHER_LOCK, &toremove_lock[i]);
|
HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[i].toremove_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!found)
|
if (!found)
|
||||||
@ -1279,7 +1279,7 @@ int connect_server(struct stream *s)
|
|||||||
// see it possibly larger.
|
// see it possibly larger.
|
||||||
ALREADY_CHECKED(i);
|
ALREADY_CHECKED(i);
|
||||||
|
|
||||||
HA_SPIN_LOCK(OTHER_LOCK, &toremove_lock[tid]);
|
HA_SPIN_LOCK(OTHER_LOCK, &idle_conns[tid].toremove_lock);
|
||||||
tokill_conn = MT_LIST_POP(&srv->idle_conns[i],
|
tokill_conn = MT_LIST_POP(&srv->idle_conns[i],
|
||||||
struct connection *, list);
|
struct connection *, list);
|
||||||
if (!tokill_conn)
|
if (!tokill_conn)
|
||||||
@ -1288,13 +1288,13 @@ int connect_server(struct stream *s)
|
|||||||
if (tokill_conn) {
|
if (tokill_conn) {
|
||||||
/* We got one, put it into the concerned thread's to kill list, and wake it's kill task */
|
/* We got one, put it into the concerned thread's to kill list, and wake it's kill task */
|
||||||
|
|
||||||
MT_LIST_ADDQ(&toremove_connections[i],
|
MT_LIST_ADDQ(&idle_conns[i].toremove_conns,
|
||||||
(struct mt_list *)&tokill_conn->list);
|
(struct mt_list *)&tokill_conn->list);
|
||||||
task_wakeup(idle_conn_cleanup[i], TASK_WOKEN_OTHER);
|
task_wakeup(idle_conns[i].cleanup_task, TASK_WOKEN_OTHER);
|
||||||
HA_SPIN_UNLOCK(OTHER_LOCK, &toremove_lock[tid]);
|
HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].toremove_lock);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
HA_SPIN_UNLOCK(OTHER_LOCK, &toremove_lock[tid]);
|
HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].toremove_lock);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3566,12 +3566,12 @@ int check_config_validity()
|
|||||||
idle_conn_task->context = NULL;
|
idle_conn_task->context = NULL;
|
||||||
|
|
||||||
for (i = 0; i < global.nbthread; i++) {
|
for (i = 0; i < global.nbthread; i++) {
|
||||||
idle_conn_cleanup[i] = task_new(1UL << i);
|
idle_conns[i].cleanup_task = task_new(1UL << i);
|
||||||
if (!idle_conn_cleanup[i])
|
if (!idle_conns[i].cleanup_task)
|
||||||
goto err;
|
goto err;
|
||||||
idle_conn_cleanup[i]->process = srv_cleanup_toremove_connections;
|
idle_conns[i].cleanup_task->process = srv_cleanup_toremove_connections;
|
||||||
idle_conn_cleanup[i]->context = NULL;
|
idle_conns[i].cleanup_task->context = NULL;
|
||||||
MT_LIST_INIT(&toremove_connections[i]);
|
MT_LIST_INIT(&idle_conns[i].toremove_conns);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -32,6 +32,7 @@ DECLARE_POOL(pool_head_connstream, "conn_stream", sizeof(struct conn_stream));
|
|||||||
DECLARE_POOL(pool_head_sockaddr, "sockaddr", sizeof(struct sockaddr_storage));
|
DECLARE_POOL(pool_head_sockaddr, "sockaddr", sizeof(struct sockaddr_storage));
|
||||||
DECLARE_POOL(pool_head_authority, "authority", PP2_AUTHORITY_MAX);
|
DECLARE_POOL(pool_head_authority, "authority", PP2_AUTHORITY_MAX);
|
||||||
|
|
||||||
|
struct idle_conns idle_conns[MAX_THREADS] = { };
|
||||||
struct xprt_ops *registered_xprt[XPRT_ENTRIES] = { NULL, };
|
struct xprt_ops *registered_xprt[XPRT_ENTRIES] = { NULL, };
|
||||||
|
|
||||||
/* List head of all known muxes for PROTO */
|
/* List head of all known muxes for PROTO */
|
||||||
|
@ -2918,13 +2918,13 @@ static struct task *fcgi_io_cb(struct task *t, void *ctx, unsigned short status)
|
|||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
|
|
||||||
HA_SPIN_LOCK(OTHER_LOCK, &toremove_lock[tid]);
|
HA_SPIN_LOCK(OTHER_LOCK, &idle_conns[tid].toremove_lock);
|
||||||
if (tl->context == NULL) {
|
if (tl->context == NULL) {
|
||||||
/* The connection has been taken over by another thread,
|
/* The connection has been taken over by another thread,
|
||||||
* we're no longer responsible for it, so just free the
|
* we're no longer responsible for it, so just free the
|
||||||
* tasklet, and do nothing.
|
* tasklet, and do nothing.
|
||||||
*/
|
*/
|
||||||
HA_SPIN_UNLOCK(OTHER_LOCK, &toremove_lock[tid]);
|
HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].toremove_lock);
|
||||||
tasklet_free(tl);
|
tasklet_free(tl);
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
@ -2938,7 +2938,7 @@ static struct task *fcgi_io_cb(struct task *t, void *ctx, unsigned short status)
|
|||||||
if (conn_in_list)
|
if (conn_in_list)
|
||||||
MT_LIST_DEL(&conn->list);
|
MT_LIST_DEL(&conn->list);
|
||||||
|
|
||||||
HA_SPIN_UNLOCK(OTHER_LOCK, &toremove_lock[tid]);
|
HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].toremove_lock);
|
||||||
|
|
||||||
if (!(fconn->wait_event.events & SUB_RETRY_SEND))
|
if (!(fconn->wait_event.events & SUB_RETRY_SEND))
|
||||||
ret = fcgi_send(fconn);
|
ret = fcgi_send(fconn);
|
||||||
@ -3092,7 +3092,7 @@ static struct task *fcgi_timeout_task(struct task *t, void *context, unsigned sh
|
|||||||
/* We're about to destroy the connection, so make sure nobody attempts
|
/* We're about to destroy the connection, so make sure nobody attempts
|
||||||
* to steal it from us.
|
* to steal it from us.
|
||||||
*/
|
*/
|
||||||
HA_SPIN_LOCK(OTHER_LOCK, &toremove_lock[tid]);
|
HA_SPIN_LOCK(OTHER_LOCK, &idle_conns[tid].toremove_lock);
|
||||||
|
|
||||||
if (fconn && fconn->conn->flags & CO_FL_LIST_MASK)
|
if (fconn && fconn->conn->flags & CO_FL_LIST_MASK)
|
||||||
MT_LIST_DEL(&fconn->conn->list);
|
MT_LIST_DEL(&fconn->conn->list);
|
||||||
@ -3103,7 +3103,7 @@ static struct task *fcgi_timeout_task(struct task *t, void *context, unsigned sh
|
|||||||
if (!t->context)
|
if (!t->context)
|
||||||
fconn = NULL;
|
fconn = NULL;
|
||||||
|
|
||||||
HA_SPIN_UNLOCK(OTHER_LOCK, &toremove_lock[tid]);
|
HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].toremove_lock);
|
||||||
|
|
||||||
task_destroy(t);
|
task_destroy(t);
|
||||||
|
|
||||||
|
10
src/mux_h1.c
10
src/mux_h1.c
@ -2219,13 +2219,13 @@ static struct task *h1_io_cb(struct task *t, void *ctx, unsigned short status)
|
|||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
|
|
||||||
HA_SPIN_LOCK(OTHER_LOCK, &toremove_lock[tid]);
|
HA_SPIN_LOCK(OTHER_LOCK, &idle_conns[tid].toremove_lock);
|
||||||
if (tl->context == NULL) {
|
if (tl->context == NULL) {
|
||||||
/* The connection has been taken over by another thread,
|
/* The connection has been taken over by another thread,
|
||||||
* we're no longer responsible for it, so just free the
|
* we're no longer responsible for it, so just free the
|
||||||
* tasklet, and do nothing.
|
* tasklet, and do nothing.
|
||||||
*/
|
*/
|
||||||
HA_SPIN_UNLOCK(OTHER_LOCK, &toremove_lock[tid]);
|
HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].toremove_lock);
|
||||||
tasklet_free(tl);
|
tasklet_free(tl);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
@ -2241,7 +2241,7 @@ static struct task *h1_io_cb(struct task *t, void *ctx, unsigned short status)
|
|||||||
if (conn_in_list)
|
if (conn_in_list)
|
||||||
MT_LIST_DEL(&conn->list);
|
MT_LIST_DEL(&conn->list);
|
||||||
|
|
||||||
HA_SPIN_UNLOCK(OTHER_LOCK, &toremove_lock[tid]);
|
HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].toremove_lock);
|
||||||
|
|
||||||
if (!(h1c->wait_event.events & SUB_RETRY_SEND))
|
if (!(h1c->wait_event.events & SUB_RETRY_SEND))
|
||||||
ret = h1_send(h1c);
|
ret = h1_send(h1c);
|
||||||
@ -2308,7 +2308,7 @@ static struct task *h1_timeout_task(struct task *t, void *context, unsigned shor
|
|||||||
/* We're about to destroy the connection, so make sure nobody attempts
|
/* We're about to destroy the connection, so make sure nobody attempts
|
||||||
* to steal it from us.
|
* to steal it from us.
|
||||||
*/
|
*/
|
||||||
HA_SPIN_LOCK(OTHER_LOCK, &toremove_lock[tid]);
|
HA_SPIN_LOCK(OTHER_LOCK, &idle_conns[tid].toremove_lock);
|
||||||
|
|
||||||
if (h1c && h1c->conn->flags & CO_FL_LIST_MASK)
|
if (h1c && h1c->conn->flags & CO_FL_LIST_MASK)
|
||||||
MT_LIST_DEL(&h1c->conn->list);
|
MT_LIST_DEL(&h1c->conn->list);
|
||||||
@ -2319,7 +2319,7 @@ static struct task *h1_timeout_task(struct task *t, void *context, unsigned shor
|
|||||||
if (!t->context)
|
if (!t->context)
|
||||||
h1c = NULL;
|
h1c = NULL;
|
||||||
|
|
||||||
HA_SPIN_UNLOCK(OTHER_LOCK, &toremove_lock[tid]);
|
HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].toremove_lock);
|
||||||
|
|
||||||
task_destroy(t);
|
task_destroy(t);
|
||||||
|
|
||||||
|
22
src/mux_h2.c
22
src/mux_h2.c
@ -3524,13 +3524,13 @@ static struct task *h2_io_cb(struct task *t, void *ctx, unsigned short status)
|
|||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
|
|
||||||
HA_SPIN_LOCK(OTHER_LOCK, &toremove_lock[tid]);
|
HA_SPIN_LOCK(OTHER_LOCK, &idle_conns[tid].toremove_lock);
|
||||||
if (t->context == NULL) {
|
if (t->context == NULL) {
|
||||||
/* The connection has been taken over by another thread,
|
/* The connection has been taken over by another thread,
|
||||||
* we're no longer responsible for it, so just free the
|
* we're no longer responsible for it, so just free the
|
||||||
* tasklet, and do nothing.
|
* tasklet, and do nothing.
|
||||||
*/
|
*/
|
||||||
HA_SPIN_UNLOCK(OTHER_LOCK, &toremove_lock[tid]);
|
HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].toremove_lock);
|
||||||
tasklet_free(tl);
|
tasklet_free(tl);
|
||||||
goto leave;
|
goto leave;
|
||||||
}
|
}
|
||||||
@ -3547,7 +3547,7 @@ static struct task *h2_io_cb(struct task *t, void *ctx, unsigned short status)
|
|||||||
if (conn_in_list)
|
if (conn_in_list)
|
||||||
MT_LIST_DEL(&conn->list);
|
MT_LIST_DEL(&conn->list);
|
||||||
|
|
||||||
HA_SPIN_UNLOCK(OTHER_LOCK, &toremove_lock[tid]);
|
HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].toremove_lock);
|
||||||
|
|
||||||
if (!(h2c->wait_event.events & SUB_RETRY_SEND))
|
if (!(h2c->wait_event.events & SUB_RETRY_SEND))
|
||||||
ret = h2_send(h2c);
|
ret = h2_send(h2c);
|
||||||
@ -3643,15 +3643,15 @@ static int h2_process(struct h2c *h2c)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* connections in error must be removed from the idle lists */
|
/* connections in error must be removed from the idle lists */
|
||||||
HA_SPIN_LOCK(OTHER_LOCK, &toremove_lock[tid]);
|
HA_SPIN_LOCK(OTHER_LOCK, &idle_conns[tid].toremove_lock);
|
||||||
MT_LIST_DEL((struct mt_list *)&conn->list);
|
MT_LIST_DEL((struct mt_list *)&conn->list);
|
||||||
HA_SPIN_UNLOCK(OTHER_LOCK, &toremove_lock[tid]);
|
HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].toremove_lock);
|
||||||
}
|
}
|
||||||
else if (h2c->st0 == H2_CS_ERROR) {
|
else if (h2c->st0 == H2_CS_ERROR) {
|
||||||
/* connections in error must be removed from the idle lists */
|
/* connections in error must be removed from the idle lists */
|
||||||
HA_SPIN_LOCK(OTHER_LOCK, &toremove_lock[tid]);
|
HA_SPIN_LOCK(OTHER_LOCK, &idle_conns[tid].toremove_lock);
|
||||||
MT_LIST_DEL((struct mt_list *)&conn->list);
|
MT_LIST_DEL((struct mt_list *)&conn->list);
|
||||||
HA_SPIN_UNLOCK(OTHER_LOCK, &toremove_lock[tid]);
|
HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].toremove_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!b_data(&h2c->dbuf))
|
if (!b_data(&h2c->dbuf))
|
||||||
@ -3721,7 +3721,7 @@ static struct task *h2_timeout_task(struct task *t, void *context, unsigned shor
|
|||||||
/* We're about to destroy the connection, so make sure nobody attempts
|
/* We're about to destroy the connection, so make sure nobody attempts
|
||||||
* to steal it from us.
|
* to steal it from us.
|
||||||
*/
|
*/
|
||||||
HA_SPIN_LOCK(OTHER_LOCK, &toremove_lock[tid]);
|
HA_SPIN_LOCK(OTHER_LOCK, &idle_conns[tid].toremove_lock);
|
||||||
|
|
||||||
if (h2c && h2c->conn->flags & CO_FL_LIST_MASK)
|
if (h2c && h2c->conn->flags & CO_FL_LIST_MASK)
|
||||||
MT_LIST_DEL(&h2c->conn->list);
|
MT_LIST_DEL(&h2c->conn->list);
|
||||||
@ -3732,7 +3732,7 @@ static struct task *h2_timeout_task(struct task *t, void *context, unsigned shor
|
|||||||
if (!t->context)
|
if (!t->context)
|
||||||
h2c = NULL;
|
h2c = NULL;
|
||||||
|
|
||||||
HA_SPIN_UNLOCK(OTHER_LOCK, &toremove_lock[tid]);
|
HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].toremove_lock);
|
||||||
|
|
||||||
task_destroy(t);
|
task_destroy(t);
|
||||||
|
|
||||||
@ -3778,9 +3778,9 @@ static struct task *h2_timeout_task(struct task *t, void *context, unsigned shor
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* in any case this connection must not be considered idle anymore */
|
/* in any case this connection must not be considered idle anymore */
|
||||||
HA_SPIN_LOCK(OTHER_LOCK, &toremove_lock[tid]);
|
HA_SPIN_LOCK(OTHER_LOCK, &idle_conns[tid].toremove_lock);
|
||||||
MT_LIST_DEL((struct mt_list *)&h2c->conn->list);
|
MT_LIST_DEL((struct mt_list *)&h2c->conn->list);
|
||||||
HA_SPIN_UNLOCK(OTHER_LOCK, &toremove_lock[tid]);
|
HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].toremove_lock);
|
||||||
|
|
||||||
/* either we can release everything now or it will be done later once
|
/* either we can release everything now or it will be done later once
|
||||||
* the last stream closes.
|
* the last stream closes.
|
||||||
|
21
src/server.c
21
src/server.c
@ -62,9 +62,6 @@ static struct srv_kw_list srv_keywords = {
|
|||||||
__decl_thread(HA_SPINLOCK_T idle_conn_srv_lock);
|
__decl_thread(HA_SPINLOCK_T idle_conn_srv_lock);
|
||||||
struct eb_root idle_conn_srv = EB_ROOT;
|
struct eb_root idle_conn_srv = EB_ROOT;
|
||||||
struct task *idle_conn_task = NULL;
|
struct task *idle_conn_task = NULL;
|
||||||
struct task *idle_conn_cleanup[MAX_THREADS] = { NULL };
|
|
||||||
struct mt_list toremove_connections[MAX_THREADS];
|
|
||||||
__decl_thread(HA_SPINLOCK_T toremove_lock[MAX_THREADS]);
|
|
||||||
|
|
||||||
/* The server names dictionary */
|
/* The server names dictionary */
|
||||||
struct dict server_name_dict = {
|
struct dict server_name_dict = {
|
||||||
@ -5172,7 +5169,7 @@ struct task *srv_cleanup_toremove_connections(struct task *task, void *context,
|
|||||||
{
|
{
|
||||||
struct connection *conn;
|
struct connection *conn;
|
||||||
|
|
||||||
while ((conn = MT_LIST_POP(&toremove_connections[tid],
|
while ((conn = MT_LIST_POP(&idle_conns[tid].toremove_conns,
|
||||||
struct connection *, list)) != NULL) {
|
struct connection *, list)) != NULL) {
|
||||||
conn->mux->destroy(conn->ctx);
|
conn->mux->destroy(conn->ctx);
|
||||||
}
|
}
|
||||||
@ -5193,7 +5190,7 @@ static void srv_cleanup_connections(struct server *srv)
|
|||||||
HA_SPIN_LOCK(OTHER_LOCK, &idle_conn_srv_lock);
|
HA_SPIN_LOCK(OTHER_LOCK, &idle_conn_srv_lock);
|
||||||
for (i = 0; i < global.nbthread; i++) {
|
for (i = 0; i < global.nbthread; i++) {
|
||||||
did_remove = 0;
|
did_remove = 0;
|
||||||
HA_SPIN_LOCK(OTHER_LOCK, &toremove_lock[i]);
|
HA_SPIN_LOCK(OTHER_LOCK, &idle_conns[i].toremove_lock);
|
||||||
for (j = 0; j < srv->curr_idle_conns; j++) {
|
for (j = 0; j < srv->curr_idle_conns; j++) {
|
||||||
conn = MT_LIST_POP(&srv->idle_conns[i], struct connection *, list);
|
conn = MT_LIST_POP(&srv->idle_conns[i], struct connection *, list);
|
||||||
if (!conn)
|
if (!conn)
|
||||||
@ -5202,11 +5199,11 @@ static void srv_cleanup_connections(struct server *srv)
|
|||||||
if (!conn)
|
if (!conn)
|
||||||
break;
|
break;
|
||||||
did_remove = 1;
|
did_remove = 1;
|
||||||
MT_LIST_ADDQ(&toremove_connections[i], (struct mt_list *)&conn->list);
|
MT_LIST_ADDQ(&idle_conns[i].toremove_conns, (struct mt_list *)&conn->list);
|
||||||
}
|
}
|
||||||
HA_SPIN_UNLOCK(OTHER_LOCK, &toremove_lock[i]);
|
HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[i].toremove_lock);
|
||||||
if (did_remove)
|
if (did_remove)
|
||||||
task_wakeup(idle_conn_cleanup[i], TASK_WOKEN_OTHER);
|
task_wakeup(idle_conns[i].cleanup_task, TASK_WOKEN_OTHER);
|
||||||
}
|
}
|
||||||
HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conn_srv_lock);
|
HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conn_srv_lock);
|
||||||
}
|
}
|
||||||
@ -5265,7 +5262,7 @@ struct task *srv_cleanup_idle_connections(struct task *task, void *context, unsi
|
|||||||
|
|
||||||
max_conn = (exceed_conns * srv->curr_idle_thr[i]) /
|
max_conn = (exceed_conns * srv->curr_idle_thr[i]) /
|
||||||
curr_idle + 1;
|
curr_idle + 1;
|
||||||
HA_SPIN_LOCK(OTHER_LOCK, &toremove_lock[i]);
|
HA_SPIN_LOCK(OTHER_LOCK, &idle_conns[i].toremove_lock);
|
||||||
for (j = 0; j < max_conn; j++) {
|
for (j = 0; j < max_conn; j++) {
|
||||||
struct connection *conn = MT_LIST_POP(&srv->idle_conns[i], struct connection *, list);
|
struct connection *conn = MT_LIST_POP(&srv->idle_conns[i], struct connection *, list);
|
||||||
if (!conn)
|
if (!conn)
|
||||||
@ -5274,13 +5271,13 @@ struct task *srv_cleanup_idle_connections(struct task *task, void *context, unsi
|
|||||||
if (!conn)
|
if (!conn)
|
||||||
break;
|
break;
|
||||||
did_remove = 1;
|
did_remove = 1;
|
||||||
MT_LIST_ADDQ(&toremove_connections[i], (struct mt_list *)&conn->list);
|
MT_LIST_ADDQ(&idle_conns[i].toremove_conns, (struct mt_list *)&conn->list);
|
||||||
}
|
}
|
||||||
HA_SPIN_UNLOCK(OTHER_LOCK, &toremove_lock[i]);
|
HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[i].toremove_lock);
|
||||||
if (did_remove && max_conn < srv->curr_idle_thr[i])
|
if (did_remove && max_conn < srv->curr_idle_thr[i])
|
||||||
srv_is_empty = 0;
|
srv_is_empty = 0;
|
||||||
if (did_remove)
|
if (did_remove)
|
||||||
task_wakeup(idle_conn_cleanup[i], TASK_WOKEN_OTHER);
|
task_wakeup(idle_conns[i].cleanup_task, TASK_WOKEN_OTHER);
|
||||||
}
|
}
|
||||||
remove:
|
remove:
|
||||||
eb32_delete(&srv->idle_node);
|
eb32_delete(&srv->idle_node);
|
||||||
|
Loading…
Reference in New Issue
Block a user