mirror of
https://git.haproxy.org/git/haproxy.git/
synced 2025-08-06 23:27:04 +02:00
MEDIUM: connection: replace idle conn lists by eb trees
The server idle/safe/available connection lists are replaced with ebmb- trees. This is used to store backend connections, with the new field connection hash as the key. The hash is a 8-bytes size field, used to reflect specific connection parameters. This is a preliminary work to be able to reuse connection with SNI, explicit src/dst address or PROXY protocol.
This commit is contained in:
parent
5c7086f6b0
commit
f232cb3e9b
@ -28,6 +28,7 @@
|
||||
#include <netinet/ip.h>
|
||||
#include <netinet/ip6.h>
|
||||
|
||||
#include <import/ebmbtree.h>
|
||||
#include <import/ist.h>
|
||||
|
||||
#include <haproxy/api-t.h>
|
||||
@ -488,7 +489,7 @@ struct connection {
|
||||
|
||||
/* second cache line */
|
||||
struct wait_event *subs; /* Task to wake when awaited events are ready */
|
||||
struct mt_list list; /* attach point to various connection lists (idle, ...) */
|
||||
struct mt_list toremove_list; /* list for connection to clean up */
|
||||
struct list session_list; /* List of attached connections to a session */
|
||||
union conn_handle handle; /* connection handle at the socket layer */
|
||||
const struct netns_entry *proxy_netns;
|
||||
@ -501,6 +502,9 @@ struct connection {
|
||||
uint8_t proxy_authority_len; /* Length of authority TLV received via PROXYv2 */
|
||||
struct ist proxy_unique_id; /* Value of the unique ID TLV received via PROXYv2 */
|
||||
struct quic_conn *qc; /* Only present if this connection is a QUIC one */
|
||||
|
||||
struct ebmb_node hash_node;
|
||||
int64_t hash;
|
||||
};
|
||||
|
||||
struct mux_proto_list {
|
||||
|
@ -349,13 +349,15 @@ static inline void conn_init(struct connection *conn, void *target)
|
||||
conn->target = target;
|
||||
conn->destroy_cb = NULL;
|
||||
conn->proxy_netns = NULL;
|
||||
MT_LIST_INIT(&conn->list);
|
||||
MT_LIST_INIT(&conn->toremove_list);
|
||||
LIST_INIT(&conn->session_list);
|
||||
conn->subs = NULL;
|
||||
conn->src = NULL;
|
||||
conn->dst = NULL;
|
||||
conn->proxy_authority = NULL;
|
||||
conn->proxy_unique_id = IST_NULL;
|
||||
memset(&conn->hash_node, 0, sizeof(conn->hash_node));
|
||||
conn->hash = 0;
|
||||
}
|
||||
|
||||
/* sets <owner> as the connection's owner */
|
||||
@ -373,7 +375,7 @@ static inline void conn_set_private(struct connection *conn)
|
||||
conn->flags |= CO_FL_PRIVATE;
|
||||
|
||||
if (obj_type(conn->target) == OBJ_TYPE_SERVER)
|
||||
srv_del_conn_from_list(__objt_server(conn->target), conn);
|
||||
srv_release_conn(__objt_server(conn->target), conn);
|
||||
}
|
||||
}
|
||||
|
||||
@ -499,7 +501,7 @@ static inline void conn_free(struct connection *conn)
|
||||
}
|
||||
else if (!(conn->flags & CO_FL_PRIVATE)) {
|
||||
if (obj_type(conn->target) == OBJ_TYPE_SERVER)
|
||||
srv_del_conn_from_list(__objt_server(conn->target), conn);
|
||||
srv_release_conn(__objt_server(conn->target), conn);
|
||||
}
|
||||
|
||||
sockaddr_free(&conn->src);
|
||||
|
@ -225,9 +225,9 @@ struct server {
|
||||
|
||||
struct eb_root pendconns; /* pending connections */
|
||||
struct list actconns; /* active connections */
|
||||
struct mt_list *idle_conns; /* shareable idle connections*/
|
||||
struct mt_list *safe_conns; /* safe idle connections */
|
||||
struct list *available_conns; /* Connection in used, but with still new streams available */
|
||||
struct eb_root *idle_conns_tree; /* shareable idle connections*/
|
||||
struct eb_root *safe_conns_tree; /* safe idle connections */
|
||||
struct eb_root *available_conns_tree; /* Connection in used, but with still new streams available */
|
||||
unsigned int pool_purge_delay; /* Delay before starting to purge the idle conns pool */
|
||||
unsigned int low_idle_conns; /* min idle connection count to start picking from other threads */
|
||||
unsigned int max_idle_conns; /* Max number of connection allowed in the orphan connections list */
|
||||
|
@ -254,7 +254,14 @@ static inline void srv_use_conn(struct server *srv, struct connection *conn)
|
||||
srv->est_need_conns = srv->curr_used_conns;
|
||||
}
|
||||
|
||||
static inline void srv_del_conn_from_list(struct server *srv, struct connection *conn)
|
||||
static inline void conn_delete_from_tree(struct ebmb_node *node)
|
||||
{
|
||||
ebmb_delete(node);
|
||||
memset(node, 0, sizeof(*node));
|
||||
}
|
||||
|
||||
/* removes an idle conn after updating the server idle conns counters */
|
||||
static inline void srv_release_conn(struct server *srv, struct connection *conn)
|
||||
{
|
||||
if (conn->flags & CO_FL_LIST_MASK) {
|
||||
/* The connection is currently in the server's idle list, so tell it
|
||||
@ -271,9 +278,9 @@ static inline void srv_del_conn_from_list(struct server *srv, struct connection
|
||||
_HA_ATOMIC_SUB(&srv->curr_used_conns, 1);
|
||||
}
|
||||
|
||||
/* Remove the connection from any list (safe, idle or available) */
|
||||
/* Remove the connection from any tree (safe, idle or available) */
|
||||
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
|
||||
MT_LIST_DEL((struct mt_list *)&conn->list);
|
||||
conn_delete_from_tree(&conn->hash_node);
|
||||
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
|
||||
}
|
||||
|
||||
@ -293,8 +300,8 @@ static inline int srv_add_to_idle_list(struct server *srv, struct connection *co
|
||||
((srv->proxy->options & PR_O_REUSE_MASK) != PR_O_REUSE_NEVR) &&
|
||||
ha_used_fds < global.tune.pool_high_count &&
|
||||
(srv->max_idle_conns == -1 || srv->max_idle_conns > srv->curr_idle_conns) &&
|
||||
((MT_LIST_ISEMPTY(&srv->safe_conns[tid]) &&
|
||||
(is_safe || MT_LIST_ISEMPTY(&srv->idle_conns[tid]))) ||
|
||||
((eb_is_empty(&srv->safe_conns_tree[tid]) &&
|
||||
(is_safe || eb_is_empty(&srv->idle_conns_tree[tid]))) ||
|
||||
(ha_used_fds < global.tune.pool_low_count &&
|
||||
(srv->curr_used_conns + srv->curr_idle_conns <=
|
||||
MAX(srv->curr_used_conns, srv->est_need_conns) + srv->low_idle_conns))) &&
|
||||
@ -309,15 +316,15 @@ static inline int srv_add_to_idle_list(struct server *srv, struct connection *co
|
||||
_HA_ATOMIC_SUB(&srv->curr_used_conns, 1);
|
||||
|
||||
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
|
||||
MT_LIST_DEL(&conn->list);
|
||||
conn_delete_from_tree(&conn->hash_node);
|
||||
|
||||
if (is_safe) {
|
||||
conn->flags = (conn->flags & ~CO_FL_LIST_MASK) | CO_FL_SAFE_LIST;
|
||||
MT_LIST_ADDQ(&srv->safe_conns[tid], (struct mt_list *)&conn->list);
|
||||
ebmb_insert(&srv->safe_conns_tree[tid], &conn->hash_node, sizeof(conn->hash));
|
||||
_HA_ATOMIC_ADD(&srv->curr_safe_nb, 1);
|
||||
} else {
|
||||
conn->flags = (conn->flags & ~CO_FL_LIST_MASK) | CO_FL_IDLE_LIST;
|
||||
MT_LIST_ADDQ(&srv->idle_conns[tid], (struct mt_list *)&conn->list);
|
||||
ebmb_insert(&srv->idle_conns_tree[tid], &conn->hash_node, sizeof(conn->hash));
|
||||
_HA_ATOMIC_ADD(&srv->curr_idle_nb, 1);
|
||||
}
|
||||
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
|
||||
@ -344,6 +351,36 @@ static inline int srv_add_to_idle_list(struct server *srv, struct connection *co
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* retrieve a connection from its <hash> in <tree>
|
||||
* returns NULL if no connection found
|
||||
*/
|
||||
static inline struct connection *srv_lookup_conn(struct eb_root *tree, uint64_t hash)
|
||||
{
|
||||
struct ebmb_node *node = NULL;
|
||||
struct connection *conn = NULL;
|
||||
|
||||
node = ebmb_lookup(tree, &hash, sizeof(conn->hash));
|
||||
if (node)
|
||||
conn = ebmb_entry(node, struct connection, hash_node);
|
||||
|
||||
return conn;
|
||||
}
|
||||
|
||||
/* retrieve the next connection sharing the same hash as <conn>
|
||||
* returns NULL if no connection found
|
||||
*/
|
||||
static inline struct connection *srv_lookup_conn_next(struct connection *conn)
|
||||
{
|
||||
struct ebmb_node *next_node = NULL;
|
||||
struct connection *next_conn = NULL;
|
||||
|
||||
next_node = ebmb_next_dup(&conn->hash_node);
|
||||
if (next_node)
|
||||
next_conn = ebmb_entry(next_node, struct connection, hash_node);
|
||||
|
||||
return next_conn;
|
||||
}
|
||||
|
||||
#endif /* _HAPROXY_SERVER_H */
|
||||
|
||||
/*
|
||||
|
@ -1100,10 +1100,10 @@ static void assign_tproxy_address(struct stream *s)
|
||||
* (safe or idle connections). The <is_safe> argument means what type of
|
||||
* connection the caller wants.
|
||||
*/
|
||||
static struct connection *conn_backend_get(struct stream *s, struct server *srv, int is_safe)
|
||||
static struct connection *conn_backend_get(struct stream *s, struct server *srv, int is_safe, int64_t hash)
|
||||
{
|
||||
struct mt_list *mt_list = is_safe ? srv->safe_conns : srv->idle_conns;
|
||||
struct connection *conn;
|
||||
struct eb_root *tree = is_safe ? srv->safe_conns_tree : srv->idle_conns_tree;
|
||||
struct connection *conn = NULL;
|
||||
int i; // thread number
|
||||
int found = 0;
|
||||
int stop;
|
||||
@ -1114,16 +1114,19 @@ static struct connection *conn_backend_get(struct stream *s, struct server *srv,
|
||||
*/
|
||||
i = tid;
|
||||
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
|
||||
conn = MT_LIST_POP(&mt_list[tid], struct connection *, list);
|
||||
conn = srv_lookup_conn(&tree[tid], hash);
|
||||
if (conn)
|
||||
conn_delete_from_tree(&conn->hash_node);
|
||||
|
||||
/* If we failed to pick a connection from the idle list, let's try again with
|
||||
* the safe list.
|
||||
*/
|
||||
if (!conn && !is_safe && srv->curr_safe_nb > 0) {
|
||||
conn = MT_LIST_POP(&srv->safe_conns[tid], struct connection *, list);
|
||||
conn = srv_lookup_conn(&srv->safe_conns_tree[tid], hash);
|
||||
if (conn) {
|
||||
conn_delete_from_tree(&conn->hash_node);
|
||||
is_safe = 1;
|
||||
mt_list = srv->safe_conns;
|
||||
tree = srv->safe_conns_tree;
|
||||
}
|
||||
}
|
||||
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
|
||||
@ -1156,33 +1159,35 @@ static struct connection *conn_backend_get(struct stream *s, struct server *srv,
|
||||
|
||||
i = stop;
|
||||
do {
|
||||
struct mt_list *elt1, elt2;
|
||||
|
||||
if (!srv->curr_idle_thr[i] || i == tid)
|
||||
continue;
|
||||
|
||||
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[i].idle_conns_lock);
|
||||
mt_list_for_each_entry_safe(conn, &mt_list[i], list, elt1, elt2) {
|
||||
conn = srv_lookup_conn(&tree[i], hash);
|
||||
while (conn) {
|
||||
if (conn->mux->takeover && conn->mux->takeover(conn, i) == 0) {
|
||||
MT_LIST_DEL_SAFE(elt1);
|
||||
conn_delete_from_tree(&conn->hash_node);
|
||||
_HA_ATOMIC_ADD(&activity[tid].fd_takeover, 1);
|
||||
found = 1;
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
conn = srv_lookup_conn_next(conn);
|
||||
}
|
||||
|
||||
if (!found && !is_safe && srv->curr_safe_nb > 0) {
|
||||
mt_list_for_each_entry_safe(conn, &srv->safe_conns[i], list, elt1, elt2) {
|
||||
conn = srv_lookup_conn(&srv->safe_conns_tree[i], hash);
|
||||
while (conn) {
|
||||
if (conn->mux->takeover && conn->mux->takeover(conn, i) == 0) {
|
||||
MT_LIST_DEL_SAFE(elt1);
|
||||
conn_delete_from_tree(&conn->hash_node);
|
||||
_HA_ATOMIC_ADD(&activity[tid].fd_takeover, 1);
|
||||
found = 1;
|
||||
is_safe = 1;
|
||||
mt_list = srv->safe_conns;
|
||||
|
||||
tree = srv->safe_conns_tree;
|
||||
break;
|
||||
}
|
||||
|
||||
conn = srv_lookup_conn_next(conn);
|
||||
}
|
||||
}
|
||||
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[i].idle_conns_lock);
|
||||
@ -1210,7 +1215,7 @@ static struct connection *conn_backend_get(struct stream *s, struct server *srv,
|
||||
session_add_conn(s->sess, conn, conn->target);
|
||||
}
|
||||
else {
|
||||
LIST_ADDQ(&srv->available_conns[tid], mt_list_to_list(&conn->list));
|
||||
ebmb_insert(&srv->available_conns_tree[tid], &conn->hash_node, sizeof(conn->hash));
|
||||
}
|
||||
}
|
||||
return conn;
|
||||
@ -1241,7 +1246,7 @@ int connect_server(struct stream *s)
|
||||
int reuse = 0;
|
||||
int init_mux = 0;
|
||||
int err;
|
||||
|
||||
int64_t hash = 0;
|
||||
|
||||
/* This will catch some corner cases such as lying connections resulting from
|
||||
* retries or connect timeouts but will rarely trigger.
|
||||
@ -1251,7 +1256,7 @@ int connect_server(struct stream *s)
|
||||
srv = objt_server(s->target);
|
||||
|
||||
/* do not reuse if mode is http or if avail list is not allocated */
|
||||
if ((s->be->mode != PR_MODE_HTTP) || (srv && !srv->available_conns))
|
||||
if ((s->be->mode != PR_MODE_HTTP) || (srv && !srv->available_conns_tree))
|
||||
goto skip_reuse;
|
||||
|
||||
/* first, search for a matching connection in the session's idle conns */
|
||||
@ -1278,9 +1283,10 @@ int connect_server(struct stream *s)
|
||||
* Idle conns are necessarily looked up on the same thread so
|
||||
* that there is no concurrency issues.
|
||||
*/
|
||||
if (!LIST_ISEMPTY(&srv->available_conns[tid])) {
|
||||
srv_conn = LIST_ELEM(srv->available_conns[tid].n, struct connection *, list);
|
||||
reuse = 1;
|
||||
if (!eb_is_empty(&srv->available_conns_tree[tid])) {
|
||||
srv_conn = srv_lookup_conn(&srv->available_conns_tree[tid], hash);
|
||||
if (srv_conn)
|
||||
reuse = 1;
|
||||
}
|
||||
/* if no available connections found, search for an idle/safe */
|
||||
else if (srv->max_idle_conns && srv->curr_idle_conns > 0) {
|
||||
@ -1292,15 +1298,15 @@ int connect_server(struct stream *s)
|
||||
* search for an idle then safe conn */
|
||||
if (not_first_req) {
|
||||
if (idle || safe)
|
||||
srv_conn = conn_backend_get(s, srv, 0);
|
||||
srv_conn = conn_backend_get(s, srv, 0, hash);
|
||||
}
|
||||
/* first column of the tables above */
|
||||
else if (reuse_mode >= PR_O_REUSE_AGGR) {
|
||||
/* search for a safe conn */
|
||||
if (safe)
|
||||
srv_conn = conn_backend_get(s, srv, 1);
|
||||
srv_conn = conn_backend_get(s, srv, 1, hash);
|
||||
else if (reuse_mode == PR_O_REUSE_ALWS && idle)
|
||||
srv_conn = conn_backend_get(s, srv, 0);
|
||||
srv_conn = conn_backend_get(s, srv, 0, hash);
|
||||
}
|
||||
|
||||
if (srv_conn)
|
||||
@ -1328,18 +1334,21 @@ int connect_server(struct stream *s)
|
||||
}
|
||||
}
|
||||
|
||||
if (ha_used_fds > global.tune.pool_high_count && srv && srv->idle_conns) {
|
||||
struct connection *tokill_conn;
|
||||
if (ha_used_fds > global.tune.pool_high_count && srv && srv->idle_conns_tree) {
|
||||
struct connection *tokill_conn = NULL;
|
||||
struct ebmb_node *node = NULL;
|
||||
|
||||
/* We can't reuse a connection, and e have more FDs than deemd
|
||||
* acceptable, attempt to kill an idling connection
|
||||
*/
|
||||
/* First, try from our own idle list */
|
||||
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
|
||||
tokill_conn = MT_LIST_POP(&srv->idle_conns[tid],
|
||||
struct connection *, list);
|
||||
if (tokill_conn)
|
||||
node = ebmb_first(&srv->idle_conns_tree[tid]);
|
||||
if (node) {
|
||||
tokill_conn = ebmb_entry(node, struct connection, hash_node);
|
||||
ebmb_delete(node);
|
||||
tokill_conn->mux->destroy(tokill_conn->ctx);
|
||||
}
|
||||
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
|
||||
|
||||
/* If not, iterate over other thread's idling pool, and try to grab one */
|
||||
@ -1354,18 +1363,26 @@ int connect_server(struct stream *s)
|
||||
ALREADY_CHECKED(i);
|
||||
|
||||
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[i].idle_conns_lock);
|
||||
tokill_conn = MT_LIST_POP(&srv->idle_conns[i],
|
||||
struct connection *, list);
|
||||
if (!tokill_conn)
|
||||
tokill_conn = MT_LIST_POP(&srv->safe_conns[i],
|
||||
struct connection *, list);
|
||||
node = ebmb_first(&srv->idle_conns_tree[i]);
|
||||
if (node) {
|
||||
tokill_conn = ebmb_entry(node, struct connection, hash_node);
|
||||
ebmb_delete(node);
|
||||
}
|
||||
|
||||
if (!tokill_conn) {
|
||||
node = ebmb_first(&srv->safe_conns_tree[i]);
|
||||
if (node) {
|
||||
tokill_conn = ebmb_entry(node, struct connection, hash_node);
|
||||
ebmb_delete(node);
|
||||
}
|
||||
}
|
||||
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[i].idle_conns_lock);
|
||||
|
||||
if (tokill_conn) {
|
||||
/* We got one, put it into the concerned thread's to kill list, and wake it's kill task */
|
||||
|
||||
MT_LIST_ADDQ(&idle_conns[i].toremove_conns,
|
||||
(struct mt_list *)&tokill_conn->list);
|
||||
(struct mt_list *)&tokill_conn->toremove_list);
|
||||
task_wakeup(idle_conns[i].cleanup_task, TASK_WOKEN_OTHER);
|
||||
break;
|
||||
}
|
||||
@ -1380,7 +1397,7 @@ int connect_server(struct stream *s)
|
||||
|
||||
if (avail <= 1) {
|
||||
/* No more streams available, remove it from the list */
|
||||
MT_LIST_DEL(&srv_conn->list);
|
||||
conn_delete_from_tree(&srv_conn->hash_node);
|
||||
}
|
||||
|
||||
if (avail >= 1) {
|
||||
@ -1561,7 +1578,7 @@ int connect_server(struct stream *s)
|
||||
if (srv && reuse_mode == PR_O_REUSE_ALWS &&
|
||||
!(srv_conn->flags & CO_FL_PRIVATE) &&
|
||||
srv_conn->mux->avail_streams(srv_conn) > 0) {
|
||||
LIST_ADDQ(&srv->available_conns[tid], mt_list_to_list(&srv_conn->list));
|
||||
ebmb_insert(&srv->available_conns_tree[tid], &srv_conn->hash_node, sizeof(srv_conn->hash));
|
||||
}
|
||||
else if (srv_conn->flags & CO_FL_PRIVATE ||
|
||||
(reuse_mode == PR_O_REUSE_SAFE &&
|
||||
|
@ -3548,17 +3548,17 @@ int check_config_validity()
|
||||
for (newsrv = curproxy->srv; newsrv; newsrv = newsrv->next) {
|
||||
int i;
|
||||
|
||||
newsrv->available_conns = calloc(global.nbthread, sizeof(*newsrv->available_conns));
|
||||
newsrv->available_conns_tree = calloc(global.nbthread, sizeof(*newsrv->available_conns_tree));
|
||||
|
||||
if (!newsrv->available_conns) {
|
||||
if (!newsrv->available_conns_tree) {
|
||||
ha_alert("parsing [%s:%d] : failed to allocate idle connections for server '%s'.\n",
|
||||
newsrv->conf.file, newsrv->conf.line, newsrv->id);
|
||||
newsrv->conf.file, newsrv->conf.line, newsrv->id);
|
||||
cfgerr++;
|
||||
continue;
|
||||
}
|
||||
|
||||
for (i = 0; i < global.nbthread; i++)
|
||||
LIST_INIT(&newsrv->available_conns[i]);
|
||||
newsrv->available_conns_tree[i] = EB_ROOT;
|
||||
|
||||
if (newsrv->max_idle_conns != 0) {
|
||||
if (idle_conn_task == NULL) {
|
||||
@ -3580,27 +3580,27 @@ int check_config_validity()
|
||||
}
|
||||
}
|
||||
|
||||
newsrv->idle_conns = calloc((unsigned short)global.nbthread, sizeof(*newsrv->idle_conns));
|
||||
if (!newsrv->idle_conns) {
|
||||
newsrv->idle_conns_tree = calloc((unsigned short)global.nbthread, sizeof(*newsrv->idle_conns_tree));
|
||||
if (!newsrv->idle_conns_tree) {
|
||||
ha_alert("parsing [%s:%d] : failed to allocate idle connections for server '%s'.\n",
|
||||
newsrv->conf.file, newsrv->conf.line, newsrv->id);
|
||||
newsrv->conf.file, newsrv->conf.line, newsrv->id);
|
||||
cfgerr++;
|
||||
continue;
|
||||
}
|
||||
|
||||
for (i = 0; i < global.nbthread; i++)
|
||||
MT_LIST_INIT(&newsrv->idle_conns[i]);
|
||||
newsrv->idle_conns_tree[i] = EB_ROOT;
|
||||
|
||||
newsrv->safe_conns = calloc(global.nbthread, sizeof(*newsrv->safe_conns));
|
||||
if (!newsrv->safe_conns) {
|
||||
newsrv->safe_conns_tree = calloc(global.nbthread, sizeof(*newsrv->safe_conns_tree));
|
||||
if (!newsrv->safe_conns_tree) {
|
||||
ha_alert("parsing [%s:%d] : failed to allocate idle connections for server '%s'.\n",
|
||||
newsrv->conf.file, newsrv->conf.line, newsrv->id);
|
||||
newsrv->conf.file, newsrv->conf.line, newsrv->id);
|
||||
cfgerr++;
|
||||
continue;
|
||||
}
|
||||
|
||||
for (i = 0; i < global.nbthread; i++)
|
||||
MT_LIST_INIT(&newsrv->safe_conns[i]);
|
||||
newsrv->safe_conns_tree[i] = EB_ROOT;
|
||||
|
||||
newsrv->curr_idle_thr = calloc(global.nbthread, sizeof(*newsrv->curr_idle_thr));
|
||||
if (!newsrv->curr_idle_thr)
|
||||
|
@ -69,7 +69,7 @@ int conn_create_mux(struct connection *conn)
|
||||
*/
|
||||
if (srv && ((srv->proxy->options & PR_O_REUSE_MASK) == PR_O_REUSE_ALWS) &&
|
||||
!(conn->flags & CO_FL_PRIVATE) && conn->mux->avail_streams(conn) > 0)
|
||||
LIST_ADDQ(&srv->available_conns[tid], mt_list_to_list(&conn->list));
|
||||
ebmb_insert(&srv->available_conns_tree[tid], &conn->hash_node, sizeof(conn->hash));
|
||||
else if (conn->flags & CO_FL_PRIVATE) {
|
||||
/* If it fail now, the same will be done in mux->detach() callback */
|
||||
session_add_conn(sess, conn, conn->target);
|
||||
|
@ -2658,9 +2658,9 @@ void deinit(void)
|
||||
free(s->hostname);
|
||||
free(s->hostname_dn);
|
||||
free((char*)s->conf.file);
|
||||
free(s->idle_conns);
|
||||
free(s->safe_conns);
|
||||
free(s->available_conns);
|
||||
free(s->idle_conns_tree);
|
||||
free(s->safe_conns_tree);
|
||||
free(s->available_conns_tree);
|
||||
free(s->curr_idle_thr);
|
||||
free(s->resolvers_id);
|
||||
free(s->addr_node.key);
|
||||
|
@ -9177,8 +9177,8 @@ void hlua_init(void) {
|
||||
socket_tcp.obj_type = OBJ_TYPE_SERVER;
|
||||
LIST_INIT(&socket_tcp.actconns);
|
||||
socket_tcp.pendconns = EB_ROOT;
|
||||
socket_tcp.idle_conns = NULL;
|
||||
socket_tcp.safe_conns = NULL;
|
||||
socket_tcp.idle_conns_tree = NULL;
|
||||
socket_tcp.safe_conns_tree = NULL;
|
||||
socket_tcp.next_state = SRV_ST_RUNNING; /* early server setup */
|
||||
socket_tcp.last_change = 0;
|
||||
socket_tcp.id = "LUA-TCP-CONN";
|
||||
@ -9222,8 +9222,8 @@ void hlua_init(void) {
|
||||
socket_ssl.obj_type = OBJ_TYPE_SERVER;
|
||||
LIST_INIT(&socket_ssl.actconns);
|
||||
socket_ssl.pendconns = EB_ROOT;
|
||||
socket_ssl.idle_conns = NULL;
|
||||
socket_ssl.safe_conns = NULL;
|
||||
socket_ssl.idle_conns_tree = NULL;
|
||||
socket_ssl.safe_conns_tree = NULL;
|
||||
socket_ssl.next_state = SRV_ST_RUNNING; /* early server setup */
|
||||
socket_ssl.last_change = 0;
|
||||
socket_ssl.id = "LUA-SSL-CONN";
|
||||
|
@ -3002,7 +3002,7 @@ struct task *fcgi_io_cb(struct task *t, void *ctx, unsigned short status)
|
||||
|
||||
conn_in_list = conn->flags & CO_FL_LIST_MASK;
|
||||
if (conn_in_list)
|
||||
MT_LIST_DEL(&conn->list);
|
||||
conn_delete_from_tree(&conn->hash_node);
|
||||
|
||||
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
|
||||
|
||||
@ -3023,9 +3023,9 @@ struct task *fcgi_io_cb(struct task *t, void *ctx, unsigned short status)
|
||||
|
||||
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
|
||||
if (conn_in_list == CO_FL_SAFE_LIST)
|
||||
MT_LIST_ADDQ(&srv->safe_conns[tid], &conn->list);
|
||||
ebmb_insert(&srv->safe_conns_tree[tid], &conn->hash_node, sizeof(conn->hash));
|
||||
else
|
||||
MT_LIST_ADDQ(&srv->idle_conns[tid], &conn->list);
|
||||
ebmb_insert(&srv->idle_conns_tree[tid], &conn->hash_node, sizeof(conn->hash));
|
||||
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
|
||||
}
|
||||
return NULL;
|
||||
@ -3176,7 +3176,7 @@ struct task *fcgi_timeout_task(struct task *t, void *context, unsigned short sta
|
||||
* to steal it from us.
|
||||
*/
|
||||
if (fconn->conn->flags & CO_FL_LIST_MASK)
|
||||
MT_LIST_DEL(&fconn->conn->list);
|
||||
conn_delete_from_tree(&fconn->conn->hash_node);
|
||||
|
||||
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
|
||||
}
|
||||
@ -3619,10 +3619,12 @@ static void fcgi_detach(struct conn_stream *cs)
|
||||
TRACE_DEVEL("reusable idle connection", FCGI_EV_STRM_END, fconn->conn);
|
||||
return;
|
||||
}
|
||||
else if (MT_LIST_ISEMPTY(&fconn->conn->list) &&
|
||||
else if (!fconn->conn->hash_node.node.leaf_p &&
|
||||
fcgi_avail_streams(fconn->conn) > 0 && objt_server(fconn->conn->target) &&
|
||||
!LIST_ADDED(&fconn->conn->session_list)) {
|
||||
LIST_ADD(&__objt_server(fconn->conn->target)->available_conns[tid], mt_list_to_list(&fconn->conn->list));
|
||||
ebmb_insert(&__objt_server(fconn->conn->target)->available_conns_tree[tid],
|
||||
&fconn->conn->hash_node,
|
||||
sizeof(fconn->conn->hash));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2828,7 +2828,7 @@ struct task *h1_io_cb(struct task *t, void *ctx, unsigned short status)
|
||||
*/
|
||||
conn_in_list = conn->flags & CO_FL_LIST_MASK;
|
||||
if (conn_in_list)
|
||||
MT_LIST_DEL(&conn->list);
|
||||
conn_delete_from_tree(&conn->hash_node);
|
||||
|
||||
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
|
||||
|
||||
@ -2848,9 +2848,9 @@ struct task *h1_io_cb(struct task *t, void *ctx, unsigned short status)
|
||||
|
||||
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
|
||||
if (conn_in_list == CO_FL_SAFE_LIST)
|
||||
MT_LIST_ADDQ(&srv->safe_conns[tid], &conn->list);
|
||||
ebmb_insert(&srv->safe_conns_tree[tid], &conn->hash_node, sizeof(conn->hash));
|
||||
else
|
||||
MT_LIST_ADDQ(&srv->idle_conns[tid], &conn->list);
|
||||
ebmb_insert(&srv->idle_conns_tree[tid], &conn->hash_node, sizeof(conn->hash));
|
||||
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
|
||||
}
|
||||
return NULL;
|
||||
@ -2946,7 +2946,7 @@ struct task *h1_timeout_task(struct task *t, void *context, unsigned short state
|
||||
* to steal it from us.
|
||||
*/
|
||||
if (h1c->conn->flags & CO_FL_LIST_MASK)
|
||||
MT_LIST_DEL(&h1c->conn->list);
|
||||
conn_delete_from_tree(&h1c->conn->hash_node);
|
||||
|
||||
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
|
||||
}
|
||||
|
20
src/mux_h2.c
20
src/mux_h2.c
@ -3803,7 +3803,7 @@ struct task *h2_io_cb(struct task *t, void *ctx, unsigned short status)
|
||||
* to use it while we handle the I/O events
|
||||
*/
|
||||
if (conn_in_list)
|
||||
MT_LIST_DEL(&conn->list);
|
||||
conn_delete_from_tree(&conn->hash_node);
|
||||
|
||||
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
|
||||
|
||||
@ -3824,9 +3824,9 @@ struct task *h2_io_cb(struct task *t, void *ctx, unsigned short status)
|
||||
|
||||
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
|
||||
if (conn_in_list == CO_FL_SAFE_LIST)
|
||||
MT_LIST_ADDQ(&srv->safe_conns[tid], &conn->list);
|
||||
ebmb_insert(&srv->safe_conns_tree[tid], &conn->hash_node, sizeof(conn->hash));
|
||||
else
|
||||
MT_LIST_ADDQ(&srv->idle_conns[tid], &conn->list);
|
||||
ebmb_insert(&srv->idle_conns_tree[tid], &conn->hash_node, sizeof(conn->hash));
|
||||
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
|
||||
}
|
||||
|
||||
@ -3905,13 +3905,13 @@ static int h2_process(struct h2c *h2c)
|
||||
|
||||
/* connections in error must be removed from the idle lists */
|
||||
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
|
||||
MT_LIST_DEL((struct mt_list *)&conn->list);
|
||||
conn_delete_from_tree(&conn->hash_node);
|
||||
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
|
||||
}
|
||||
else if (h2c->st0 == H2_CS_ERROR) {
|
||||
/* connections in error must be removed from the idle lists */
|
||||
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
|
||||
MT_LIST_DEL((struct mt_list *)&conn->list);
|
||||
conn_delete_from_tree(&conn->hash_node);
|
||||
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
|
||||
}
|
||||
|
||||
@ -3999,7 +3999,7 @@ struct task *h2_timeout_task(struct task *t, void *context, unsigned short state
|
||||
* to steal it from us.
|
||||
*/
|
||||
if (h2c->conn->flags & CO_FL_LIST_MASK)
|
||||
MT_LIST_DEL(&h2c->conn->list);
|
||||
conn_delete_from_tree(&h2c->conn->hash_node);
|
||||
|
||||
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
|
||||
}
|
||||
@ -4050,7 +4050,7 @@ struct task *h2_timeout_task(struct task *t, void *context, unsigned short state
|
||||
|
||||
/* in any case this connection must not be considered idle anymore */
|
||||
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
|
||||
MT_LIST_DEL((struct mt_list *)&h2c->conn->list);
|
||||
conn_delete_from_tree(&h2c->conn->hash_node);
|
||||
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
|
||||
|
||||
/* either we can release everything now or it will be done later once
|
||||
@ -4247,10 +4247,12 @@ static void h2_detach(struct conn_stream *cs)
|
||||
return;
|
||||
|
||||
}
|
||||
else if (MT_LIST_ISEMPTY(&h2c->conn->list) &&
|
||||
else if (!h2c->conn->hash_node.node.leaf_p &&
|
||||
h2_avail_streams(h2c->conn) > 0 && objt_server(h2c->conn->target) &&
|
||||
!LIST_ADDED(&h2c->conn->session_list)) {
|
||||
LIST_ADD(&__objt_server(h2c->conn->target)->available_conns[tid], mt_list_to_list(&h2c->conn->list));
|
||||
ebmb_insert(&__objt_server(h2c->conn->target)->available_conns_tree[tid],
|
||||
&h2c->conn->hash_node,
|
||||
sizeof(h2c->conn->hash));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
28
src/server.c
28
src/server.c
@ -5267,31 +5267,37 @@ struct task *srv_cleanup_toremove_connections(struct task *task, void *context,
|
||||
struct connection *conn;
|
||||
|
||||
while ((conn = MT_LIST_POP(&idle_conns[tid].toremove_conns,
|
||||
struct connection *, list)) != NULL) {
|
||||
struct connection *, toremove_list)) != NULL) {
|
||||
conn->mux->destroy(conn->ctx);
|
||||
}
|
||||
|
||||
return task;
|
||||
}
|
||||
|
||||
/* Move toremove_nb connections from idle_list to toremove_list, -1 means
|
||||
/* Move toremove_nb connections from idle_tree to toremove_list, -1 means
|
||||
* moving them all.
|
||||
* Returns the number of connections moved.
|
||||
*
|
||||
* Must be called with idle_conns_lock held.
|
||||
*/
|
||||
static int srv_migrate_conns_to_remove(struct mt_list *idle_list, struct mt_list *toremove_list, int toremove_nb)
|
||||
static int srv_migrate_conns_to_remove(struct eb_root *idle_tree, struct mt_list *toremove_list, int toremove_nb)
|
||||
{
|
||||
struct mt_list *elt1, elt2;
|
||||
struct eb_node *node, *next;
|
||||
struct connection *conn;
|
||||
int i = 0;
|
||||
|
||||
mt_list_for_each_entry_safe(conn, idle_list, list, elt1, elt2) {
|
||||
node = eb_first(idle_tree);
|
||||
while (node) {
|
||||
next = eb_next(node);
|
||||
if (toremove_nb != -1 && i >= toremove_nb)
|
||||
break;
|
||||
MT_LIST_DEL_SAFE_NOINIT(elt1);
|
||||
MT_LIST_ADDQ(toremove_list, &conn->list);
|
||||
|
||||
conn = ebmb_entry(node, struct connection, hash_node);
|
||||
eb_delete(node);
|
||||
MT_LIST_ADDQ(toremove_list, &conn->toremove_list);
|
||||
i++;
|
||||
|
||||
node = next;
|
||||
}
|
||||
return i;
|
||||
}
|
||||
@ -5311,9 +5317,9 @@ static void srv_cleanup_connections(struct server *srv)
|
||||
for (i = tid;;) {
|
||||
did_remove = 0;
|
||||
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[i].idle_conns_lock);
|
||||
if (srv_migrate_conns_to_remove(&srv->idle_conns[i], &idle_conns[i].toremove_conns, -1) > 0)
|
||||
if (srv_migrate_conns_to_remove(&srv->idle_conns_tree[i], &idle_conns[i].toremove_conns, -1) > 0)
|
||||
did_remove = 1;
|
||||
if (srv_migrate_conns_to_remove(&srv->safe_conns[i], &idle_conns[i].toremove_conns, -1) > 0)
|
||||
if (srv_migrate_conns_to_remove(&srv->safe_conns_tree[i], &idle_conns[i].toremove_conns, -1) > 0)
|
||||
did_remove = 1;
|
||||
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[i].idle_conns_lock);
|
||||
if (did_remove)
|
||||
@ -5386,11 +5392,11 @@ struct task *srv_cleanup_idle_connections(struct task *task, void *context, unsi
|
||||
curr_idle + 1;
|
||||
|
||||
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[i].idle_conns_lock);
|
||||
j = srv_migrate_conns_to_remove(&srv->idle_conns[i], &idle_conns[i].toremove_conns, max_conn);
|
||||
j = srv_migrate_conns_to_remove(&srv->idle_conns_tree[i], &idle_conns[i].toremove_conns, max_conn);
|
||||
if (j > 0)
|
||||
did_remove = 1;
|
||||
if (max_conn - j > 0 &&
|
||||
srv_migrate_conns_to_remove(&srv->safe_conns[i], &idle_conns[i].toremove_conns, max_conn - j) > 0)
|
||||
srv_migrate_conns_to_remove(&srv->safe_conns_tree[i], &idle_conns[i].toremove_conns, max_conn - j) > 0)
|
||||
did_remove = 1;
|
||||
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[i].idle_conns_lock);
|
||||
|
||||
|
@ -5817,7 +5817,7 @@ struct task *ssl_sock_io_cb(struct task *t, void *context, unsigned short state)
|
||||
conn = ctx->conn;
|
||||
conn_in_list = conn->flags & CO_FL_LIST_MASK;
|
||||
if (conn_in_list)
|
||||
MT_LIST_DEL(&conn->list);
|
||||
conn_delete_from_tree(&conn->hash_node);
|
||||
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
|
||||
/* First if we're doing an handshake, try that */
|
||||
if (ctx->conn->flags & CO_FL_SSL_WAIT_HS)
|
||||
@ -5868,9 +5868,9 @@ struct task *ssl_sock_io_cb(struct task *t, void *context, unsigned short state)
|
||||
|
||||
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
|
||||
if (conn_in_list == CO_FL_SAFE_LIST)
|
||||
MT_LIST_ADDQ(&srv->safe_conns[tid], &conn->list);
|
||||
ebmb_insert(&srv->safe_conns_tree[tid], &conn->hash_node, sizeof(conn->hash));
|
||||
else
|
||||
MT_LIST_ADDQ(&srv->idle_conns[tid], &conn->list);
|
||||
ebmb_insert(&srv->idle_conns_tree[tid], &conn->hash_node, sizeof(conn->hash));
|
||||
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
|
||||
}
|
||||
return NULL;
|
||||
|
Loading…
Reference in New Issue
Block a user