mirror of
https://git.haproxy.org/git/haproxy.git/
synced 2025-08-10 09:07:02 +02:00
Revert "MEDIUM: queue: use a dedicated lock for the queues"
This reverts commitfcb8bf8650
. The recent changes since5304669e1
MEDIUM: queue: make pendconn_process_next_strm() only return the pendconn opened a tiny race condition between stream_free() and process_srv_queue(), as the pendconn is accessed outside of the lock, possibly while it's being freed. A different approach is required.
This commit is contained in:
parent
ccd85a3e08
commit
3f70fb9ea2
@ -41,7 +41,6 @@ struct pendconn {
|
|||||||
|
|
||||||
struct queue {
|
struct queue {
|
||||||
struct eb_root head; /* queued pendconnds */
|
struct eb_root head; /* queued pendconnds */
|
||||||
__decl_thread(HA_SPINLOCK_T lock); /* for manipulations in the tree */
|
|
||||||
unsigned int idx; /* current queuing index */
|
unsigned int idx; /* current queuing index */
|
||||||
unsigned int length; /* number of entries */
|
unsigned int length; /* number of entries */
|
||||||
};
|
};
|
||||||
|
@ -399,7 +399,6 @@ enum lock_label {
|
|||||||
LOGSRV_LOCK,
|
LOGSRV_LOCK,
|
||||||
DICT_LOCK,
|
DICT_LOCK,
|
||||||
PROTO_LOCK,
|
PROTO_LOCK,
|
||||||
QUEUE_LOCK,
|
|
||||||
CKCH_LOCK,
|
CKCH_LOCK,
|
||||||
SNI_LOCK,
|
SNI_LOCK,
|
||||||
SSL_SERVER_LOCK,
|
SSL_SERVER_LOCK,
|
||||||
@ -452,7 +451,6 @@ static inline const char *lock_label(enum lock_label label)
|
|||||||
case LOGSRV_LOCK: return "LOGSRV";
|
case LOGSRV_LOCK: return "LOGSRV";
|
||||||
case DICT_LOCK: return "DICT";
|
case DICT_LOCK: return "DICT";
|
||||||
case PROTO_LOCK: return "PROTO";
|
case PROTO_LOCK: return "PROTO";
|
||||||
case QUEUE_LOCK: return "QUEUE";
|
|
||||||
case CKCH_LOCK: return "CKCH";
|
case CKCH_LOCK: return "CKCH";
|
||||||
case SNI_LOCK: return "SNI";
|
case SNI_LOCK: return "SNI";
|
||||||
case SSL_SERVER_LOCK: return "SSL_SERVER";
|
case SSL_SERVER_LOCK: return "SSL_SERVER";
|
||||||
|
@ -1293,7 +1293,6 @@ void init_new_proxy(struct proxy *p)
|
|||||||
memset(p, 0, sizeof(struct proxy));
|
memset(p, 0, sizeof(struct proxy));
|
||||||
p->obj_type = OBJ_TYPE_PROXY;
|
p->obj_type = OBJ_TYPE_PROXY;
|
||||||
p->queue.head = EB_ROOT;
|
p->queue.head = EB_ROOT;
|
||||||
HA_SPIN_INIT(&p->queue.lock);
|
|
||||||
LIST_INIT(&p->acl);
|
LIST_INIT(&p->acl);
|
||||||
LIST_INIT(&p->http_req_rules);
|
LIST_INIT(&p->http_req_rules);
|
||||||
LIST_INIT(&p->http_res_rules);
|
LIST_INIT(&p->http_res_rules);
|
||||||
|
44
src/queue.c
44
src/queue.c
@ -157,9 +157,9 @@ static void __pendconn_unlink_prx(struct pendconn *p)
|
|||||||
static inline void pendconn_queue_lock(struct pendconn *p)
|
static inline void pendconn_queue_lock(struct pendconn *p)
|
||||||
{
|
{
|
||||||
if (p->srv)
|
if (p->srv)
|
||||||
HA_SPIN_LOCK(QUEUE_LOCK, &p->srv->queue.lock);
|
HA_SPIN_LOCK(SERVER_LOCK, &p->srv->lock);
|
||||||
else
|
else
|
||||||
HA_SPIN_LOCK(QUEUE_LOCK, &p->px->queue.lock);
|
HA_RWLOCK_WRLOCK(PROXY_LOCK, &p->px->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Unlocks the queue the pendconn element belongs to. This relies on both p->px
|
/* Unlocks the queue the pendconn element belongs to. This relies on both p->px
|
||||||
@ -169,9 +169,9 @@ static inline void pendconn_queue_lock(struct pendconn *p)
|
|||||||
static inline void pendconn_queue_unlock(struct pendconn *p)
|
static inline void pendconn_queue_unlock(struct pendconn *p)
|
||||||
{
|
{
|
||||||
if (p->srv)
|
if (p->srv)
|
||||||
HA_SPIN_UNLOCK(QUEUE_LOCK, &p->srv->queue.lock);
|
HA_SPIN_UNLOCK(SERVER_LOCK, &p->srv->lock);
|
||||||
else
|
else
|
||||||
HA_SPIN_UNLOCK(QUEUE_LOCK, &p->px->queue.lock);
|
HA_RWLOCK_WRUNLOCK(PROXY_LOCK, &p->px->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Removes the pendconn from the server/proxy queue. At this stage, the
|
/* Removes the pendconn from the server/proxy queue. At this stage, the
|
||||||
@ -187,12 +187,12 @@ void pendconn_unlink(struct pendconn *p)
|
|||||||
|
|
||||||
if (p->srv) {
|
if (p->srv) {
|
||||||
/* queued in the server */
|
/* queued in the server */
|
||||||
HA_SPIN_LOCK(QUEUE_LOCK, &p->srv->queue.lock);
|
HA_SPIN_LOCK(SERVER_LOCK, &p->srv->lock);
|
||||||
if (p->node.node.leaf_p) {
|
if (p->node.node.leaf_p) {
|
||||||
__pendconn_unlink_srv(p);
|
__pendconn_unlink_srv(p);
|
||||||
done = 1;
|
done = 1;
|
||||||
}
|
}
|
||||||
HA_SPIN_UNLOCK(QUEUE_LOCK, &p->srv->queue.lock);
|
HA_SPIN_UNLOCK(SERVER_LOCK, &p->srv->lock);
|
||||||
if (done) {
|
if (done) {
|
||||||
_HA_ATOMIC_DEC(&p->srv->queue.length);
|
_HA_ATOMIC_DEC(&p->srv->queue.length);
|
||||||
_HA_ATOMIC_DEC(&p->px->totpend);
|
_HA_ATOMIC_DEC(&p->px->totpend);
|
||||||
@ -200,12 +200,12 @@ void pendconn_unlink(struct pendconn *p)
|
|||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
/* queued in the proxy */
|
/* queued in the proxy */
|
||||||
HA_SPIN_LOCK(QUEUE_LOCK, &p->px->queue.lock);
|
HA_RWLOCK_WRLOCK(PROXY_LOCK, &p->px->lock);
|
||||||
if (p->node.node.leaf_p) {
|
if (p->node.node.leaf_p) {
|
||||||
__pendconn_unlink_prx(p);
|
__pendconn_unlink_prx(p);
|
||||||
done = 1;
|
done = 1;
|
||||||
}
|
}
|
||||||
HA_SPIN_UNLOCK(QUEUE_LOCK, &p->px->queue.lock);
|
HA_RWLOCK_WRUNLOCK(PROXY_LOCK, &p->px->lock);
|
||||||
if (done) {
|
if (done) {
|
||||||
_HA_ATOMIC_DEC(&p->px->queue.length);
|
_HA_ATOMIC_DEC(&p->px->queue.length);
|
||||||
_HA_ATOMIC_DEC(&p->px->totpend);
|
_HA_ATOMIC_DEC(&p->px->totpend);
|
||||||
@ -344,13 +344,15 @@ void process_srv_queue(struct server *s, int server_locked)
|
|||||||
while (s->served < maxconn) {
|
while (s->served < maxconn) {
|
||||||
struct pendconn *pc;
|
struct pendconn *pc;
|
||||||
|
|
||||||
HA_SPIN_LOCK(QUEUE_LOCK, &s->queue.lock);
|
if (!server_locked)
|
||||||
HA_SPIN_LOCK(QUEUE_LOCK, &p->queue.lock);
|
HA_SPIN_LOCK(SERVER_LOCK, &s->lock);
|
||||||
|
HA_RWLOCK_WRLOCK(PROXY_LOCK, &p->lock);
|
||||||
|
|
||||||
pc = pendconn_process_next_strm(s, p);
|
pc = pendconn_process_next_strm(s, p);
|
||||||
|
|
||||||
HA_SPIN_UNLOCK(QUEUE_LOCK, &p->queue.lock);
|
HA_RWLOCK_WRUNLOCK(PROXY_LOCK, &p->lock);
|
||||||
HA_SPIN_UNLOCK(QUEUE_LOCK, &s->queue.lock);
|
if (!server_locked)
|
||||||
|
HA_SPIN_UNLOCK(SERVER_LOCK, &s->lock);
|
||||||
|
|
||||||
if (!pc)
|
if (!pc)
|
||||||
break;
|
break;
|
||||||
@ -422,10 +424,10 @@ struct pendconn *pendconn_add(struct stream *strm)
|
|||||||
}
|
}
|
||||||
__ha_barrier_atomic_store();
|
__ha_barrier_atomic_store();
|
||||||
|
|
||||||
HA_SPIN_LOCK(QUEUE_LOCK, &p->srv->queue.lock);
|
HA_SPIN_LOCK(SERVER_LOCK, &p->srv->lock);
|
||||||
p->queue_idx = srv->queue.idx - 1; // for increment
|
p->queue_idx = srv->queue.idx - 1; // for increment
|
||||||
eb32_insert(&srv->queue.head, &p->node);
|
eb32_insert(&srv->queue.head, &p->node);
|
||||||
HA_SPIN_UNLOCK(QUEUE_LOCK, &p->srv->queue.lock);
|
HA_SPIN_UNLOCK(SERVER_LOCK, &p->srv->lock);
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
unsigned int old_max, new_max;
|
unsigned int old_max, new_max;
|
||||||
@ -438,10 +440,10 @@ struct pendconn *pendconn_add(struct stream *strm)
|
|||||||
}
|
}
|
||||||
__ha_barrier_atomic_store();
|
__ha_barrier_atomic_store();
|
||||||
|
|
||||||
HA_SPIN_LOCK(QUEUE_LOCK, &p->px->queue.lock);
|
HA_RWLOCK_WRLOCK(PROXY_LOCK, &p->px->lock);
|
||||||
p->queue_idx = px->queue.idx - 1; // for increment
|
p->queue_idx = px->queue.idx - 1; // for increment
|
||||||
eb32_insert(&px->queue.head, &p->node);
|
eb32_insert(&px->queue.head, &p->node);
|
||||||
HA_SPIN_UNLOCK(QUEUE_LOCK, &p->px->queue.lock);
|
HA_RWLOCK_WRUNLOCK(PROXY_LOCK, &p->px->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
_HA_ATOMIC_INC(&px->totpend);
|
_HA_ATOMIC_INC(&px->totpend);
|
||||||
@ -450,7 +452,7 @@ struct pendconn *pendconn_add(struct stream *strm)
|
|||||||
|
|
||||||
/* Redistribute pending connections when a server goes down. The number of
|
/* Redistribute pending connections when a server goes down. The number of
|
||||||
* connections redistributed is returned. It must be called with the server
|
* connections redistributed is returned. It must be called with the server
|
||||||
* queue lock held.
|
* lock held.
|
||||||
*/
|
*/
|
||||||
int pendconn_redistribute(struct server *s)
|
int pendconn_redistribute(struct server *s)
|
||||||
{
|
{
|
||||||
@ -487,8 +489,8 @@ int pendconn_redistribute(struct server *s)
|
|||||||
/* Check for pending connections at the backend, and assign some of them to
|
/* Check for pending connections at the backend, and assign some of them to
|
||||||
* the server coming up. The server's weight is checked before being assigned
|
* the server coming up. The server's weight is checked before being assigned
|
||||||
* connections it may not be able to handle. The total number of transferred
|
* connections it may not be able to handle. The total number of transferred
|
||||||
* connections is returned. It must be called with the server queue lock held,
|
* connections is returned. It must be called with the server lock held, and
|
||||||
* and will take the proxy's queue lock.
|
* will take the proxy's lock.
|
||||||
*/
|
*/
|
||||||
int pendconn_grab_from_px(struct server *s)
|
int pendconn_grab_from_px(struct server *s)
|
||||||
{
|
{
|
||||||
@ -507,7 +509,7 @@ int pendconn_grab_from_px(struct server *s)
|
|||||||
((s != s->proxy->lbprm.fbck) && !(s->proxy->options & PR_O_USE_ALL_BK))))
|
((s != s->proxy->lbprm.fbck) && !(s->proxy->options & PR_O_USE_ALL_BK))))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
HA_SPIN_LOCK(QUEUE_LOCK, &s->proxy->queue.lock);
|
HA_RWLOCK_WRLOCK(PROXY_LOCK, &s->proxy->lock);
|
||||||
maxconn = srv_dynamic_maxconn(s);
|
maxconn = srv_dynamic_maxconn(s);
|
||||||
while ((p = pendconn_first(&s->proxy->queue.head))) {
|
while ((p = pendconn_first(&s->proxy->queue.head))) {
|
||||||
if (s->maxconn && s->served + xferred >= maxconn)
|
if (s->maxconn && s->served + xferred >= maxconn)
|
||||||
@ -519,7 +521,7 @@ int pendconn_grab_from_px(struct server *s)
|
|||||||
task_wakeup(p->strm->task, TASK_WOKEN_RES);
|
task_wakeup(p->strm->task, TASK_WOKEN_RES);
|
||||||
xferred++;
|
xferred++;
|
||||||
}
|
}
|
||||||
HA_SPIN_UNLOCK(QUEUE_LOCK, &s->proxy->queue.lock);
|
HA_RWLOCK_WRUNLOCK(PROXY_LOCK, &s->proxy->lock);
|
||||||
if (xferred) {
|
if (xferred) {
|
||||||
_HA_ATOMIC_SUB(&s->proxy->queue.length, xferred);
|
_HA_ATOMIC_SUB(&s->proxy->queue.length, xferred);
|
||||||
_HA_ATOMIC_SUB(&s->proxy->totpend, xferred);
|
_HA_ATOMIC_SUB(&s->proxy->totpend, xferred);
|
||||||
|
@ -2164,7 +2164,6 @@ struct server *new_server(struct proxy *proxy)
|
|||||||
srv->obj_type = OBJ_TYPE_SERVER;
|
srv->obj_type = OBJ_TYPE_SERVER;
|
||||||
srv->proxy = proxy;
|
srv->proxy = proxy;
|
||||||
srv->queue.head = EB_ROOT;
|
srv->queue.head = EB_ROOT;
|
||||||
HA_SPIN_INIT(&srv->queue.lock);
|
|
||||||
LIST_APPEND(&servers_list, &srv->global_list);
|
LIST_APPEND(&servers_list, &srv->global_list);
|
||||||
LIST_INIT(&srv->srv_rec_item);
|
LIST_INIT(&srv->srv_rec_item);
|
||||||
LIST_INIT(&srv->ip_rec_item);
|
LIST_INIT(&srv->ip_rec_item);
|
||||||
|
Loading…
Reference in New Issue
Block a user