mirror of
https://git.haproxy.org/git/haproxy.git/
synced 2026-03-14 11:31:59 +01:00
MINOR: queues: Let process_srv_queue() callers know if srv is full
Add an extra argument to process_srv_queue(). If provided, the function will set it to 1 if the server is full once we're done dequeueing requests, and 0 otherwise.
This commit is contained in:
parent
8ed07610b1
commit
d6a2d03d39
@ -34,7 +34,7 @@ extern struct pool_head *pool_head_pendconn;
|
||||
|
||||
struct pendconn *pendconn_add(struct stream *strm);
|
||||
int pendconn_dequeue(struct stream *strm);
|
||||
int process_srv_queue(struct server *s);
|
||||
int process_srv_queue(struct server *s, int *fullp);
|
||||
unsigned int srv_dynamic_maxconn(const struct server *s);
|
||||
int pendconn_redistribute(struct server *s);
|
||||
void pendconn_unlink(struct pendconn *p);
|
||||
|
||||
@ -399,7 +399,7 @@ static inline int srv_manage_queues(struct server *srv, struct proxy *px)
|
||||
int full = -1;
|
||||
|
||||
if (may_dequeue_tasks(srv, px))
|
||||
full = process_srv_queue(srv);
|
||||
full = process_srv_queue(srv, &full);
|
||||
|
||||
return full;
|
||||
}
|
||||
|
||||
@ -690,7 +690,7 @@ int assign_server(struct stream *s)
|
||||
/* if there's some queue on the backend, with certain algos we
|
||||
* know it's because all servers are full.
|
||||
*/
|
||||
if (s->be->queueslength && s->be->served && s->be->queueslength != s->be->beconn &&
|
||||
if (s->be->queues_not_empty && s->be->served && s->be->queueslength != s->be->beconn &&
|
||||
(((s->be->lbprm.algo & (BE_LB_KIND|BE_LB_NEED|BE_LB_PARM)) == BE_LB_ALGO_FAS)|| // first
|
||||
((s->be->lbprm.algo & (BE_LB_KIND|BE_LB_NEED|BE_LB_PARM)) == BE_LB_ALGO_RR) || // roundrobin
|
||||
((s->be->lbprm.algo & (BE_LB_KIND|BE_LB_NEED|BE_LB_PARM)) == BE_LB_ALGO_SRR))) { // static-rr
|
||||
|
||||
23
src/queue.c
23
src/queue.c
@ -263,7 +263,7 @@ static struct pendconn *pendconn_first(struct eb_root *pendconns)
|
||||
* This function must only be called if the server queue is locked _AND_ the
|
||||
* proxy queue is not. Today it is only called by process_srv_queue.
|
||||
* When a pending connection is dequeued, this function returns 1 if a pendconn
|
||||
* is dequeued, otherwise 0.
|
||||
* is dequeued, otherwise 0 if the queues are empty, or -1 if the server is full.
|
||||
*/
|
||||
static int pendconn_process_next_strm(struct server *srv, struct proxy *px, int px_ok, int tgrp)
|
||||
{
|
||||
@ -302,7 +302,7 @@ static int pendconn_process_next_strm(struct server *srv, struct proxy *px, int
|
||||
if (!got_it) {
|
||||
if (pp)
|
||||
HA_SPIN_UNLOCK(QUEUE_LOCK, &px->per_tgrp[tgrp - 1].queue.lock);
|
||||
return 0;
|
||||
return -1;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -398,17 +398,19 @@ static int pendconn_process_next_strm(struct server *srv, struct proxy *px, int
|
||||
/* Manages a server's connection queue. This function will try to dequeue as
|
||||
* many pending streams as possible, and wake them up.
|
||||
*/
|
||||
int process_srv_queue(struct server *s)
|
||||
int process_srv_queue(struct server *s, int *fullp)
|
||||
{
|
||||
struct server *ref = s->track ? s->track : s;
|
||||
struct proxy *p = s->proxy;
|
||||
long non_empty_tgids[(global.nbtgroups / LONGBITS) + 1];
|
||||
int maxconn;
|
||||
int done = 0;
|
||||
int full = 0;
|
||||
int px_ok;
|
||||
int cur_tgrp;
|
||||
int i = global.nbtgroups;
|
||||
int curgrpnb = i;
|
||||
int ret;
|
||||
|
||||
|
||||
while (i >= LONGBITS) {
|
||||
@ -507,9 +509,12 @@ int process_srv_queue(struct server *s)
|
||||
* pendconn_process_next_strm() will increment
|
||||
* the served field, only if it is < maxconn.
|
||||
*/
|
||||
if (!pendconn_process_next_strm(s, p, px_ok, cur_tgrp)) {
|
||||
ret = pendconn_process_next_strm(s, p, px_ok, cur_tgrp);
|
||||
if (ret <= 0) {
|
||||
ha_bit_clr(cur_tgrp - 1, non_empty_tgids);
|
||||
curgrpnb--;
|
||||
if (ret == -1)
|
||||
full = 1;
|
||||
break;
|
||||
}
|
||||
to_dequeue--;
|
||||
@ -549,16 +554,24 @@ int process_srv_queue(struct server *s)
|
||||
*/
|
||||
for (i = 0; i < global.nbtgroups; i++) {
|
||||
HA_SPIN_LOCK(QUEUE_LOCK, &s->per_tgrp[i].queue.lock);
|
||||
if (pendconn_process_next_strm(s, p, px_ok, i + 1)) {
|
||||
ret = pendconn_process_next_strm(s, p, px_ok, i + 1);
|
||||
if (ret == 1) {
|
||||
HA_SPIN_UNLOCK(QUEUE_LOCK, &s->per_tgrp[i].queue.lock);
|
||||
_HA_ATOMIC_SUB(&p->totpend, 1);
|
||||
_HA_ATOMIC_ADD(&p->served, 1);
|
||||
done++;
|
||||
break;
|
||||
} else if (ret == -1) {
|
||||
/* Server full */
|
||||
HA_SPIN_UNLOCK(QUEUE_LOCK, &s->per_tgrp[i].queue.lock);
|
||||
full = 1;
|
||||
break;
|
||||
}
|
||||
HA_SPIN_UNLOCK(QUEUE_LOCK, &s->per_tgrp[i].queue.lock);
|
||||
}
|
||||
}
|
||||
if (fullp)
|
||||
*fullp = full;
|
||||
return done;
|
||||
}
|
||||
|
||||
|
||||
@ -5930,6 +5930,7 @@ static int srv_alloc_lb(struct server *sv, struct proxy *be)
|
||||
static struct task *server_warmup(struct task *t, void *context, unsigned int state)
|
||||
{
|
||||
struct server *s = context;
|
||||
int full;
|
||||
|
||||
/* by default, plan on stopping the task */
|
||||
t->expire = TICK_ETERNITY;
|
||||
@ -5945,7 +5946,7 @@ static struct task *server_warmup(struct task *t, void *context, unsigned int st
|
||||
HA_SPIN_UNLOCK(SERVER_LOCK, &s->lock);
|
||||
|
||||
/* probably that we can refill this server with a bit more connections */
|
||||
process_srv_queue(s);
|
||||
process_srv_queue(s, &full);
|
||||
|
||||
|
||||
/* get back there in 1 second or 1/20th of the slowstart interval,
|
||||
@ -6702,6 +6703,7 @@ static int _srv_update_status_op(struct server *s, enum srv_op_st_chg_cause caus
|
||||
int log_level;
|
||||
int srv_was_stopping = (s->cur_state == SRV_ST_STOPPING) || (s->cur_admin & SRV_ADMF_DRAIN);
|
||||
int xferred = 0;
|
||||
int full;
|
||||
|
||||
if ((s->cur_state != SRV_ST_STOPPED) && (s->next_state == SRV_ST_STOPPED)) {
|
||||
srv_lb_propagate(s);
|
||||
@ -6782,7 +6784,7 @@ static int _srv_update_status_op(struct server *s, enum srv_op_st_chg_cause caus
|
||||
/* check if we can handle some connections queued.
|
||||
* We will take as many as we can handle.
|
||||
*/
|
||||
xferred = process_srv_queue(s);
|
||||
xferred = process_srv_queue(s, &full);
|
||||
|
||||
tmptrash = alloc_trash_chunk();
|
||||
if (tmptrash) {
|
||||
@ -6819,6 +6821,7 @@ static int _srv_update_status_adm(struct server *s, enum srv_adm_st_chg_cause ca
|
||||
struct buffer *tmptrash = NULL;
|
||||
int srv_was_stopping = (s->cur_state == SRV_ST_STOPPING) || (s->cur_admin & SRV_ADMF_DRAIN);
|
||||
int xferred = 0;
|
||||
int full;
|
||||
|
||||
/* Maintenance must also disable health checks */
|
||||
if (!(s->cur_admin & SRV_ADMF_MAINT) && (s->next_admin & SRV_ADMF_MAINT)) {
|
||||
@ -6981,7 +6984,7 @@ static int _srv_update_status_adm(struct server *s, enum srv_adm_st_chg_cause ca
|
||||
/* check if we can handle some connections queued.
|
||||
* We will take as many as we can handle.
|
||||
*/
|
||||
xferred = process_srv_queue(s);
|
||||
xferred = process_srv_queue(s, &full);
|
||||
}
|
||||
else if (s->next_admin & SRV_ADMF_MAINT) {
|
||||
/* remaining in maintenance mode, let's inform precisely about the
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user