[MAJOR] proxy: finally get rid of maintain_proxies()

This function is finally not needed anymore, as it has been replaced with
a per-proxy task that is scheduled when some limits are encountered on
incoming connections or when the process is stopping. The savings should
be noticeable on configs with a large number of proxies. The most important
point is that the rate limiting is now enforced in a clean and solid way.
This commit is contained in:
Willy Tarreau 2011-07-25 16:33:49 +02:00
parent 26e4881a2d
commit 918ff608f8
7 changed files with 85 additions and 79 deletions

View File

@ -2,7 +2,7 @@
* include/proto/proxy.h
* This file defines function prototypes for proxy management.
*
* Copyright (C) 2000-2009 Willy Tarreau - w@1wt.eu
* Copyright (C) 2000-2011 Willy Tarreau - w@1wt.eu
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
@ -29,7 +29,7 @@
#include <proto/freq_ctr.h>
int start_proxies(int verbose);
void maintain_proxies(int *next);
struct task *manage_proxy(struct task *t);
void soft_stop(void);
void pause_proxy(struct proxy *p);
void stop_proxy(struct proxy *p);

View File

@ -242,10 +242,9 @@ static inline void task_schedule(struct task *task, int when)
}
/*
* This does 4 things :
* This does 3 things :
* - wake up all expired tasks
* - call all runnable tasks
* - call maintain_proxies() to enable/disable the listeners
* - return the date of next event in <next> or eternity.
*/

View File

@ -306,6 +306,7 @@ struct proxy {
struct list listener_queue; /* list of the temporarily limited listeners because of lack of a proxy resource */
struct stktable table; /* table for storing sticking sessions */
struct task *task; /* the associated task, mandatory to manage rate limiting, stopping and resource shortage */
int grace; /* grace time after stop request */
char *check_req; /* HTTP or SSL request to use for PR_O_HTTP_CHK|PR_O_SSL3_CHK */
int check_len; /* Length of the HTTP or SSL3 request */

View File

@ -6412,7 +6412,22 @@ int check_config_validity()
}
}
}
/* create the task associated with the proxy */
curproxy->task = task_new();
if (curproxy->task) {
curproxy->task->context = curproxy;
curproxy->task->process = manage_proxy;
/* no need to queue, it will be done automatically if some
* listener gets limited.
*/
curproxy->task->expire = TICK_ETERNITY;
} else {
Alert("Proxy '%s': no more memory when trying to allocate the management task\n",
curproxy->id);
cfgerr++;
}
curproxy = curproxy->next;
}

View File

@ -918,6 +918,7 @@ void deinit(void)
free(p->fwdfor_hdr_name);
free_http_req_rules(&p->http_req_rules);
free(p->task);
pool_destroy2(p->req_cap_pool);
pool_destroy2(p->rsp_cap_pool);
@ -992,13 +993,7 @@ static int tell_old_pids(int sig)
return ret;
}
/*
* Runs the polling loop
*
* FIXME:
* - we still use 'listeners' to check whether we want to stop or not.
*
*/
/* Runs the polling loop */
void run_poll_loop()
{
int next;
@ -1014,11 +1009,6 @@ void run_poll_loop()
/* Process a few tasks */
process_runnable_tasks(&next);
/* maintain all proxies in a consistent state. This should quickly
* become a task because it becomes expensive when there are huge
* numbers of proxies. */
maintain_proxies(&next);
/* stop when there's nothing left to do */
if (jobs == 0)
break;

View File

@ -40,7 +40,7 @@
#include <proto/task.h>
int listeners; /* # of proxy listeners, set by cfgparse, unset by maintain_proxies */
int listeners; /* # of proxy listeners, set by cfgparse */
struct proxy *proxy = NULL; /* list of all existing proxies */
struct eb_root used_proxy_id = EB_ROOT; /* list of proxy IDs in use */
unsigned int error_snapshot_id = 0; /* global ID assigned to each error then incremented */
@ -467,74 +467,72 @@ int start_proxies(int verbose)
/*
* this function enables proxies when there are enough free sessions,
* or stops them when the table is full. It is designed to be called from the
* select_loop(). It adjusts the date of next expiration event during stop
* time if appropriate.
* This is the proxy management task. It enables proxies when there are enough
* free sessions, or stops them when the table is full. It is designed to be
* called as a task which is woken up upon stopping or when rate limiting must
* be enforced.
*/
void maintain_proxies(int *next)
struct task *manage_proxy(struct task *t)
{
struct proxy *p;
struct proxy *p = t->context;
int next = TICK_ETERNITY;
unsigned int wait;
p = proxy;
/* We should periodically try to enable listeners waiting for a
* global resource here.
*/
/* if there are enough free sessions, we'll activate proxies */
if (actconn < global.maxconn) {
/* We should periodically try to enable listeners waiting for a
* global resource here.
*/
for (; p; p = p->next) {
/* first, let's check if we need to stop the proxy */
if (unlikely(stopping && p->state != PR_STSTOPPED)) {
int t;
t = tick_remain(now_ms, p->stop_time);
if (t == 0) {
Warning("Proxy %s stopped (FE: %lld conns, BE: %lld conns).\n",
p->id, p->fe_counters.cum_conn, p->be_counters.cum_conn);
send_log(p, LOG_WARNING, "Proxy %s stopped (FE: %lld conns, BE: %lld conns).\n",
p->id, p->fe_counters.cum_conn, p->be_counters.cum_conn);
stop_proxy(p);
/* try to free more memory */
pool_gc2();
}
else {
*next = tick_first(*next, p->stop_time);
}
}
/* the rest below is just for frontends */
if (!(p->cap & PR_CAP_FE))
continue;
/* check the various reasons we may find to block the frontend */
if (unlikely(p->feconn >= p->maxconn)) {
if (p->state == PR_STREADY)
p->state = PR_STFULL;
continue;
}
/* OK we have no reason to block, so let's unblock if we were blocking */
if (p->state == PR_STFULL)
p->state = PR_STREADY;
if (p->fe_sps_lim &&
(wait = next_event_delay(&p->fe_sess_per_sec, p->fe_sps_lim, 0))) {
/* we're blocking because a limit was reached on the number of
* requests/s on the frontend. We want to re-check ASAP, which
* means in 1 ms before estimated expiration date, because the
* timer will have settled down.
*/
*next = tick_first(*next, tick_add(now_ms, wait));
continue;
}
/* The proxy is not limited so we can re-enable any waiting listener */
if (!LIST_ISEMPTY(&p->listener_queue))
dequeue_all_listeners(&p->listener_queue);
/* first, let's check if we need to stop the proxy */
if (unlikely(stopping && p->state != PR_STSTOPPED)) {
int t;
t = tick_remain(now_ms, p->stop_time);
if (t == 0) {
Warning("Proxy %s stopped (FE: %lld conns, BE: %lld conns).\n",
p->id, p->fe_counters.cum_conn, p->be_counters.cum_conn);
send_log(p, LOG_WARNING, "Proxy %s stopped (FE: %lld conns, BE: %lld conns).\n",
p->id, p->fe_counters.cum_conn, p->be_counters.cum_conn);
stop_proxy(p);
/* try to free more memory */
pool_gc2();
}
else {
next = tick_first(next, p->stop_time);
}
}
/* the rest below is just for frontends */
if (!(p->cap & PR_CAP_FE))
goto out;
/* check the various reasons we may find to block the frontend */
if (unlikely(p->feconn >= p->maxconn)) {
if (p->state == PR_STREADY)
p->state = PR_STFULL;
goto out;
}
/* OK we have no reason to block, so let's unblock if we were blocking */
if (p->state == PR_STFULL)
p->state = PR_STREADY;
if (p->fe_sps_lim &&
(wait = next_event_delay(&p->fe_sess_per_sec, p->fe_sps_lim, 0))) {
/* we're blocking because a limit was reached on the number of
* requests/s on the frontend. We want to re-check ASAP, which
* means in 1 ms before estimated expiration date, because the
* timer will have settled down.
*/
next = tick_first(next, tick_add(now_ms, wait));
goto out;
}
/* The proxy is not limited so we can re-enable any waiting listener */
if (!LIST_ISEMPTY(&p->listener_queue))
dequeue_all_listeners(&p->listener_queue);
out:
t->expire = next;
task_queue(t);
return t;
}
@ -560,6 +558,8 @@ void soft_stop(void)
if (p->table.size && p->table.sync_task)
task_wakeup(p->table.sync_task, TASK_WOKEN_MSG);
/* wake every proxy task up so that they can handle the stopping */
task_wakeup(p->task, TASK_WOKEN_MSG);
p = p->next;
}

View File

@ -1204,6 +1204,7 @@ int stream_sock_accept(int fd)
if (unlikely(!max)) {
/* frontend accept rate limit was reached */
limit_listener(l, &p->listener_queue);
task_schedule(p->task, tick_add(now_ms, next_event_delay(&p->fe_sess_per_sec, p->fe_sps_lim, 0)));
return 0;
}