mirror of
https://git.haproxy.org/git/haproxy.git/
synced 2025-08-07 07:37:02 +02:00
[MAJOR] proxy: finally get rid of maintain_proxies()
This function is finally not needed anymore, as it has been replaced with a per-proxy task that is scheduled when some limits are encountered on incoming connections or when the process is stopping. The savings should be noticeable on configs with a large number of proxies. The most important point is that the rate limiting is now enforced in a clean and solid way.
This commit is contained in:
parent
26e4881a2d
commit
918ff608f8
@ -2,7 +2,7 @@
|
|||||||
* include/proto/proxy.h
|
* include/proto/proxy.h
|
||||||
* This file defines function prototypes for proxy management.
|
* This file defines function prototypes for proxy management.
|
||||||
*
|
*
|
||||||
* Copyright (C) 2000-2009 Willy Tarreau - w@1wt.eu
|
* Copyright (C) 2000-2011 Willy Tarreau - w@1wt.eu
|
||||||
*
|
*
|
||||||
* This library is free software; you can redistribute it and/or
|
* This library is free software; you can redistribute it and/or
|
||||||
* modify it under the terms of the GNU Lesser General Public
|
* modify it under the terms of the GNU Lesser General Public
|
||||||
@ -29,7 +29,7 @@
|
|||||||
#include <proto/freq_ctr.h>
|
#include <proto/freq_ctr.h>
|
||||||
|
|
||||||
int start_proxies(int verbose);
|
int start_proxies(int verbose);
|
||||||
void maintain_proxies(int *next);
|
struct task *manage_proxy(struct task *t);
|
||||||
void soft_stop(void);
|
void soft_stop(void);
|
||||||
void pause_proxy(struct proxy *p);
|
void pause_proxy(struct proxy *p);
|
||||||
void stop_proxy(struct proxy *p);
|
void stop_proxy(struct proxy *p);
|
||||||
|
@ -242,10 +242,9 @@ static inline void task_schedule(struct task *task, int when)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This does 4 things :
|
* This does 3 things :
|
||||||
* - wake up all expired tasks
|
* - wake up all expired tasks
|
||||||
* - call all runnable tasks
|
* - call all runnable tasks
|
||||||
* - call maintain_proxies() to enable/disable the listeners
|
|
||||||
* - return the date of next event in <next> or eternity.
|
* - return the date of next event in <next> or eternity.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
@ -306,6 +306,7 @@ struct proxy {
|
|||||||
struct list listener_queue; /* list of the temporarily limited listeners because of lack of a proxy resource */
|
struct list listener_queue; /* list of the temporarily limited listeners because of lack of a proxy resource */
|
||||||
struct stktable table; /* table for storing sticking sessions */
|
struct stktable table; /* table for storing sticking sessions */
|
||||||
|
|
||||||
|
struct task *task; /* the associated task, mandatory to manage rate limiting, stopping and resource shortage */
|
||||||
int grace; /* grace time after stop request */
|
int grace; /* grace time after stop request */
|
||||||
char *check_req; /* HTTP or SSL request to use for PR_O_HTTP_CHK|PR_O_SSL3_CHK */
|
char *check_req; /* HTTP or SSL request to use for PR_O_HTTP_CHK|PR_O_SSL3_CHK */
|
||||||
int check_len; /* Length of the HTTP or SSL3 request */
|
int check_len; /* Length of the HTTP or SSL3 request */
|
||||||
|
@ -6413,6 +6413,21 @@ int check_config_validity()
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* create the task associated with the proxy */
|
||||||
|
curproxy->task = task_new();
|
||||||
|
if (curproxy->task) {
|
||||||
|
curproxy->task->context = curproxy;
|
||||||
|
curproxy->task->process = manage_proxy;
|
||||||
|
/* no need to queue, it will be done automatically if some
|
||||||
|
* listener gets limited.
|
||||||
|
*/
|
||||||
|
curproxy->task->expire = TICK_ETERNITY;
|
||||||
|
} else {
|
||||||
|
Alert("Proxy '%s': no more memory when trying to allocate the management task\n",
|
||||||
|
curproxy->id);
|
||||||
|
cfgerr++;
|
||||||
|
}
|
||||||
|
|
||||||
curproxy = curproxy->next;
|
curproxy = curproxy->next;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -918,6 +918,7 @@ void deinit(void)
|
|||||||
free(p->fwdfor_hdr_name);
|
free(p->fwdfor_hdr_name);
|
||||||
|
|
||||||
free_http_req_rules(&p->http_req_rules);
|
free_http_req_rules(&p->http_req_rules);
|
||||||
|
free(p->task);
|
||||||
|
|
||||||
pool_destroy2(p->req_cap_pool);
|
pool_destroy2(p->req_cap_pool);
|
||||||
pool_destroy2(p->rsp_cap_pool);
|
pool_destroy2(p->rsp_cap_pool);
|
||||||
@ -992,13 +993,7 @@ static int tell_old_pids(int sig)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/* Runs the polling loop */
|
||||||
* Runs the polling loop
|
|
||||||
*
|
|
||||||
* FIXME:
|
|
||||||
* - we still use 'listeners' to check whether we want to stop or not.
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
void run_poll_loop()
|
void run_poll_loop()
|
||||||
{
|
{
|
||||||
int next;
|
int next;
|
||||||
@ -1014,11 +1009,6 @@ void run_poll_loop()
|
|||||||
/* Process a few tasks */
|
/* Process a few tasks */
|
||||||
process_runnable_tasks(&next);
|
process_runnable_tasks(&next);
|
||||||
|
|
||||||
/* maintain all proxies in a consistent state. This should quickly
|
|
||||||
* become a task because it becomes expensive when there are huge
|
|
||||||
* numbers of proxies. */
|
|
||||||
maintain_proxies(&next);
|
|
||||||
|
|
||||||
/* stop when there's nothing left to do */
|
/* stop when there's nothing left to do */
|
||||||
if (jobs == 0)
|
if (jobs == 0)
|
||||||
break;
|
break;
|
||||||
|
38
src/proxy.c
38
src/proxy.c
@ -40,7 +40,7 @@
|
|||||||
#include <proto/task.h>
|
#include <proto/task.h>
|
||||||
|
|
||||||
|
|
||||||
int listeners; /* # of proxy listeners, set by cfgparse, unset by maintain_proxies */
|
int listeners; /* # of proxy listeners, set by cfgparse */
|
||||||
struct proxy *proxy = NULL; /* list of all existing proxies */
|
struct proxy *proxy = NULL; /* list of all existing proxies */
|
||||||
struct eb_root used_proxy_id = EB_ROOT; /* list of proxy IDs in use */
|
struct eb_root used_proxy_id = EB_ROOT; /* list of proxy IDs in use */
|
||||||
unsigned int error_snapshot_id = 0; /* global ID assigned to each error then incremented */
|
unsigned int error_snapshot_id = 0; /* global ID assigned to each error then incremented */
|
||||||
@ -467,25 +467,21 @@ int start_proxies(int verbose)
|
|||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* this function enables proxies when there are enough free sessions,
|
* This is the proxy management task. It enables proxies when there are enough
|
||||||
* or stops them when the table is full. It is designed to be called from the
|
* free sessions, or stops them when the table is full. It is designed to be
|
||||||
* select_loop(). It adjusts the date of next expiration event during stop
|
* called as a task which is woken up upon stopping or when rate limiting must
|
||||||
* time if appropriate.
|
* be enforced.
|
||||||
*/
|
*/
|
||||||
void maintain_proxies(int *next)
|
struct task *manage_proxy(struct task *t)
|
||||||
{
|
{
|
||||||
struct proxy *p;
|
struct proxy *p = t->context;
|
||||||
|
int next = TICK_ETERNITY;
|
||||||
unsigned int wait;
|
unsigned int wait;
|
||||||
|
|
||||||
p = proxy;
|
|
||||||
|
|
||||||
/* if there are enough free sessions, we'll activate proxies */
|
|
||||||
if (actconn < global.maxconn) {
|
|
||||||
/* We should periodically try to enable listeners waiting for a
|
/* We should periodically try to enable listeners waiting for a
|
||||||
* global resource here.
|
* global resource here.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
for (; p; p = p->next) {
|
|
||||||
/* first, let's check if we need to stop the proxy */
|
/* first, let's check if we need to stop the proxy */
|
||||||
if (unlikely(stopping && p->state != PR_STSTOPPED)) {
|
if (unlikely(stopping && p->state != PR_STSTOPPED)) {
|
||||||
int t;
|
int t;
|
||||||
@ -500,19 +496,19 @@ void maintain_proxies(int *next)
|
|||||||
pool_gc2();
|
pool_gc2();
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
*next = tick_first(*next, p->stop_time);
|
next = tick_first(next, p->stop_time);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* the rest below is just for frontends */
|
/* the rest below is just for frontends */
|
||||||
if (!(p->cap & PR_CAP_FE))
|
if (!(p->cap & PR_CAP_FE))
|
||||||
continue;
|
goto out;
|
||||||
|
|
||||||
/* check the various reasons we may find to block the frontend */
|
/* check the various reasons we may find to block the frontend */
|
||||||
if (unlikely(p->feconn >= p->maxconn)) {
|
if (unlikely(p->feconn >= p->maxconn)) {
|
||||||
if (p->state == PR_STREADY)
|
if (p->state == PR_STREADY)
|
||||||
p->state = PR_STFULL;
|
p->state = PR_STFULL;
|
||||||
continue;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* OK we have no reason to block, so let's unblock if we were blocking */
|
/* OK we have no reason to block, so let's unblock if we were blocking */
|
||||||
@ -526,15 +522,17 @@ void maintain_proxies(int *next)
|
|||||||
* means in 1 ms before estimated expiration date, because the
|
* means in 1 ms before estimated expiration date, because the
|
||||||
* timer will have settled down.
|
* timer will have settled down.
|
||||||
*/
|
*/
|
||||||
*next = tick_first(*next, tick_add(now_ms, wait));
|
next = tick_first(next, tick_add(now_ms, wait));
|
||||||
continue;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* The proxy is not limited so we can re-enable any waiting listener */
|
/* The proxy is not limited so we can re-enable any waiting listener */
|
||||||
if (!LIST_ISEMPTY(&p->listener_queue))
|
if (!LIST_ISEMPTY(&p->listener_queue))
|
||||||
dequeue_all_listeners(&p->listener_queue);
|
dequeue_all_listeners(&p->listener_queue);
|
||||||
}
|
out:
|
||||||
}
|
t->expire = next;
|
||||||
|
task_queue(t);
|
||||||
|
return t;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -560,6 +558,8 @@ void soft_stop(void)
|
|||||||
if (p->table.size && p->table.sync_task)
|
if (p->table.size && p->table.sync_task)
|
||||||
task_wakeup(p->table.sync_task, TASK_WOKEN_MSG);
|
task_wakeup(p->table.sync_task, TASK_WOKEN_MSG);
|
||||||
|
|
||||||
|
/* wake every proxy task up so that they can handle the stopping */
|
||||||
|
task_wakeup(p->task, TASK_WOKEN_MSG);
|
||||||
p = p->next;
|
p = p->next;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1204,6 +1204,7 @@ int stream_sock_accept(int fd)
|
|||||||
if (unlikely(!max)) {
|
if (unlikely(!max)) {
|
||||||
/* frontend accept rate limit was reached */
|
/* frontend accept rate limit was reached */
|
||||||
limit_listener(l, &p->listener_queue);
|
limit_listener(l, &p->listener_queue);
|
||||||
|
task_schedule(p->task, tick_add(now_ms, next_event_delay(&p->fe_sess_per_sec, p->fe_sps_lim, 0)));
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user