mirror of
https://git.haproxy.org/git/haproxy.git/
synced 2025-09-22 14:21:25 +02:00
MEDIUM: servers: Replace idle-timeout with pool-purge-delay.
Instead of the old "idle-timeout" mechanism, add a new option, "pool-purge-delay", that sets the delay before purging idle connections. Each time the delay happens, we destroy half of the idle connections.
This commit is contained in:
parent
006e3101f9
commit
b7b3faa79c
@ -11679,12 +11679,6 @@ id <value>
|
||||
the proxy. An unused ID will automatically be assigned if unset. The first
|
||||
assigned value will be 1. This ID is currently only returned in statistics.
|
||||
|
||||
idle-timeout <delay>
|
||||
Set the time to keep a connection alive before destroying it. By default
|
||||
connections are destroyed as soon as they are unused, if idle-timeout is
|
||||
non-zero, then connection are kept alive for up to <delay> before being
|
||||
destroyed, and can be reused if no other connection is available.
|
||||
|
||||
init-addr {last | libc | none | <ip>},[...]*
|
||||
Indicate in what order the server's address should be resolved upon startup
|
||||
if it uses an FQDN. Attempts are made to resolve the address by applying in
|
||||
@ -11980,6 +11974,11 @@ pool-max-conn <max>
|
||||
usable by future clients. This only applies to connections that can be shared
|
||||
according to the same principles as those applying to "http-reuse".
|
||||
|
||||
pool-purge-delay <delay>
|
||||
Sets the delay to start purging idle connections. Each <delay> interval, half
|
||||
of the idle connections are closed. 0 means it's never purged. The default is
|
||||
1s.
|
||||
|
||||
port <port>
|
||||
Using the "port" parameter, it becomes possible to use a different port to
|
||||
send health-checks. On some servers, it may be desirable to dedicate a port
|
||||
|
@ -222,7 +222,7 @@ struct server {
|
||||
struct list *idle_conns; /* sharable idle connections attached or not to a stream interface */
|
||||
struct list *safe_conns; /* safe idle connections attached to stream interfaces, shared */
|
||||
struct list *idle_orphan_conns; /* Orphan connections idling */
|
||||
unsigned int idle_timeout; /* Time to keep an idling orphan connection alive */
|
||||
unsigned int pool_purge_delay; /* Delay before starting to purge the idle conns pool */
|
||||
unsigned int max_idle_conns; /* Max number of connection allowed in the orphan connections list */
|
||||
unsigned int curr_idle_conns; /* Current number of orphan idling connections */
|
||||
struct task **idle_task; /* task responsible for cleaning idle orphan connections */
|
||||
|
@ -472,6 +472,7 @@ void init_default_instance()
|
||||
defproxy.defsrv.minconn = 0;
|
||||
defproxy.defsrv.maxconn = 0;
|
||||
defproxy.defsrv.max_idle_conns = -1;
|
||||
defproxy.defsrv.pool_purge_delay = 1000;
|
||||
defproxy.defsrv.slowstart = 0;
|
||||
defproxy.defsrv.onerror = DEF_HANA_ONERR;
|
||||
defproxy.defsrv.consecutive_errors_limit = DEF_HANA_ERRLIMIT;
|
||||
|
26
src/server.c
26
src/server.c
@ -358,7 +358,7 @@ static int srv_parse_enabled(char **args, int *cur_arg,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int srv_parse_idle_timeout(char **args, int *cur_arg, struct proxy *curproxy, struct server *newsrv, char **err)
|
||||
static int srv_parse_pool_purge_delay(char **args, int *cur_arg, struct proxy *curproxy, struct server *newsrv, char **err)
|
||||
{
|
||||
const char *res;
|
||||
char *arg;
|
||||
@ -375,7 +375,7 @@ static int srv_parse_idle_timeout(char **args, int *cur_arg, struct proxy *curpr
|
||||
*res, args[*cur_arg]);
|
||||
return ERR_ALERT | ERR_FATAL;
|
||||
}
|
||||
newsrv->idle_timeout = time;
|
||||
newsrv->pool_purge_delay = time;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1234,7 +1234,6 @@ static struct srv_kw_list srv_kws = { "ALL", { }, {
|
||||
{ "disabled", srv_parse_disabled, 0, 1 }, /* Start the server in 'disabled' state */
|
||||
{ "enabled", srv_parse_enabled, 0, 1 }, /* Start the server in 'enabled' state */
|
||||
{ "id", srv_parse_id, 1, 0 }, /* set id# of server */
|
||||
{ "idle-timeout", srv_parse_idle_timeout, 1, 1 }, /* Set the time before we destroy orphan idle connections, defaults to 0 */
|
||||
{ "namespace", srv_parse_namespace, 1, 1 }, /* Namespace the server socket belongs to (if supported) */
|
||||
{ "no-agent-check", srv_parse_no_agent_check, 0, 1 }, /* Do not enable any auxiliary agent check */
|
||||
{ "no-backup", srv_parse_no_backup, 0, 1 }, /* Flag as non-backup server */
|
||||
@ -1245,6 +1244,7 @@ static struct srv_kw_list srv_kws = { "ALL", { }, {
|
||||
{ "non-stick", srv_parse_non_stick, 0, 1 }, /* Disable stick-table persistence */
|
||||
{ "observe", srv_parse_observe, 1, 1 }, /* Enables health adjusting based on observing communication with the server */
|
||||
{ "pool-max-conn", srv_parse_pool_max_conn, 1, 1 }, /* Set the max number of orphan idle connections, 0 means unlimited */
|
||||
{ "pool-purge-delay", srv_parse_pool_purge_delay, 1, 1 }, /* Set the time before we destroy orphan idle connections, defaults to 1s */
|
||||
{ "proto", srv_parse_proto, 1, 1 }, /* Set the proto to use for all outgoing connections */
|
||||
{ "proxy-v2-options", srv_parse_proxy_v2_options, 1, 1 }, /* options for send-proxy-v2 */
|
||||
{ "redir", srv_parse_redir, 1, 1 }, /* Enable redirection mode */
|
||||
@ -1679,7 +1679,7 @@ static void srv_settings_cpy(struct server *srv, struct server *src, int srv_tmp
|
||||
srv->tcp_ut = src->tcp_ut;
|
||||
#endif
|
||||
srv->mux_proto = src->mux_proto;
|
||||
srv->idle_timeout = src->idle_timeout;
|
||||
srv->pool_purge_delay = src->pool_purge_delay;
|
||||
srv->max_idle_conns = src->max_idle_conns;
|
||||
|
||||
if (srv_tmpl)
|
||||
@ -1724,7 +1724,7 @@ struct server *new_server(struct proxy *proxy)
|
||||
srv->agent.server = srv;
|
||||
srv->xprt = srv->check.xprt = srv->agent.xprt = xprt_get(XPRT_RAW);
|
||||
|
||||
srv->idle_timeout = 1000;
|
||||
srv->pool_purge_delay = 1000;
|
||||
srv->max_idle_conns = -1;
|
||||
|
||||
return srv;
|
||||
@ -5317,17 +5317,21 @@ static struct task *cleanup_idle_connections(struct task *task, void *context, u
|
||||
{
|
||||
struct server *srv = context;
|
||||
struct connection *conn, *conn_back;
|
||||
unsigned int next_wakeup = 0;
|
||||
unsigned int to_destroy = srv->curr_idle_conns / 2 + (srv->curr_idle_conns & 1);
|
||||
unsigned int i = 0;
|
||||
|
||||
|
||||
|
||||
list_for_each_entry_safe(conn, conn_back, &srv->idle_orphan_conns[tid], list) {
|
||||
if (conn->idle_time + srv->idle_timeout > now_ms) {
|
||||
next_wakeup = conn->idle_time + srv->idle_timeout;
|
||||
if (i == to_destroy)
|
||||
break;
|
||||
}
|
||||
conn->mux->destroy(conn);
|
||||
i++;
|
||||
}
|
||||
if (next_wakeup > 0)
|
||||
task_schedule(task, next_wakeup);
|
||||
if (!LIST_ISEMPTY(&srv->idle_orphan_conns[tid]))
|
||||
task_schedule(task, tick_add(now_ms, srv->pool_purge_delay));
|
||||
else
|
||||
task->expire = TICK_ETERNITY;
|
||||
return task;
|
||||
}
|
||||
/*
|
||||
|
@ -92,7 +92,7 @@ void session_free(struct session *sess)
|
||||
LIST_INIT(&conn->session_list);
|
||||
srv = objt_server(conn->target);
|
||||
conn->owner = NULL;
|
||||
if (srv && srv->idle_timeout > 0 &&
|
||||
if (srv && srv->pool_purge_delay > 0 &&
|
||||
(srv->max_idle_conns == -1 ||
|
||||
srv->max_idle_conns > srv->curr_idle_conns) &&
|
||||
!(conn->flags & CO_FL_PRIVATE) &&
|
||||
@ -108,7 +108,7 @@ void session_free(struct session *sess)
|
||||
if (!(task_in_wq(srv->idle_task[tid])) &&
|
||||
!(task_in_rq(srv->idle_task[tid])))
|
||||
task_schedule(srv->idle_task[tid],
|
||||
tick_add(now_ms, srv->idle_timeout));
|
||||
tick_add(now_ms, srv->pool_purge_delay));
|
||||
} else
|
||||
conn->mux->destroy(conn);
|
||||
} else {
|
||||
|
Loading…
x
Reference in New Issue
Block a user