MEDIUM: servers: Replace idle-timeout with pool-purge-delay.

Instead of the old "idle-timeout" mechanism, add a new option,
"pool-purge-delay", that sets the delay before purging idle connections.
Each time the delay happens, we destroy half of the idle connections.
This commit is contained in:
Olivier Houchard 2018-12-14 18:15:36 +01:00 committed by Willy Tarreau
parent 006e3101f9
commit b7b3faa79c
5 changed files with 24 additions and 20 deletions

View File

@ -11679,12 +11679,6 @@ id <value>
the proxy. An unused ID will automatically be assigned if unset. The first the proxy. An unused ID will automatically be assigned if unset. The first
assigned value will be 1. This ID is currently only returned in statistics. assigned value will be 1. This ID is currently only returned in statistics.
idle-timeout <delay>
Set the time to keep a connection alive before destroying it. By default
connections are destroyed as soon as they are unused, if idle-timeout is
non-zero, then connection are kept alive for up to <delay> before being
destroyed, and can be reused if no other connection is available.
init-addr {last | libc | none | <ip>},[...]* init-addr {last | libc | none | <ip>},[...]*
Indicate in what order the server's address should be resolved upon startup Indicate in what order the server's address should be resolved upon startup
if it uses an FQDN. Attempts are made to resolve the address by applying in if it uses an FQDN. Attempts are made to resolve the address by applying in
@ -11980,6 +11974,11 @@ pool-max-conn <max>
usable by future clients. This only applies to connections that can be shared usable by future clients. This only applies to connections that can be shared
according to the same principles as those applying to "http-reuse". according to the same principles as those applying to "http-reuse".
pool-purge-delay <delay>
Sets the delay to start purging idle connections. Each <delay> interval, half
of the idle connections are closed. 0 means it's never purged. The default is
1s.
port <port> port <port>
Using the "port" parameter, it becomes possible to use a different port to Using the "port" parameter, it becomes possible to use a different port to
send health-checks. On some servers, it may be desirable to dedicate a port send health-checks. On some servers, it may be desirable to dedicate a port

View File

@ -222,7 +222,7 @@ struct server {
struct list *idle_conns; /* sharable idle connections attached or not to a stream interface */ struct list *idle_conns; /* sharable idle connections attached or not to a stream interface */
struct list *safe_conns; /* safe idle connections attached to stream interfaces, shared */ struct list *safe_conns; /* safe idle connections attached to stream interfaces, shared */
struct list *idle_orphan_conns; /* Orphan connections idling */ struct list *idle_orphan_conns; /* Orphan connections idling */
unsigned int idle_timeout; /* Time to keep an idling orphan connection alive */ unsigned int pool_purge_delay; /* Delay before starting to purge the idle conns pool */
unsigned int max_idle_conns; /* Max number of connection allowed in the orphan connections list */ unsigned int max_idle_conns; /* Max number of connection allowed in the orphan connections list */
unsigned int curr_idle_conns; /* Current number of orphan idling connections */ unsigned int curr_idle_conns; /* Current number of orphan idling connections */
struct task **idle_task; /* task responsible for cleaning idle orphan connections */ struct task **idle_task; /* task responsible for cleaning idle orphan connections */

View File

@ -472,6 +472,7 @@ void init_default_instance()
defproxy.defsrv.minconn = 0; defproxy.defsrv.minconn = 0;
defproxy.defsrv.maxconn = 0; defproxy.defsrv.maxconn = 0;
defproxy.defsrv.max_idle_conns = -1; defproxy.defsrv.max_idle_conns = -1;
defproxy.defsrv.pool_purge_delay = 1000;
defproxy.defsrv.slowstart = 0; defproxy.defsrv.slowstart = 0;
defproxy.defsrv.onerror = DEF_HANA_ONERR; defproxy.defsrv.onerror = DEF_HANA_ONERR;
defproxy.defsrv.consecutive_errors_limit = DEF_HANA_ERRLIMIT; defproxy.defsrv.consecutive_errors_limit = DEF_HANA_ERRLIMIT;

View File

@ -358,7 +358,7 @@ static int srv_parse_enabled(char **args, int *cur_arg,
return 0; return 0;
} }
static int srv_parse_idle_timeout(char **args, int *cur_arg, struct proxy *curproxy, struct server *newsrv, char **err) static int srv_parse_pool_purge_delay(char **args, int *cur_arg, struct proxy *curproxy, struct server *newsrv, char **err)
{ {
const char *res; const char *res;
char *arg; char *arg;
@ -375,7 +375,7 @@ static int srv_parse_idle_timeout(char **args, int *cur_arg, struct proxy *curpr
*res, args[*cur_arg]); *res, args[*cur_arg]);
return ERR_ALERT | ERR_FATAL; return ERR_ALERT | ERR_FATAL;
} }
newsrv->idle_timeout = time; newsrv->pool_purge_delay = time;
return 0; return 0;
} }
@ -1234,7 +1234,6 @@ static struct srv_kw_list srv_kws = { "ALL", { }, {
{ "disabled", srv_parse_disabled, 0, 1 }, /* Start the server in 'disabled' state */ { "disabled", srv_parse_disabled, 0, 1 }, /* Start the server in 'disabled' state */
{ "enabled", srv_parse_enabled, 0, 1 }, /* Start the server in 'enabled' state */ { "enabled", srv_parse_enabled, 0, 1 }, /* Start the server in 'enabled' state */
{ "id", srv_parse_id, 1, 0 }, /* set id# of server */ { "id", srv_parse_id, 1, 0 }, /* set id# of server */
{ "idle-timeout", srv_parse_idle_timeout, 1, 1 }, /* Set the time before we destroy orphan idle connections, defaults to 0 */
{ "namespace", srv_parse_namespace, 1, 1 }, /* Namespace the server socket belongs to (if supported) */ { "namespace", srv_parse_namespace, 1, 1 }, /* Namespace the server socket belongs to (if supported) */
{ "no-agent-check", srv_parse_no_agent_check, 0, 1 }, /* Do not enable any auxiliary agent check */ { "no-agent-check", srv_parse_no_agent_check, 0, 1 }, /* Do not enable any auxiliary agent check */
{ "no-backup", srv_parse_no_backup, 0, 1 }, /* Flag as non-backup server */ { "no-backup", srv_parse_no_backup, 0, 1 }, /* Flag as non-backup server */
@ -1245,6 +1244,7 @@ static struct srv_kw_list srv_kws = { "ALL", { }, {
{ "non-stick", srv_parse_non_stick, 0, 1 }, /* Disable stick-table persistence */ { "non-stick", srv_parse_non_stick, 0, 1 }, /* Disable stick-table persistence */
{ "observe", srv_parse_observe, 1, 1 }, /* Enables health adjusting based on observing communication with the server */ { "observe", srv_parse_observe, 1, 1 }, /* Enables health adjusting based on observing communication with the server */
{ "pool-max-conn", srv_parse_pool_max_conn, 1, 1 }, /* Set the max number of orphan idle connections, 0 means unlimited */ { "pool-max-conn", srv_parse_pool_max_conn, 1, 1 }, /* Set the max number of orphan idle connections, 0 means unlimited */
{ "pool-purge-delay", srv_parse_pool_purge_delay, 1, 1 }, /* Set the time before we destroy orphan idle connections, defaults to 1s */
{ "proto", srv_parse_proto, 1, 1 }, /* Set the proto to use for all outgoing connections */ { "proto", srv_parse_proto, 1, 1 }, /* Set the proto to use for all outgoing connections */
{ "proxy-v2-options", srv_parse_proxy_v2_options, 1, 1 }, /* options for send-proxy-v2 */ { "proxy-v2-options", srv_parse_proxy_v2_options, 1, 1 }, /* options for send-proxy-v2 */
{ "redir", srv_parse_redir, 1, 1 }, /* Enable redirection mode */ { "redir", srv_parse_redir, 1, 1 }, /* Enable redirection mode */
@ -1679,7 +1679,7 @@ static void srv_settings_cpy(struct server *srv, struct server *src, int srv_tmp
srv->tcp_ut = src->tcp_ut; srv->tcp_ut = src->tcp_ut;
#endif #endif
srv->mux_proto = src->mux_proto; srv->mux_proto = src->mux_proto;
srv->idle_timeout = src->idle_timeout; srv->pool_purge_delay = src->pool_purge_delay;
srv->max_idle_conns = src->max_idle_conns; srv->max_idle_conns = src->max_idle_conns;
if (srv_tmpl) if (srv_tmpl)
@ -1724,7 +1724,7 @@ struct server *new_server(struct proxy *proxy)
srv->agent.server = srv; srv->agent.server = srv;
srv->xprt = srv->check.xprt = srv->agent.xprt = xprt_get(XPRT_RAW); srv->xprt = srv->check.xprt = srv->agent.xprt = xprt_get(XPRT_RAW);
srv->idle_timeout = 1000; srv->pool_purge_delay = 1000;
srv->max_idle_conns = -1; srv->max_idle_conns = -1;
return srv; return srv;
@ -5317,17 +5317,21 @@ static struct task *cleanup_idle_connections(struct task *task, void *context, u
{ {
struct server *srv = context; struct server *srv = context;
struct connection *conn, *conn_back; struct connection *conn, *conn_back;
unsigned int next_wakeup = 0; unsigned int to_destroy = srv->curr_idle_conns / 2 + (srv->curr_idle_conns & 1);
unsigned int i = 0;
list_for_each_entry_safe(conn, conn_back, &srv->idle_orphan_conns[tid], list) { list_for_each_entry_safe(conn, conn_back, &srv->idle_orphan_conns[tid], list) {
if (conn->idle_time + srv->idle_timeout > now_ms) { if (i == to_destroy)
next_wakeup = conn->idle_time + srv->idle_timeout;
break; break;
}
conn->mux->destroy(conn); conn->mux->destroy(conn);
i++;
} }
if (next_wakeup > 0) if (!LIST_ISEMPTY(&srv->idle_orphan_conns[tid]))
task_schedule(task, next_wakeup); task_schedule(task, tick_add(now_ms, srv->pool_purge_delay));
else
task->expire = TICK_ETERNITY;
return task; return task;
} }
/* /*

View File

@ -92,7 +92,7 @@ void session_free(struct session *sess)
LIST_INIT(&conn->session_list); LIST_INIT(&conn->session_list);
srv = objt_server(conn->target); srv = objt_server(conn->target);
conn->owner = NULL; conn->owner = NULL;
if (srv && srv->idle_timeout > 0 && if (srv && srv->pool_purge_delay > 0 &&
(srv->max_idle_conns == -1 || (srv->max_idle_conns == -1 ||
srv->max_idle_conns > srv->curr_idle_conns) && srv->max_idle_conns > srv->curr_idle_conns) &&
!(conn->flags & CO_FL_PRIVATE) && !(conn->flags & CO_FL_PRIVATE) &&
@ -108,7 +108,7 @@ void session_free(struct session *sess)
if (!(task_in_wq(srv->idle_task[tid])) && if (!(task_in_wq(srv->idle_task[tid])) &&
!(task_in_rq(srv->idle_task[tid]))) !(task_in_rq(srv->idle_task[tid])))
task_schedule(srv->idle_task[tid], task_schedule(srv->idle_task[tid],
tick_add(now_ms, srv->idle_timeout)); tick_add(now_ms, srv->pool_purge_delay));
} else } else
conn->mux->destroy(conn); conn->mux->destroy(conn);
} else { } else {