From ccbcc37a0120770b2049feac6026c682653207f8 Mon Sep 17 00:00:00 2001 From: Willy Tarreau Date: Thu, 27 Dec 2012 12:37:57 +0100 Subject: [PATCH] MEDIUM: http: add support for "http-request tarpit" rule The "reqtarpit" rule is not very handy to use. Now that we have more flexibility with "http-request", let's finally make the tarpit rules usable there. There are still semantical differences between apply_filters_to_request() and http_req_get_intercept_rule() because the former updates the counters while the latter does not. So we currently have almost similar code leafs for similar conditions, but this should be cleaned up later. --- doc/configuration.txt | 14 +++++++++++++- include/types/proto_http.h | 1 + src/proto_http.c | 35 ++++++++++++++++++++++++++++++++++- 3 files changed, 48 insertions(+), 2 deletions(-) diff --git a/doc/configuration.txt b/doc/configuration.txt index a4973ecb7..5c15d6a88 100644 --- a/doc/configuration.txt +++ b/doc/configuration.txt @@ -2606,7 +2606,7 @@ http-check send-state See also : "option httpchk", "http-check disable-on-404" -http-request { allow | deny | auth [realm ] | redirect | +http-request { allow | deny | tarpit | auth [realm ] | redirect | add-header | set-header } [ { if | unless } ] Access control for Layer 7 requests @@ -2628,6 +2628,18 @@ http-request { allow | deny | auth [realm ] | redirect | the request and emits an HTTP 403 error. No further "http-request" rules are evaluated. + - "tarpit" : this stops the evaluation of the rules and immediately blocks + the request without responding for a delay specified by "timeout tarpit" + or "timeout connect" if the former is not set. After that delay, if the + client is still connected, an HTTP error 500 is returned so that the + client does not suspect it has been tarpitted. Logs will report the flags + "PT". The goal of the tarpit rule is to slow down robots during an attack + when they're limited on the number of concurrent requests. It can be very + efficient against very dumb robots, and will significantly reduce the + load on firewalls compared to a "deny" rule. But when facing "correctly" + developped robots, it can make things worse by forcing haproxy and the + front firewall to support insane number of concurrent connections. + - "auth" : this stops the evaluation of the rules and immediately responds with an HTTP 401 or 407 error code to invite the user to present a valid user name and password. No further "http-request" rules are evaluated. An diff --git a/include/types/proto_http.h b/include/types/proto_http.h index ef81a12a6..12e446f73 100644 --- a/include/types/proto_http.h +++ b/include/types/proto_http.h @@ -240,6 +240,7 @@ enum { HTTP_REQ_ACT_UNKNOWN = 0, HTTP_REQ_ACT_ALLOW, HTTP_REQ_ACT_DENY, + HTTP_REQ_ACT_TARPIT, HTTP_REQ_ACT_AUTH, HTTP_REQ_ACT_ADD_HDR, HTTP_REQ_ACT_SET_HDR, diff --git a/src/proto_http.c b/src/proto_http.c index 7fc2dce2b..aaa94766e 100644 --- a/src/proto_http.c +++ b/src/proto_http.c @@ -3101,6 +3101,10 @@ http_req_get_intercept_rule(struct proxy *px, struct list *rules, struct session txn->flags |= TX_CLDENY; return rule; + case HTTP_REQ_ACT_TARPIT: + txn->flags |= TX_CLTARPIT; + return rule; + case HTTP_REQ_ACT_AUTH: return rule; @@ -3419,7 +3423,8 @@ int http_process_req_common(struct session *s, struct channel *req, int an_bit, do_stats = 0; /* return a 403 if either rule has blocked */ - if (txn->flags & TX_CLDENY) { + if (txn->flags & (TX_CLDENY|TX_CLTARPIT)) { + if (txn->flags & TX_CLDENY) { txn->status = 403; s->logs.tv_request = now; stream_int_retnclose(req->prod, http_error_message(s, HTTP_ERR_403)); @@ -3430,6 +3435,31 @@ int http_process_req_common(struct session *s, struct channel *req, int an_bit, if (s->listener->counters) s->listener->counters->denied_req++; goto return_prx_cond; + } + /* When a connection is tarpitted, we use the tarpit timeout, + * which may be the same as the connect timeout if unspecified. + * If unset, then set it to zero because we really want it to + * eventually expire. We build the tarpit as an analyser. + */ + if (txn->flags & TX_CLTARPIT) { + channel_erase(s->req); + /* wipe the request out so that we can drop the connection early + * if the client closes first. + */ + channel_dont_connect(req); + req->analysers = 0; /* remove switching rules etc... */ + req->analysers |= AN_REQ_HTTP_TARPIT; + req->analyse_exp = tick_add_ifset(now_ms, s->be->timeout.tarpit); + if (!req->analyse_exp) + req->analyse_exp = tick_add(now_ms, 0); + session_inc_http_err_ctr(s); + s->fe->fe_counters.denied_req++; + if (s->fe != s->be) + s->be->be_counters.denied_req++; + if (s->listener->counters) + s->listener->counters->denied_req++; + return 1; + } } /* try headers filters */ @@ -8059,6 +8089,9 @@ struct http_req_rule *parse_http_req_cond(const char **args, const char *file, i } else if (!strcmp(args[0], "deny")) { rule->action = HTTP_REQ_ACT_DENY; cur_arg = 1; + } else if (!strcmp(args[0], "tarpit")) { + rule->action = HTTP_REQ_ACT_TARPIT; + cur_arg = 1; } else if (!strcmp(args[0], "auth")) { rule->action = HTTP_REQ_ACT_AUTH; cur_arg = 1;