mirror of
https://git.haproxy.org/git/haproxy.git/
synced 2025-12-07 18:51:21 +01:00
MEDIUM: connection: make the subscribe() call able to wakeup if ready
There's currently an internal API limitation at the connection layer
regarding conn_subscribe(). We must not subscribe if we haven't yet
met EAGAIN or such a condition, so we sometimes force ourselves to read
in order to meet this condition and being allowed to call subscribe.
But reading cannot always be done (e.g. at the end of a loop where we
cannot afford to retrieve new data and start again) so we instead
perform a tasklet_wakeup() of the requester's io_cb. This is what is
done in mux_h1 for example. The problem with this is that it forces
a new receive when we're not necessarily certain we need one. And if
the FD is not ready and was already being polled, it's a useless
wakeup.
The current patch improves the connection-level subscribe() so that
it really manipulates the polling if the FD is marked not-ready, but
instead schedules the argument tasklet for a wakeup if the FD is
ready. This guarantees we'll wake this tasklet up in any case once the
FD is ready, either immediately or after polling.
By doing so, a test on pure close mode shows we cut in half the number
of epoll_ctl() calls and almost eliminate failed recvfrom():
$ ./h1load -n 100000 -r 1 -t 4 -c 1000 -T 20 -F 127.0.0.1:8001/?s=1k/t=20
before:
399464 epoll_ctl 1
200007 recvfrom 1
200000 sendto 1
100000 recvfrom -1
7508 epoll_wait 1
after:
205739 epoll_ctl 1
200000 sendto 1
200000 recvfrom 1
6084 epoll_wait 1
2651 recvfrom -1
On keep-alive there is no change however.
This commit is contained in:
parent
8dd348c90c
commit
7e59c0a5e1
@ -334,25 +334,45 @@ int conn_unsubscribe(struct connection *conn, void *xprt_ctx, int event_type, st
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Called from the upper layer, to unsubscribe <es> from events <event_type>
|
||||
* (undo fcgi_subscribe). The <es> struct is not allowed to differ from the one
|
||||
* passed to the subscribe() call. It always returns zero.
|
||||
/* Called from the upper layer, to subscribe <es> to events <event_type>.
|
||||
* The <es> struct is not allowed to differ from the one passed during a
|
||||
* previous call to subscribe(). If the FD is ready, the wait_event is
|
||||
* immediately woken up and the subcription is cancelled. It always
|
||||
* returns zero.
|
||||
*/
|
||||
int conn_subscribe(struct connection *conn, void *xprt_ctx, int event_type, struct wait_event *es)
|
||||
{
|
||||
BUG_ON(event_type & ~(SUB_RETRY_SEND|SUB_RETRY_RECV));
|
||||
BUG_ON(conn->subs && conn->subs->events & event_type);
|
||||
BUG_ON(conn->subs && conn->subs != es);
|
||||
|
||||
if (conn->subs && (conn->subs->events & event_type) == event_type)
|
||||
return 0;
|
||||
|
||||
conn->subs = es;
|
||||
es->events |= event_type;
|
||||
|
||||
if (conn_ctrl_ready(conn)) {
|
||||
if (event_type & SUB_RETRY_RECV)
|
||||
fd_want_recv(conn->handle.fd);
|
||||
if (event_type & SUB_RETRY_RECV) {
|
||||
if (fd_recv_ready(conn->handle.fd)) {
|
||||
tasklet_wakeup(es->tasklet);
|
||||
es->events &= ~SUB_RETRY_RECV;
|
||||
if (!es->events)
|
||||
conn->subs = NULL;
|
||||
}
|
||||
else
|
||||
fd_want_recv(conn->handle.fd);
|
||||
}
|
||||
|
||||
if (event_type & SUB_RETRY_SEND)
|
||||
fd_want_send(conn->handle.fd);
|
||||
if (event_type & SUB_RETRY_SEND) {
|
||||
if (fd_send_ready(conn->handle.fd)) {
|
||||
tasklet_wakeup(es->tasklet);
|
||||
es->events &= ~SUB_RETRY_SEND;
|
||||
if (!es->events)
|
||||
conn->subs = NULL;
|
||||
}
|
||||
else
|
||||
fd_want_send(conn->handle.fd);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user