[OPTIM] ev_sepoll: detect newly created FDs and check them once

When an accept() creates a new FD, it is already marked as set for
reads. But the task will be woken up without first checking if the
socket could be read.

The speculative I/O gives us a chance to either read the FD if there
are data pending on it, or immediately mark it for poll mode if
nothing is pending.

Simply doing this reduces the number of calls to process_session
from 6 to 5 per session, 2 to 1 calls to process_request, 10% less
calls to epoll_ctl, fd_clr, fd_set, stream_sock_data_update, 20%
less eb32_insert/eb_delete, etc... General performance increase
seems to be around 3%.
This commit is contained in:
Willy Tarreau 2008-08-29 13:57:30 +02:00
parent 21e1be8152
commit cb651251f9
2 changed files with 29 additions and 0 deletions

View File

@ -157,6 +157,7 @@ struct fd_status {
static int nbspec = 0; // current size of the spec list
static int absmaxevents = 0; // absolute maximum amounts of polled events
static int fd_created = 0; // fd creation detector, reset upon poll() entry.
static struct fd_status *fd_list = NULL; // list of FDs
static unsigned int *spec_list = NULL; // speculative I/O list
@ -242,6 +243,7 @@ REGPRM2 static int __fd_set(const int fd, int dir)
if (i == FD_EV_IDLE) {
// switch to SPEC state and allocate a SPEC entry.
fd_created = 1;
alloc_spec_entry(fd);
switch_state:
fd_list[fd].e ^= (unsigned int)(FD_EV_IN_SL << dir);
@ -315,8 +317,10 @@ REGPRM2 static void _do_poll(struct poller *p, int exp)
int count;
int spec_idx;
int wait_time;
int looping = 0;
re_poll_once:
/* Here we have two options :
* - either walk the list forwards and hope to match more events
* - or walk it backwards to minimize the number of changes and
@ -438,6 +442,12 @@ REGPRM2 static void _do_poll(struct poller *p, int exp)
*/
spec_processed += status;
if (looping) {
last_skipped++;
return;
}
if (status >= MIN_RETURN_EVENTS && spec_processed < absmaxevents) {
/* We have processed at least MIN_RETURN_EVENTS, it's worth
* returning now without checking epoll_wait().
@ -477,6 +487,8 @@ REGPRM2 static void _do_poll(struct poller *p, int exp)
fd = MIN(absmaxevents, spec_processed);
fd = MAX(global.tune.maxpollevents, fd);
fd = MIN(maxfd, fd);
/* we want to detect if an accept() will create new speculative FDs here */
fd_created = 0;
spec_processed = 0;
status = epoll_wait(epoll_fd, epoll_events, fd, wait_time);
tv_update_date(wait_time, status);
@ -514,6 +526,19 @@ REGPRM2 static void _do_poll(struct poller *p, int exp)
fdtab[fd].cb[DIR_WR].f(fd);
}
}
if (fd_created) {
/* we have created some fds, certainly in return of an accept(),
* and they're marked as speculative. If we can manage to perform
* a read(), we're almost sure to collect all the request at once
* and avoid several expensive wakeups. So let's try now. Anyway,
* if we fail, the tasks are still woken up, and the FD gets marked
* for poll mode.
*/
fd_created = 0;
looping = 1;
goto re_poll_once;
}
}
/*

View File

@ -827,6 +827,10 @@ void process_session(struct task *t, int *next)
if (s->req->analysers)
t->expire = tick_first(t->expire, s->req->analyse_exp);
#ifdef DEBUG_FULL
fprintf(stderr, "[%u] queuing with exp=%u req->rex=%u req->wex=%u req->ana_exp=%u rep->rex=%u rep->wex=%u\n",
now_ms, t->expire, s->req->rex, s->req->wex, s->req->analyse_exp, s->rep->rex, s->rep->wex);
#endif
/* restore t to its place in the task list */
task_queue(t);