REORG: polling: rename "spec_e" to "state" and "spec_p" to "cache"

We're completely changing the way FDs will be polled. There will be no
more speculative I/O since we'll know the exact FD state, so these will
only be cached events.

First, let's fix a few field names which become confusing. "spec_e" was
used to store a speculative I/O event state. Now we'll store the whole
R/W states for the FD there. "spec_p" was used to store a speculative
I/O cache position. Now let's clearly call it "cache".
This commit is contained in:
Willy Tarreau 2014-01-20 11:09:39 +01:00
parent 8e84c637d1
commit 15a4dec87e
7 changed files with 13 additions and 13 deletions

View File

@ -97,11 +97,11 @@ static inline void updt_fd(const int fd)
/* allocate an entry for a speculative event. This can be done at any time. */
static inline void alloc_spec_entry(const int fd)
{
if (fdtab[fd].spec_p)
if (fdtab[fd].cache)
/* FD already in speculative I/O list */
return;
fd_nbspec++;
fdtab[fd].spec_p = fd_nbspec;
fdtab[fd].cache = fd_nbspec;
fd_spec[fd_nbspec-1] = fd;
}
@ -113,16 +113,16 @@ static inline void release_spec_entry(int fd)
{
unsigned int pos;
pos = fdtab[fd].spec_p;
pos = fdtab[fd].cache;
if (!pos)
return;
fdtab[fd].spec_p = 0;
fdtab[fd].cache = 0;
fd_nbspec--;
if (likely(pos <= fd_nbspec)) {
/* was not the last entry */
fd = fd_spec[fd_nbspec];
fd_spec[pos - 1] = fd;
fdtab[fd].spec_p = pos;
fdtab[fd].cache = pos;
}
}

View File

@ -69,8 +69,8 @@ enum {
struct fdtab {
int (*iocb)(int fd); /* I/O handler, returns FD_WAIT_* */
void *owner; /* the connection or listener associated with this fd, NULL if closed */
unsigned int spec_p; /* speculative polling: position in spec list+1. 0=not in list. */
unsigned char state; /* FD state for read and write directions */
unsigned int cache; /* position+1 in the FD cache. 0=not in cache. */
unsigned char state; /* FD state for read and write directions (4+4 bits) */
unsigned char ev; /* event seen in return of poll() : FD_POLL_* */
unsigned char new:1; /* 1 if this fd has just been created */
unsigned char updated:1; /* 1 if this fd is already in the update list */

View File

@ -4529,7 +4529,7 @@ static int stats_dump_full_sess_to_buffer(struct stream_interface *si, struct se
conn->flags,
conn->t.sock.fd,
conn->t.sock.fd >= 0 ? fdtab[conn->t.sock.fd].state : 0,
conn->t.sock.fd >= 0 ? fdtab[conn->t.sock.fd].spec_p : 0,
conn->t.sock.fd >= 0 ? fdtab[conn->t.sock.fd].cache : 0,
conn->t.sock.fd >= 0 ? fdtab[conn->t.sock.fd].updated : 0);
}
else if ((tmpctx = objt_appctx(sess->si[0].end)) != NULL) {
@ -4557,7 +4557,7 @@ static int stats_dump_full_sess_to_buffer(struct stream_interface *si, struct se
conn->flags,
conn->t.sock.fd,
conn->t.sock.fd >= 0 ? fdtab[conn->t.sock.fd].state : 0,
conn->t.sock.fd >= 0 ? fdtab[conn->t.sock.fd].spec_p : 0,
conn->t.sock.fd >= 0 ? fdtab[conn->t.sock.fd].cache : 0,
conn->t.sock.fd >= 0 ? fdtab[conn->t.sock.fd].updated : 0);
}
else if ((tmpctx = objt_appctx(sess->si[1].end)) != NULL) {

View File

@ -187,7 +187,7 @@ REGPRM2 static void _do_poll(struct poller *p, int exp)
if (fdtab[fd].ev & FD_POLL_OUT)
fd_ev_set(fd, DIR_WR);
if (fdtab[fd].spec_p) {
if (fdtab[fd].cache) {
/* This fd was already scheduled for being called as a speculative I/O */
continue;
}

View File

@ -158,7 +158,7 @@ REGPRM2 static void _do_poll(struct poller *p, int exp)
if (fdtab[fd].ev & FD_POLL_OUT)
fd_ev_set(fd, DIR_WR);
if (fdtab[fd].spec_p) {
if (fdtab[fd].cache) {
/* This fd was already scheduled for being
* called as a speculative I/O.
*/

View File

@ -183,7 +183,7 @@ REGPRM2 static void _do_poll(struct poller *p, int exp)
if (fdtab[fd].ev & FD_POLL_OUT)
fd_ev_set(fd, DIR_WR);
if (fdtab[fd].spec_p) {
if (fdtab[fd].cache) {
/* This fd was already scheduled for being
* called as a speculative I/O
*/

View File

@ -167,7 +167,7 @@ REGPRM2 static void _do_poll(struct poller *p, int exp)
if (fdtab[fd].ev & FD_POLL_OUT)
fd_ev_set(fd, DIR_WR);
if (fdtab[fd].spec_p) {
if (fdtab[fd].cache) {
/* This fd was already scheduled for being
* called as a speculative I/O.
*/