mirror of
https://git.haproxy.org/git/haproxy.git/
synced 2025-08-08 08:07:10 +02:00
REORG: polling: rename "spec_e" to "state" and "spec_p" to "cache"
We're completely changing the way FDs will be polled. There will be no more speculative I/O since we'll know the exact FD state, so these will only be cached events. First, let's fix a few field names which become confusing. "spec_e" was used to store a speculative I/O event state. Now we'll store the whole R/W states for the FD there. "spec_p" was used to store a speculative I/O cache position. Now let's clearly call it "cache".
This commit is contained in:
parent
8e84c637d1
commit
15a4dec87e
@ -97,11 +97,11 @@ static inline void updt_fd(const int fd)
|
||||
/* allocate an entry for a speculative event. This can be done at any time. */
|
||||
static inline void alloc_spec_entry(const int fd)
|
||||
{
|
||||
if (fdtab[fd].spec_p)
|
||||
if (fdtab[fd].cache)
|
||||
/* FD already in speculative I/O list */
|
||||
return;
|
||||
fd_nbspec++;
|
||||
fdtab[fd].spec_p = fd_nbspec;
|
||||
fdtab[fd].cache = fd_nbspec;
|
||||
fd_spec[fd_nbspec-1] = fd;
|
||||
}
|
||||
|
||||
@ -113,16 +113,16 @@ static inline void release_spec_entry(int fd)
|
||||
{
|
||||
unsigned int pos;
|
||||
|
||||
pos = fdtab[fd].spec_p;
|
||||
pos = fdtab[fd].cache;
|
||||
if (!pos)
|
||||
return;
|
||||
fdtab[fd].spec_p = 0;
|
||||
fdtab[fd].cache = 0;
|
||||
fd_nbspec--;
|
||||
if (likely(pos <= fd_nbspec)) {
|
||||
/* was not the last entry */
|
||||
fd = fd_spec[fd_nbspec];
|
||||
fd_spec[pos - 1] = fd;
|
||||
fdtab[fd].spec_p = pos;
|
||||
fdtab[fd].cache = pos;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -69,8 +69,8 @@ enum {
|
||||
struct fdtab {
|
||||
int (*iocb)(int fd); /* I/O handler, returns FD_WAIT_* */
|
||||
void *owner; /* the connection or listener associated with this fd, NULL if closed */
|
||||
unsigned int spec_p; /* speculative polling: position in spec list+1. 0=not in list. */
|
||||
unsigned char state; /* FD state for read and write directions */
|
||||
unsigned int cache; /* position+1 in the FD cache. 0=not in cache. */
|
||||
unsigned char state; /* FD state for read and write directions (4+4 bits) */
|
||||
unsigned char ev; /* event seen in return of poll() : FD_POLL_* */
|
||||
unsigned char new:1; /* 1 if this fd has just been created */
|
||||
unsigned char updated:1; /* 1 if this fd is already in the update list */
|
||||
|
@ -4529,7 +4529,7 @@ static int stats_dump_full_sess_to_buffer(struct stream_interface *si, struct se
|
||||
conn->flags,
|
||||
conn->t.sock.fd,
|
||||
conn->t.sock.fd >= 0 ? fdtab[conn->t.sock.fd].state : 0,
|
||||
conn->t.sock.fd >= 0 ? fdtab[conn->t.sock.fd].spec_p : 0,
|
||||
conn->t.sock.fd >= 0 ? fdtab[conn->t.sock.fd].cache : 0,
|
||||
conn->t.sock.fd >= 0 ? fdtab[conn->t.sock.fd].updated : 0);
|
||||
}
|
||||
else if ((tmpctx = objt_appctx(sess->si[0].end)) != NULL) {
|
||||
@ -4557,7 +4557,7 @@ static int stats_dump_full_sess_to_buffer(struct stream_interface *si, struct se
|
||||
conn->flags,
|
||||
conn->t.sock.fd,
|
||||
conn->t.sock.fd >= 0 ? fdtab[conn->t.sock.fd].state : 0,
|
||||
conn->t.sock.fd >= 0 ? fdtab[conn->t.sock.fd].spec_p : 0,
|
||||
conn->t.sock.fd >= 0 ? fdtab[conn->t.sock.fd].cache : 0,
|
||||
conn->t.sock.fd >= 0 ? fdtab[conn->t.sock.fd].updated : 0);
|
||||
}
|
||||
else if ((tmpctx = objt_appctx(sess->si[1].end)) != NULL) {
|
||||
|
@ -187,7 +187,7 @@ REGPRM2 static void _do_poll(struct poller *p, int exp)
|
||||
if (fdtab[fd].ev & FD_POLL_OUT)
|
||||
fd_ev_set(fd, DIR_WR);
|
||||
|
||||
if (fdtab[fd].spec_p) {
|
||||
if (fdtab[fd].cache) {
|
||||
/* This fd was already scheduled for being called as a speculative I/O */
|
||||
continue;
|
||||
}
|
||||
|
@ -158,7 +158,7 @@ REGPRM2 static void _do_poll(struct poller *p, int exp)
|
||||
if (fdtab[fd].ev & FD_POLL_OUT)
|
||||
fd_ev_set(fd, DIR_WR);
|
||||
|
||||
if (fdtab[fd].spec_p) {
|
||||
if (fdtab[fd].cache) {
|
||||
/* This fd was already scheduled for being
|
||||
* called as a speculative I/O.
|
||||
*/
|
||||
|
@ -183,7 +183,7 @@ REGPRM2 static void _do_poll(struct poller *p, int exp)
|
||||
if (fdtab[fd].ev & FD_POLL_OUT)
|
||||
fd_ev_set(fd, DIR_WR);
|
||||
|
||||
if (fdtab[fd].spec_p) {
|
||||
if (fdtab[fd].cache) {
|
||||
/* This fd was already scheduled for being
|
||||
* called as a speculative I/O
|
||||
*/
|
||||
|
@ -167,7 +167,7 @@ REGPRM2 static void _do_poll(struct poller *p, int exp)
|
||||
if (fdtab[fd].ev & FD_POLL_OUT)
|
||||
fd_ev_set(fd, DIR_WR);
|
||||
|
||||
if (fdtab[fd].spec_p) {
|
||||
if (fdtab[fd].cache) {
|
||||
/* This fd was already scheduled for being
|
||||
* called as a speculative I/O.
|
||||
*/
|
||||
|
Loading…
Reference in New Issue
Block a user