MEDIUM: raw-sock: remove obsolete calls to fd_{cant,cond,done}_{send,recv}

Given that raw_sock's functions solely act on connections and that all its
callers properly use subscribe() when they want to receive/send more, there
is no more reason for calling fd_{cant,cond,done}_{send,recv} anymore as
this call is immediately overridden by the subscribe call. It's also worth
noting that the purpose of fd_cond_recv() whose purpose was to speculatively
enable reading in the FD cache if the FD was active but not yet polled was
made to save on expensive epoll_ctl() calls and was implicitly covered more
cleanly by recent commit 5d7dcc2a8e ("OPTIM: epoll: always poll for recv if
neither active nor ready").

No change on the number of calls to epoll_ctl() was noticed consecutive to
this change.
This commit is contained in:
Willy Tarreau 2020-01-28 18:51:57 +01:00
parent 882093249a
commit 1113116b4a

View File

@ -118,8 +118,6 @@ int raw_sock_to_pipe(struct connection *conn, void *xprt_ctx, struct pipe *pipe,
conn->flags |= CO_FL_WAIT_ROOM; conn->flags |= CO_FL_WAIT_ROOM;
break; break;
} }
fd_cant_recv(conn->handle.fd);
break; break;
} }
else if (errno == ENOSYS || errno == EINVAL || errno == EBADF) { else if (errno == ENOSYS || errno == EINVAL || errno == EBADF) {
@ -148,7 +146,6 @@ int raw_sock_to_pipe(struct connection *conn, void *xprt_ctx, struct pipe *pipe,
* being asked to poll. * being asked to poll.
*/ */
conn->flags |= CO_FL_WAIT_ROOM; conn->flags |= CO_FL_WAIT_ROOM;
fd_done_recv(conn->handle.fd);
break; break;
} }
} /* while */ } /* while */
@ -199,7 +196,6 @@ int raw_sock_from_pipe(struct connection *conn, void *xprt_ctx, struct pipe *pip
if (ret <= 0) { if (ret <= 0) {
if (ret == 0 || errno == EAGAIN) { if (ret == 0 || errno == EAGAIN) {
fd_cant_send(conn->handle.fd);
break; break;
} }
else if (errno == EINTR) else if (errno == EINTR)
@ -215,7 +211,6 @@ int raw_sock_from_pipe(struct connection *conn, void *xprt_ctx, struct pipe *pip
} }
if (unlikely(conn->flags & CO_FL_WAIT_L4_CONN) && done) { if (unlikely(conn->flags & CO_FL_WAIT_L4_CONN) && done) {
conn->flags &= ~CO_FL_WAIT_L4_CONN; conn->flags &= ~CO_FL_WAIT_L4_CONN;
fd_cond_recv(conn->handle.fd);
} }
return done; return done;
@ -293,7 +288,6 @@ static size_t raw_sock_to_buf(struct connection *conn, void *xprt_ctx, struct bu
if ((!fdtab[conn->handle.fd].linger_risk) || if ((!fdtab[conn->handle.fd].linger_risk) ||
(cur_poller.flags & HAP_POLL_F_RDHUP)) { (cur_poller.flags & HAP_POLL_F_RDHUP)) {
fd_done_recv(conn->handle.fd);
break; break;
} }
} }
@ -303,7 +297,6 @@ static size_t raw_sock_to_buf(struct connection *conn, void *xprt_ctx, struct bu
goto read0; goto read0;
} }
else if (errno == EAGAIN || errno == ENOTCONN) { else if (errno == EAGAIN || errno == ENOTCONN) {
fd_cant_recv(conn->handle.fd);
break; break;
} }
else if (errno != EINTR) { else if (errno != EINTR) {
@ -393,7 +386,6 @@ static size_t raw_sock_from_buf(struct connection *conn, void *xprt_ctx, const s
} }
else if (ret == 0 || errno == EAGAIN || errno == ENOTCONN || errno == EINPROGRESS) { else if (ret == 0 || errno == EAGAIN || errno == ENOTCONN || errno == EINPROGRESS) {
/* nothing written, we need to poll for write first */ /* nothing written, we need to poll for write first */
fd_cant_send(conn->handle.fd);
break; break;
} }
else if (errno != EINTR) { else if (errno != EINTR) {
@ -403,7 +395,6 @@ static size_t raw_sock_from_buf(struct connection *conn, void *xprt_ctx, const s
} }
if (unlikely(conn->flags & CO_FL_WAIT_L4_CONN) && done) { if (unlikely(conn->flags & CO_FL_WAIT_L4_CONN) && done) {
conn->flags &= ~CO_FL_WAIT_L4_CONN; conn->flags &= ~CO_FL_WAIT_L4_CONN;
fd_cond_recv(conn->handle.fd);
} }
if (done > 0) { if (done > 0) {