diff --git a/include/proto/channel.h b/include/proto/channel.h index c6ab71816..2f2a91353 100644 --- a/include/proto/channel.h +++ b/include/proto/channel.h @@ -34,21 +34,22 @@ #include -extern struct pool_head *pool2_buffer; +extern struct pool_head *pool2_channel; /* perform minimal intializations, report 0 in case of error, 1 if OK. */ -int init_buffer(); +int init_channel(); -/* SI-to-buffer functions : buffer_{get,put}_{char,block,string,chunk} */ -int bo_inject(struct channel *buf, const char *msg, int len); +unsigned long long channel_forward(struct channel *buf, unsigned long long bytes); + +/* SI-to-channel functions working with buffers */ int bi_putblk(struct channel *buf, const char *str, int len); int bi_putchr(struct channel *buf, char c); +int bo_inject(struct channel *buf, const char *msg, int len); int bo_getline(struct channel *buf, char *str, int len); int bo_getblk(struct channel *buf, char *blk, int len, int offset); -unsigned long long buffer_forward(struct channel *buf, unsigned long long bytes); -/* Initialize all fields in the buffer. */ -static inline void buffer_init(struct channel *buf) +/* Initialize all fields in the channel. */ +static inline void channel_init(struct channel *buf) { buf->buf.o = 0; buf->buf.i = 0; @@ -61,9 +62,9 @@ static inline void buffer_init(struct channel *buf) buf->flags = 0; } -/*****************************************************************/ -/* These functions are used to compute various buffer area sizes */ -/*****************************************************************/ +/*********************************************************************/ +/* These functions are used to compute various channel content sizes */ +/*********************************************************************/ /* Reports non-zero if the channel is empty, which means both its * buffer and pipe are empty. The construct looks strange but is @@ -75,31 +76,6 @@ static inline unsigned int channel_is_empty(struct channel *c) return !(c->buf.o | (long)c->pipe); } -/* Return the number of reserved bytes in the buffer, which ensures that once - * all pending data are forwarded, the buffer still has global.tune.maxrewrite - * bytes free. The result is between 0 and global.maxrewrite, which is itself - * smaller than any buf->size. - */ -static inline int buffer_reserved(const struct channel *buf) -{ - int ret = global.tune.maxrewrite - buf->to_forward - buf->buf.o; - - if (buf->to_forward == CHN_INFINITE_FORWARD) - return 0; - if (ret <= 0) - return 0; - return ret; -} - -/* Return the max number of bytes the buffer can contain so that once all the - * pending bytes are forwarded, the buffer still has global.tune.maxrewrite - * bytes free. The result sits between buf->size - maxrewrite and buf->size. - */ -static inline int buffer_max_len(const struct channel *buf) -{ - return buf->buf.size - buffer_reserved(buf); -} - /* Returns non-zero if the buffer input is considered full. The reserved space * is taken into account if ->to_forward indicates that an end of transfer is * close to happen. The test is optimized to avoid as many operations as @@ -125,6 +101,167 @@ static inline int channel_full(const struct channel *b) return rem <= 0; } +/* Returns true if the channel's input is already closed */ +static inline int channel_input_closed(struct channel *buf) +{ + return ((buf->flags & CF_SHUTR) != 0); +} + +/* Returns true if the channel's output is already closed */ +static inline int channel_output_closed(struct channel *buf) +{ + return ((buf->flags & CF_SHUTW) != 0); +} + +/* Check channel timeouts, and set the corresponding flags. The likely/unlikely + * have been optimized for fastest normal path. The read/write timeouts are not + * set if there was activity on the channel. That way, we don't have to update + * the timeout on every I/O. Note that the analyser timeout is always checked. + */ +static inline void channel_check_timeouts(struct channel *b) +{ + if (likely(!(b->flags & (CF_SHUTR|CF_READ_TIMEOUT|CF_READ_ACTIVITY|CF_READ_NOEXP))) && + unlikely(tick_is_expired(b->rex, now_ms))) + b->flags |= CF_READ_TIMEOUT; + + if (likely(!(b->flags & (CF_SHUTW|CF_WRITE_TIMEOUT|CF_WRITE_ACTIVITY))) && + unlikely(tick_is_expired(b->wex, now_ms))) + b->flags |= CF_WRITE_TIMEOUT; + + if (likely(!(b->flags & CF_ANA_TIMEOUT)) && + unlikely(tick_is_expired(b->analyse_exp, now_ms))) + b->flags |= CF_ANA_TIMEOUT; +} + +/* Erase any content from channel and adjusts flags accordingly. Note + * that any spliced data is not affected since we may not have any access to + * it. + */ +static inline void channel_erase(struct channel *buf) +{ + buf->buf.o = 0; + buf->buf.i = 0; + buf->to_forward = 0; + buf->buf.p = buf->buf.data; +} + +/* marks the channel as "shutdown" ASAP for reads */ +static inline void channel_shutr_now(struct channel *buf) +{ + buf->flags |= CF_SHUTR_NOW; +} + +/* marks the channel as "shutdown" ASAP for writes */ +static inline void channel_shutw_now(struct channel *buf) +{ + buf->flags |= CF_SHUTW_NOW; +} + +/* marks the channel as "shutdown" ASAP in both directions */ +static inline void channel_abort(struct channel *buf) +{ + buf->flags |= CF_SHUTR_NOW | CF_SHUTW_NOW; + buf->flags &= ~CF_AUTO_CONNECT; +} + +/* Installs as a hijacker on the channel for session . The hijack + * flag is set, and the function called once. The function is responsible for + * clearing the hijack bit. It is possible that the function clears the flag + * during this first call. + */ +static inline void channel_install_hijacker(struct session *s, + struct channel *b, + void (*func)(struct session *, struct channel *)) +{ + b->hijacker = func; + b->flags |= CF_HIJACK; + func(s, b); +} + +/* Releases the channel from hijacking mode. Often used by the hijack function */ +static inline void channel_stop_hijacker(struct channel *buf) +{ + buf->flags &= ~CF_HIJACK; +} + +/* allow the consumer to try to establish a new connection. */ +static inline void channel_auto_connect(struct channel *buf) +{ + buf->flags |= CF_AUTO_CONNECT; +} + +/* prevent the consumer from trying to establish a new connection, and also + * disable auto shutdown forwarding. + */ +static inline void channel_dont_connect(struct channel *buf) +{ + buf->flags &= ~(CF_AUTO_CONNECT|CF_AUTO_CLOSE); +} + +/* allow the producer to forward shutdown requests */ +static inline void channel_auto_close(struct channel *buf) +{ + buf->flags |= CF_AUTO_CLOSE; +} + +/* prevent the producer from forwarding shutdown requests */ +static inline void channel_dont_close(struct channel *buf) +{ + buf->flags &= ~CF_AUTO_CLOSE; +} + +/* allow the producer to read / poll the input */ +static inline void channel_auto_read(struct channel *buf) +{ + buf->flags &= ~CF_DONT_READ; +} + +/* prevent the producer from read / poll the input */ +static inline void channel_dont_read(struct channel *buf) +{ + buf->flags |= CF_DONT_READ; +} + + +/*************************************************/ +/* Buffer operations in the context of a channel */ +/*************************************************/ + + +/* Return the number of reserved bytes in the channel's visible + * buffer, which ensures that once all pending data are forwarded, the + * buffer still has global.tune.maxrewrite bytes free. The result is + * between 0 and global.tune.maxrewrite, which is itself smaller than + * any buf->size. + */ +static inline int buffer_reserved(const struct channel *buf) +{ + int ret = global.tune.maxrewrite - buf->to_forward - buf->buf.o; + + if (buf->to_forward == CHN_INFINITE_FORWARD) + return 0; + if (ret <= 0) + return 0; + return ret; +} + +/* Return the max number of bytes the buffer can contain so that once all the + * pending bytes are forwarded, the buffer still has global.tune.maxrewrite + * bytes free. The result sits between buf->size - maxrewrite and buf->size. + */ +static inline int buffer_max_len(const struct channel *buf) +{ + return buf->buf.size - buffer_reserved(buf); +} + +/* Return the amount of bytes that can be written into the buffer at once, + * excluding reserved space, which is preserved. + */ +static inline int buffer_contig_space_res(const struct channel *chn) +{ + return buffer_contig_space_with_res(&chn->buf, buffer_reserved(chn)); +} + /* Returns the amount of space available at the input of the buffer, taking the * reserved space into account if ->to_forward indicates that an end of transfer * is close to happen. The test is optimized to avoid as many operations as @@ -156,68 +293,15 @@ static inline int bi_avail(const struct channel *b) return 0; } -/* Return the amount of bytes that can be written into the buffer at once, - * excluding reserved space, which is preserved. - */ -static inline int buffer_contig_space_res(const struct channel *chn) -{ - return buffer_contig_space_with_res(&chn->buf, buffer_reserved(chn)); -} - -/* Returns true if the buffer's input is already closed */ -static inline int buffer_input_closed(struct channel *buf) -{ - return ((buf->flags & CF_SHUTR) != 0); -} - -/* Returns true if the buffer's output is already closed */ -static inline int buffer_output_closed(struct channel *buf) -{ - return ((buf->flags & CF_SHUTW) != 0); -} - -/* Check buffer timeouts, and set the corresponding flags. The - * likely/unlikely have been optimized for fastest normal path. - * The read/write timeouts are not set if there was activity on the buffer. - * That way, we don't have to update the timeout on every I/O. Note that the - * analyser timeout is always checked. - */ -static inline void buffer_check_timeouts(struct channel *b) -{ - if (likely(!(b->flags & (CF_SHUTR|CF_READ_TIMEOUT|CF_READ_ACTIVITY|CF_READ_NOEXP))) && - unlikely(tick_is_expired(b->rex, now_ms))) - b->flags |= CF_READ_TIMEOUT; - - if (likely(!(b->flags & (CF_SHUTW|CF_WRITE_TIMEOUT|CF_WRITE_ACTIVITY))) && - unlikely(tick_is_expired(b->wex, now_ms))) - b->flags |= CF_WRITE_TIMEOUT; - - if (likely(!(b->flags & CF_ANA_TIMEOUT)) && - unlikely(tick_is_expired(b->analyse_exp, now_ms))) - b->flags |= CF_ANA_TIMEOUT; -} - -/* Erase any content from buffer and adjusts flags accordingly. Note - * that any spliced data is not affected since we may not have any access to - * it. - */ -static inline void buffer_erase(struct channel *buf) -{ - buf->buf.o = 0; - buf->buf.i = 0; - buf->to_forward = 0; - buf->buf.p = buf->buf.data; -} - -/* Cut the "tail" of the buffer, which means strip it to the length of unsent - * data only, and kill any remaining unsent data. Any scheduled forwarding is - * stopped. This is mainly to be used to send error messages after existing - * data. +/* Cut the "tail" of the channel's buffer, which means strip it to the length + * of unsent data only, and kill any remaining unsent data. Any scheduled + * forwarding is stopped. This is mainly to be used to send error messages + * after existing data. */ static inline void bi_erase(struct channel *buf) { if (!buf->buf.o) - return buffer_erase(buf); + return channel_erase(buf); buf->to_forward = 0; if (!buf->buf.i) @@ -226,88 +310,12 @@ static inline void bi_erase(struct channel *buf) buf->buf.i = 0; } -/* marks the buffer as "shutdown" ASAP for reads */ -static inline void buffer_shutr_now(struct channel *buf) -{ - buf->flags |= CF_SHUTR_NOW; -} - -/* marks the buffer as "shutdown" ASAP for writes */ -static inline void buffer_shutw_now(struct channel *buf) -{ - buf->flags |= CF_SHUTW_NOW; -} - -/* marks the buffer as "shutdown" ASAP in both directions */ -static inline void buffer_abort(struct channel *buf) -{ - buf->flags |= CF_SHUTR_NOW | CF_SHUTW_NOW; - buf->flags &= ~CF_AUTO_CONNECT; -} - -/* Installs as a hijacker on the buffer for session . The hijack - * flag is set, and the function called once. The function is responsible for - * clearing the hijack bit. It is possible that the function clears the flag - * during this first call. - */ -static inline void buffer_install_hijacker(struct session *s, - struct channel *b, - void (*func)(struct session *, struct channel *)) -{ - b->hijacker = func; - b->flags |= CF_HIJACK; - func(s, b); -} - -/* Releases the buffer from hijacking mode. Often used by the hijack function */ -static inline void buffer_stop_hijack(struct channel *buf) -{ - buf->flags &= ~CF_HIJACK; -} - -/* allow the consumer to try to establish a new connection. */ -static inline void buffer_auto_connect(struct channel *buf) -{ - buf->flags |= CF_AUTO_CONNECT; -} - -/* prevent the consumer from trying to establish a new connection, and also - * disable auto shutdown forwarding. - */ -static inline void buffer_dont_connect(struct channel *buf) -{ - buf->flags &= ~(CF_AUTO_CONNECT|CF_AUTO_CLOSE); -} - -/* allow the producer to forward shutdown requests */ -static inline void buffer_auto_close(struct channel *buf) -{ - buf->flags |= CF_AUTO_CLOSE; -} - -/* prevent the producer from forwarding shutdown requests */ -static inline void buffer_dont_close(struct channel *buf) -{ - buf->flags &= ~CF_AUTO_CLOSE; -} - -/* allow the producer to read / poll the input */ -static inline void buffer_auto_read(struct channel *buf) -{ - buf->flags &= ~CF_DONT_READ; -} - -/* prevent the producer from read / poll the input */ -static inline void buffer_dont_read(struct channel *buf) -{ - buf->flags |= CF_DONT_READ; -} - /* - * Advance the buffer's read pointer by bytes. This is useful when data - * have been read directly from the buffer. It is illegal to call this function - * with causing a wrapping at the end of the buffer. It's the caller's - * responsibility to ensure that is never larger than buf->o. + * Advance the channel buffer's read pointer by bytes. This is useful + * when data have been read directly from the buffer. It is illegal to call + * this function with causing a wrapping at the end of the buffer. It's + * the caller's responsibility to ensure that is never larger than + * buf->o. Channel flag WRITE_PARTIAL is set. */ static inline void bo_skip(struct channel *buf, int len) { @@ -320,12 +328,12 @@ static inline void bo_skip(struct channel *buf, int len) buf->flags |= CF_WRITE_PARTIAL; } -/* Tries to copy chunk into buffer after length controls. - * The ->o and to_forward pointers are updated. If the buffer's input is +/* Tries to copy chunk into the channel's buffer after length controls. + * The buf->o and to_forward pointers are updated. If the channel's input is * closed, -2 is returned. If the block is too large for this buffer, -3 is * returned. If there is not enough room left in the buffer, -1 is returned. * Otherwise the number of bytes copied is returned (0 being a valid number). - * Buffer flag READ_PARTIAL is updated if some data can be transferred. The + * Channel flag READ_PARTIAL is updated if some data can be transferred. The * chunk's length is updated with the number of bytes sent. */ static inline int bi_putchk(struct channel *buf, struct chunk *chunk) @@ -338,12 +346,13 @@ static inline int bi_putchk(struct channel *buf, struct chunk *chunk) return ret; } -/* Tries to copy string at once into buffer after length controls. - * The ->o and to_forward pointers are updated. If the buffer's input is - * closed, -2 is returned. If the block is too large for this buffer, -3 is - * returned. If there is not enough room left in the buffer, -1 is returned. - * Otherwise the number of bytes copied is returned (0 being a valid number). - * Buffer flag READ_PARTIAL is updated if some data can be transferred. +/* Tries to copy string at once into the channel's buffer after length + * controls. The buf->o and to_forward pointers are updated. If the channel's + * input is closed, -2 is returned. If the block is too large for this buffer, + * -3 is returned. If there is not enough room left in the buffer, -1 is + * returned. Otherwise the number of bytes copied is returned (0 being a valid + * number). Channel flag READ_PARTIAL is updated if some data can be + * transferred. */ static inline int bi_putstr(struct channel *buf, const char *str) { @@ -351,10 +360,11 @@ static inline int bi_putstr(struct channel *buf, const char *str) } /* - * Return one char from the buffer. If the buffer is empty and closed, return -2. - * If the buffer is just empty, return -1. The buffer's pointer is not advanced, - * it's up to the caller to call bo_skip(buf, 1) when it has consumed the char. - * Also note that this function respects the ->o limit. + * Return one char from the channel's buffer. If the buffer is empty and the + * channel is closed, return -2. If the buffer is just empty, return -1. The + * buffer's pointer is not advanced, it's up to the caller to call bo_skip(buf, + * 1) when it has consumed the char. Also note that this function respects the + * buf->o limit. */ static inline int bo_getchr(struct channel *buf) { diff --git a/include/types/channel.h b/include/types/channel.h index 2ef3b3e0f..c68f4f768 100644 --- a/include/types/channel.h +++ b/include/types/channel.h @@ -137,7 +137,7 @@ * Those bits indicate that there are some processing to do on the buffer * contents. It will probably evolve into a linked list later. Those * analysers could be compared to higher level processors. - * The field is blanked by buffer_init() and only by analysers themselves + * The field is blanked by channel_init() and only by analysers themselves * afterwards. */ #define AN_REQ_DECODE_PROXY 0x00000001 /* take the proxied address from a 'PROXY' line */ diff --git a/src/channel.c b/src/channel.c index 1597b0fc5..65fe66597 100644 --- a/src/channel.c +++ b/src/channel.c @@ -22,28 +22,25 @@ #include -/* Note: this code has not yet been completely cleaned up and still refers to - * the word "buffer" when "channel" is meant instead. - */ -struct pool_head *pool2_buffer; +struct pool_head *pool2_channel; /* perform minimal intializations, report 0 in case of error, 1 if OK. */ -int init_buffer() +int init_channel() { - pool2_buffer = create_pool("buffer", sizeof(struct channel) + global.tune.bufsize, MEM_F_SHARED); - return pool2_buffer != NULL; + pool2_channel = create_pool("channel", sizeof(struct channel) + global.tune.bufsize, MEM_F_SHARED); + return pool2_channel != NULL; } -/* Schedule up to more bytes to be forwarded by the buffer without notifying - * the task. Any pending data in the buffer is scheduled to be sent as well, - * in the limit of the number of bytes to forward. This must be the only method - * to use to schedule bytes to be sent. If the requested number is too large, it - * is automatically adjusted. The number of bytes taken into account is returned. - * Directly touching ->to_forward will cause lockups when ->o goes down to - * zero if nobody is ready to push the remaining data. +/* Schedule up to more bytes to be forwarded via the channel without + * notifying the owner task. Any data pending in the buffer are scheduled to be + * sent as well, in the limit of the number of bytes to forward. This must be + * the only method to use to schedule bytes to be forwarded. If the requested + * number is too large, it is automatically adjusted. The number of bytes taken + * into account is returned. Directly touching ->to_forward will cause lockups + * when buf->o goes down to zero if nobody is ready to push the remaining data. */ -unsigned long long buffer_forward(struct channel *buf, unsigned long long bytes) +unsigned long long channel_forward(struct channel *buf, unsigned long long bytes) { unsigned int new_forward; unsigned int forwarded; @@ -94,12 +91,12 @@ unsigned long long buffer_forward(struct channel *buf, unsigned long long bytes) return bytes; } -/* writes bytes from message to buffer . Returns -1 in case of - * success, -2 if the message is larger than the buffer size, or the number of - * bytes available otherwise. The send limit is automatically adjusted with the - * amount of data written. FIXME-20060521: handle unaligned data. - * Note: this function appends data to the buffer's output and possibly overwrites - * any pending input data which are assumed not to exist. +/* writes bytes from message to the channel's buffer. Returns -1 in + * case of success, -2 if the message is larger than the buffer size, or the + * number of bytes available otherwise. The send limit is automatically + * adjusted to the amount of data written. FIXME-20060521: handle unaligned + * data. Note: this function appends data to the buffer's output and possibly + * overwrites any pending input data which are assumed not to exist. */ int bo_inject(struct channel *buf, const char *msg, int len) { @@ -129,15 +126,15 @@ int bo_inject(struct channel *buf, const char *msg, int len) return -1; } -/* Tries to copy character into buffer after length controls. The - * ->o and to_forward pointers are updated. If the buffer's input is - * closed, -2 is returned. If there is not enough room left in the buffer, -1 - * is returned. Otherwise the number of bytes copied is returned (1). Buffer - * flag READ_PARTIAL is updated if some data can be transferred. +/* Tries to copy character into the channel's buffer after some length + * controls. The buf->o and to_forward pointers are updated. If the channel + * input is closed, -2 is returned. If there is not enough room left in the + * buffer, -1 is returned. Otherwise the number of bytes copied is returned + * (1). Channel flag READ_PARTIAL is updated if some data can be transferred. */ int bi_putchr(struct channel *buf, char c) { - if (unlikely(buffer_input_closed(buf))) + if (unlikely(channel_input_closed(buf))) return -2; if (channel_full(buf)) @@ -158,18 +155,19 @@ int bi_putchr(struct channel *buf, char c) return 1; } -/* Tries to copy block at once into buffer after length controls. - * The ->o and to_forward pointers are updated. If the buffer's input is - * closed, -2 is returned. If the block is too large for this buffer, -3 is - * returned. If there is not enough room left in the buffer, -1 is returned. - * Otherwise the number of bytes copied is returned (0 being a valid number). - * Buffer flag READ_PARTIAL is updated if some data can be transferred. +/* Tries to copy block at once into the channel's buffer after length + * controls. The buf->o and to_forward pointers are updated. If the channel + * input is closed, -2 is returned. If the block is too large for this buffer, + * -3 is returned. If there is not enough room left in the buffer, -1 is + * returned. Otherwise the number of bytes copied is returned (0 being a valid + * number). Channel flag READ_PARTIAL is updated if some data can be + * transferred. */ int bi_putblk(struct channel *buf, const char *blk, int len) { int max; - if (unlikely(buffer_input_closed(buf))) + if (unlikely(channel_input_closed(buf))) return -2; max = buffer_max_len(buf); @@ -210,12 +208,12 @@ int bi_putblk(struct channel *buf, const char *blk, int len) return len; } -/* Gets one text line out of a buffer from a stream interface. +/* Gets one text line out of a channel's buffer from a stream interface. * Return values : * >0 : number of bytes read. Includes the \n if present before len or end. * =0 : no '\n' before end found. is left undefined. * <0 : no more bytes readable because output is shut. - * The buffer status is not changed. The caller must call bo_skip() to + * The channel status is not changed. The caller must call bo_skip() to * update it. The '\n' is waited for as long as neither the buffer nor the * output are full. If either of them is full, the string may be returned * as is, without the '\n'. @@ -260,12 +258,12 @@ int bo_getline(struct channel *buf, char *str, int len) return ret; } -/* Gets one full block of data at once from a buffer, optionally from a - * specific offset. Return values : +/* Gets one full block of data at once from a channel's buffer, optionally from + * a specific offset. Return values : * >0 : number of bytes read, equal to requested size. * =0 : not enough data available. is left undefined. * <0 : no more bytes readable because output is shut. - * The buffer status is not changed. The caller must call bo_skip() to + * The channel status is not changed. The caller must call bo_skip() to * update it. */ int bo_getblk(struct channel *buf, char *blk, int len, int offset) diff --git a/src/frontend.c b/src/frontend.c index 07c44629d..8b168360a 100644 --- a/src/frontend.c +++ b/src/frontend.c @@ -190,8 +190,8 @@ int frontend_accept(struct session *s) /* note: this should not happen anymore since there's always at least the switching rules */ if (!s->req->analysers) { - buffer_auto_connect(s->req); /* don't wait to establish connection */ - buffer_auto_close(s->req); /* let the producer forward close requests */ + channel_auto_connect(s->req); /* don't wait to establish connection */ + channel_auto_close(s->req); /* let the producer forward close requests */ } s->req->rto = s->fe->timeout.client; @@ -400,12 +400,12 @@ int frontend_decode_proxy_request(struct session *s, struct channel *req, int an if ((req->flags & CF_SHUTR) || buffer_full(&req->buf, global.tune.maxrewrite)) goto fail; - buffer_dont_connect(s->req); + channel_dont_connect(s->req); return 0; fail: - buffer_abort(req); - buffer_abort(s->rep); + channel_abort(req); + channel_abort(s->rep); req->analysers = 0; s->fe->fe_counters.failed_req++; diff --git a/src/haproxy.c b/src/haproxy.c index 3427dccf5..5d537df45 100644 --- a/src/haproxy.c +++ b/src/haproxy.c @@ -584,8 +584,8 @@ void init(int argc, char **argv) global_listener_queue_task->process = manage_global_listener_queue; global_listener_queue_task->expire = TICK_ETERNITY; - /* now we know the buffer size, we can initialize the buffers */ - init_buffer(); + /* now we know the buffer size, we can initialize the channels and buffers */ + init_channel(); if (have_appsession) appsession_init(); @@ -1053,7 +1053,7 @@ void deinit(void) } pool_destroy2(pool2_session); - pool_destroy2(pool2_buffer); + pool_destroy2(pool2_channel); pool_destroy2(pool2_requri); pool_destroy2(pool2_task); pool_destroy2(pool2_capture); diff --git a/src/peers.c b/src/peers.c index 386cca94d..312c248fe 100644 --- a/src/peers.c +++ b/src/peers.c @@ -1222,11 +1222,11 @@ static struct session *peer_session_create(struct peer *peer, struct peer_sessio txn->hdr_idx.v = NULL; txn->hdr_idx.size = txn->hdr_idx.used = 0; - if ((s->req = pool_alloc2(pool2_buffer)) == NULL) + if ((s->req = pool_alloc2(pool2_channel)) == NULL) goto out_fail_req; /* no memory */ s->req->buf.size = global.tune.bufsize; - buffer_init(s->req); + channel_init(s->req); s->req->prod = &s->si[0]; s->req->cons = &s->si[1]; s->si[0].ib = s->si[1].ob = s->req; @@ -1238,18 +1238,18 @@ static struct session *peer_session_create(struct peer *peer, struct peer_sessio /* note: this should not happen anymore since there's always at least the switching rules */ if (!s->req->analysers) { - buffer_auto_connect(s->req);/* don't wait to establish connection */ - buffer_auto_close(s->req);/* let the producer forward close requests */ + channel_auto_connect(s->req);/* don't wait to establish connection */ + channel_auto_close(s->req);/* let the producer forward close requests */ } s->req->rto = s->fe->timeout.client; s->req->wto = s->be->timeout.server; - if ((s->rep = pool_alloc2(pool2_buffer)) == NULL) + if ((s->rep = pool_alloc2(pool2_channel)) == NULL) goto out_fail_rep; /* no memory */ s->rep->buf.size = global.tune.bufsize; - buffer_init(s->rep); + channel_init(s->rep); s->rep->prod = &s->si[1]; s->rep->cons = &s->si[0]; s->si[0].ob = s->si[1].ib = s->rep; @@ -1283,7 +1283,7 @@ static struct session *peer_session_create(struct peer *peer, struct peer_sessio /* Error unrolling */ out_fail_rep: - pool_free2(pool2_buffer, s->req); + pool_free2(pool2_channel, s->req); out_fail_req: task_free(t); out_free_session: diff --git a/src/proto_http.c b/src/proto_http.c index ce462e101..e671225ff 100644 --- a/src/proto_http.c +++ b/src/proto_http.c @@ -649,12 +649,12 @@ int http_remove_header2(struct http_msg *msg, struct hdr_idx *idx, struct hdr_ct static void http_server_error(struct session *t, struct stream_interface *si, int err, int finst, int status, const struct chunk *msg) { - buffer_auto_read(si->ob); - buffer_abort(si->ob); - buffer_auto_close(si->ob); - buffer_erase(si->ob); - buffer_auto_close(si->ib); - buffer_auto_read(si->ib); + channel_auto_read(si->ob); + channel_abort(si->ob); + channel_auto_close(si->ob); + channel_erase(si->ob); + channel_auto_close(si->ib); + channel_auto_read(si->ib); if (status > 0 && msg) { t->txn.status = status; bo_inject(si->ib, msg->str, msg->len); @@ -2026,7 +2026,7 @@ int http_wait_for_request(struct session *s, struct channel *req, int an_bit) if (req->flags & (CF_SHUTW|CF_SHUTW_NOW|CF_WRITE_ERROR|CF_WRITE_TIMEOUT)) goto failed_keep_alive; /* some data has still not left the buffer, wake us once that's done */ - buffer_dont_connect(req); + channel_dont_connect(req); req->flags |= CF_READ_DONTWAIT; /* try to get back here ASAP */ return 0; } @@ -2050,7 +2050,7 @@ int http_wait_for_request(struct session *s, struct channel *req, int an_bit) if (s->rep->flags & (CF_SHUTW|CF_SHUTW_NOW|CF_WRITE_ERROR|CF_WRITE_TIMEOUT)) goto failed_keep_alive; /* don't let a connection request be initiated */ - buffer_dont_connect(req); + channel_dont_connect(req); s->rep->flags &= ~CF_EXPECT_MORE; /* speed up sending a previous response */ s->rep->analysers |= an_bit; /* wake us up once it changes */ return 0; @@ -2211,7 +2211,7 @@ int http_wait_for_request(struct session *s, struct channel *req, int an_bit) return 0; } - buffer_dont_connect(req); + channel_dont_connect(req); req->flags |= CF_READ_DONTWAIT; /* try to get back here ASAP */ s->rep->flags &= ~CF_EXPECT_MORE; /* speed up sending a previous response */ #ifdef TCP_QUICKACK @@ -2784,7 +2784,7 @@ int http_process_req_common(struct session *s, struct channel *req, int an_bit, if (unlikely(msg->msg_state < HTTP_MSG_BODY)) { /* we need more data */ - buffer_dont_connect(req); + channel_dont_connect(req); return 0; } @@ -2866,11 +2866,11 @@ int http_process_req_common(struct session *s, struct channel *req, int an_bit, * eventually expire. We build the tarpit as an analyser. */ if (txn->flags & TX_CLTARPIT) { - buffer_erase(s->req); + channel_erase(s->req); /* wipe the request out so that we can drop the connection early * if the client closes first. */ - buffer_dont_connect(req); + channel_dont_connect(req); req->analysers = 0; /* remove switching rules etc... */ req->analysers |= AN_REQ_HTTP_TARPIT; req->analyse_exp = tick_add_ifset(now_ms, s->be->timeout.tarpit); @@ -3017,7 +3017,7 @@ int http_process_req_common(struct session *s, struct channel *req, int an_bit, if (!http_process_req_stat_post(s->rep->prod, txn, req)) { /* we need more data */ req->analysers |= an_bit; - buffer_dont_connect(req); + channel_dont_connect(req); return 0; } } else { @@ -3244,7 +3244,7 @@ int http_process_request(struct session *s, struct channel *req, int an_bit) if (unlikely(msg->msg_state < HTTP_MSG_BODY)) { /* we need more data */ - buffer_dont_connect(req); + channel_dont_connect(req); return 0; } @@ -3454,7 +3454,7 @@ int http_process_request(struct session *s, struct channel *req, int an_bit) s->txn.meth == HTTP_METH_POST && s->be->url_param_name != NULL && s->be->url_param_post_limit != 0 && (msg->flags & (HTTP_MSGF_CNT_LEN|HTTP_MSGF_TE_CHNK))) { - buffer_dont_connect(req); + channel_dont_connect(req); req->analysers |= AN_REQ_HTTP_BODY; } @@ -3526,7 +3526,7 @@ int http_process_tarpit(struct session *s, struct channel *req, int an_bit) * timeout. We just have to check that the client is still * there and that the timeout has not expired. */ - buffer_dont_connect(req); + channel_dont_connect(req); if ((req->flags & (CF_SHUTR|CF_READ_ERROR)) == 0 && !tick_is_expired(req->analyse_exp, now_ms)) return 0; @@ -3660,7 +3660,7 @@ int http_process_request_body(struct session *s, struct channel *req, int an_bit * request timeout once at the beginning of the * request. */ - buffer_dont_connect(req); + channel_dont_connect(req); if (!tick_isset(req->analyse_exp)) req->analyse_exp = tick_add_ifset(now_ms, s->be->timeout.httpreq); return 0; @@ -3859,10 +3859,10 @@ void http_end_txn_clean_session(struct session *s) } /* we're removing the analysers, we MUST re-enable events detection */ - buffer_auto_read(s->req); - buffer_auto_close(s->req); - buffer_auto_read(s->rep); - buffer_auto_close(s->rep); + channel_auto_read(s->req); + channel_auto_close(s->req); + channel_auto_read(s->rep); + channel_auto_close(s->rep); s->req->analysers = s->listener->analysers; s->req->analysers &= ~AN_REQ_DECODE_PROXY; @@ -3900,7 +3900,7 @@ int http_sync_req_state(struct session *s) * (eg: Linux). */ if (!(s->be->options & PR_O_ABRT_CLOSE) && txn->meth != HTTP_METH_POST) - buffer_dont_read(buf); + channel_dont_read(buf); if (txn->rsp.msg_state == HTTP_MSG_ERROR) goto wait_other_side; @@ -3914,7 +3914,7 @@ int http_sync_req_state(struct session *s) if (txn->rsp.msg_state == HTTP_MSG_TUNNEL) { /* if any side switches to tunnel mode, the other one does too */ - buffer_auto_read(buf); + channel_auto_read(buf); txn->req.msg_state = HTTP_MSG_TUNNEL; goto wait_other_side; } @@ -3928,7 +3928,7 @@ int http_sync_req_state(struct session *s) if ((txn->flags & TX_CON_WANT_MSK) == TX_CON_WANT_SCL) { /* Server-close mode : queue a connection close to the server */ if (!(buf->flags & (CF_SHUTW|CF_SHUTW_NOW))) - buffer_shutw_now(buf); + channel_shutw_now(buf); } else if ((txn->flags & TX_CON_WANT_MSK) == TX_CON_WANT_CLO) { /* Option forceclose is set, or either side wants to close, @@ -3937,8 +3937,8 @@ int http_sync_req_state(struct session *s) * once both states are CLOSED. */ if (!(buf->flags & (CF_SHUTW|CF_SHUTW_NOW))) { - buffer_shutr_now(buf); - buffer_shutw_now(buf); + channel_shutr_now(buf); + channel_shutw_now(buf); } } else { @@ -3947,7 +3947,7 @@ int http_sync_req_state(struct session *s) * in tunnel mode, so we're left with keep-alive only. * This mode is currently not implemented, we switch to tunnel mode. */ - buffer_auto_read(buf); + channel_auto_read(buf); txn->req.msg_state = HTTP_MSG_TUNNEL; } @@ -4017,7 +4017,7 @@ int http_sync_res_state(struct session *s) * while the request is being uploaded, so we don't disable * reading. */ - /* buffer_dont_read(buf); */ + /* channel_dont_read(buf); */ if (txn->req.msg_state == HTTP_MSG_ERROR) goto wait_other_side; @@ -4033,7 +4033,7 @@ int http_sync_res_state(struct session *s) if (txn->req.msg_state == HTTP_MSG_TUNNEL) { /* if any side switches to tunnel mode, the other one does too */ - buffer_auto_read(buf); + channel_auto_read(buf); txn->rsp.msg_state = HTTP_MSG_TUNNEL; goto wait_other_side; } @@ -4051,7 +4051,7 @@ int http_sync_res_state(struct session *s) * catch that for the final cleanup. */ if (!(buf->flags & (CF_SHUTR|CF_SHUTR_NOW))) - buffer_shutr_now(buf); + channel_shutr_now(buf); } else if ((txn->flags & TX_CON_WANT_MSK) == TX_CON_WANT_CLO) { /* Option forceclose is set, or either side wants to close, @@ -4060,8 +4060,8 @@ int http_sync_res_state(struct session *s) * once both states are CLOSED. */ if (!(buf->flags & (CF_SHUTW|CF_SHUTW_NOW))) { - buffer_shutr_now(buf); - buffer_shutw_now(buf); + channel_shutr_now(buf); + channel_shutw_now(buf); } } else { @@ -4070,7 +4070,7 @@ int http_sync_res_state(struct session *s) * in tunnel mode, so we're left with keep-alive only. * This mode is currently not implemented, we switch to tunnel mode. */ - buffer_auto_read(buf); + channel_auto_read(buf); txn->rsp.msg_state = HTTP_MSG_TUNNEL; } @@ -4110,8 +4110,8 @@ int http_sync_res_state(struct session *s) http_msg_closed: /* drop any pending data */ bi_erase(buf); - buffer_auto_close(buf); - buffer_auto_read(buf); + channel_auto_close(buf); + channel_auto_read(buf); goto wait_other_side; } @@ -4158,23 +4158,23 @@ int http_resync_states(struct session *s) (txn->req.msg_state == HTTP_MSG_CLOSED && txn->rsp.msg_state == HTTP_MSG_CLOSED)) { s->req->analysers = 0; - buffer_auto_close(s->req); - buffer_auto_read(s->req); + channel_auto_close(s->req); + channel_auto_read(s->req); s->rep->analysers = 0; - buffer_auto_close(s->rep); - buffer_auto_read(s->rep); + channel_auto_close(s->rep); + channel_auto_read(s->rep); } else if (txn->rsp.msg_state == HTTP_MSG_CLOSED || txn->rsp.msg_state == HTTP_MSG_ERROR || txn->req.msg_state == HTTP_MSG_ERROR || (s->rep->flags & CF_SHUTW)) { s->rep->analysers = 0; - buffer_auto_close(s->rep); - buffer_auto_read(s->rep); + channel_auto_close(s->rep); + channel_auto_read(s->rep); s->req->analysers = 0; - buffer_abort(s->req); - buffer_auto_close(s->req); - buffer_auto_read(s->req); + channel_abort(s->req); + channel_auto_close(s->req); + channel_auto_read(s->req); bi_erase(s->req); } else if (txn->req.msg_state == HTTP_MSG_CLOSED && @@ -4221,7 +4221,7 @@ int http_request_forward_body(struct session *s, struct channel *req, int an_bit } /* in most states, we should abort in case of early close */ - buffer_auto_close(req); + channel_auto_close(req); /* Note that we don't have to send 100-continue back because we don't * need the data to complete our job, and it's up to the server to @@ -4254,7 +4254,7 @@ int http_request_forward_body(struct session *s, struct channel *req, int an_bit msg->sol = msg->sov; msg->next -= bytes; /* will be forwarded */ msg->chunk_len += bytes; - msg->chunk_len -= buffer_forward(req, msg->chunk_len); + msg->chunk_len -= channel_forward(req, msg->chunk_len); } if (msg->msg_state == HTTP_MSG_DATA) { @@ -4321,7 +4321,7 @@ int http_request_forward_body(struct session *s, struct channel *req, int an_bit /* for keep-alive we don't want to forward closes on DONE */ if ((txn->flags & TX_CON_WANT_MSK) == TX_CON_WANT_KAL || (txn->flags & TX_CON_WANT_MSK) == TX_CON_WANT_SCL) - buffer_dont_close(req); + channel_dont_close(req); if (http_resync_states(s)) { /* some state changes occurred, maybe the analyser * was disabled too. @@ -4347,8 +4347,8 @@ int http_request_forward_body(struct session *s, struct channel *req, int an_bit * request. */ if (s->be->options & PR_O_ABRT_CLOSE) { - buffer_auto_read(req); - buffer_auto_close(req); + channel_auto_read(req); + channel_auto_close(req); } else if (s->txn.meth == HTTP_METH_POST) { /* POST requests may require to read extra CRLF @@ -4356,7 +4356,7 @@ int http_request_forward_body(struct session *s, struct channel *req, int an_bit * an RST to be sent upon close on some systems * (eg: Linux). */ - buffer_auto_read(req); + channel_auto_read(req); } return 0; @@ -4391,7 +4391,7 @@ int http_request_forward_body(struct session *s, struct channel *req, int an_bit * chunks even if the client has closed, so we don't want to set CF_DONTCLOSE. */ if (msg->flags & HTTP_MSGF_TE_CHNK) - buffer_dont_close(req); + channel_dont_close(req); /* We know that more data are expected, but we couldn't send more that * what we did. So we always set the CF_EXPECT_MORE flag so that the @@ -4515,7 +4515,7 @@ int http_wait_for_response(struct session *s, struct channel *rep, int an_bit) /* some data has still not left the buffer, wake us once that's done */ if (rep->flags & (CF_SHUTW|CF_SHUTW_NOW|CF_WRITE_ERROR|CF_WRITE_TIMEOUT)) goto abort_response; - buffer_dont_close(rep); + channel_dont_close(rep); rep->flags |= CF_READ_DONTWAIT; /* try to get back here ASAP */ return 0; } @@ -4577,7 +4577,7 @@ int http_wait_for_response(struct session *s, struct channel *rep, int an_bit) health_adjust(target_srv(&s->target), HANA_STATUS_HTTP_HDRRSP); } abort_response: - buffer_auto_close(rep); + channel_auto_close(rep); rep->analysers = 0; txn->status = 502; rep->prod->flags |= SI_FL_NOLINGER; @@ -4610,7 +4610,7 @@ int http_wait_for_response(struct session *s, struct channel *rep, int an_bit) health_adjust(target_srv(&s->target), HANA_STATUS_HTTP_READ_ERROR); } - buffer_auto_close(rep); + channel_auto_close(rep); rep->analysers = 0; txn->status = 502; rep->prod->flags |= SI_FL_NOLINGER; @@ -4635,7 +4635,7 @@ int http_wait_for_response(struct session *s, struct channel *rep, int an_bit) health_adjust(target_srv(&s->target), HANA_STATUS_HTTP_READ_TIMEOUT); } - buffer_auto_close(rep); + channel_auto_close(rep); rep->analysers = 0; txn->status = 504; rep->prod->flags |= SI_FL_NOLINGER; @@ -4660,7 +4660,7 @@ int http_wait_for_response(struct session *s, struct channel *rep, int an_bit) health_adjust(target_srv(&s->target), HANA_STATUS_HTTP_BROKEN_PIPE); } - buffer_auto_close(rep); + channel_auto_close(rep); rep->analysers = 0; txn->status = 502; rep->prod->flags |= SI_FL_NOLINGER; @@ -4681,7 +4681,7 @@ int http_wait_for_response(struct session *s, struct channel *rep, int an_bit) s->be->be_counters.failed_resp++; rep->analysers = 0; - buffer_auto_close(rep); + channel_auto_close(rep); if (!(s->flags & SN_ERR_MASK)) s->flags |= SN_ERR_CLICL; @@ -4692,7 +4692,7 @@ int http_wait_for_response(struct session *s, struct channel *rep, int an_bit) return 0; } - buffer_dont_close(rep); + channel_dont_close(rep); return 0; } @@ -4888,7 +4888,7 @@ int http_wait_for_response(struct session *s, struct channel *rep, int an_bit) /* end of job, return OK */ rep->analysers &= ~an_bit; rep->analyse_exp = TICK_ETERNITY; - buffer_auto_close(rep); + channel_auto_close(rep); return 1; } @@ -5066,7 +5066,7 @@ int http_process_res_common(struct session *t, struct channel *rep, int an_bit, */ if (unlikely(txn->status == 100)) { hdr_idx_init(&txn->hdr_idx); - msg->next -= buffer_forward(rep, msg->next); + msg->next -= channel_forward(rep, msg->next); msg->msg_state = HTTP_MSG_RPBEFORE; txn->status = 0; rep->analysers |= AN_RES_WAIT_HTTP | an_bit; @@ -5290,7 +5290,7 @@ int http_response_forward_body(struct session *s, struct channel *res, int an_bi } /* in most states, we should abort in case of early close */ - buffer_auto_close(res); + channel_auto_close(res); if (msg->msg_state < HTTP_MSG_CHUNK_SIZE) { /* we have msg->sov which points to the first byte of message body. @@ -5315,7 +5315,7 @@ int http_response_forward_body(struct session *s, struct channel *res, int an_bi msg->sol = msg->sov; msg->next -= bytes; /* will be forwarded */ msg->chunk_len += bytes; - msg->chunk_len -= buffer_forward(res, msg->chunk_len); + msg->chunk_len -= channel_forward(res, msg->chunk_len); } if (msg->msg_state == HTTP_MSG_DATA) { @@ -5379,7 +5379,7 @@ int http_response_forward_body(struct session *s, struct channel *res, int an_bi /* for keep-alive we don't want to forward closes on DONE */ if ((txn->flags & TX_CON_WANT_MSK) == TX_CON_WANT_KAL || (txn->flags & TX_CON_WANT_MSK) == TX_CON_WANT_SCL) - buffer_dont_close(res); + channel_dont_close(res); if (http_resync_states(s)) { http_silent_debug(__LINE__, s); /* some state changes occurred, maybe the analyser @@ -5426,7 +5426,7 @@ int http_response_forward_body(struct session *s, struct channel *res, int an_bi msg->sol = msg->sov; msg->next -= bytes; /* will be forwarded */ msg->chunk_len += bytes; - msg->chunk_len -= buffer_forward(res, msg->chunk_len); + msg->chunk_len -= channel_forward(res, msg->chunk_len); } /* When TE: chunked is used, we need to get there again to parse remaining @@ -5437,7 +5437,7 @@ int http_response_forward_body(struct session *s, struct channel *res, int an_bi if ((msg->flags & HTTP_MSGF_TE_CHNK) || (txn->flags & TX_CON_WANT_MSK) == TX_CON_WANT_KAL || (txn->flags & TX_CON_WANT_MSK) == TX_CON_WANT_SCL) - buffer_dont_close(res); + channel_dont_close(res); /* We know that more data are expected, but we couldn't send more that * what we did. So we always set the CF_EXPECT_MORE flag so that the diff --git a/src/proto_tcp.c b/src/proto_tcp.c index 22664dd17..f6cc6a325 100644 --- a/src/proto_tcp.c +++ b/src/proto_tcp.c @@ -821,7 +821,7 @@ int tcp_inspect_request(struct session *s, struct channel *req, int an_bit) if (rule->cond) { ret = acl_exec_cond(rule->cond, s->be, s, &s->txn, SMP_OPT_DIR_REQ | partial); if (ret == ACL_PAT_MISS) { - buffer_dont_connect(req); + channel_dont_connect(req); /* just set the request timeout once at the beginning of the request */ if (!tick_isset(req->analyse_exp) && s->be->tcp_req.inspect_delay) req->analyse_exp = tick_add_ifset(now_ms, s->be->tcp_req.inspect_delay); @@ -836,8 +836,8 @@ int tcp_inspect_request(struct session *s, struct channel *req, int an_bit) if (ret) { /* we have a matching rule. */ if (rule->action == TCP_ACT_REJECT) { - buffer_abort(req); - buffer_abort(s->rep); + channel_abort(req); + channel_abort(s->rep); req->analysers = 0; s->be->be_counters.denied_req++; @@ -953,8 +953,8 @@ int tcp_inspect_response(struct session *s, struct channel *rep, int an_bit) if (ret) { /* we have a matching rule. */ if (rule->action == TCP_ACT_REJECT) { - buffer_abort(rep); - buffer_abort(s->req); + channel_abort(rep); + channel_abort(s->req); rep->analysers = 0; s->be->be_counters.denied_resp++; diff --git a/src/session.c b/src/session.c index a62b42a66..e5a8b7ec9 100644 --- a/src/session.c +++ b/src/session.c @@ -217,15 +217,15 @@ int session_accept(struct listener *l, int cfd, struct sockaddr_storage *addr) if (unlikely(fcntl(cfd, F_SETFL, O_NONBLOCK) == -1)) goto out_free_task; - if (unlikely((s->req = pool_alloc2(pool2_buffer)) == NULL)) + if (unlikely((s->req = pool_alloc2(pool2_channel)) == NULL)) goto out_free_task; /* no memory */ - if (unlikely((s->rep = pool_alloc2(pool2_buffer)) == NULL)) + if (unlikely((s->rep = pool_alloc2(pool2_channel)) == NULL)) goto out_free_req; /* no memory */ /* initialize the request buffer */ s->req->buf.size = global.tune.bufsize; - buffer_init(s->req); + channel_init(s->req); s->req->prod = &s->si[0]; s->req->cons = &s->si[1]; s->si[0].ib = s->si[1].ob = s->req; @@ -242,7 +242,7 @@ int session_accept(struct listener *l, int cfd, struct sockaddr_storage *addr) /* initialize response buffer */ s->rep->buf.size = global.tune.bufsize; - buffer_init(s->rep); + channel_init(s->rep); s->rep->prod = &s->si[1]; s->rep->cons = &s->si[0]; s->si[0].ob = s->si[1].ib = s->rep; @@ -302,9 +302,9 @@ int session_accept(struct listener *l, int cfd, struct sockaddr_storage *addr) /* Error unrolling */ out_free_rep: - pool_free2(pool2_buffer, s->rep); + pool_free2(pool2_channel, s->rep); out_free_req: - pool_free2(pool2_buffer, s->req); + pool_free2(pool2_channel, s->req); out_free_task: p->feconn--; if (s->stkctr1_entry || s->stkctr2_entry) @@ -363,8 +363,8 @@ static void session_free(struct session *s) if (s->rep->pipe) put_pipe(s->rep->pipe); - pool_free2(pool2_buffer, s->req); - pool_free2(pool2_buffer, s->rep); + pool_free2(pool2_channel, s->req); + pool_free2(pool2_channel, s->rep); http_end_txn(s); @@ -399,7 +399,7 @@ static void session_free(struct session *s) /* We may want to free the maximum amount of pools if the proxy is stopping */ if (fe && unlikely(fe->state == PR_STSTOPPED)) { - pool_flush2(pool2_buffer); + pool_flush2(pool2_channel); pool_flush2(pool2_hdr_idx); pool_flush2(pool2_requri); pool_flush2(pool2_capture); @@ -1032,8 +1032,8 @@ static int process_switching_rules(struct session *s, struct channel *req, int a sw_failed: /* immediately abort this request in case of allocation failure */ - buffer_abort(s->req); - buffer_abort(s->rep); + channel_abort(s->req); + channel_abort(s->rep); if (!(s->flags & SN_ERR_MASK)) s->flags |= SN_ERR_RESOURCE; @@ -1335,13 +1335,13 @@ struct task *process_session(struct task *t) stream_int_check_timeouts(&s->si[0]); stream_int_check_timeouts(&s->si[1]); - /* check buffer timeouts, and close the corresponding stream interfaces + /* check channel timeouts, and close the corresponding stream interfaces * for future reads or writes. Note: this will also concern upper layers * but we do not touch any other flag. We must be careful and correctly * detect state changes when calling them. */ - buffer_check_timeouts(s->req); + channel_check_timeouts(s->req); if (unlikely((s->req->flags & (CF_SHUTW|CF_WRITE_TIMEOUT)) == CF_WRITE_TIMEOUT)) { s->req->cons->flags |= SI_FL_NOLINGER; @@ -1354,7 +1354,7 @@ struct task *process_session(struct task *t) si_shutr(s->req->prod); } - buffer_check_timeouts(s->rep); + channel_check_timeouts(s->rep); if (unlikely((s->rep->flags & (CF_SHUTW|CF_WRITE_TIMEOUT)) == CF_WRITE_TIMEOUT)) { s->rep->cons->flags |= SI_FL_NOLINGER; @@ -1496,9 +1496,9 @@ struct task *process_session(struct task *t) * enabling them again when it disables itself, so * that other analysers are called in similar conditions. */ - buffer_auto_read(s->req); - buffer_auto_connect(s->req); - buffer_auto_close(s->req); + channel_auto_read(s->req); + channel_auto_connect(s->req); + channel_auto_close(s->req); /* We will call all analysers for which a bit is set in * s->req->analysers, following the bit order from LSB @@ -1688,8 +1688,8 @@ struct task *process_session(struct task *t) * it disables itself, so that other analysers are called * in similar conditions. */ - buffer_auto_read(s->rep); - buffer_auto_close(s->rep); + channel_auto_read(s->rep); + channel_auto_close(s->rep); /* We will call all analysers for which a bit is set in * s->rep->analysers, following the bit order from LSB @@ -1843,7 +1843,7 @@ struct task *process_session(struct task *t) /* If noone is interested in analysing data, it's time to forward * everything. We configure the buffer to forward indefinitely. * Note that we're checking CF_SHUTR_NOW as an indication of a possible - * recent call to buffer_abort(). + * recent call to channel_abort(). */ if (!s->req->analysers && !(s->req->flags & (CF_HIJACK|CF_SHUTW|CF_SHUTR_NOW)) && @@ -1853,16 +1853,16 @@ struct task *process_session(struct task *t) * attached to it. If any data are left in, we'll permit them to * move. */ - buffer_auto_read(s->req); - buffer_auto_connect(s->req); - buffer_auto_close(s->req); + channel_auto_read(s->req); + channel_auto_connect(s->req); + channel_auto_close(s->req); buffer_flush(&s->req->buf); /* We'll let data flow between the producer (if still connected) * to the consumer (which might possibly not be connected yet). */ if (!(s->req->flags & (CF_SHUTR|CF_SHUTW_NOW))) - buffer_forward(s->req, CHN_INFINITE_FORWARD); + channel_forward(s->req, CHN_INFINITE_FORWARD); } /* check if it is wise to enable kernel splicing to forward request data */ @@ -1890,7 +1890,7 @@ struct task *process_session(struct task *t) */ if (unlikely((s->req->flags & (CF_SHUTW|CF_SHUTW_NOW|CF_HIJACK|CF_AUTO_CLOSE|CF_SHUTR)) == (CF_AUTO_CLOSE|CF_SHUTR))) - buffer_shutw_now(s->req); + channel_shutw_now(s->req); /* shutdown(write) pending */ if (unlikely((s->req->flags & (CF_SHUTW|CF_SHUTW_NOW)) == CF_SHUTW_NOW && @@ -1900,7 +1900,7 @@ struct task *process_session(struct task *t) /* shutdown(write) done on server side, we must stop the client too */ if (unlikely((s->req->flags & (CF_SHUTW|CF_SHUTR|CF_SHUTR_NOW)) == CF_SHUTW && !s->req->analysers)) - buffer_shutr_now(s->req); + channel_shutr_now(s->req); /* shutdown(read) pending */ if (unlikely((s->req->flags & (CF_SHUTR|CF_SHUTR_NOW)) == CF_SHUTR_NOW)) { @@ -1933,8 +1933,8 @@ struct task *process_session(struct task *t) } else { s->req->cons->state = SI_ST_CLO; /* shutw+ini = abort */ - buffer_shutw_now(s->req); /* fix buffer flags upon abort */ - buffer_shutr_now(s->rep); + channel_shutw_now(s->req); /* fix buffer flags upon abort */ + channel_shutr_now(s->rep); } } @@ -1979,7 +1979,7 @@ struct task *process_session(struct task *t) /* If noone is interested in analysing data, it's time to forward * everything. We configure the buffer to forward indefinitely. * Note that we're checking CF_SHUTR_NOW as an indication of a possible - * recent call to buffer_abort(). + * recent call to channel_abort(). */ if (!s->rep->analysers && !(s->rep->flags & (CF_HIJACK|CF_SHUTW|CF_SHUTR_NOW)) && @@ -1989,15 +1989,15 @@ struct task *process_session(struct task *t) * attached to it. If any data are left in, we'll permit them to * move. */ - buffer_auto_read(s->rep); - buffer_auto_close(s->rep); + channel_auto_read(s->rep); + channel_auto_close(s->rep); buffer_flush(&s->rep->buf); /* We'll let data flow between the producer (if still connected) * to the consumer. */ if (!(s->rep->flags & (CF_SHUTR|CF_SHUTW_NOW))) - buffer_forward(s->rep, CHN_INFINITE_FORWARD); + channel_forward(s->rep, CHN_INFINITE_FORWARD); /* if we have no analyser anymore in any direction and have a * tunnel timeout set, use it now. @@ -2036,7 +2036,7 @@ struct task *process_session(struct task *t) /* first, let's check if the response buffer needs to shutdown(write) */ if (unlikely((s->rep->flags & (CF_SHUTW|CF_SHUTW_NOW|CF_HIJACK|CF_AUTO_CLOSE|CF_SHUTR)) == (CF_AUTO_CLOSE|CF_SHUTR))) - buffer_shutw_now(s->rep); + channel_shutw_now(s->rep); /* shutdown(write) pending */ if (unlikely((s->rep->flags & (CF_SHUTW|CF_SHUTW_NOW)) == CF_SHUTW_NOW && @@ -2046,7 +2046,7 @@ struct task *process_session(struct task *t) /* shutdown(write) done on the client side, we must stop the server too */ if (unlikely((s->rep->flags & (CF_SHUTW|CF_SHUTR|CF_SHUTR_NOW)) == CF_SHUTW) && !s->rep->analysers) - buffer_shutr_now(s->rep); + channel_shutr_now(s->rep); /* shutdown(read) pending */ if (unlikely((s->rep->flags & (CF_SHUTR|CF_SHUTR_NOW)) == CF_SHUTR_NOW)) { @@ -2313,8 +2313,8 @@ void session_shutdown(struct session *session, int why) if (session->req->flags & (CF_SHUTW|CF_SHUTW_NOW)) return; - buffer_shutw_now(session->req); - buffer_shutr_now(session->rep); + channel_shutw_now(session->req); + channel_shutr_now(session->rep); session->task->nice = 1024; if (!(session->flags & SN_ERR_MASK)) session->flags |= why; diff --git a/src/stream_interface.c b/src/stream_interface.c index ae633eae6..765d04fea 100644 --- a/src/stream_interface.c +++ b/src/stream_interface.c @@ -108,19 +108,19 @@ void stream_int_report_error(struct stream_interface *si) */ void stream_int_retnclose(struct stream_interface *si, const struct chunk *msg) { - buffer_auto_read(si->ib); - buffer_abort(si->ib); - buffer_auto_close(si->ib); - buffer_erase(si->ib); + channel_auto_read(si->ib); + channel_abort(si->ib); + channel_auto_close(si->ib); + channel_erase(si->ib); bi_erase(si->ob); if (likely(msg && msg->len)) bo_inject(si->ob, msg->str, msg->len); si->ob->wex = tick_add_ifset(now_ms, si->ob->wto); - buffer_auto_read(si->ob); - buffer_auto_close(si->ob); - buffer_shutr_now(si->ob); + channel_auto_read(si->ob); + channel_auto_close(si->ob); + channel_shutr_now(si->ob); } /* default update function for scheduled tasks, not used for embedded tasks */ @@ -1179,7 +1179,7 @@ void si_conn_recv_cb(struct connection *conn) /* we received a shutdown */ b->flags |= CF_READ_NULL; if (b->flags & CF_AUTO_CLOSE) - buffer_shutw_now(b); + channel_shutw_now(b); stream_sock_read0(si); conn_data_read0(conn); return;