CLEANUP: channel: usr CF_/CHN_ prefixes instead of BF_/BUF_

Get rid of these confusing BF_* flags. Now channel naming should clearly
be used everywhere appropriate.

No code was changed, only a renaming was performed. The comments about
channel operations was updated.
This commit is contained in:
Willy Tarreau 2012-08-27 23:14:58 +02:00 committed by Willy Tarreau
parent af81935b82
commit 03cdb7c678
11 changed files with 390 additions and 379 deletions

View File

@ -84,7 +84,7 @@ static inline int buffer_reserved(const struct channel *buf)
{
int ret = global.tune.maxrewrite - buf->to_forward - buf->buf.o;
if (buf->to_forward == BUF_INFINITE_FORWARD)
if (buf->to_forward == CHN_INFINITE_FORWARD)
return 0;
if (ret <= 0)
return 0;
@ -115,8 +115,8 @@ static inline int channel_full(const struct channel *b)
return 1; /* buffer already full */
if (b->to_forward >= b->buf.size ||
(BUF_INFINITE_FORWARD < MAX_RANGE(typeof(b->buf.size)) && // just there to ensure gcc
b->to_forward == BUF_INFINITE_FORWARD)) // avoids the useless second
(CHN_INFINITE_FORWARD < MAX_RANGE(typeof(b->buf.size)) && // just there to ensure gcc
b->to_forward == CHN_INFINITE_FORWARD)) // avoids the useless second
return 0; // test whenever possible
rem -= global.tune.maxrewrite;
@ -141,8 +141,8 @@ static inline int bi_avail(const struct channel *b)
return rem; /* buffer already full */
if (b->to_forward >= b->buf.size ||
(BUF_INFINITE_FORWARD < MAX_RANGE(typeof(b->buf.size)) && // just there to ensure gcc
b->to_forward == BUF_INFINITE_FORWARD)) // avoids the useless second
(CHN_INFINITE_FORWARD < MAX_RANGE(typeof(b->buf.size)) && // just there to ensure gcc
b->to_forward == CHN_INFINITE_FORWARD)) // avoids the useless second
return rem; // test whenever possible
rem2 = rem - global.tune.maxrewrite;
@ -167,13 +167,13 @@ static inline int buffer_contig_space_res(const struct channel *chn)
/* Returns true if the buffer's input is already closed */
static inline int buffer_input_closed(struct channel *buf)
{
return ((buf->flags & BF_SHUTR) != 0);
return ((buf->flags & CF_SHUTR) != 0);
}
/* Returns true if the buffer's output is already closed */
static inline int buffer_output_closed(struct channel *buf)
{
return ((buf->flags & BF_SHUTW) != 0);
return ((buf->flags & CF_SHUTW) != 0);
}
/* Check buffer timeouts, and set the corresponding flags. The
@ -184,17 +184,17 @@ static inline int buffer_output_closed(struct channel *buf)
*/
static inline void buffer_check_timeouts(struct channel *b)
{
if (likely(!(b->flags & (BF_SHUTR|BF_READ_TIMEOUT|BF_READ_ACTIVITY|BF_READ_NOEXP))) &&
if (likely(!(b->flags & (CF_SHUTR|CF_READ_TIMEOUT|CF_READ_ACTIVITY|CF_READ_NOEXP))) &&
unlikely(tick_is_expired(b->rex, now_ms)))
b->flags |= BF_READ_TIMEOUT;
b->flags |= CF_READ_TIMEOUT;
if (likely(!(b->flags & (BF_SHUTW|BF_WRITE_TIMEOUT|BF_WRITE_ACTIVITY))) &&
if (likely(!(b->flags & (CF_SHUTW|CF_WRITE_TIMEOUT|CF_WRITE_ACTIVITY))) &&
unlikely(tick_is_expired(b->wex, now_ms)))
b->flags |= BF_WRITE_TIMEOUT;
b->flags |= CF_WRITE_TIMEOUT;
if (likely(!(b->flags & BF_ANA_TIMEOUT)) &&
if (likely(!(b->flags & CF_ANA_TIMEOUT)) &&
unlikely(tick_is_expired(b->analyse_exp, now_ms)))
b->flags |= BF_ANA_TIMEOUT;
b->flags |= CF_ANA_TIMEOUT;
}
/* Erase any content from buffer <buf> and adjusts flags accordingly. Note
@ -229,20 +229,20 @@ static inline void bi_erase(struct channel *buf)
/* marks the buffer as "shutdown" ASAP for reads */
static inline void buffer_shutr_now(struct channel *buf)
{
buf->flags |= BF_SHUTR_NOW;
buf->flags |= CF_SHUTR_NOW;
}
/* marks the buffer as "shutdown" ASAP for writes */
static inline void buffer_shutw_now(struct channel *buf)
{
buf->flags |= BF_SHUTW_NOW;
buf->flags |= CF_SHUTW_NOW;
}
/* marks the buffer as "shutdown" ASAP in both directions */
static inline void buffer_abort(struct channel *buf)
{
buf->flags |= BF_SHUTR_NOW | BF_SHUTW_NOW;
buf->flags &= ~BF_AUTO_CONNECT;
buf->flags |= CF_SHUTR_NOW | CF_SHUTW_NOW;
buf->flags &= ~CF_AUTO_CONNECT;
}
/* Installs <func> as a hijacker on the buffer <b> for session <s>. The hijack
@ -255,20 +255,20 @@ static inline void buffer_install_hijacker(struct session *s,
void (*func)(struct session *, struct channel *))
{
b->hijacker = func;
b->flags |= BF_HIJACK;
b->flags |= CF_HIJACK;
func(s, b);
}
/* Releases the buffer from hijacking mode. Often used by the hijack function */
static inline void buffer_stop_hijack(struct channel *buf)
{
buf->flags &= ~BF_HIJACK;
buf->flags &= ~CF_HIJACK;
}
/* allow the consumer to try to establish a new connection. */
static inline void buffer_auto_connect(struct channel *buf)
{
buf->flags |= BF_AUTO_CONNECT;
buf->flags |= CF_AUTO_CONNECT;
}
/* prevent the consumer from trying to establish a new connection, and also
@ -276,31 +276,31 @@ static inline void buffer_auto_connect(struct channel *buf)
*/
static inline void buffer_dont_connect(struct channel *buf)
{
buf->flags &= ~(BF_AUTO_CONNECT|BF_AUTO_CLOSE);
buf->flags &= ~(CF_AUTO_CONNECT|CF_AUTO_CLOSE);
}
/* allow the producer to forward shutdown requests */
static inline void buffer_auto_close(struct channel *buf)
{
buf->flags |= BF_AUTO_CLOSE;
buf->flags |= CF_AUTO_CLOSE;
}
/* prevent the producer from forwarding shutdown requests */
static inline void buffer_dont_close(struct channel *buf)
{
buf->flags &= ~BF_AUTO_CLOSE;
buf->flags &= ~CF_AUTO_CLOSE;
}
/* allow the producer to read / poll the input */
static inline void buffer_auto_read(struct channel *buf)
{
buf->flags &= ~BF_DONT_READ;
buf->flags &= ~CF_DONT_READ;
}
/* prevent the producer from read / poll the input */
static inline void buffer_dont_read(struct channel *buf)
{
buf->flags |= BF_DONT_READ;
buf->flags |= CF_DONT_READ;
}
/*
@ -317,7 +317,7 @@ static inline void bo_skip(struct channel *buf, int len)
buf->buf.p = buf->buf.data;
/* notify that some data was written to the SI from the buffer */
buf->flags |= BF_WRITE_PARTIAL;
buf->flags |= CF_WRITE_PARTIAL;
}
/* Tries to copy chunk <chunk> into buffer <buf> after length controls.
@ -359,8 +359,8 @@ static inline int bi_putstr(struct channel *buf, const char *str)
static inline int bo_getchr(struct channel *buf)
{
/* closed or empty + imminent close = -2; empty = -1 */
if (unlikely((buf->flags & BF_SHUTW) || channel_is_empty(buf))) {
if (buf->flags & (BF_SHUTW|BF_SHUTW_NOW))
if (unlikely((buf->flags & CF_SHUTW) || channel_is_empty(buf))) {
if (buf->flags & (CF_SHUTW|CF_SHUTW_NOW))
return -2;
return -1;
}

View File

@ -27,25 +27,25 @@
#include <common/buffer.h>
#include <types/stream_interface.h>
/* The BF_* macros designate Channel Flags (originally "Buffer Flags"), which
* may be ORed in the bit field member 'flags' in struct channel. Here we have
* several types of flags :
/* The CF_* macros designate Channel Flags, which may be ORed in the bit field
* member 'flags' in struct channel. Here we have several types of flags :
*
* - pure status flags, reported by the lower layer, which must be cleared
* - pure status flags, reported by the data layer, which must be cleared
* before doing further I/O :
* BF_*_NULL, BF_*_PARTIAL
* CF_*_NULL, CF_*_PARTIAL
*
* - pure status flags, reported by mid-layer, which must also be cleared
* before doing further I/O :
* BF_*_TIMEOUT, BF_*_ERROR
* - pure status flags, reported by stream-interface layer, which must also
* be cleared before doing further I/O :
* CF_*_TIMEOUT, CF_*_ERROR
*
* - read-only indicators reported by lower levels :
* BF_STREAMER, BF_STREAMER_FAST
* - read-only indicators reported by lower data levels :
* CF_STREAMER, CF_STREAMER_FAST
*
* - write-once status flags reported by the mid-level : BF_SHUTR, BF_SHUTW
* - write-once status flags reported by the stream-interface layer :
* CF_SHUTR, CF_SHUTW
*
* - persistent control flags managed only by higher level :
* BF_SHUT*_NOW, BF_*_ENA, BF_HIJACK
* - persistent control flags managed only by application level :
* CF_SHUT*_NOW, CF_*_ENA, CF_HIJACK
*
* The flags have been arranged for readability, so that the read and write
* bits have the same position in a byte (read being the lower byte and write
@ -53,30 +53,30 @@
* 'write' indicates the direction from the channel to the stream interface.
*/
#define BF_READ_NULL 0x000001 /* last read detected on producer side */
#define BF_READ_PARTIAL 0x000002 /* some data were read from producer */
#define BF_READ_TIMEOUT 0x000004 /* timeout while waiting for producer */
#define BF_READ_ERROR 0x000008 /* unrecoverable error on producer side */
#define BF_READ_ACTIVITY (BF_READ_NULL|BF_READ_PARTIAL|BF_READ_ERROR)
#define CF_READ_NULL 0x00000001 /* last read detected on producer side */
#define CF_READ_PARTIAL 0x00000002 /* some data were read from producer */
#define CF_READ_TIMEOUT 0x00000004 /* timeout while waiting for producer */
#define CF_READ_ERROR 0x00000008 /* unrecoverable error on producer side */
#define CF_READ_ACTIVITY (CF_READ_NULL|CF_READ_PARTIAL|CF_READ_ERROR)
/* unused: 0x000010 */
#define BF_SHUTR 0x000020 /* producer has already shut down */
#define BF_SHUTR_NOW 0x000040 /* the producer must shut down for reads ASAP */
#define BF_READ_NOEXP 0x000080 /* producer should not expire */
/* unused: 0x00000010 */
#define CF_SHUTR 0x00000020 /* producer has already shut down */
#define CF_SHUTR_NOW 0x00000040 /* the producer must shut down for reads ASAP */
#define CF_READ_NOEXP 0x00000080 /* producer should not expire */
#define BF_WRITE_NULL 0x000100 /* write(0) or connect() succeeded on consumer side */
#define BF_WRITE_PARTIAL 0x000200 /* some data were written to the consumer */
#define BF_WRITE_TIMEOUT 0x000400 /* timeout while waiting for consumer */
#define BF_WRITE_ERROR 0x000800 /* unrecoverable error on consumer side */
#define BF_WRITE_ACTIVITY (BF_WRITE_NULL|BF_WRITE_PARTIAL|BF_WRITE_ERROR)
#define CF_WRITE_NULL 0x00000100 /* write(0) or connect() succeeded on consumer side */
#define CF_WRITE_PARTIAL 0x00000200 /* some data were written to the consumer */
#define CF_WRITE_TIMEOUT 0x00000400 /* timeout while waiting for consumer */
#define CF_WRITE_ERROR 0x00000800 /* unrecoverable error on consumer side */
#define CF_WRITE_ACTIVITY (CF_WRITE_NULL|CF_WRITE_PARTIAL|CF_WRITE_ERROR)
/* unused: 0x001000 */
#define BF_SHUTW 0x002000 /* consumer has already shut down */
#define BF_SHUTW_NOW 0x004000 /* the consumer must shut down for writes ASAP */
#define BF_AUTO_CLOSE 0x008000 /* producer can forward shutdown to other side */
/* unused: 0x00001000 */
#define CF_SHUTW 0x00002000 /* consumer has already shut down */
#define CF_SHUTW_NOW 0x00004000 /* the consumer must shut down for writes ASAP */
#define CF_AUTO_CLOSE 0x00008000 /* producer can forward shutdown to other side */
/* When either BF_SHUTR_NOW or BF_HIJACK is set, it is strictly forbidden for
* the producer to alter the buffer contents. When BF_SHUTW_NOW is set, the
/* When either CF_SHUTR_NOW or CF_HIJACK is set, it is strictly forbidden for
* the producer to alter the buffer contents. When CF_SHUTW_NOW is set, the
* consumer is free to perform a shutw() when it has consumed the last contents,
* otherwise the session processor will do it anyway.
*
@ -103,33 +103,34 @@
* detected on the consumer side.
*/
#define BF_STREAMER 0x010000 /* the producer is identified as streaming data */
#define BF_STREAMER_FAST 0x020000 /* the consumer seems to eat the stream very fast */
#define CF_STREAMER 0x00010000 /* the producer is identified as streaming data */
#define CF_STREAMER_FAST 0x00020000 /* the consumer seems to eat the stream very fast */
#define BF_HIJACK 0x040000 /* the producer is temporarily replaced by ->hijacker */
#define BF_ANA_TIMEOUT 0x080000 /* the analyser timeout has expired */
#define BF_READ_ATTACHED 0x100000 /* the read side is attached for the first time */
#define BF_KERN_SPLICING 0x200000 /* kernel splicing desired for this channel */
#define BF_READ_DONTWAIT 0x400000 /* wake the task up after every read (eg: HTTP request) */
#define BF_AUTO_CONNECT 0x800000 /* consumer may attempt to establish a new connection */
#define CF_HIJACK 0x00040000 /* the producer is temporarily replaced by ->hijacker */
#define CF_ANA_TIMEOUT 0x00080000 /* the analyser timeout has expired */
#define CF_READ_ATTACHED 0x00100000 /* the read side is attached for the first time */
#define CF_KERN_SPLICING 0x00200000 /* kernel splicing desired for this channel */
#define CF_READ_DONTWAIT 0x00400000 /* wake the task up after every read (eg: HTTP request) */
#define CF_AUTO_CONNECT 0x00800000 /* consumer may attempt to establish a new connection */
#define BF_DONT_READ 0x1000000 /* disable reading for now */
#define BF_EXPECT_MORE 0x2000000 /* more data expected to be sent very soon (one-shoot) */
#define BF_SEND_DONTWAIT 0x4000000 /* don't wait for sending data (one-shoot) */
#define BF_NEVER_WAIT 0x8000000 /* never wait for sending data (permanent) */
#define CF_DONT_READ 0x01000000 /* disable reading for now */
#define CF_EXPECT_MORE 0x02000000 /* more data expected to be sent very soon (one-shoot) */
#define CF_SEND_DONTWAIT 0x04000000 /* don't wait for sending data (one-shoot) */
#define CF_NEVER_WAIT 0x08000000 /* never wait for sending data (permanent) */
#define BF_WAKE_ONCE 0x10000000 /* pretend there is activity on this channel (one-shoot) */
#define CF_WAKE_ONCE 0x10000000 /* pretend there is activity on this channel (one-shoot) */
/* unused: 0x20000000, 0x20000000, 0x80000000 */
/* Use these masks to clear the flags before going back to lower layers */
#define BF_CLEAR_READ (~(BF_READ_NULL|BF_READ_PARTIAL|BF_READ_ERROR|BF_READ_ATTACHED))
#define BF_CLEAR_WRITE (~(BF_WRITE_NULL|BF_WRITE_PARTIAL|BF_WRITE_ERROR))
#define BF_CLEAR_TIMEOUT (~(BF_READ_TIMEOUT|BF_WRITE_TIMEOUT|BF_ANA_TIMEOUT))
#define CF_CLEAR_READ (~(CF_READ_NULL|CF_READ_PARTIAL|CF_READ_ERROR|CF_READ_ATTACHED))
#define CF_CLEAR_WRITE (~(CF_WRITE_NULL|CF_WRITE_PARTIAL|CF_WRITE_ERROR))
#define CF_CLEAR_TIMEOUT (~(CF_READ_TIMEOUT|CF_WRITE_TIMEOUT|CF_ANA_TIMEOUT))
/* Masks which define input events for stream analysers */
#define BF_MASK_ANALYSER (BF_READ_ATTACHED|BF_READ_ACTIVITY|BF_READ_TIMEOUT|BF_ANA_TIMEOUT|BF_WRITE_ACTIVITY|BF_WAKE_ONCE)
#define CF_MASK_ANALYSER (CF_READ_ATTACHED|CF_READ_ACTIVITY|CF_READ_TIMEOUT|CF_ANA_TIMEOUT|CF_WRITE_ACTIVITY|CF_WAKE_ONCE)
/* Mask for static flags which cause analysers to be woken up when they change */
#define BF_MASK_STATIC (BF_SHUTR|BF_SHUTW|BF_SHUTR_NOW|BF_SHUTW_NOW)
#define CF_MASK_STATIC (CF_SHUTR|CF_SHUTW|CF_SHUTR_NOW|CF_SHUTW_NOW)
/* Analysers (channel->analysers).
@ -164,13 +165,13 @@
/* Magic value to forward infinite size (TCP, ...), used with ->to_forward */
#define BUF_INFINITE_FORWARD MAX_RANGE(int)
#define CHN_INFINITE_FORWARD MAX_RANGE(int)
/* needed for a declaration below */
struct session;
struct channel {
unsigned int flags; /* BF_* */
unsigned int flags; /* CF_* */
int rex; /* expiration date for a read, in ticks */
int wex; /* expiration date for a write or connect, in ticks */
int rto; /* read timeout, in ticks */
@ -189,15 +190,27 @@ struct channel {
};
/* Note about the buffer structure
/* Note about the channel structure
The buffer contains two length indicators, one to_forward counter and one
->o limit. First, it must be understood that the buffer is in fact
split in two parts :
- the visible data (->data, for ->l bytes)
- the invisible data, typically in kernel buffers forwarded directly from
the source stream sock to the destination stream sock (->pipe->data
bytes). Those are used only during forward.
A channel stores information needed to reliably transport data in a single
direction. It stores status flags, timeouts, counters, subscribed analysers,
pointers to a data producer and to a data consumer, and information about
the amount of data which is allowed to flow directly from the producer to
the consumer without waking up the analysers.
A channel may buffer data into two locations :
- a visible buffer (->buf)
- an invisible buffer which right now consists in a pipe making use of
kernel buffers that cannot be tampered with.
Data stored into the first location may be analysed and altered by analysers
while data stored in pipes is only aimed at being transported from one
network socket to another one without being subject to memory copies. This
buffer may only be used when both the socket layer and the data layer of the
producer and the consumer support it, which typically is the case with Linux
splicing over sockets, and when there are enough data to be transported
without being analyzed (transport of TCP/HTTP payload or tunnelled data,
which is indicated by ->to_forward).
In order not to mix data streams, the producer may only feed the invisible
data with data to forward, and only when the visible buffer is empty. The
@ -214,19 +227,19 @@ struct channel {
reflected by ->pipe->data. This is very important as this and only this can
ensure strict ordering of data between buffers.
The producer is responsible for decreasing ->to_forward and increasing
->o. The ->to_forward parameter indicates how many bytes may be fed
into either data buffer without waking the parent up. The special value
BUF_INFINITE_FORWARD is never decreased nor increased. The ->o
parameter says how many bytes may be consumed from the visible buffer. Thus
it may never exceed ->l. This parameter is updated by any buffer_write() as
well as any data forwarded through the visible buffer. Since the ->to_forward
attribute applies to data after ->w+o, an analyser will not see a
buffer which has a non-null to_forward with o < l. A producer is
responsible for raising ->o by min(to_forward, l-o) when it
injects data into the buffer.
The producer is responsible for decreasing ->to_forward. The ->to_forward
parameter indicates how many bytes may be fed into either data buffer
without waking the parent up. The special value CHN_INFINITE_FORWARD is
never decreased nor increased.
The consumer is responsible for decreasing ->o when it sends data
The buf->o parameter says how many bytes may be consumed from the visible
buffer. This parameter is updated by any buffer_write() as well as any data
forwarded through the visible buffer. Since the ->to_forward attribute
applies to data after buf->p, an analyser will not see a buffer which has a
non-null ->to_forward with buf->i > 0. A producer is responsible for raising
buf->o by min(to_forward, buf->i) when it injects data into the buffer.
The consumer is responsible for decreasing ->buf->o when it sends data
from the visible buffer, and ->pipe->data when it sends data from the
invisible buffer.
@ -234,11 +247,11 @@ struct channel {
buffer to be forwarded. We know the header length (300) and the amount of
data to forward (content-length=9000). The buffer already contains 1000
bytes of data after the 300 bytes of headers. Thus the caller will set
->o to 300 indicating that it explicitly wants to send those data,
and set ->to_forward to 9000 (content-length). This value must be normalised
buf->o to 300 indicating that it explicitly wants to send those data, and
set ->to_forward to 9000 (content-length). This value must be normalised
immediately after updating ->to_forward : since there are already 1300 bytes
in the buffer, 300 of which are already counted in ->o, and that size
is smaller than ->to_forward, we must update ->o to 1300 to flush the
in the buffer, 300 of which are already counted in buf->o, and that size
is smaller than ->to_forward, we must update buf->o to 1300 to flush the
whole buffer, and reduce ->to_forward to 8000. After that, the producer may
try to feed the additional data through the invisible buffer using a
platform-specific method such as splice().
@ -255,21 +268,19 @@ struct channel {
eventually leave the buffer. So as long as ->to_forward is larger than
global.maxrewrite, we can fill the buffer. If ->to_forward is smaller than
global.maxrewrite, then we don't want to fill the buffer with more than
->size - global.maxrewrite + ->to_forward.
vuf->size - global.maxrewrite + ->to_forward.
A buffer may contain up to 5 areas :
- the data waiting to be sent. These data are located between ->w and
->w+o ;
- the data waiting to be sent. These data are located between buf->p-o and
buf->p ;
- the data to process and possibly transform. These data start at
->w+o and may be up to r-w bytes long. Generally ->lr remains in
this area ;
- the data to preserve. They start at the end of the previous one and stop
at ->r. The limit between the two solely depends on the protocol being
analysed ; ->lr may be used as a marker.
buf->p and may be up to ->i bytes long.
- the data to preserve. They start at ->p and stop at ->p+i. The limit
between the two solely depends on the protocol being analysed.
- the spare area : it is the remainder of the buffer, which can be used to
store new incoming data. It starts at ->r and is up to ->size-l long. It
may be limited by global.maxrewrite.
- the reserved are : this is the area which must not be filled and is
store new incoming data. It starts at ->p+i and is up to ->size-i-o long.
It may be limited by global.maxrewrite.
- the reserved area : this is the area which must not be filled and is
reserved for possible rewrites ; it is up to global.maxrewrite bytes
long.
*/

View File

@ -71,10 +71,10 @@ unsigned long long buffer_forward(struct channel *buf, unsigned long long bytes)
/* Note: the case below is the only case where we may return
* a byte count that does not fit into a 32-bit number.
*/
if (likely(buf->to_forward == BUF_INFINITE_FORWARD))
if (likely(buf->to_forward == CHN_INFINITE_FORWARD))
return bytes;
if (likely(bytes == BUF_INFINITE_FORWARD)) {
if (likely(bytes == CHN_INFINITE_FORWARD)) {
buf->to_forward = bytes;
return bytes;
}
@ -146,10 +146,10 @@ int bi_putchr(struct channel *buf, char c)
*bi_end(&buf->buf) = c;
buf->buf.i++;
buf->flags |= BF_READ_PARTIAL;
buf->flags |= CF_READ_PARTIAL;
if (buf->to_forward >= 1) {
if (buf->to_forward != BUF_INFINITE_FORWARD)
if (buf->to_forward != CHN_INFINITE_FORWARD)
buf->to_forward--;
b_adv(&buf->buf, 1);
}
@ -197,7 +197,7 @@ int bi_putblk(struct channel *buf, const char *blk, int len)
buf->total += len;
if (buf->to_forward) {
unsigned long fwd = len;
if (buf->to_forward != BUF_INFINITE_FORWARD) {
if (buf->to_forward != CHN_INFINITE_FORWARD) {
if (fwd > buf->to_forward)
fwd = buf->to_forward;
buf->to_forward -= fwd;
@ -206,7 +206,7 @@ int bi_putblk(struct channel *buf, const char *blk, int len)
}
/* notify that some data was read from the SI into the buffer */
buf->flags |= BF_READ_PARTIAL;
buf->flags |= CF_READ_PARTIAL;
return len;
}
@ -229,8 +229,8 @@ int bo_getline(struct channel *buf, char *str, int len)
max = len;
/* closed or empty + imminent close = -1; empty = 0 */
if (unlikely((buf->flags & BF_SHUTW) || channel_is_empty(buf))) {
if (buf->flags & (BF_SHUTW|BF_SHUTW_NOW))
if (unlikely((buf->flags & CF_SHUTW) || channel_is_empty(buf))) {
if (buf->flags & (CF_SHUTW|CF_SHUTW_NOW))
ret = -1;
goto out;
}
@ -252,7 +252,7 @@ int bo_getline(struct channel *buf, char *str, int len)
}
if (ret > 0 && ret < len && ret < buf->buf.o &&
*(str-1) != '\n' &&
!(buf->flags & (BF_SHUTW|BF_SHUTW_NOW)))
!(buf->flags & (CF_SHUTW|CF_SHUTW_NOW)))
ret = 0;
out:
if (max)
@ -272,11 +272,11 @@ int bo_getblk(struct channel *buf, char *blk, int len, int offset)
{
int firstblock;
if (buf->flags & BF_SHUTW)
if (buf->flags & CF_SHUTW)
return -1;
if (len + offset > buf->buf.o) {
if (buf->flags & (BF_SHUTW|BF_SHUTW_NOW))
if (buf->flags & (CF_SHUTW|CF_SHUTW_NOW))
return -1;
return 0;
}

View File

@ -139,7 +139,7 @@ static int stats_accept(struct session *s)
s->logs.prx_queue_size = 0; /* we get the number of pending conns before us */
s->logs.srv_queue_size = 0; /* we will get this number soon */
s->req->flags |= BF_READ_DONTWAIT; /* we plan to read small requests */
s->req->flags |= CF_READ_DONTWAIT; /* we plan to read small requests */
if (s->listener->timeout) {
s->req->rto = *s->listener->timeout;
@ -1545,10 +1545,10 @@ static void cli_io_handler(struct stream_interface *si)
/* re-adjust req buffer */
bo_skip(si->ob, reql);
req->flags |= BF_READ_DONTWAIT; /* we plan to read small requests */
req->flags |= CF_READ_DONTWAIT; /* we plan to read small requests */
}
else { /* output functions: first check if the output buffer is closed then abort */
if (res->flags & (BF_SHUTR_NOW|BF_SHUTR)) {
if (res->flags & (CF_SHUTR_NOW|CF_SHUTR)) {
si->applet.st0 = STAT_CLI_END;
continue;
}
@ -1595,7 +1595,7 @@ static void cli_io_handler(struct stream_interface *si)
* buffer is empty. This still allows pipelined requests
* to be sent in non-interactive mode.
*/
if ((res->flags & (BF_SHUTW|BF_SHUTW_NOW)) || (!si->applet.st1 && !req->buf.o)) {
if ((res->flags & (CF_SHUTW|CF_SHUTW_NOW)) || (!si->applet.st1 && !req->buf.o)) {
si->applet.st0 = STAT_CLI_END;
continue;
}
@ -1605,7 +1605,7 @@ static void cli_io_handler(struct stream_interface *si)
}
}
if ((res->flags & BF_SHUTR) && (si->state == SI_ST_EST) && (si->applet.st0 != STAT_CLI_GETREQ)) {
if ((res->flags & CF_SHUTR) && (si->state == SI_ST_EST) && (si->applet.st0 != STAT_CLI_GETREQ)) {
DPRINTF(stderr, "%s@%d: si to buf closed. req=%08x, res=%08x, st=%d\n",
__FUNCTION__, __LINE__, req->flags, res->flags, si->state);
/* Other side has closed, let's abort if we have no more processing to do
@ -1616,7 +1616,7 @@ static void cli_io_handler(struct stream_interface *si)
si_shutw(si);
}
if ((req->flags & BF_SHUTW) && (si->state == SI_ST_EST) && (si->applet.st0 < STAT_CLI_OUTPUT)) {
if ((req->flags & CF_SHUTW) && (si->state == SI_ST_EST) && (si->applet.st0 < STAT_CLI_OUTPUT)) {
DPRINTF(stderr, "%s@%d: buf to si closed. req=%08x, res=%08x, st=%d\n",
__FUNCTION__, __LINE__, req->flags, res->flags, si->state);
/* We have no more processing to do, and nothing more to send, and
@ -1624,7 +1624,7 @@ static void cli_io_handler(struct stream_interface *si)
* on the response buffer.
*/
si_shutr(si);
res->flags |= BF_READ_NULL;
res->flags |= CF_READ_NULL;
}
/* update all other flags and resync with the other side */
@ -1823,7 +1823,7 @@ static void http_stats_io_handler(struct stream_interface *si)
goto out;
/* check that the output is not closed */
if (res->flags & (BF_SHUTW|BF_SHUTW_NOW))
if (res->flags & (CF_SHUTW|CF_SHUTW_NOW))
si->applet.st0 = 1;
if (!si->applet.st0) {
@ -1840,12 +1840,12 @@ static void http_stats_io_handler(struct stream_interface *si)
}
}
if ((res->flags & BF_SHUTR) && (si->state == SI_ST_EST))
if ((res->flags & CF_SHUTR) && (si->state == SI_ST_EST))
si_shutw(si);
if ((req->flags & BF_SHUTW) && (si->state == SI_ST_EST) && si->applet.st0) {
if ((req->flags & CF_SHUTW) && (si->state == SI_ST_EST) && si->applet.st0) {
si_shutr(si);
res->flags |= BF_READ_NULL;
res->flags |= CF_READ_NULL;
}
/* update all other flags and resync with the other side */
@ -3545,7 +3545,7 @@ static int stats_dump_sess_to_buffer(struct stream_interface *si)
{
struct chunk msg;
if (unlikely(si->ib->flags & (BF_WRITE_ERROR|BF_SHUTW))) {
if (unlikely(si->ib->flags & (CF_WRITE_ERROR|CF_SHUTW))) {
/* If we're forced to shut down, we might have to remove our
* reference to the last session being dumped.
*/
@ -3770,7 +3770,7 @@ static int stats_table_request(struct stream_interface *si, bool show)
* data though.
*/
if (unlikely(si->ib->flags & (BF_WRITE_ERROR|BF_SHUTW))) {
if (unlikely(si->ib->flags & (CF_WRITE_ERROR|CF_SHUTW))) {
/* in case of abort, remove any refcount we might have set on an entry */
if (si->conn.data_st == STAT_ST_LIST) {
si->applet.ctx.table.entry->ref_cnt--;
@ -3971,7 +3971,7 @@ static int stats_dump_errors_to_buffer(struct stream_interface *si)
extern const char *monthname[12];
struct chunk msg;
if (unlikely(si->ib->flags & (BF_WRITE_ERROR|BF_SHUTW)))
if (unlikely(si->ib->flags & (CF_WRITE_ERROR|CF_SHUTW)))
return 1;
chunk_init(&msg, trash, trashlen);

View File

@ -186,7 +186,7 @@ int frontend_accept(struct session *s)
}
if (s->fe->mode == PR_MODE_HTTP)
s->req->flags |= BF_READ_DONTWAIT; /* one read is usually enough */
s->req->flags |= CF_READ_DONTWAIT; /* one read is usually enough */
/* note: this should not happen anymore since there's always at least the switching rules */
if (!s->req->analysers) {
@ -266,7 +266,7 @@ int frontend_decode_proxy_request(struct session *s, struct channel *req, int an
req->i,
req->analysers);
if (req->flags & (BF_READ_ERROR|BF_READ_TIMEOUT))
if (req->flags & (CF_READ_ERROR|CF_READ_TIMEOUT))
goto fail;
len = MIN(req->buf.i, 6);
@ -397,7 +397,7 @@ int frontend_decode_proxy_request(struct session *s, struct channel *req, int an
missing:
/* missing data and buffer is either full or shutdown => fail */
if ((req->flags & BF_SHUTR) || buffer_full(&req->buf, global.tune.maxrewrite))
if ((req->flags & CF_SHUTR) || buffer_full(&req->buf, global.tune.maxrewrite))
goto fail;
buffer_dont_connect(s->req);

View File

@ -525,7 +525,7 @@ static void peer_io_handler(struct stream_interface *si)
case PEER_SESSION_GETSTATUS: {
struct peer_session *ps = (struct peer_session *)si->conn.data_ctx;
if (si->ib->flags & BF_WRITE_PARTIAL)
if (si->ib->flags & CF_WRITE_PARTIAL)
ps->statuscode = PEER_SESSION_CONNECTEDCODE;
reql = bo_getline(si->ob, trash, trashlen);
@ -1024,14 +1024,14 @@ static void peer_io_handler(struct stream_interface *si)
case PEER_SESSION_END: {
si_shutw(si);
si_shutr(si);
si->ib->flags |= BF_READ_NULL;
si->ib->flags |= CF_READ_NULL;
goto quit;
}
}
}
out:
si_update(si);
si->ob->flags |= BF_READ_DONTWAIT;
si->ob->flags |= CF_READ_DONTWAIT;
/* we don't want to expire timeouts while we're processing requests */
si->ib->rex = TICK_ETERNITY;
si->ob->wex = TICK_ETERNITY;
@ -1090,7 +1090,7 @@ int peer_accept(struct session *s)
s->logs.prx_queue_size = 0;/* we get the number of pending conns before us */
s->logs.srv_queue_size = 0; /* we will get this number soon */
s->req->flags |= BF_READ_DONTWAIT; /* we plan to read small requests */
s->req->flags |= CF_READ_DONTWAIT; /* we plan to read small requests */
if (s->listener->timeout) {
s->req->rto = *s->listener->timeout;
@ -1231,7 +1231,7 @@ static struct session *peer_session_create(struct peer *peer, struct peer_sessio
s->req->cons = &s->si[1];
s->si[0].ib = s->si[1].ob = s->req;
s->req->flags |= BF_READ_ATTACHED; /* the producer is already connected */
s->req->flags |= CF_READ_ATTACHED; /* the producer is already connected */
/* activate default analysers enabled for this listener */
s->req->analysers = l->analysers;
@ -1265,7 +1265,7 @@ static struct session *peer_session_create(struct peer *peer, struct peer_sessio
s->rep->analyse_exp = TICK_ETERNITY;
t->expire = TICK_ETERNITY;
s->rep->flags |= BF_READ_DONTWAIT;
s->rep->flags |= CF_READ_DONTWAIT;
/* it is important not to call the wakeup function directly but to
* pass through task_wakeup(), because this one knows how to apply
* priorities to tasks.

View File

@ -2023,11 +2023,11 @@ int http_wait_for_request(struct session *s, struct channel *req, int an_bit)
bi_end(&req->buf) < b_ptr(&req->buf, msg->next) ||
bi_end(&req->buf) > req->buf.data + req->buf.size - global.tune.maxrewrite)) {
if (req->buf.o) {
if (req->flags & (BF_SHUTW|BF_SHUTW_NOW|BF_WRITE_ERROR|BF_WRITE_TIMEOUT))
if (req->flags & (CF_SHUTW|CF_SHUTW_NOW|CF_WRITE_ERROR|CF_WRITE_TIMEOUT))
goto failed_keep_alive;
/* some data has still not left the buffer, wake us once that's done */
buffer_dont_connect(req);
req->flags |= BF_READ_DONTWAIT; /* try to get back here ASAP */
req->flags |= CF_READ_DONTWAIT; /* try to get back here ASAP */
return 0;
}
if (bi_end(&req->buf) < b_ptr(&req->buf, msg->next) ||
@ -2047,11 +2047,11 @@ int http_wait_for_request(struct session *s, struct channel *req, int an_bit)
bi_end(&s->rep->buf) < b_ptr(&s->rep->buf, txn->rsp.next) ||
bi_end(&s->rep->buf) > s->rep->buf.data + s->rep->buf.size - global.tune.maxrewrite)) {
if (s->rep->buf.o) {
if (s->rep->flags & (BF_SHUTW|BF_SHUTW_NOW|BF_WRITE_ERROR|BF_WRITE_TIMEOUT))
if (s->rep->flags & (CF_SHUTW|CF_SHUTW_NOW|CF_WRITE_ERROR|CF_WRITE_TIMEOUT))
goto failed_keep_alive;
/* don't let a connection request be initiated */
buffer_dont_connect(req);
s->rep->flags &= ~BF_EXPECT_MORE; /* speed up sending a previous response */
s->rep->flags &= ~CF_EXPECT_MORE; /* speed up sending a previous response */
s->rep->analysers |= an_bit; /* wake us up once it changes */
return 0;
}
@ -2128,7 +2128,7 @@ int http_wait_for_request(struct session *s, struct channel *req, int an_bit)
}
/* 2: have we encountered a read error ? */
else if (req->flags & BF_READ_ERROR) {
else if (req->flags & CF_READ_ERROR) {
if (!(s->flags & SN_ERR_MASK))
s->flags |= SN_ERR_CLICL;
@ -2156,7 +2156,7 @@ int http_wait_for_request(struct session *s, struct channel *req, int an_bit)
}
/* 3: has the read timeout expired ? */
else if (req->flags & BF_READ_TIMEOUT || tick_is_expired(req->analyse_exp, now_ms)) {
else if (req->flags & CF_READ_TIMEOUT || tick_is_expired(req->analyse_exp, now_ms)) {
if (!(s->flags & SN_ERR_MASK))
s->flags |= SN_ERR_CLITO;
@ -2185,7 +2185,7 @@ int http_wait_for_request(struct session *s, struct channel *req, int an_bit)
}
/* 4: have we encountered a close ? */
else if (req->flags & BF_SHUTR) {
else if (req->flags & CF_SHUTR) {
if (!(s->flags & SN_ERR_MASK))
s->flags |= SN_ERR_CLICL;
@ -2212,8 +2212,8 @@ int http_wait_for_request(struct session *s, struct channel *req, int an_bit)
}
buffer_dont_connect(req);
req->flags |= BF_READ_DONTWAIT; /* try to get back here ASAP */
s->rep->flags &= ~BF_EXPECT_MORE; /* speed up sending a previous response */
req->flags |= CF_READ_DONTWAIT; /* try to get back here ASAP */
s->rep->flags &= ~CF_EXPECT_MORE; /* speed up sending a previous response */
#ifdef TCP_QUICKACK
if (s->listener->options & LI_O_NOQUICKACK && req->buf.i) {
/* We need more data, we have to re-enable quick-ack in case we
@ -2255,7 +2255,7 @@ int http_wait_for_request(struct session *s, struct channel *req, int an_bit)
msg->msg_state = HTTP_MSG_RQBEFORE;
req->analysers = 0;
s->logs.logwait = 0;
s->rep->flags &= ~BF_EXPECT_MORE; /* speed up sending a previous response */
s->rep->flags &= ~CF_EXPECT_MORE; /* speed up sending a previous response */
stream_int_retnclose(req->prod, NULL);
return 0;
}
@ -3196,10 +3196,10 @@ int http_process_req_common(struct session *s, struct channel *req, int an_bit,
* If this happens, then the data will not come immediately, so we must
* send all what we have without waiting. Note that due to the small gain
* in waiting for the body of the request, it's easier to simply put the
* BF_SEND_DONTWAIT flag any time. It's a one-shot flag so it will remove
* CF_SEND_DONTWAIT flag any time. It's a one-shot flag so it will remove
* itself once used.
*/
req->flags |= BF_SEND_DONTWAIT;
req->flags |= CF_SEND_DONTWAIT;
/* that's OK for us now, let's move on to next analysers */
return 1;
@ -3527,7 +3527,7 @@ int http_process_tarpit(struct session *s, struct channel *req, int an_bit)
* there and that the timeout has not expired.
*/
buffer_dont_connect(req);
if ((req->flags & (BF_SHUTR|BF_READ_ERROR)) == 0 &&
if ((req->flags & (CF_SHUTR|CF_READ_ERROR)) == 0 &&
!tick_is_expired(req->analyse_exp, now_ms))
return 0;
@ -3540,7 +3540,7 @@ int http_process_tarpit(struct session *s, struct channel *req, int an_bit)
s->logs.t_queue = tv_ms_elapsed(&s->logs.tv_accept, &now);
txn->status = 500;
if (!(req->flags & BF_READ_ERROR))
if (!(req->flags & CF_READ_ERROR))
stream_int_retnclose(req->prod, error_message(s, HTTP_ERR_500));
req->analysers = 0;
@ -3641,7 +3641,7 @@ int http_process_request_body(struct session *s, struct channel *req, int an_bit
goto return_bad_req;
}
if ((req->flags & BF_READ_TIMEOUT) || tick_is_expired(req->analyse_exp, now_ms)) {
if ((req->flags & CF_READ_TIMEOUT) || tick_is_expired(req->analyse_exp, now_ms)) {
txn->status = 408;
stream_int_retnclose(req->prod, error_message(s, HTTP_ERR_408));
@ -3653,7 +3653,7 @@ int http_process_request_body(struct session *s, struct channel *req, int an_bit
}
/* we get here if we need to wait for more data */
if (!(req->flags & (BF_SHUTR | BF_READ_ERROR)) && !buffer_full(&req->buf, global.tune.maxrewrite)) {
if (!(req->flags & (CF_SHUTR | CF_READ_ERROR)) && !buffer_full(&req->buf, global.tune.maxrewrite)) {
/* Not enough data. We'll re-use the http-request
* timeout here. Ideally, we should set the timeout
* relative to the accept() date. We just set the
@ -3829,8 +3829,8 @@ void http_end_txn_clean_session(struct session *s)
s->req->cons->err_loc = NULL;
s->req->cons->exp = TICK_ETERNITY;
s->req->cons->flags = SI_FL_NONE;
s->req->flags &= ~(BF_SHUTW|BF_SHUTW_NOW|BF_AUTO_CONNECT|BF_WRITE_ERROR|BF_STREAMER|BF_STREAMER_FAST|BF_NEVER_WAIT);
s->rep->flags &= ~(BF_SHUTR|BF_SHUTR_NOW|BF_READ_ATTACHED|BF_READ_ERROR|BF_READ_NOEXP|BF_STREAMER|BF_STREAMER_FAST|BF_WRITE_PARTIAL|BF_NEVER_WAIT);
s->req->flags &= ~(CF_SHUTW|CF_SHUTW_NOW|CF_AUTO_CONNECT|CF_WRITE_ERROR|CF_STREAMER|CF_STREAMER_FAST|CF_NEVER_WAIT);
s->rep->flags &= ~(CF_SHUTR|CF_SHUTR_NOW|CF_READ_ATTACHED|CF_READ_ERROR|CF_READ_NOEXP|CF_STREAMER|CF_STREAMER_FAST|CF_WRITE_PARTIAL|CF_NEVER_WAIT);
s->flags &= ~(SN_DIRECT|SN_ASSIGNED|SN_ADDR_SET|SN_BE_ASSIGNED|SN_FORCE_PRST|SN_IGNORE_PRST);
s->flags &= ~(SN_CURR_SESS|SN_REDIRECTABLE);
s->txn.meth = 0;
@ -3840,8 +3840,8 @@ void http_end_txn_clean_session(struct session *s)
s->req->cons->flags |= SI_FL_INDEP_STR;
if (s->fe->options2 & PR_O2_NODELAY) {
s->req->flags |= BF_NEVER_WAIT;
s->rep->flags |= BF_NEVER_WAIT;
s->req->flags |= CF_NEVER_WAIT;
s->rep->flags |= CF_NEVER_WAIT;
}
/* if the request buffer is not empty, it means we're
@ -3855,7 +3855,7 @@ void http_end_txn_clean_session(struct session *s)
if (s->rep->buf.o &&
!buffer_full(&s->rep->buf, global.tune.maxrewrite) &&
bi_end(&s->rep->buf) <= s->rep->buf.data + s->rep->buf.size - global.tune.maxrewrite)
s->rep->flags |= BF_EXPECT_MORE;
s->rep->flags |= CF_EXPECT_MORE;
}
/* we're removing the analysers, we MUST re-enable events detection */
@ -3927,7 +3927,7 @@ int http_sync_req_state(struct session *s)
if ((txn->flags & TX_CON_WANT_MSK) == TX_CON_WANT_SCL) {
/* Server-close mode : queue a connection close to the server */
if (!(buf->flags & (BF_SHUTW|BF_SHUTW_NOW)))
if (!(buf->flags & (CF_SHUTW|CF_SHUTW_NOW)))
buffer_shutw_now(buf);
}
else if ((txn->flags & TX_CON_WANT_MSK) == TX_CON_WANT_CLO) {
@ -3936,7 +3936,7 @@ int http_sync_req_state(struct session *s)
* data to come. The caller knows the session is complete
* once both states are CLOSED.
*/
if (!(buf->flags & (BF_SHUTW|BF_SHUTW_NOW))) {
if (!(buf->flags & (CF_SHUTW|CF_SHUTW_NOW))) {
buffer_shutr_now(buf);
buffer_shutw_now(buf);
}
@ -3951,7 +3951,7 @@ int http_sync_req_state(struct session *s)
txn->req.msg_state = HTTP_MSG_TUNNEL;
}
if (buf->flags & (BF_SHUTW|BF_SHUTW_NOW)) {
if (buf->flags & (CF_SHUTW|CF_SHUTW_NOW)) {
/* if we've just closed an output, let's switch */
buf->cons->flags |= SI_FL_NOLINGER; /* we want to close ASAP */
@ -3976,7 +3976,7 @@ int http_sync_req_state(struct session *s)
txn->req.msg_state = HTTP_MSG_CLOSED;
goto http_msg_closed;
}
else if (buf->flags & BF_SHUTW) {
else if (buf->flags & CF_SHUTW) {
txn->req.msg_state = HTTP_MSG_ERROR;
goto wait_other_side;
}
@ -4050,7 +4050,7 @@ int http_sync_res_state(struct session *s)
* when we're in DONE and the other is in CLOSED and will
* catch that for the final cleanup.
*/
if (!(buf->flags & (BF_SHUTR|BF_SHUTR_NOW)))
if (!(buf->flags & (CF_SHUTR|CF_SHUTR_NOW)))
buffer_shutr_now(buf);
}
else if ((txn->flags & TX_CON_WANT_MSK) == TX_CON_WANT_CLO) {
@ -4059,7 +4059,7 @@ int http_sync_res_state(struct session *s)
* data to come. The caller knows the session is complete
* once both states are CLOSED.
*/
if (!(buf->flags & (BF_SHUTW|BF_SHUTW_NOW))) {
if (!(buf->flags & (CF_SHUTW|CF_SHUTW_NOW))) {
buffer_shutr_now(buf);
buffer_shutw_now(buf);
}
@ -4074,7 +4074,7 @@ int http_sync_res_state(struct session *s)
txn->rsp.msg_state = HTTP_MSG_TUNNEL;
}
if (buf->flags & (BF_SHUTW|BF_SHUTW_NOW)) {
if (buf->flags & (CF_SHUTW|CF_SHUTW_NOW)) {
/* if we've just closed an output, let's switch */
if (!channel_is_empty(buf)) {
txn->rsp.msg_state = HTTP_MSG_CLOSING;
@ -4097,7 +4097,7 @@ int http_sync_res_state(struct session *s)
txn->rsp.msg_state = HTTP_MSG_CLOSED;
goto http_msg_closed;
}
else if (buf->flags & BF_SHUTW) {
else if (buf->flags & CF_SHUTW) {
txn->rsp.msg_state = HTTP_MSG_ERROR;
s->be->be_counters.cli_aborts++;
if (target_srv(&s->target))
@ -4167,7 +4167,7 @@ int http_resync_states(struct session *s)
else if (txn->rsp.msg_state == HTTP_MSG_CLOSED ||
txn->rsp.msg_state == HTTP_MSG_ERROR ||
txn->req.msg_state == HTTP_MSG_ERROR ||
(s->rep->flags & BF_SHUTW)) {
(s->rep->flags & CF_SHUTW)) {
s->rep->analysers = 0;
buffer_auto_close(s->rep);
buffer_auto_read(s->rep);
@ -4210,8 +4210,8 @@ int http_request_forward_body(struct session *s, struct channel *req, int an_bit
if (unlikely(msg->msg_state < HTTP_MSG_BODY))
return 0;
if ((req->flags & (BF_READ_ERROR|BF_READ_TIMEOUT|BF_WRITE_ERROR|BF_WRITE_TIMEOUT)) ||
((req->flags & BF_SHUTW) && (req->to_forward || req->buf.o))) {
if ((req->flags & (CF_READ_ERROR|CF_READ_TIMEOUT|CF_WRITE_ERROR|CF_WRITE_TIMEOUT)) ||
((req->flags & CF_SHUTW) && (req->to_forward || req->buf.o))) {
/* Output closed while we were sending data. We must abort and
* wake the other side up.
*/
@ -4327,7 +4327,7 @@ int http_request_forward_body(struct session *s, struct channel *req, int an_bit
* was disabled too.
*/
if (unlikely(msg->msg_state == HTTP_MSG_ERROR)) {
if (req->flags & BF_SHUTW) {
if (req->flags & CF_SHUTW) {
/* request errors are most likely due to
* the server aborting the transfer.
*/
@ -4365,7 +4365,7 @@ int http_request_forward_body(struct session *s, struct channel *req, int an_bit
missing_data:
/* stop waiting for data if the input is closed before the end */
if (req->flags & BF_SHUTR) {
if (req->flags & CF_SHUTR) {
if (!(s->flags & SN_ERR_MASK))
s->flags |= SN_ERR_CLICL;
if (!(s->flags & SN_FINST_MASK)) {
@ -4384,17 +4384,17 @@ int http_request_forward_body(struct session *s, struct channel *req, int an_bit
}
/* waiting for the last bits to leave the buffer */
if (req->flags & BF_SHUTW)
if (req->flags & CF_SHUTW)
goto aborted_xfer;
/* When TE: chunked is used, we need to get there again to parse remaining
* chunks even if the client has closed, so we don't want to set BF_DONTCLOSE.
* chunks even if the client has closed, so we don't want to set CF_DONTCLOSE.
*/
if (msg->flags & HTTP_MSGF_TE_CHNK)
buffer_dont_close(req);
/* We know that more data are expected, but we couldn't send more that
* what we did. So we always set the BF_EXPECT_MORE flag so that the
* what we did. So we always set the CF_EXPECT_MORE flag so that the
* system knows it must not set a PUSH on this first part. Interactive
* modes are already handled by the stream sock layer. We must not do
* this in content-length mode because it could present the MSG_MORE
@ -4402,7 +4402,7 @@ int http_request_forward_body(struct session *s, struct channel *req, int an_bit
* additional delay to be observed by the receiver.
*/
if (msg->flags & HTTP_MSGF_TE_CHNK)
req->flags |= BF_EXPECT_MORE;
req->flags |= CF_EXPECT_MORE;
http_silent_debug(__LINE__, s);
return 0;
@ -4513,10 +4513,10 @@ int http_wait_for_response(struct session *s, struct channel *rep, int an_bit)
bi_end(&rep->buf) > rep->buf.data + rep->buf.size - global.tune.maxrewrite)) {
if (rep->buf.o) {
/* some data has still not left the buffer, wake us once that's done */
if (rep->flags & (BF_SHUTW|BF_SHUTW_NOW|BF_WRITE_ERROR|BF_WRITE_TIMEOUT))
if (rep->flags & (CF_SHUTW|CF_SHUTW_NOW|CF_WRITE_ERROR|CF_WRITE_TIMEOUT))
goto abort_response;
buffer_dont_close(rep);
rep->flags |= BF_READ_DONTWAIT; /* try to get back here ASAP */
rep->flags |= CF_READ_DONTWAIT; /* try to get back here ASAP */
return 0;
}
if (rep->buf.i <= rep->buf.size - global.tune.maxrewrite)
@ -4600,7 +4600,7 @@ int http_wait_for_response(struct session *s, struct channel *rep, int an_bit)
}
/* read error */
else if (rep->flags & BF_READ_ERROR) {
else if (rep->flags & CF_READ_ERROR) {
if (msg->err_pos >= 0)
http_capture_bad_message(&s->be->invalid_rep, s, msg, msg->msg_state, s->fe);
@ -4625,7 +4625,7 @@ int http_wait_for_response(struct session *s, struct channel *rep, int an_bit)
}
/* read timeout : return a 504 to the client. */
else if (rep->flags & BF_READ_TIMEOUT) {
else if (rep->flags & CF_READ_TIMEOUT) {
if (msg->err_pos >= 0)
http_capture_bad_message(&s->be->invalid_rep, s, msg, msg->msg_state, s->fe);
@ -4650,7 +4650,7 @@ int http_wait_for_response(struct session *s, struct channel *rep, int an_bit)
}
/* close from server, capture the response if the server has started to respond */
else if (rep->flags & BF_SHUTR) {
else if (rep->flags & CF_SHUTR) {
if (msg->msg_state >= HTTP_MSG_RPVER || msg->err_pos >= 0)
http_capture_bad_message(&s->be->invalid_rep, s, msg, msg->msg_state, s->fe);
@ -4675,7 +4675,7 @@ int http_wait_for_response(struct session *s, struct channel *rep, int an_bit)
}
/* write error to client (we don't send any message then) */
else if (rep->flags & BF_WRITE_ERROR) {
else if (rep->flags & CF_WRITE_ERROR) {
if (msg->err_pos >= 0)
http_capture_bad_message(&s->be->invalid_rep, s, msg, msg->msg_state, s->fe);
@ -5278,8 +5278,8 @@ int http_response_forward_body(struct session *s, struct channel *res, int an_bi
if (unlikely(msg->msg_state < HTTP_MSG_BODY))
return 0;
if ((res->flags & (BF_READ_ERROR|BF_READ_TIMEOUT|BF_WRITE_ERROR|BF_WRITE_TIMEOUT)) ||
((res->flags & BF_SHUTW) && (res->to_forward || res->buf.o)) ||
if ((res->flags & (CF_READ_ERROR|CF_READ_TIMEOUT|CF_WRITE_ERROR|CF_WRITE_TIMEOUT)) ||
((res->flags & CF_SHUTW) && (res->to_forward || res->buf.o)) ||
!s->req->analysers) {
/* Output closed while we were sending data. We must abort and
* wake the other side up.
@ -5386,7 +5386,7 @@ int http_response_forward_body(struct session *s, struct channel *res, int an_bi
* was disabled too.
*/
if (unlikely(msg->msg_state == HTTP_MSG_ERROR)) {
if (res->flags & BF_SHUTW) {
if (res->flags & CF_SHUTW) {
/* response errors are most likely due to
* the client aborting the transfer.
*/
@ -5404,7 +5404,7 @@ int http_response_forward_body(struct session *s, struct channel *res, int an_bi
missing_data:
/* stop waiting for data if the input is closed before the end */
if (res->flags & BF_SHUTR) {
if (res->flags & CF_SHUTR) {
if (!(s->flags & SN_ERR_MASK))
s->flags |= SN_ERR_SRVCL;
s->be->be_counters.srv_aborts++;
@ -5413,7 +5413,7 @@ int http_response_forward_body(struct session *s, struct channel *res, int an_bi
goto return_bad_res_stats_ok;
}
if (res->flags & BF_SHUTW)
if (res->flags & CF_SHUTW)
goto aborted_xfer;
/* we need to obey the req analyser, so if it leaves, we must too */
@ -5430,7 +5430,7 @@ int http_response_forward_body(struct session *s, struct channel *res, int an_bi
}
/* When TE: chunked is used, we need to get there again to parse remaining
* chunks even if the server has closed, so we don't want to set BF_DONTCLOSE.
* chunks even if the server has closed, so we don't want to set CF_DONTCLOSE.
* Similarly, with keep-alive on the client side, we don't want to forward a
* close.
*/
@ -5440,7 +5440,7 @@ int http_response_forward_body(struct session *s, struct channel *res, int an_bi
buffer_dont_close(res);
/* We know that more data are expected, but we couldn't send more that
* what we did. So we always set the BF_EXPECT_MORE flag so that the
* what we did. So we always set the CF_EXPECT_MORE flag so that the
* system knows it must not set a PUSH on this first part. Interactive
* modes are already handled by the stream sock layer. We must not do
* this in content-length mode because it could present the MSG_MORE
@ -5448,7 +5448,7 @@ int http_response_forward_body(struct session *s, struct channel *res, int an_bi
* additional delay to be observed by the receiver.
*/
if (msg->flags & HTTP_MSGF_TE_CHNK)
res->flags |= BF_EXPECT_MORE;
res->flags |= CF_EXPECT_MORE;
/* the session handler will take care of timeouts and errors */
http_silent_debug(__LINE__, s);
@ -7469,7 +7469,7 @@ void http_reset_txn(struct session *s)
s->pend_pos = NULL;
s->req->flags |= BF_READ_DONTWAIT; /* one read is usually enough */
s->req->flags |= CF_READ_DONTWAIT; /* one read is usually enough */
/* We must trim any excess data from the response buffer, because we
* may have blocked an invalid response from a server that we don't

View File

@ -809,7 +809,7 @@ int tcp_inspect_request(struct session *s, struct channel *req, int an_bit)
* - if one rule returns KO, then return KO
*/
if ((req->flags & BF_SHUTR) || buffer_full(&req->buf, global.tune.maxrewrite) ||
if ((req->flags & CF_SHUTR) || buffer_full(&req->buf, global.tune.maxrewrite) ||
!s->be->tcp_req.inspect_delay || tick_is_expired(req->analyse_exp, now_ms))
partial = SMP_OPT_FINAL;
else
@ -928,7 +928,7 @@ int tcp_inspect_response(struct session *s, struct channel *rep, int an_bit)
* - if one rule returns KO, then return KO
*/
if (rep->flags & BF_SHUTR || tick_is_expired(rep->analyse_exp, now_ms))
if (rep->flags & CF_SHUTR || tick_is_expired(rep->analyse_exp, now_ms))
partial = SMP_OPT_FINAL;
else
partial = 0;

View File

@ -830,8 +830,8 @@ int session_set_backend(struct session *s, struct proxy *be)
}
if (be->options2 & PR_O2_NODELAY) {
s->req->flags |= BF_NEVER_WAIT;
s->rep->flags |= BF_NEVER_WAIT;
s->req->flags |= CF_NEVER_WAIT;
s->rep->flags |= CF_NEVER_WAIT;
}
/* We want to enable the backend-specific analysers except those which

View File

@ -229,7 +229,7 @@ int session_accept(struct listener *l, int cfd, struct sockaddr_storage *addr)
s->req->prod = &s->si[0];
s->req->cons = &s->si[1];
s->si[0].ib = s->si[1].ob = s->req;
s->req->flags |= BF_READ_ATTACHED; /* the producer is already connected */
s->req->flags |= CF_READ_ATTACHED; /* the producer is already connected */
/* activate default analysers enabled for this listener */
s->req->analysers = l->analysers;
@ -249,8 +249,8 @@ int session_accept(struct listener *l, int cfd, struct sockaddr_storage *addr)
s->rep->analysers = 0;
if (s->fe->options2 & PR_O2_NODELAY) {
s->req->flags |= BF_NEVER_WAIT;
s->rep->flags |= BF_NEVER_WAIT;
s->req->flags |= CF_NEVER_WAIT;
s->rep->flags |= CF_NEVER_WAIT;
}
s->rep->rto = TICK_ETERNITY;
@ -561,9 +561,9 @@ static int sess_update_st_con_tcp(struct session *s, struct stream_interface *si
}
/* OK, maybe we want to abort */
if (unlikely((rep->flags & BF_SHUTW) ||
((req->flags & BF_SHUTW_NOW) && /* FIXME: this should not prevent a connection from establishing */
((!(req->flags & BF_WRITE_ACTIVITY) && channel_is_empty(req)) ||
if (unlikely((rep->flags & CF_SHUTW) ||
((req->flags & CF_SHUTW_NOW) && /* FIXME: this should not prevent a connection from establishing */
((!(req->flags & CF_WRITE_ACTIVITY) && channel_is_empty(req)) ||
s->be->options & PR_O_ABRT_CLOSE)))) {
/* give up */
si_shutw(si);
@ -576,7 +576,7 @@ static int sess_update_st_con_tcp(struct session *s, struct stream_interface *si
}
/* we need to wait a bit more if there was no activity either */
if (!(req->flags & BF_WRITE_ACTIVITY))
if (!(req->flags & CF_WRITE_ACTIVITY))
return 1;
/* OK, this means that a connection succeeded. The caller will be
@ -628,8 +628,8 @@ static int sess_update_st_cer(struct session *s, struct stream_interface *si)
/* shutw is enough so stop a connecting socket */
si_shutw(si);
si->ob->flags |= BF_WRITE_ERROR;
si->ib->flags |= BF_READ_ERROR;
si->ob->flags |= CF_WRITE_ERROR;
si->ib->flags |= CF_READ_ERROR;
si->state = SI_ST_CLO;
if (s->srv_error)
@ -705,7 +705,7 @@ static void sess_establish(struct session *s, struct stream_interface *si)
}
rep->analysers |= s->fe->fe_rsp_ana | s->be->be_rsp_ana;
rep->flags |= BF_READ_ATTACHED; /* producer is now attached */
rep->flags |= CF_READ_ATTACHED; /* producer is now attached */
if (si_ctrl(si)) {
/* real connections have timeouts */
req->wto = s->be->timeout.server;
@ -768,7 +768,7 @@ static void sess_update_stream_int(struct session *s, struct stream_interface *s
/* Failed and not retryable. */
si_shutr(si);
si_shutw(si);
si->ob->flags |= BF_WRITE_ERROR;
si->ob->flags |= CF_WRITE_ERROR;
s->logs.t_queue = tv_ms_elapsed(&s->logs.tv_accept, &now);
@ -817,7 +817,7 @@ static void sess_update_stream_int(struct session *s, struct stream_interface *s
s->be->be_counters.failed_conns++;
si_shutr(si);
si_shutw(si);
si->ob->flags |= BF_WRITE_TIMEOUT;
si->ob->flags |= CF_WRITE_TIMEOUT;
if (!si->err_type)
si->err_type = SI_ET_QUEUE_TO;
si->state = SI_ST_CLO;
@ -827,8 +827,8 @@ static void sess_update_stream_int(struct session *s, struct stream_interface *s
}
/* Connection remains in queue, check if we have to abort it */
if ((si->ob->flags & (BF_READ_ERROR)) ||
((si->ob->flags & BF_SHUTW_NOW) && /* empty and client aborted */
if ((si->ob->flags & (CF_READ_ERROR)) ||
((si->ob->flags & CF_SHUTW_NOW) && /* empty and client aborted */
(channel_is_empty(si->ob) || s->be->options & PR_O_ABRT_CLOSE))) {
/* give up */
si->exp = TICK_ETERNITY;
@ -847,8 +847,8 @@ static void sess_update_stream_int(struct session *s, struct stream_interface *s
}
else if (si->state == SI_ST_TAR) {
/* Connection request might be aborted */
if ((si->ob->flags & (BF_READ_ERROR)) ||
((si->ob->flags & BF_SHUTW_NOW) && /* empty and client aborted */
if ((si->ob->flags & (CF_READ_ERROR)) ||
((si->ob->flags & CF_SHUTW_NOW) && /* empty and client aborted */
(channel_is_empty(si->ob) || s->be->options & PR_O_ABRT_CLOSE))) {
/* give up */
si->exp = TICK_ETERNITY;
@ -933,7 +933,7 @@ static void sess_prepare_conn_req(struct session *s, struct stream_interface *si
/* we did not get any server, let's check the cause */
si_shutr(si);
si_shutw(si);
si->ob->flags |= BF_WRITE_ERROR;
si->ob->flags |= CF_WRITE_ERROR;
if (!si->err_type)
si->err_type = SI_ET_CONN_OTHER;
si->state = SI_ST_CLO;
@ -1316,11 +1316,11 @@ struct task *process_session(struct task *t)
memset(&s->txn.auth, 0, sizeof(s->txn.auth));
/* This flag must explicitly be set every time */
s->req->flags &= ~BF_READ_NOEXP;
s->req->flags &= ~CF_READ_NOEXP;
/* Keep a copy of req/rep flags so that we can detect shutdowns */
rqf_last = s->req->flags & ~BF_MASK_ANALYSER;
rpf_last = s->rep->flags & ~BF_MASK_ANALYSER;
rqf_last = s->req->flags & ~CF_MASK_ANALYSER;
rpf_last = s->rep->flags & ~CF_MASK_ANALYSER;
/* we don't want the stream interface functions to recursively wake us up */
if (s->req->prod->owner == t)
@ -1343,12 +1343,12 @@ struct task *process_session(struct task *t)
buffer_check_timeouts(s->req);
if (unlikely((s->req->flags & (BF_SHUTW|BF_WRITE_TIMEOUT)) == BF_WRITE_TIMEOUT)) {
if (unlikely((s->req->flags & (CF_SHUTW|CF_WRITE_TIMEOUT)) == CF_WRITE_TIMEOUT)) {
s->req->cons->flags |= SI_FL_NOLINGER;
si_shutw(s->req->cons);
}
if (unlikely((s->req->flags & (BF_SHUTR|BF_READ_TIMEOUT)) == BF_READ_TIMEOUT)) {
if (unlikely((s->req->flags & (CF_SHUTR|CF_READ_TIMEOUT)) == CF_READ_TIMEOUT)) {
if (s->req->prod->flags & SI_FL_NOHALF)
s->req->prod->flags |= SI_FL_NOLINGER;
si_shutr(s->req->prod);
@ -1356,12 +1356,12 @@ struct task *process_session(struct task *t)
buffer_check_timeouts(s->rep);
if (unlikely((s->rep->flags & (BF_SHUTW|BF_WRITE_TIMEOUT)) == BF_WRITE_TIMEOUT)) {
if (unlikely((s->rep->flags & (CF_SHUTW|CF_WRITE_TIMEOUT)) == CF_WRITE_TIMEOUT)) {
s->rep->cons->flags |= SI_FL_NOLINGER;
si_shutw(s->rep->cons);
}
if (unlikely((s->rep->flags & (BF_SHUTR|BF_READ_TIMEOUT)) == BF_READ_TIMEOUT)) {
if (unlikely((s->rep->flags & (CF_SHUTR|CF_READ_TIMEOUT)) == CF_READ_TIMEOUT)) {
if (s->rep->prod->flags & SI_FL_NOHALF)
s->rep->prod->flags |= SI_FL_NOLINGER;
si_shutr(s->rep->prod);
@ -1479,8 +1479,8 @@ struct task *process_session(struct task *t)
resync_request:
/* Analyse request */
if (((s->req->flags & ~rqf_last) & BF_MASK_ANALYSER) ||
((s->req->flags ^ rqf_last) & BF_MASK_STATIC) ||
if (((s->req->flags & ~rqf_last) & CF_MASK_ANALYSER) ||
((s->req->flags ^ rqf_last) & CF_MASK_STATIC) ||
s->si[0].state != rq_prod_last ||
s->si[1].state != rq_cons_last) {
unsigned int flags = s->req->flags;
@ -1627,10 +1627,10 @@ struct task *process_session(struct task *t)
rq_prod_last = s->si[0].state;
rq_cons_last = s->si[1].state;
s->req->flags &= ~BF_WAKE_ONCE;
s->req->flags &= ~CF_WAKE_ONCE;
rqf_last = s->req->flags;
if ((s->req->flags ^ flags) & BF_MASK_STATIC)
if ((s->req->flags ^ flags) & CF_MASK_STATIC)
goto resync_request;
}
@ -1643,29 +1643,29 @@ struct task *process_session(struct task *t)
resync_response:
/* Analyse response */
if (unlikely(s->rep->flags & BF_HIJACK)) {
if (unlikely(s->rep->flags & CF_HIJACK)) {
/* In inject mode, we wake up everytime something has
* happened on the write side of the buffer.
*/
unsigned int flags = s->rep->flags;
if ((s->rep->flags & (BF_WRITE_PARTIAL|BF_WRITE_ERROR|BF_SHUTW)) &&
if ((s->rep->flags & (CF_WRITE_PARTIAL|CF_WRITE_ERROR|CF_SHUTW)) &&
!channel_full(s->rep)) {
s->rep->hijacker(s, s->rep);
}
if ((s->rep->flags ^ flags) & BF_MASK_STATIC) {
if ((s->rep->flags ^ flags) & CF_MASK_STATIC) {
rpf_last = s->rep->flags;
goto resync_response;
}
}
else if (((s->rep->flags & ~rpf_last) & BF_MASK_ANALYSER) ||
(s->rep->flags ^ rpf_last) & BF_MASK_STATIC ||
else if (((s->rep->flags & ~rpf_last) & CF_MASK_ANALYSER) ||
(s->rep->flags ^ rpf_last) & CF_MASK_STATIC ||
s->si[0].state != rp_cons_last ||
s->si[1].state != rp_prod_last) {
unsigned int flags = s->rep->flags;
if ((s->rep->flags & BF_MASK_ANALYSER) &&
if ((s->rep->flags & CF_MASK_ANALYSER) &&
(s->rep->analysers & AN_REQ_WAIT_HTTP)) {
/* Due to HTTP pipelining, the HTTP request analyser might be waiting
* for some free space in the response buffer, so we might need to call
@ -1674,7 +1674,7 @@ struct task *process_session(struct task *t)
* be zero due to us returning a flow of redirects!
*/
s->rep->analysers &= ~AN_REQ_WAIT_HTTP;
s->req->flags |= BF_WAKE_ONCE;
s->req->flags |= CF_WAKE_ONCE;
}
if (s->rep->prod->state >= SI_ST_EST) {
@ -1743,7 +1743,7 @@ struct task *process_session(struct task *t)
rp_prod_last = s->si[1].state;
rpf_last = s->rep->flags;
if ((s->rep->flags ^ flags) & BF_MASK_STATIC)
if ((s->rep->flags ^ flags) & CF_MASK_STATIC)
goto resync_response;
}
@ -1751,7 +1751,7 @@ struct task *process_session(struct task *t)
if (s->req->analysers & ~req_ana_back)
goto resync_request;
if ((s->req->flags & ~rqf_last) & BF_MASK_ANALYSER)
if ((s->req->flags & ~rqf_last) & CF_MASK_ANALYSER)
goto resync_request;
/* FIXME: here we should call protocol handlers which rely on
@ -1766,24 +1766,24 @@ struct task *process_session(struct task *t)
*/
srv = target_srv(&s->target);
if (unlikely(!(s->flags & SN_ERR_MASK))) {
if (s->req->flags & (BF_READ_ERROR|BF_READ_TIMEOUT|BF_WRITE_ERROR|BF_WRITE_TIMEOUT)) {
if (s->req->flags & (CF_READ_ERROR|CF_READ_TIMEOUT|CF_WRITE_ERROR|CF_WRITE_TIMEOUT)) {
/* Report it if the client got an error or a read timeout expired */
s->req->analysers = 0;
if (s->req->flags & BF_READ_ERROR) {
if (s->req->flags & CF_READ_ERROR) {
s->be->be_counters.cli_aborts++;
s->fe->fe_counters.cli_aborts++;
if (srv)
srv->counters.cli_aborts++;
s->flags |= SN_ERR_CLICL;
}
else if (s->req->flags & BF_READ_TIMEOUT) {
else if (s->req->flags & CF_READ_TIMEOUT) {
s->be->be_counters.cli_aborts++;
s->fe->fe_counters.cli_aborts++;
if (srv)
srv->counters.cli_aborts++;
s->flags |= SN_ERR_CLITO;
}
else if (s->req->flags & BF_WRITE_ERROR) {
else if (s->req->flags & CF_WRITE_ERROR) {
s->be->be_counters.srv_aborts++;
s->fe->fe_counters.srv_aborts++;
if (srv)
@ -1799,24 +1799,24 @@ struct task *process_session(struct task *t)
}
sess_set_term_flags(s);
}
else if (s->rep->flags & (BF_READ_ERROR|BF_READ_TIMEOUT|BF_WRITE_ERROR|BF_WRITE_TIMEOUT)) {
else if (s->rep->flags & (CF_READ_ERROR|CF_READ_TIMEOUT|CF_WRITE_ERROR|CF_WRITE_TIMEOUT)) {
/* Report it if the server got an error or a read timeout expired */
s->rep->analysers = 0;
if (s->rep->flags & BF_READ_ERROR) {
if (s->rep->flags & CF_READ_ERROR) {
s->be->be_counters.srv_aborts++;
s->fe->fe_counters.srv_aborts++;
if (srv)
srv->counters.srv_aborts++;
s->flags |= SN_ERR_SRVCL;
}
else if (s->rep->flags & BF_READ_TIMEOUT) {
else if (s->rep->flags & CF_READ_TIMEOUT) {
s->be->be_counters.srv_aborts++;
s->fe->fe_counters.srv_aborts++;
if (srv)
srv->counters.srv_aborts++;
s->flags |= SN_ERR_SRVTO;
}
else if (s->rep->flags & BF_WRITE_ERROR) {
else if (s->rep->flags & CF_WRITE_ERROR) {
s->be->be_counters.cli_aborts++;
s->fe->fe_counters.cli_aborts++;
if (srv)
@ -1842,13 +1842,13 @@ struct task *process_session(struct task *t)
/* If noone is interested in analysing data, it's time to forward
* everything. We configure the buffer to forward indefinitely.
* Note that we're checking BF_SHUTR_NOW as an indication of a possible
* Note that we're checking CF_SHUTR_NOW as an indication of a possible
* recent call to buffer_abort().
*/
if (!s->req->analysers &&
!(s->req->flags & (BF_HIJACK|BF_SHUTW|BF_SHUTR_NOW)) &&
!(s->req->flags & (CF_HIJACK|CF_SHUTW|CF_SHUTR_NOW)) &&
(s->req->prod->state >= SI_ST_EST) &&
(s->req->to_forward != BUF_INFINITE_FORWARD)) {
(s->req->to_forward != CHN_INFINITE_FORWARD)) {
/* This buffer is freewheeling, there's no analyser nor hijacker
* attached to it. If any data are left in, we'll permit them to
* move.
@ -1861,20 +1861,20 @@ struct task *process_session(struct task *t)
/* We'll let data flow between the producer (if still connected)
* to the consumer (which might possibly not be connected yet).
*/
if (!(s->req->flags & (BF_SHUTR|BF_SHUTW_NOW)))
buffer_forward(s->req, BUF_INFINITE_FORWARD);
if (!(s->req->flags & (CF_SHUTR|CF_SHUTW_NOW)))
buffer_forward(s->req, CHN_INFINITE_FORWARD);
}
/* check if it is wise to enable kernel splicing to forward request data */
if (!(s->req->flags & (BF_KERN_SPLICING|BF_SHUTR)) &&
if (!(s->req->flags & (CF_KERN_SPLICING|CF_SHUTR)) &&
s->req->to_forward &&
(global.tune.options & GTUNE_USE_SPLICE) &&
(s->si[0].flags & s->si[1].flags & SI_FL_CAP_SPLICE) &&
(pipes_used < global.maxpipes) &&
(((s->fe->options2|s->be->options2) & PR_O2_SPLIC_REQ) ||
(((s->fe->options2|s->be->options2) & PR_O2_SPLIC_AUT) &&
(s->req->flags & BF_STREAMER_FAST)))) {
s->req->flags |= BF_KERN_SPLICING;
(s->req->flags & CF_STREAMER_FAST)))) {
s->req->flags |= CF_KERN_SPLICING;
}
/* reflect what the L7 analysers have seen last */
@ -1888,22 +1888,22 @@ struct task *process_session(struct task *t)
* happen either because the input is closed or because we want to force a close
* once the server has begun to respond.
*/
if (unlikely((s->req->flags & (BF_SHUTW|BF_SHUTW_NOW|BF_HIJACK|BF_AUTO_CLOSE|BF_SHUTR)) ==
(BF_AUTO_CLOSE|BF_SHUTR)))
if (unlikely((s->req->flags & (CF_SHUTW|CF_SHUTW_NOW|CF_HIJACK|CF_AUTO_CLOSE|CF_SHUTR)) ==
(CF_AUTO_CLOSE|CF_SHUTR)))
buffer_shutw_now(s->req);
/* shutdown(write) pending */
if (unlikely((s->req->flags & (BF_SHUTW|BF_SHUTW_NOW)) == BF_SHUTW_NOW &&
if (unlikely((s->req->flags & (CF_SHUTW|CF_SHUTW_NOW)) == CF_SHUTW_NOW &&
channel_is_empty(s->req)))
si_shutw(s->req->cons);
/* shutdown(write) done on server side, we must stop the client too */
if (unlikely((s->req->flags & (BF_SHUTW|BF_SHUTR|BF_SHUTR_NOW)) == BF_SHUTW &&
if (unlikely((s->req->flags & (CF_SHUTW|CF_SHUTR|CF_SHUTR_NOW)) == CF_SHUTW &&
!s->req->analysers))
buffer_shutr_now(s->req);
/* shutdown(read) pending */
if (unlikely((s->req->flags & (BF_SHUTR|BF_SHUTR_NOW)) == BF_SHUTR_NOW)) {
if (unlikely((s->req->flags & (CF_SHUTR|CF_SHUTR_NOW)) == CF_SHUTR_NOW)) {
if (s->req->prod->flags & SI_FL_NOHALF)
s->req->prod->flags |= SI_FL_NOLINGER;
si_shutr(s->req->prod);
@ -1912,11 +1912,11 @@ struct task *process_session(struct task *t)
/* it's possible that an upper layer has requested a connection setup or abort.
* There are 2 situations where we decide to establish a new connection :
* - there are data scheduled for emission in the buffer
* - the BF_AUTO_CONNECT flag is set (active connection)
* - the CF_AUTO_CONNECT flag is set (active connection)
*/
if (s->req->cons->state == SI_ST_INI) {
if (!(s->req->flags & BF_SHUTW)) {
if ((s->req->flags & BF_AUTO_CONNECT) || !channel_is_empty(s->req)) {
if (!(s->req->flags & CF_SHUTW)) {
if ((s->req->flags & CF_AUTO_CONNECT) || !channel_is_empty(s->req)) {
/* If we have an applet without a connect method, we immediately
* switch to the connected state, otherwise we perform a connection
* request.
@ -1926,7 +1926,7 @@ struct task *process_session(struct task *t)
if (unlikely(s->req->cons->target.type == TARG_TYPE_APPLET &&
!(si_ctrl(s->req->cons) && si_ctrl(s->req->cons)->connect))) {
s->req->cons->state = SI_ST_EST; /* connection established */
s->rep->flags |= BF_READ_ATTACHED; /* producer is now attached */
s->rep->flags |= CF_READ_ATTACHED; /* producer is now attached */
s->req->wex = TICK_ETERNITY;
}
}
@ -1971,20 +1971,20 @@ struct task *process_session(struct task *t)
goto resync_stream_interface;
/* otherwise we want to check if we need to resync the req buffer or not */
if ((s->req->flags ^ rqf_last) & BF_MASK_STATIC)
if ((s->req->flags ^ rqf_last) & CF_MASK_STATIC)
goto resync_request;
/* perform output updates to the response buffer */
/* If noone is interested in analysing data, it's time to forward
* everything. We configure the buffer to forward indefinitely.
* Note that we're checking BF_SHUTR_NOW as an indication of a possible
* Note that we're checking CF_SHUTR_NOW as an indication of a possible
* recent call to buffer_abort().
*/
if (!s->rep->analysers &&
!(s->rep->flags & (BF_HIJACK|BF_SHUTW|BF_SHUTR_NOW)) &&
!(s->rep->flags & (CF_HIJACK|CF_SHUTW|CF_SHUTR_NOW)) &&
(s->rep->prod->state >= SI_ST_EST) &&
(s->rep->to_forward != BUF_INFINITE_FORWARD)) {
(s->rep->to_forward != CHN_INFINITE_FORWARD)) {
/* This buffer is freewheeling, there's no analyser nor hijacker
* attached to it. If any data are left in, we'll permit them to
* move.
@ -1996,8 +1996,8 @@ struct task *process_session(struct task *t)
/* We'll let data flow between the producer (if still connected)
* to the consumer.
*/
if (!(s->rep->flags & (BF_SHUTR|BF_SHUTW_NOW)))
buffer_forward(s->rep, BUF_INFINITE_FORWARD);
if (!(s->rep->flags & (CF_SHUTR|CF_SHUTW_NOW)))
buffer_forward(s->rep, CHN_INFINITE_FORWARD);
/* if we have no analyser anymore in any direction and have a
* tunnel timeout set, use it now.
@ -2011,15 +2011,15 @@ struct task *process_session(struct task *t)
}
/* check if it is wise to enable kernel splicing to forward response data */
if (!(s->rep->flags & (BF_KERN_SPLICING|BF_SHUTR)) &&
if (!(s->rep->flags & (CF_KERN_SPLICING|CF_SHUTR)) &&
s->rep->to_forward &&
(global.tune.options & GTUNE_USE_SPLICE) &&
(s->si[0].flags & s->si[1].flags & SI_FL_CAP_SPLICE) &&
(pipes_used < global.maxpipes) &&
(((s->fe->options2|s->be->options2) & PR_O2_SPLIC_RTR) ||
(((s->fe->options2|s->be->options2) & PR_O2_SPLIC_AUT) &&
(s->rep->flags & BF_STREAMER_FAST)))) {
s->rep->flags |= BF_KERN_SPLICING;
(s->rep->flags & CF_STREAMER_FAST)))) {
s->rep->flags |= CF_KERN_SPLICING;
}
/* reflect what the L7 analysers have seen last */
@ -2034,22 +2034,22 @@ struct task *process_session(struct task *t)
*/
/* first, let's check if the response buffer needs to shutdown(write) */
if (unlikely((s->rep->flags & (BF_SHUTW|BF_SHUTW_NOW|BF_HIJACK|BF_AUTO_CLOSE|BF_SHUTR)) ==
(BF_AUTO_CLOSE|BF_SHUTR)))
if (unlikely((s->rep->flags & (CF_SHUTW|CF_SHUTW_NOW|CF_HIJACK|CF_AUTO_CLOSE|CF_SHUTR)) ==
(CF_AUTO_CLOSE|CF_SHUTR)))
buffer_shutw_now(s->rep);
/* shutdown(write) pending */
if (unlikely((s->rep->flags & (BF_SHUTW|BF_SHUTW_NOW)) == BF_SHUTW_NOW &&
if (unlikely((s->rep->flags & (CF_SHUTW|CF_SHUTW_NOW)) == CF_SHUTW_NOW &&
channel_is_empty(s->rep)))
si_shutw(s->rep->cons);
/* shutdown(write) done on the client side, we must stop the server too */
if (unlikely((s->rep->flags & (BF_SHUTW|BF_SHUTR|BF_SHUTR_NOW)) == BF_SHUTW) &&
if (unlikely((s->rep->flags & (CF_SHUTW|CF_SHUTR|CF_SHUTR_NOW)) == CF_SHUTW) &&
!s->rep->analysers)
buffer_shutr_now(s->rep);
/* shutdown(read) pending */
if (unlikely((s->rep->flags & (BF_SHUTR|BF_SHUTR_NOW)) == BF_SHUTR_NOW)) {
if (unlikely((s->rep->flags & (CF_SHUTR|CF_SHUTR_NOW)) == CF_SHUTR_NOW)) {
if (s->rep->prod->flags & SI_FL_NOHALF)
s->rep->prod->flags |= SI_FL_NOLINGER;
si_shutr(s->rep->prod);
@ -2061,7 +2061,7 @@ struct task *process_session(struct task *t)
if (s->req->flags != rqf_last)
goto resync_request;
if ((s->rep->flags ^ rpf_last) & BF_MASK_STATIC)
if ((s->rep->flags ^ rpf_last) & CF_MASK_STATIC)
goto resync_response;
/* we're interested in getting wakeups again */
@ -2109,8 +2109,8 @@ struct task *process_session(struct task *t)
if (s->req->cons->state == SI_ST_EST && s->req->cons->target.type != TARG_TYPE_APPLET)
si_update(s->req->cons);
s->req->flags &= ~(BF_READ_NULL|BF_READ_PARTIAL|BF_WRITE_NULL|BF_WRITE_PARTIAL|BF_READ_ATTACHED);
s->rep->flags &= ~(BF_READ_NULL|BF_READ_PARTIAL|BF_WRITE_NULL|BF_WRITE_PARTIAL|BF_READ_ATTACHED);
s->req->flags &= ~(CF_READ_NULL|CF_READ_PARTIAL|CF_WRITE_NULL|CF_WRITE_PARTIAL|CF_READ_ATTACHED);
s->rep->flags &= ~(CF_READ_NULL|CF_READ_PARTIAL|CF_WRITE_NULL|CF_WRITE_PARTIAL|CF_READ_ATTACHED);
s->si[0].prev_state = s->si[0].state;
s->si[1].prev_state = s->si[1].state;
s->si[0].flags &= ~(SI_FL_ERR|SI_FL_EXP);
@ -2124,9 +2124,9 @@ struct task *process_session(struct task *t)
* request timeout is set and the server has not yet sent a response.
*/
if ((s->rep->flags & (BF_AUTO_CLOSE|BF_SHUTR)) == 0 &&
if ((s->rep->flags & (CF_AUTO_CLOSE|CF_SHUTR)) == 0 &&
(tick_isset(s->req->wex) || tick_isset(s->rep->rex))) {
s->req->flags |= BF_READ_NOEXP;
s->req->flags |= CF_READ_NOEXP;
s->req->rex = TICK_ETERNITY;
}
@ -2310,7 +2310,7 @@ void default_srv_error(struct session *s, struct stream_interface *si)
/* kill a session and set the termination flags to <why> (one of SN_ERR_*) */
void session_shutdown(struct session *session, int why)
{
if (session->req->flags & (BF_SHUTW|BF_SHUTW_NOW))
if (session->req->flags & (CF_SHUTW|CF_SHUTW_NOW))
return;
buffer_shutw_now(session->req);

View File

@ -93,8 +93,8 @@ void stream_int_report_error(struct stream_interface *si)
if (!si->err_type)
si->err_type = SI_ET_DATA_ERR;
si->ob->flags |= BF_WRITE_ERROR;
si->ib->flags |= BF_READ_ERROR;
si->ob->flags |= CF_WRITE_ERROR;
si->ib->flags |= CF_READ_ERROR;
}
/*
@ -146,38 +146,38 @@ static void stream_int_update_embedded(struct stream_interface *si)
if (si->state != SI_ST_EST)
return;
if ((si->ob->flags & (BF_SHUTW|BF_HIJACK|BF_SHUTW_NOW)) == BF_SHUTW_NOW &&
if ((si->ob->flags & (CF_SHUTW|CF_HIJACK|CF_SHUTW_NOW)) == CF_SHUTW_NOW &&
channel_is_empty(si->ob))
si_shutw(si);
if ((si->ob->flags & (BF_SHUTW|BF_SHUTW_NOW|BF_HIJACK)) == 0 && !channel_full(si->ob))
if ((si->ob->flags & (CF_SHUTW|CF_SHUTW_NOW|CF_HIJACK)) == 0 && !channel_full(si->ob))
si->flags |= SI_FL_WAIT_DATA;
/* we're almost sure that we need some space if the buffer is not
* empty, even if it's not full, because the applets can't fill it.
*/
if ((si->ib->flags & (BF_SHUTR|BF_DONT_READ)) == 0 && !channel_is_empty(si->ib))
if ((si->ib->flags & (CF_SHUTR|CF_DONT_READ)) == 0 && !channel_is_empty(si->ib))
si->flags |= SI_FL_WAIT_ROOM;
if (si->ob->flags & BF_WRITE_ACTIVITY) {
if (si->ob->flags & CF_WRITE_ACTIVITY) {
if (tick_isset(si->ob->wex))
si->ob->wex = tick_add_ifset(now_ms, si->ob->wto);
}
if (si->ib->flags & BF_READ_ACTIVITY ||
(si->ob->flags & BF_WRITE_ACTIVITY && !(si->flags & SI_FL_INDEP_STR))) {
if (si->ib->flags & CF_READ_ACTIVITY ||
(si->ob->flags & CF_WRITE_ACTIVITY && !(si->flags & SI_FL_INDEP_STR))) {
if (tick_isset(si->ib->rex))
si->ib->rex = tick_add_ifset(now_ms, si->ib->rto);
}
/* save flags to detect changes */
old_flags = si->flags;
if (likely((si->ob->flags & (BF_SHUTW|BF_WRITE_PARTIAL|BF_DONT_READ)) == BF_WRITE_PARTIAL &&
if (likely((si->ob->flags & (CF_SHUTW|CF_WRITE_PARTIAL|CF_DONT_READ)) == CF_WRITE_PARTIAL &&
!channel_full(si->ob) &&
(si->ob->prod->flags & SI_FL_WAIT_ROOM)))
si_chk_rcv(si->ob->prod);
if (((si->ib->flags & BF_READ_PARTIAL) && !channel_is_empty(si->ib)) &&
if (((si->ib->flags & CF_READ_PARTIAL) && !channel_is_empty(si->ib)) &&
(si->ib->cons->flags & SI_FL_WAIT_DATA)) {
si_chk_snd(si->ib->cons);
/* check if the consumer has freed some space */
@ -196,23 +196,23 @@ static void stream_int_update_embedded(struct stream_interface *si)
((old_flags & ~si->flags) & (SI_FL_WAIT_ROOM|SI_FL_WAIT_DATA)) ||
/* changes on the production side */
(si->ib->flags & (BF_READ_NULL|BF_READ_ERROR)) ||
(si->ib->flags & (CF_READ_NULL|CF_READ_ERROR)) ||
si->state != SI_ST_EST ||
(si->flags & SI_FL_ERR) ||
((si->ib->flags & BF_READ_PARTIAL) &&
((si->ib->flags & CF_READ_PARTIAL) &&
(!si->ib->to_forward || si->ib->cons->state != SI_ST_EST)) ||
/* changes on the consumption side */
(si->ob->flags & (BF_WRITE_NULL|BF_WRITE_ERROR)) ||
((si->ob->flags & BF_WRITE_ACTIVITY) &&
((si->ob->flags & BF_SHUTW) ||
(si->ob->flags & (CF_WRITE_NULL|CF_WRITE_ERROR)) ||
((si->ob->flags & CF_WRITE_ACTIVITY) &&
((si->ob->flags & CF_SHUTW) ||
si->ob->prod->state != SI_ST_EST ||
(channel_is_empty(si->ob) && !si->ob->to_forward)))) {
if (!(si->flags & SI_FL_DONT_WAKE) && si->owner)
task_wakeup(si->owner, TASK_WOKEN_IO);
}
if (si->ib->flags & BF_READ_ACTIVITY)
si->ib->flags &= ~BF_READ_DONTWAIT;
if (si->ib->flags & CF_READ_ACTIVITY)
si->ib->flags &= ~CF_READ_DONTWAIT;
}
/*
@ -232,17 +232,17 @@ int stream_int_shutr(struct stream_interface *si)
{
struct connection *conn = &si->conn;
si->ib->flags &= ~BF_SHUTR_NOW;
if (si->ib->flags & BF_SHUTR)
si->ib->flags &= ~CF_SHUTR_NOW;
if (si->ib->flags & CF_SHUTR)
return 0;
si->ib->flags |= BF_SHUTR;
si->ib->flags |= CF_SHUTR;
si->ib->rex = TICK_ETERNITY;
si->flags &= ~SI_FL_WAIT_ROOM;
if (si->state != SI_ST_EST && si->state != SI_ST_CON)
return 0;
if (si->ob->flags & BF_SHUTW) {
if (si->ob->flags & CF_SHUTW) {
conn_data_close(&si->conn);
if (conn->ctrl)
fd_delete(si_fd(si));
@ -283,10 +283,10 @@ int stream_int_shutw(struct stream_interface *si)
{
struct connection *conn = &si->conn;
si->ob->flags &= ~BF_SHUTW_NOW;
if (si->ob->flags & BF_SHUTW)
si->ob->flags &= ~CF_SHUTW_NOW;
if (si->ob->flags & CF_SHUTW)
return 0;
si->ob->flags |= BF_SHUTW;
si->ob->flags |= CF_SHUTW;
si->ob->wex = TICK_ETERNITY;
si->flags &= ~SI_FL_WAIT_DATA;
@ -320,7 +320,7 @@ int stream_int_shutw(struct stream_interface *si)
if (conn->ctrl)
shutdown(si_fd(si), SHUT_WR);
if (!(si->ib->flags & (BF_SHUTR|BF_DONT_READ))) {
if (!(si->ib->flags & (CF_SHUTR|CF_DONT_READ))) {
/* OK just a shutw, but we want the caller
* to disable polling on this FD if exists.
*/
@ -347,7 +347,7 @@ int stream_int_shutw(struct stream_interface *si)
si->release(si);
default:
si->flags &= ~SI_FL_WAIT_ROOM;
si->ib->flags |= BF_SHUTR;
si->ib->flags |= CF_SHUTR;
si->ib->rex = TICK_ETERNITY;
si->exp = TICK_ETERNITY;
}
@ -367,7 +367,7 @@ static void stream_int_chk_rcv(struct stream_interface *si)
__FUNCTION__,
si, si->state, si->ib->flags, si->ob->flags);
if (unlikely(si->state != SI_ST_EST || (ib->flags & (BF_SHUTR|BF_HIJACK|BF_DONT_READ))))
if (unlikely(si->state != SI_ST_EST || (ib->flags & (CF_SHUTR|CF_HIJACK|CF_DONT_READ))))
return;
if (channel_full(ib)) {
@ -391,7 +391,7 @@ static void stream_int_chk_snd(struct stream_interface *si)
__FUNCTION__,
si, si->state, si->ib->flags, si->ob->flags);
if (unlikely(si->state != SI_ST_EST || (si->ob->flags & BF_SHUTW)))
if (unlikely(si->state != SI_ST_EST || (si->ob->flags & CF_SHUTW)))
return;
if (!(si->flags & SI_FL_WAIT_DATA) || /* not waiting for data */
@ -574,24 +574,24 @@ void conn_notify_si(struct connection *conn)
/* check for recent connection establishment */
if (unlikely(!(conn->flags & (CO_FL_WAIT_L4_CONN | CO_FL_WAIT_L6_CONN | CO_FL_CONNECTED)))) {
si->exp = TICK_ETERNITY;
si->ob->flags |= BF_WRITE_NULL;
si->ob->flags |= CF_WRITE_NULL;
}
/* process consumer side */
if (channel_is_empty(si->ob)) {
if (((si->ob->flags & (BF_SHUTW|BF_HIJACK|BF_SHUTW_NOW)) == BF_SHUTW_NOW) &&
if (((si->ob->flags & (CF_SHUTW|CF_HIJACK|CF_SHUTW_NOW)) == CF_SHUTW_NOW) &&
(si->state == SI_ST_EST))
stream_int_shutw(si);
__conn_data_stop_send(conn);
si->ob->wex = TICK_ETERNITY;
}
if ((si->ob->flags & (BF_SHUTW|BF_SHUTW_NOW|BF_HIJACK)) == 0 && !channel_full(si->ob))
if ((si->ob->flags & (CF_SHUTW|CF_SHUTW_NOW|CF_HIJACK)) == 0 && !channel_full(si->ob))
si->flags |= SI_FL_WAIT_DATA;
if (si->ob->flags & BF_WRITE_ACTIVITY) {
if (si->ob->flags & CF_WRITE_ACTIVITY) {
/* update timeouts if we have written something */
if ((si->ob->flags & (BF_SHUTW|BF_WRITE_PARTIAL)) == BF_WRITE_PARTIAL &&
if ((si->ob->flags & (CF_SHUTW|CF_WRITE_PARTIAL)) == CF_WRITE_PARTIAL &&
!channel_is_empty(si->ob))
if (tick_isset(si->ob->wex))
si->ob->wex = tick_add_ifset(now_ms, si->ob->wto);
@ -600,7 +600,7 @@ void conn_notify_si(struct connection *conn)
if (tick_isset(si->ib->rex))
si->ib->rex = tick_add_ifset(now_ms, si->ib->rto);
if (likely((si->ob->flags & (BF_SHUTW|BF_WRITE_PARTIAL|BF_DONT_READ)) == BF_WRITE_PARTIAL &&
if (likely((si->ob->flags & (CF_SHUTW|CF_WRITE_PARTIAL|CF_DONT_READ)) == CF_WRITE_PARTIAL &&
!channel_full(si->ob) &&
(si->ob->prod->flags & SI_FL_WAIT_ROOM)))
si_chk_rcv(si->ob->prod);
@ -613,7 +613,7 @@ void conn_notify_si(struct connection *conn)
* immediately afterwards once the following data is parsed (eg:
* HTTP chunking).
*/
if (((si->ib->flags & BF_READ_PARTIAL) && !channel_is_empty(si->ib)) &&
if (((si->ib->flags & CF_READ_PARTIAL) && !channel_is_empty(si->ib)) &&
(si->ib->pipe /* always try to send spliced data */ ||
(si->ib->buf.i == 0 && (si->ib->cons->flags & SI_FL_WAIT_DATA)))) {
int last_len = si->ib->pipe ? si->ib->pipe->data : 0;
@ -632,7 +632,7 @@ void conn_notify_si(struct connection *conn)
__conn_data_stop_recv(conn);
si->ib->rex = TICK_ETERNITY;
}
else if ((si->ib->flags & (BF_SHUTR|BF_READ_PARTIAL|BF_DONT_READ|BF_READ_NOEXP)) == BF_READ_PARTIAL &&
else if ((si->ib->flags & (CF_SHUTR|CF_READ_PARTIAL|CF_DONT_READ|CF_READ_NOEXP)) == CF_READ_PARTIAL &&
!channel_full(si->ib)) {
if (tick_isset(si->ib->rex))
si->ib->rex = tick_add_ifset(now_ms, si->ib->rto);
@ -640,22 +640,22 @@ void conn_notify_si(struct connection *conn)
/* wake the task up only when needed */
if (/* changes on the production side */
(si->ib->flags & (BF_READ_NULL|BF_READ_ERROR)) ||
(si->ib->flags & (CF_READ_NULL|CF_READ_ERROR)) ||
si->state != SI_ST_EST ||
(si->flags & SI_FL_ERR) ||
((si->ib->flags & BF_READ_PARTIAL) &&
((si->ib->flags & CF_READ_PARTIAL) &&
(!si->ib->to_forward || si->ib->cons->state != SI_ST_EST)) ||
/* changes on the consumption side */
(si->ob->flags & (BF_WRITE_NULL|BF_WRITE_ERROR)) ||
((si->ob->flags & BF_WRITE_ACTIVITY) &&
((si->ob->flags & BF_SHUTW) ||
(si->ob->flags & (CF_WRITE_NULL|CF_WRITE_ERROR)) ||
((si->ob->flags & CF_WRITE_ACTIVITY) &&
((si->ob->flags & CF_SHUTW) ||
si->ob->prod->state != SI_ST_EST ||
(channel_is_empty(si->ob) && !si->ob->to_forward)))) {
task_wakeup(si->owner, TASK_WOKEN_IO);
}
if (si->ib->flags & BF_READ_ACTIVITY)
si->ib->flags &= ~BF_READ_DONTWAIT;
if (si->ib->flags & CF_READ_ACTIVITY)
si->ib->flags &= ~CF_READ_DONTWAIT;
}
/*
@ -676,7 +676,7 @@ static int si_conn_send_loop(struct connection *conn)
if (b->pipe && conn->data->snd_pipe) {
ret = conn->data->snd_pipe(conn, b->pipe);
if (ret > 0)
b->flags |= BF_WRITE_PARTIAL;
b->flags |= CF_WRITE_PARTIAL;
if (!b->pipe->data) {
put_pipe(b->pipe);
@ -714,10 +714,10 @@ static int si_conn_send_loop(struct connection *conn)
*/
unsigned int send_flag = MSG_DONTWAIT | MSG_NOSIGNAL;
if ((!(b->flags & (BF_NEVER_WAIT|BF_SEND_DONTWAIT)) &&
((b->to_forward && b->to_forward != BUF_INFINITE_FORWARD) ||
(b->flags & BF_EXPECT_MORE))) ||
((b->flags & (BF_SHUTW|BF_SHUTW_NOW|BF_HIJACK)) == BF_SHUTW_NOW))
if ((!(b->flags & (CF_NEVER_WAIT|CF_SEND_DONTWAIT)) &&
((b->to_forward && b->to_forward != CHN_INFINITE_FORWARD) ||
(b->flags & CF_EXPECT_MORE))) ||
((b->flags & (CF_SHUTW|CF_SHUTW_NOW|CF_HIJACK)) == CF_SHUTW_NOW))
send_flag |= MSG_MORE;
ret = conn->data->snd_buf(conn, &b->buf, send_flag);
@ -727,11 +727,11 @@ static int si_conn_send_loop(struct connection *conn)
if (si->conn.flags & CO_FL_WAIT_L4_CONN)
si->conn.flags &= ~CO_FL_WAIT_L4_CONN;
b->flags |= BF_WRITE_PARTIAL;
b->flags |= CF_WRITE_PARTIAL;
if (!b->buf.o) {
/* Always clear both flags once everything has been sent, they're one-shot */
b->flags &= ~(BF_EXPECT_MORE | BF_SEND_DONTWAIT);
b->flags &= ~(CF_EXPECT_MORE | CF_SEND_DONTWAIT);
break;
}
@ -769,12 +769,12 @@ void stream_int_update_conn(struct stream_interface *si)
}
/* Check if we need to close the read side */
if (!(ib->flags & BF_SHUTR)) {
if (!(ib->flags & CF_SHUTR)) {
/* Read not closed, update FD status and timeout for reads */
if ((ib->flags & (BF_HIJACK|BF_DONT_READ)) || channel_full(ib)) {
if ((ib->flags & (CF_HIJACK|CF_DONT_READ)) || channel_full(ib)) {
/* stop reading */
if (!(si->flags & SI_FL_WAIT_ROOM)) {
if (!(ib->flags & (BF_HIJACK|BF_DONT_READ))) /* full */
if (!(ib->flags & (CF_HIJACK|CF_DONT_READ))) /* full */
si->flags |= SI_FL_WAIT_ROOM;
conn_data_stop_recv(&si->conn);
ib->rex = TICK_ETERNITY;
@ -788,18 +788,18 @@ void stream_int_update_conn(struct stream_interface *si)
*/
si->flags &= ~SI_FL_WAIT_ROOM;
conn_data_want_recv(&si->conn);
if (!(ib->flags & (BF_READ_NOEXP|BF_DONT_READ)) && !tick_isset(ib->rex))
if (!(ib->flags & (CF_READ_NOEXP|CF_DONT_READ)) && !tick_isset(ib->rex))
ib->rex = tick_add_ifset(now_ms, ib->rto);
}
}
/* Check if we need to close the write side */
if (!(ob->flags & BF_SHUTW)) {
if (!(ob->flags & CF_SHUTW)) {
/* Write not closed, update FD status and timeout for writes */
if (channel_is_empty(ob)) {
/* stop writing */
if (!(si->flags & SI_FL_WAIT_DATA)) {
if ((ob->flags & (BF_HIJACK|BF_SHUTW_NOW)) == 0)
if ((ob->flags & (CF_HIJACK|CF_SHUTW_NOW)) == 0)
si->flags |= SI_FL_WAIT_DATA;
conn_data_stop_send(&si->conn);
ob->wex = TICK_ETERNITY;
@ -839,7 +839,7 @@ static void stream_int_chk_rcv_conn(struct stream_interface *si)
{
struct channel *ib = si->ib;
if (unlikely(si->state != SI_ST_EST || (ib->flags & BF_SHUTR)))
if (unlikely(si->state != SI_ST_EST || (ib->flags & CF_SHUTR)))
return;
if (si->conn.flags & CO_FL_HANDSHAKE) {
@ -847,9 +847,9 @@ static void stream_int_chk_rcv_conn(struct stream_interface *si)
return;
}
if ((ib->flags & (BF_HIJACK|BF_DONT_READ)) || channel_full(ib)) {
if ((ib->flags & (CF_HIJACK|CF_DONT_READ)) || channel_full(ib)) {
/* stop reading */
if (!(ib->flags & (BF_HIJACK|BF_DONT_READ))) /* full */
if (!(ib->flags & (CF_HIJACK|CF_DONT_READ))) /* full */
si->flags |= SI_FL_WAIT_ROOM;
conn_data_stop_recv(&si->conn);
}
@ -870,7 +870,7 @@ static void stream_int_chk_snd_conn(struct stream_interface *si)
{
struct channel *ob = si->ob;
if (unlikely(si->state != SI_ST_EST || (ob->flags & BF_SHUTW)))
if (unlikely(si->state != SI_ST_EST || (ob->flags & CF_SHUTW)))
return;
/* handshake running on producer */
@ -907,14 +907,14 @@ static void stream_int_chk_snd_conn(struct stream_interface *si)
* ->o limit was reached. Maybe we just wrote the last
* chunk and need to close.
*/
if (((ob->flags & (BF_SHUTW|BF_HIJACK|BF_AUTO_CLOSE|BF_SHUTW_NOW)) ==
(BF_AUTO_CLOSE|BF_SHUTW_NOW)) &&
if (((ob->flags & (CF_SHUTW|CF_HIJACK|CF_AUTO_CLOSE|CF_SHUTW_NOW)) ==
(CF_AUTO_CLOSE|CF_SHUTW_NOW)) &&
(si->state == SI_ST_EST)) {
si_shutw(si);
goto out_wakeup;
}
if ((ob->flags & (BF_SHUTW|BF_SHUTW_NOW|BF_HIJACK)) == 0)
if ((ob->flags & (CF_SHUTW|CF_SHUTW_NOW|CF_HIJACK)) == 0)
si->flags |= SI_FL_WAIT_DATA;
ob->wex = TICK_ETERNITY;
}
@ -928,9 +928,9 @@ static void stream_int_chk_snd_conn(struct stream_interface *si)
ob->wex = tick_add_ifset(now_ms, ob->wto);
}
if (likely(ob->flags & BF_WRITE_ACTIVITY)) {
if (likely(ob->flags & CF_WRITE_ACTIVITY)) {
/* update timeout if we have written something */
if ((ob->flags & (BF_SHUTW|BF_WRITE_PARTIAL)) == BF_WRITE_PARTIAL &&
if ((ob->flags & (CF_SHUTW|CF_WRITE_PARTIAL)) == CF_WRITE_PARTIAL &&
!channel_is_empty(ob))
ob->wex = tick_add_ifset(now_ms, ob->wto);
@ -950,7 +950,7 @@ static void stream_int_chk_snd_conn(struct stream_interface *si)
/* in case of special condition (error, shutdown, end of write...), we
* have to notify the task.
*/
if (likely((ob->flags & (BF_WRITE_NULL|BF_WRITE_ERROR|BF_SHUTW)) ||
if (likely((ob->flags & (CF_WRITE_NULL|CF_WRITE_ERROR|CF_SHUTW)) ||
(channel_is_empty(ob) && !ob->to_forward) ||
si->state != SI_ST_EST)) {
out_wakeup:
@ -988,7 +988,7 @@ void si_conn_recv_cb(struct connection *conn)
goto out_shutdown_r;
/* maybe we were called immediately after an asynchronous shutr */
if (b->flags & BF_SHUTR)
if (b->flags & CF_SHUTR)
return;
cur_read = 0;
@ -998,7 +998,7 @@ void si_conn_recv_cb(struct connection *conn)
* using a buffer.
*/
if (conn->data->rcv_pipe &&
b->to_forward >= MIN_SPLICE_FORWARD && b->flags & BF_KERN_SPLICING) {
b->to_forward >= MIN_SPLICE_FORWARD && b->flags & CF_KERN_SPLICING) {
if (buffer_not_empty(&b->buf)) {
/* We're embarrassed, there are already data pending in
* the buffer and we don't want to have them at two
@ -1010,7 +1010,7 @@ void si_conn_recv_cb(struct connection *conn)
if (unlikely(b->pipe == NULL)) {
if (pipes_used >= global.maxpipes || !(b->pipe = get_pipe())) {
b->flags &= ~BF_KERN_SPLICING;
b->flags &= ~CF_KERN_SPLICING;
goto abort_splice;
}
}
@ -1018,17 +1018,17 @@ void si_conn_recv_cb(struct connection *conn)
ret = conn->data->rcv_pipe(conn, b->pipe, b->to_forward);
if (ret < 0) {
/* splice not supported on this end, let's disable it */
b->flags &= ~BF_KERN_SPLICING;
b->flags &= ~CF_KERN_SPLICING;
si->flags &= ~SI_FL_CAP_SPLICE;
goto abort_splice;
}
if (ret > 0) {
if (b->to_forward != BUF_INFINITE_FORWARD)
if (b->to_forward != CHN_INFINITE_FORWARD)
b->to_forward -= ret;
b->total += ret;
cur_read += ret;
b->flags |= BF_READ_PARTIAL;
b->flags |= CF_READ_PARTIAL;
}
if (conn_data_read0_pending(conn))
@ -1062,9 +1062,9 @@ void si_conn_recv_cb(struct connection *conn)
cur_read += ret;
/* if we're allowed to directly forward data, we must update ->o */
if (b->to_forward && !(b->flags & (BF_SHUTW|BF_SHUTW_NOW))) {
if (b->to_forward && !(b->flags & (CF_SHUTW|CF_SHUTW_NOW))) {
unsigned long fwd = ret;
if (b->to_forward != BUF_INFINITE_FORWARD) {
if (b->to_forward != CHN_INFINITE_FORWARD) {
if (fwd > b->to_forward)
fwd = b->to_forward;
b->to_forward -= fwd;
@ -1075,25 +1075,25 @@ void si_conn_recv_cb(struct connection *conn)
if (conn->flags & CO_FL_WAIT_L4_CONN)
conn->flags &= ~CO_FL_WAIT_L4_CONN;
b->flags |= BF_READ_PARTIAL;
b->flags |= CF_READ_PARTIAL;
b->total += ret;
if (channel_full(b)) {
/* The buffer is now full, there's no point in going through
* the loop again.
*/
if (!(b->flags & BF_STREAMER_FAST) && (cur_read == buffer_len(&b->buf))) {
if (!(b->flags & CF_STREAMER_FAST) && (cur_read == buffer_len(&b->buf))) {
b->xfer_small = 0;
b->xfer_large++;
if (b->xfer_large >= 3) {
/* we call this buffer a fast streamer if it manages
* to be filled in one call 3 consecutive times.
*/
b->flags |= (BF_STREAMER | BF_STREAMER_FAST);
b->flags |= (CF_STREAMER | CF_STREAMER_FAST);
//fputc('+', stderr);
}
}
else if ((b->flags & (BF_STREAMER | BF_STREAMER_FAST)) &&
else if ((b->flags & (CF_STREAMER | CF_STREAMER_FAST)) &&
(cur_read <= b->buf.size / 2)) {
b->xfer_large = 0;
b->xfer_small++;
@ -1102,7 +1102,7 @@ void si_conn_recv_cb(struct connection *conn)
* we receive faster than we send, so at least it
* is not a "fast streamer".
*/
b->flags &= ~BF_STREAMER_FAST;
b->flags &= ~CF_STREAMER_FAST;
//fputc('-', stderr);
}
}
@ -1115,7 +1115,7 @@ void si_conn_recv_cb(struct connection *conn)
break;
}
if ((b->flags & BF_READ_DONTWAIT) || --read_poll <= 0)
if ((b->flags & CF_READ_DONTWAIT) || --read_poll <= 0)
break;
/* if too many bytes were missing from last read, it means that
@ -1123,7 +1123,7 @@ void si_conn_recv_cb(struct connection *conn)
* not have them in buffers.
*/
if (ret < max) {
if ((b->flags & (BF_STREAMER | BF_STREAMER_FAST)) &&
if ((b->flags & (CF_STREAMER | CF_STREAMER_FAST)) &&
(cur_read <= b->buf.size / 2)) {
b->xfer_large = 0;
b->xfer_small++;
@ -1132,7 +1132,7 @@ void si_conn_recv_cb(struct connection *conn)
* one pass, and this happened at least 3 times.
* This is definitely not a streamer.
*/
b->flags &= ~(BF_STREAMER | BF_STREAMER_FAST);
b->flags &= ~(CF_STREAMER | CF_STREAMER_FAST);
//fputc('!', stderr);
}
}
@ -1141,7 +1141,7 @@ void si_conn_recv_cb(struct connection *conn)
* have exhausted system buffers. It's not worth trying
* again.
*/
if (b->flags & BF_STREAMER)
if (b->flags & CF_STREAMER)
break;
/* if we read a large block smaller than what we requested,
@ -1177,8 +1177,8 @@ void si_conn_recv_cb(struct connection *conn)
out_shutdown_r:
/* we received a shutdown */
b->flags |= BF_READ_NULL;
if (b->flags & BF_AUTO_CLOSE)
b->flags |= CF_READ_NULL;
if (b->flags & CF_AUTO_CLOSE)
buffer_shutw_now(b);
stream_sock_read0(si);
conn_data_read0(conn);
@ -1208,7 +1208,7 @@ void si_conn_send_cb(struct connection *conn)
return;
/* we might have been called just after an asynchronous shutw */
if (b->flags & BF_SHUTW)
if (b->flags & CF_SHUTW)
return;
/* OK there are data waiting to be sent */
@ -1233,17 +1233,17 @@ void si_conn_send_cb(struct connection *conn)
*/
void stream_sock_read0(struct stream_interface *si)
{
si->ib->flags &= ~BF_SHUTR_NOW;
if (si->ib->flags & BF_SHUTR)
si->ib->flags &= ~CF_SHUTR_NOW;
if (si->ib->flags & CF_SHUTR)
return;
si->ib->flags |= BF_SHUTR;
si->ib->flags |= CF_SHUTR;
si->ib->rex = TICK_ETERNITY;
si->flags &= ~SI_FL_WAIT_ROOM;
if (si->state != SI_ST_EST && si->state != SI_ST_CON)
return;
if (si->ob->flags & BF_SHUTW)
if (si->ob->flags & CF_SHUTW)
goto do_close;
if (si->flags & SI_FL_NOHALF) {