mirror of
https://git.haproxy.org/git/haproxy.git/
synced 2025-09-20 21:31:28 +02:00
MINOR: muxes: adjust takeover with buf_wait interaction
Takeover operation defines an argument <release>. It's a boolean which if set indicate that freed connection resources during the takeover does not have to be reallocated on the new thread. Typically, it is set to false when takever is performed to reuse a connection. However, when used to be able to delete a connection from a different thread, <release> should be set to true. Previously, <release> was only set in conjunction with "del server" handler. This operation was performed under thread isolation, which guarantee that not thread-safe operation such as removal from buf_wait list could be performed on takeover if <release> was true. In the contrary case, takeover operation would fail. Recently, "del server" handler has been adjusted to remove idle connection cleanup with takeover. As such, <release> is never set to true in remaining takeover usage. However, takeover is also used to enforce strict-maxconn on a server. This is performed to delete a connection from any thread, which is the primary reason of <release> to true. But for the moment as takeover implementers considers that thread isolation is active if <release> is set, this is not yet applicable for strict-maxconn usage. Thus, the purpose of this patch is to adjust takeover implementation. Remove assumption between <release> and thread-isolation mode. It's not possible to remove a connection from a buf_wait list, an error will be return in any case.
This commit is contained in:
parent
8a456399db
commit
d971d3fed8
@ -4435,18 +4435,21 @@ static int fcgi_takeover(struct connection *conn, int orig_tid, int release)
|
|||||||
struct task *new_task = NULL;
|
struct task *new_task = NULL;
|
||||||
struct tasklet *new_tasklet = NULL;
|
struct tasklet *new_tasklet = NULL;
|
||||||
|
|
||||||
/* Pre-allocate tasks so that we don't have to roll back after the xprt
|
/* If the connection is attached to a buffer_wait (extremely rare),
|
||||||
* has been migrated.
|
* it will be woken up at any instant by its own thread and we can't
|
||||||
*/
|
* undo it anyway, so let's give up on this one. It's not interesting
|
||||||
if (!release) {
|
* at least for reuse anyway since it's not usable right now.
|
||||||
/* If the connection is attached to a buffer_wait (extremely
|
*
|
||||||
* rare), it will be woken up at any instant by its own thread
|
* TODO if takeover is used to free conn (release == 1), buf_wait may
|
||||||
* and we can't undo it anyway, so let's give up on this one.
|
* prevent this which is not desirable.
|
||||||
* It's not interesting anyway since it's not usable right now.
|
|
||||||
*/
|
*/
|
||||||
if (LIST_INLIST(&fcgi->buf_wait.list))
|
if (LIST_INLIST(&fcgi->buf_wait.list))
|
||||||
goto fail;
|
goto fail;
|
||||||
|
|
||||||
|
/* Pre-allocate tasks so that we don't have to roll back after the xprt
|
||||||
|
* has been migrated.
|
||||||
|
*/
|
||||||
|
if (!release) {
|
||||||
new_task = task_new_here();
|
new_task = task_new_here();
|
||||||
new_tasklet = tasklet_new();
|
new_tasklet = tasklet_new();
|
||||||
if (!new_task || !new_tasklet)
|
if (!new_task || !new_tasklet)
|
||||||
@ -4503,20 +4506,6 @@ static int fcgi_takeover(struct connection *conn, int orig_tid, int release)
|
|||||||
SUB_RETRY_RECV, &fcgi->wait_event);
|
SUB_RETRY_RECV, &fcgi->wait_event);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (release) {
|
|
||||||
/* we're being called for a server deletion and are running
|
|
||||||
* under thread isolation. That's the only way we can
|
|
||||||
* unregister a possible subscription of the original
|
|
||||||
* connection from its owner thread's queue, as this involves
|
|
||||||
* manipulating thread-unsafe areas. Note that it is not
|
|
||||||
* possible to just call b_dequeue() here as it would update
|
|
||||||
* the current thread's bufq_map and not the original one.
|
|
||||||
*/
|
|
||||||
BUG_ON(!thread_isolated());
|
|
||||||
if (LIST_INLIST(&fcgi->buf_wait.list))
|
|
||||||
_b_dequeue(&fcgi->buf_wait, orig_tid);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (new_task)
|
if (new_task)
|
||||||
__task_free(new_task);
|
__task_free(new_task);
|
||||||
return 0;
|
return 0;
|
||||||
|
33
src/mux_h1.c
33
src/mux_h1.c
@ -5596,18 +5596,21 @@ static int h1_takeover(struct connection *conn, int orig_tid, int release)
|
|||||||
struct task *new_task = NULL;
|
struct task *new_task = NULL;
|
||||||
struct tasklet *new_tasklet = NULL;
|
struct tasklet *new_tasklet = NULL;
|
||||||
|
|
||||||
/* Pre-allocate tasks so that we don't have to roll back after the xprt
|
/* If the connection is attached to a buffer_wait (extremely rare),
|
||||||
* has been migrated.
|
* it will be woken up at any instant by its own thread and we can't
|
||||||
*/
|
* undo it anyway, so let's give up on this one. It's not interesting
|
||||||
if (!release) {
|
* at least for reuse anyway since it's not usable right now.
|
||||||
/* If the connection is attached to a buffer_wait (extremely
|
*
|
||||||
* rare), it will be woken up at any instant by its own thread
|
* TODO if takeover is used to free conn (release == 1), buf_wait may
|
||||||
* and we can't undo it anyway, so let's give up on this one.
|
* prevent this which is not desirable.
|
||||||
* It's not interesting anyway since it's not usable right now.
|
|
||||||
*/
|
*/
|
||||||
if (LIST_INLIST(&h1c->buf_wait.list))
|
if (LIST_INLIST(&h1c->buf_wait.list))
|
||||||
goto fail;
|
goto fail;
|
||||||
|
|
||||||
|
/* Pre-allocate tasks so that we don't have to roll back after the xprt
|
||||||
|
* has been migrated.
|
||||||
|
*/
|
||||||
|
if (!release) {
|
||||||
new_task = task_new_here();
|
new_task = task_new_here();
|
||||||
new_tasklet = tasklet_new();
|
new_tasklet = tasklet_new();
|
||||||
if (!new_task || !new_tasklet)
|
if (!new_task || !new_tasklet)
|
||||||
@ -5664,20 +5667,6 @@ static int h1_takeover(struct connection *conn, int orig_tid, int release)
|
|||||||
SUB_RETRY_RECV, &h1c->wait_event);
|
SUB_RETRY_RECV, &h1c->wait_event);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (release) {
|
|
||||||
/* we're being called for a server deletion and are running
|
|
||||||
* under thread isolation. That's the only way we can
|
|
||||||
* unregister a possible subscription of the original
|
|
||||||
* connection from its owner thread's queue, as this involves
|
|
||||||
* manipulating thread-unsafe areas. Note that it is not
|
|
||||||
* possible to just call b_dequeue() here as it would update
|
|
||||||
* the current thread's bufq_map and not the original one.
|
|
||||||
*/
|
|
||||||
BUG_ON(!thread_isolated());
|
|
||||||
if (LIST_INLIST(&h1c->buf_wait.list))
|
|
||||||
_b_dequeue(&h1c->buf_wait, orig_tid);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (new_task)
|
if (new_task)
|
||||||
__task_free(new_task);
|
__task_free(new_task);
|
||||||
return 0;
|
return 0;
|
||||||
|
33
src/mux_h2.c
33
src/mux_h2.c
@ -8375,18 +8375,21 @@ static int h2_takeover(struct connection *conn, int orig_tid, int release)
|
|||||||
struct task *new_task = NULL;
|
struct task *new_task = NULL;
|
||||||
struct tasklet *new_tasklet = NULL;
|
struct tasklet *new_tasklet = NULL;
|
||||||
|
|
||||||
/* Pre-allocate tasks so that we don't have to roll back after the xprt
|
/* If the connection is attached to a buffer_wait (extremely rare),
|
||||||
* has been migrated.
|
* it will be woken up at any instant by its own thread and we can't
|
||||||
*/
|
* undo it anyway, so let's give up on this one. It's not interesting
|
||||||
if (!release) {
|
* at least for reuse anyway since it's not usable right now.
|
||||||
/* If the connection is attached to a buffer_wait (extremely
|
*
|
||||||
* rare), it will be woken up at any instant by its own thread
|
* TODO if takeover is used to free conn (release == 1), buf_wait may
|
||||||
* and we can't undo it anyway, so let's give up on this one.
|
* prevent this which is not desirable.
|
||||||
* It's not interesting anyway since it's not usable right now.
|
|
||||||
*/
|
*/
|
||||||
if (LIST_INLIST(&h2c->buf_wait.list))
|
if (LIST_INLIST(&h2c->buf_wait.list))
|
||||||
goto fail;
|
goto fail;
|
||||||
|
|
||||||
|
/* Pre-allocate tasks so that we don't have to roll back after the xprt
|
||||||
|
* has been migrated.
|
||||||
|
*/
|
||||||
|
if (!release) {
|
||||||
new_task = task_new_here();
|
new_task = task_new_here();
|
||||||
new_tasklet = tasklet_new();
|
new_tasklet = tasklet_new();
|
||||||
if (!new_task || !new_tasklet)
|
if (!new_task || !new_tasklet)
|
||||||
@ -8443,20 +8446,6 @@ static int h2_takeover(struct connection *conn, int orig_tid, int release)
|
|||||||
SUB_RETRY_RECV, &h2c->wait_event);
|
SUB_RETRY_RECV, &h2c->wait_event);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (release) {
|
|
||||||
/* we're being called for a server deletion and are running
|
|
||||||
* under thread isolation. That's the only way we can
|
|
||||||
* unregister a possible subscription of the original
|
|
||||||
* connection from its owner thread's queue, as this involves
|
|
||||||
* manipulating thread-unsafe areas. Note that it is not
|
|
||||||
* possible to just call b_dequeue() here as it would update
|
|
||||||
* the current thread's bufq_map and not the original one.
|
|
||||||
*/
|
|
||||||
BUG_ON(!thread_isolated());
|
|
||||||
if (LIST_INLIST(&h2c->buf_wait.list))
|
|
||||||
_b_dequeue(&h2c->buf_wait, orig_tid);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (new_task)
|
if (new_task)
|
||||||
__task_free(new_task);
|
__task_free(new_task);
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -3633,17 +3633,21 @@ static int spop_takeover(struct connection *conn, int orig_tid, int release)
|
|||||||
struct task *new_task = NULL;
|
struct task *new_task = NULL;
|
||||||
struct tasklet *new_tasklet = NULL;
|
struct tasklet *new_tasklet = NULL;
|
||||||
|
|
||||||
|
/* If the connection is attached to a buffer_wait (extremely rare),
|
||||||
|
* it will be woken up at any instant by its own thread and we can't
|
||||||
|
* undo it anyway, so let's give up on this one. It's not interesting
|
||||||
|
* at least for reuse anyway since it's not usable right now.
|
||||||
|
*
|
||||||
|
* TODO if takeover is used to free conn (release == 1), buf_wait may
|
||||||
|
* prevent this which is not desirable.
|
||||||
|
*/
|
||||||
|
if (LIST_INLIST(&spop_conn->buf_wait.list))
|
||||||
|
goto fail;
|
||||||
|
|
||||||
/* Pre-allocate tasks so that we don't have to roll back after the xprt
|
/* Pre-allocate tasks so that we don't have to roll back after the xprt
|
||||||
* has been migrated.
|
* has been migrated.
|
||||||
*/
|
*/
|
||||||
if (!release) {
|
if (!release) {
|
||||||
/* If the connection is attached to a buffer_wait (extremely
|
|
||||||
* rare), it will be woken up at any instant by its own thread
|
|
||||||
* and we can't undo it anyway, so let's give up on this one.
|
|
||||||
* It's not interesting anyway since it's not usable right now.
|
|
||||||
*/
|
|
||||||
if (LIST_INLIST(&spop_conn->buf_wait.list))
|
|
||||||
goto fail;
|
|
||||||
|
|
||||||
new_task = task_new_here();
|
new_task = task_new_here();
|
||||||
new_tasklet = tasklet_new();
|
new_tasklet = tasklet_new();
|
||||||
@ -3701,20 +3705,6 @@ static int spop_takeover(struct connection *conn, int orig_tid, int release)
|
|||||||
SUB_RETRY_RECV, &spop_conn->wait_event);
|
SUB_RETRY_RECV, &spop_conn->wait_event);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (release) {
|
|
||||||
/* we're being called for a server deletion and are running
|
|
||||||
* under thread isolation. That's the only way we can
|
|
||||||
* unregister a possible subscription of the original
|
|
||||||
* connection from its owner thread's queue, as this involves
|
|
||||||
* manipulating thread-unsafe areas. Note that it is not
|
|
||||||
* possible to just call b_dequeue() here as it would update
|
|
||||||
* the current thread's bufq_map and not the original one.
|
|
||||||
*/
|
|
||||||
BUG_ON(!thread_isolated());
|
|
||||||
if (LIST_INLIST(&spop_conn->buf_wait.list))
|
|
||||||
_b_dequeue(&spop_conn->buf_wait, orig_tid);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (new_task)
|
if (new_task)
|
||||||
__task_free(new_task);
|
__task_free(new_task);
|
||||||
return 0;
|
return 0;
|
||||||
|
Loading…
x
Reference in New Issue
Block a user