mirror of
https://git.haproxy.org/git/haproxy.git/
synced 2025-08-07 07:37:02 +02:00
BUILD: threads: Rename SPIN/RWLOCK macros using HA_ prefix
This remove any name conflicts, especially on Solaris.
This commit is contained in:
parent
7d8e4af46a
commit
2a944ee16b
@ -751,13 +751,13 @@ void __offer_buffer(void *from, unsigned int threshold);
|
||||
|
||||
static inline void offer_buffers(void *from, unsigned int threshold)
|
||||
{
|
||||
SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock);
|
||||
HA_SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock);
|
||||
if (LIST_ISEMPTY(&buffer_wq)) {
|
||||
SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock);
|
||||
HA_SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock);
|
||||
return;
|
||||
}
|
||||
__offer_buffer(from, threshold);
|
||||
SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock);
|
||||
HA_SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock);
|
||||
}
|
||||
|
||||
/*************************************************************************/
|
||||
|
@ -70,20 +70,20 @@ extern THREAD_LOCAL unsigned int tid_bit; /* The bit corresponding to the thread
|
||||
#define THREAD_NO_SYNC() ({ 0; })
|
||||
#define THREAD_NEED_SYNC() ({ 1; })
|
||||
|
||||
#define SPIN_INIT(l) do { /* do nothing */ } while(0)
|
||||
#define SPIN_DESTROY(l) do { /* do nothing */ } while(0)
|
||||
#define SPIN_LOCK(lbl, l) do { /* do nothing */ } while(0)
|
||||
#define SPIN_TRYLOCK(lbl, l) ({ 0; })
|
||||
#define SPIN_UNLOCK(lbl, l) do { /* do nothing */ } while(0)
|
||||
#define HA_SPIN_INIT(l) do { /* do nothing */ } while(0)
|
||||
#define HA_SPIN_DESTROY(l) do { /* do nothing */ } while(0)
|
||||
#define HA_SPIN_LOCK(lbl, l) do { /* do nothing */ } while(0)
|
||||
#define HA_SPIN_TRYLOCK(lbl, l) ({ 0; })
|
||||
#define HA_SPIN_UNLOCK(lbl, l) do { /* do nothing */ } while(0)
|
||||
|
||||
#define RWLOCK_INIT(l) do { /* do nothing */ } while(0)
|
||||
#define RWLOCK_DESTROY(l) do { /* do nothing */ } while(0)
|
||||
#define RWLOCK_WRLOCK(lbl, l) do { /* do nothing */ } while(0)
|
||||
#define RWLOCK_TRYWRLOCK(lbl, l) ({ 0; })
|
||||
#define RWLOCK_WRUNLOCK(lbl, l) do { /* do nothing */ } while(0)
|
||||
#define RWLOCK_RDLOCK(lbl, l) do { /* do nothing */ } while(0)
|
||||
#define RWLOCK_TRYRDLOCK(lbl, l) ({ 0; })
|
||||
#define RWLOCK_RDUNLOCK(lbl, l) do { /* do nothing */ } while(0)
|
||||
#define HA_RWLOCK_INIT(l) do { /* do nothing */ } while(0)
|
||||
#define HA_RWLOCK_DESTROY(l) do { /* do nothing */ } while(0)
|
||||
#define HA_RWLOCK_WRLOCK(lbl, l) do { /* do nothing */ } while(0)
|
||||
#define HA_RWLOCK_TRYWRLOCK(lbl, l) ({ 0; })
|
||||
#define HA_RWLOCK_WRUNLOCK(lbl, l) do { /* do nothing */ } while(0)
|
||||
#define HA_RWLOCK_RDLOCK(lbl, l) do { /* do nothing */ } while(0)
|
||||
#define HA_RWLOCK_TRYRDLOCK(lbl, l) ({ 0; })
|
||||
#define HA_RWLOCK_RDUNLOCK(lbl, l) do { /* do nothing */ } while(0)
|
||||
|
||||
#else /* USE_THREAD */
|
||||
|
||||
@ -208,23 +208,23 @@ extern struct lock_stat lock_stats[LOCK_LABELS];
|
||||
|
||||
#define HA_SPINLOCK_T struct ha_spinlock
|
||||
|
||||
#define SPIN_INIT(l) __spin_init(l)
|
||||
#define SPIN_DESTROY(l) __spin_destroy(l)
|
||||
#define HA_SPIN_INIT(l) __spin_init(l)
|
||||
#define HA_SPIN_DESTROY(l) __spin_destroy(l)
|
||||
|
||||
#define SPIN_LOCK(lbl, l) __spin_lock(lbl, l, __func__, __FILE__, __LINE__)
|
||||
#define SPIN_TRYLOCK(lbl, l) __spin_trylock(lbl, l, __func__, __FILE__, __LINE__)
|
||||
#define SPIN_UNLOCK(lbl, l) __spin_unlock(lbl, l, __func__, __FILE__, __LINE__)
|
||||
#define HA_SPIN_LOCK(lbl, l) __spin_lock(lbl, l, __func__, __FILE__, __LINE__)
|
||||
#define HA_SPIN_TRYLOCK(lbl, l) __spin_trylock(lbl, l, __func__, __FILE__, __LINE__)
|
||||
#define HA_SPIN_UNLOCK(lbl, l) __spin_unlock(lbl, l, __func__, __FILE__, __LINE__)
|
||||
|
||||
#define HA_RWLOCK_T struct ha_rwlock
|
||||
|
||||
#define RWLOCK_INIT(l) __ha_rwlock_init((l))
|
||||
#define RWLOCK_DESTROY(l) __ha_rwlock_destroy((l))
|
||||
#define RWLOCK_WRLOCK(lbl,l) __ha_rwlock_wrlock(lbl, l, __func__, __FILE__, __LINE__)
|
||||
#define RWLOCK_TRYWRLOCK(lbl,l) __ha_rwlock_trywrlock(lbl, l, __func__, __FILE__, __LINE__)
|
||||
#define RWLOCK_WRUNLOCK(lbl,l) __ha_rwlock_wrunlock(lbl, l, __func__, __FILE__, __LINE__)
|
||||
#define RWLOCK_RDLOCK(lbl,l) __ha_rwlock_rdlock(lbl, l)
|
||||
#define RWLOCK_TRYRDLOCK(lbl,l) __ha_rwlock_tryrdlock(lbl, l)
|
||||
#define RWLOCK_RDUNLOCK(lbl,l) __ha_rwlock_rdunlock(lbl, l)
|
||||
#define HA_RWLOCK_INIT(l) __ha_rwlock_init((l))
|
||||
#define HA_RWLOCK_DESTROY(l) __ha_rwlock_destroy((l))
|
||||
#define HA_RWLOCK_WRLOCK(lbl,l) __ha_rwlock_wrlock(lbl, l, __func__, __FILE__, __LINE__)
|
||||
#define HA_RWLOCK_TRYWRLOCK(lbl,l) __ha_rwlock_trywrlock(lbl, l, __func__, __FILE__, __LINE__)
|
||||
#define HA_RWLOCK_WRUNLOCK(lbl,l) __ha_rwlock_wrunlock(lbl, l, __func__, __FILE__, __LINE__)
|
||||
#define HA_RWLOCK_RDLOCK(lbl,l) __ha_rwlock_rdlock(lbl, l)
|
||||
#define HA_RWLOCK_TRYRDLOCK(lbl,l) __ha_rwlock_tryrdlock(lbl, l)
|
||||
#define HA_RWLOCK_RDUNLOCK(lbl,l) __ha_rwlock_rdunlock(lbl, l)
|
||||
|
||||
struct ha_spinlock {
|
||||
__HA_SPINLOCK_T lock;
|
||||
@ -550,22 +550,22 @@ static inline void __spin_unlock(enum lock_label lbl, struct ha_spinlock *l,
|
||||
|
||||
#define HA_SPINLOCK_T unsigned long
|
||||
|
||||
#define SPIN_INIT(l) ({ (*l) = 0; })
|
||||
#define SPIN_DESTROY(l) ({ (*l) = 0; })
|
||||
#define SPIN_LOCK(lbl, l) pl_take_s(l)
|
||||
#define SPIN_TRYLOCK(lbl, l) !pl_try_s(l)
|
||||
#define SPIN_UNLOCK(lbl, l) pl_drop_s(l)
|
||||
#define HA_SPIN_INIT(l) ({ (*l) = 0; })
|
||||
#define HA_SPIN_DESTROY(l) ({ (*l) = 0; })
|
||||
#define HA_SPIN_LOCK(lbl, l) pl_take_s(l)
|
||||
#define HA_SPIN_TRYLOCK(lbl, l) !pl_try_s(l)
|
||||
#define HA_SPIN_UNLOCK(lbl, l) pl_drop_s(l)
|
||||
|
||||
#define HA_RWLOCK_T unsigned long
|
||||
|
||||
#define RWLOCK_INIT(l) ({ (*l) = 0; })
|
||||
#define RWLOCK_DESTROY(l) ({ (*l) = 0; })
|
||||
#define RWLOCK_WRLOCK(lbl,l) pl_take_w(l)
|
||||
#define RWLOCK_TRYWRLOCK(lbl,l) !pl_try_w(l)
|
||||
#define RWLOCK_WRUNLOCK(lbl,l) pl_drop_w(l)
|
||||
#define RWLOCK_RDLOCK(lbl,l) pl_take_r(l)
|
||||
#define RWLOCK_TRYRDLOCK(lbl,l) !pl_try_r(l)
|
||||
#define RWLOCK_RDUNLOCK(lbl,l) pl_drop_r(l)
|
||||
#define HA_RWLOCK_INIT(l) ({ (*l) = 0; })
|
||||
#define HA_RWLOCK_DESTROY(l) ({ (*l) = 0; })
|
||||
#define HA_RWLOCK_WRLOCK(lbl,l) pl_take_w(l)
|
||||
#define HA_RWLOCK_TRYWRLOCK(lbl,l) !pl_try_w(l)
|
||||
#define HA_RWLOCK_WRUNLOCK(lbl,l) pl_drop_w(l)
|
||||
#define HA_RWLOCK_RDLOCK(lbl,l) pl_take_r(l)
|
||||
#define HA_RWLOCK_TRYRDLOCK(lbl,l) !pl_try_r(l)
|
||||
#define HA_RWLOCK_RDUNLOCK(lbl,l) pl_drop_r(l)
|
||||
|
||||
#endif /* DEBUG_THREAD */
|
||||
|
||||
|
@ -135,9 +135,9 @@ static inline void *pool_get_first(struct pool_head *pool)
|
||||
{
|
||||
void *ret;
|
||||
|
||||
SPIN_LOCK(POOL_LOCK, &pool->lock);
|
||||
HA_SPIN_LOCK(POOL_LOCK, &pool->lock);
|
||||
ret = __pool_get_first(pool);
|
||||
SPIN_UNLOCK(POOL_LOCK, &pool->lock);
|
||||
HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
|
||||
return ret;
|
||||
}
|
||||
/*
|
||||
@ -150,10 +150,10 @@ static inline void *pool_alloc_dirty(struct pool_head *pool)
|
||||
{
|
||||
void *p;
|
||||
|
||||
SPIN_LOCK(POOL_LOCK, &pool->lock);
|
||||
HA_SPIN_LOCK(POOL_LOCK, &pool->lock);
|
||||
if ((p = __pool_get_first(pool)) == NULL)
|
||||
p = __pool_refill_alloc(pool, 0);
|
||||
SPIN_UNLOCK(POOL_LOCK, &pool->lock);
|
||||
HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
|
||||
return p;
|
||||
}
|
||||
|
||||
@ -169,10 +169,10 @@ static inline void *pool_alloc2(struct pool_head *pool)
|
||||
p = pool_alloc_dirty(pool);
|
||||
#ifdef DEBUG_MEMORY_POOLS
|
||||
if (p) {
|
||||
SPIN_LOCK(POOL_LOCK, &pool->lock);
|
||||
HA_SPIN_LOCK(POOL_LOCK, &pool->lock);
|
||||
/* keep track of where the element was allocated from */
|
||||
*POOL_LINK(pool, p) = (void *)pool;
|
||||
SPIN_UNLOCK(POOL_LOCK, &pool->lock);
|
||||
HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
|
||||
}
|
||||
#endif
|
||||
if (p && mem_poison_byte >= 0) {
|
||||
@ -194,7 +194,7 @@ static inline void *pool_alloc2(struct pool_head *pool)
|
||||
static inline void pool_free2(struct pool_head *pool, void *ptr)
|
||||
{
|
||||
if (likely(ptr != NULL)) {
|
||||
SPIN_LOCK(POOL_LOCK, &pool->lock);
|
||||
HA_SPIN_LOCK(POOL_LOCK, &pool->lock);
|
||||
#ifdef DEBUG_MEMORY_POOLS
|
||||
/* we'll get late corruption if we refill to the wrong pool or double-free */
|
||||
if (*POOL_LINK(pool, ptr) != (void *)pool)
|
||||
@ -203,7 +203,7 @@ static inline void pool_free2(struct pool_head *pool, void *ptr)
|
||||
*POOL_LINK(pool, ptr) = (void *)pool->free_list;
|
||||
pool->free_list = (void *)ptr;
|
||||
pool->used--;
|
||||
SPIN_UNLOCK(POOL_LOCK, &pool->lock);
|
||||
HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
|
||||
}
|
||||
}
|
||||
#endif /* _COMMON_MEMORY_H */
|
||||
|
@ -88,10 +88,10 @@ static inline void __appctx_free(struct appctx *appctx)
|
||||
}
|
||||
|
||||
if (!LIST_ISEMPTY(&appctx->buffer_wait.list)) {
|
||||
SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock);
|
||||
HA_SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock);
|
||||
LIST_DEL(&appctx->buffer_wait.list);
|
||||
LIST_INIT(&appctx->buffer_wait.list);
|
||||
SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock);
|
||||
HA_SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock);
|
||||
}
|
||||
|
||||
pool_free2(pool2_connection, appctx);
|
||||
@ -99,14 +99,14 @@ static inline void __appctx_free(struct appctx *appctx)
|
||||
}
|
||||
static inline void appctx_free(struct appctx *appctx)
|
||||
{
|
||||
SPIN_LOCK(APPLETS_LOCK, &applet_active_lock);
|
||||
HA_SPIN_LOCK(APPLETS_LOCK, &applet_active_lock);
|
||||
if (appctx->state & APPLET_RUNNING) {
|
||||
appctx->state |= APPLET_WANT_DIE;
|
||||
SPIN_UNLOCK(APPLETS_LOCK, &applet_active_lock);
|
||||
HA_SPIN_UNLOCK(APPLETS_LOCK, &applet_active_lock);
|
||||
return;
|
||||
}
|
||||
__appctx_free(appctx);
|
||||
SPIN_UNLOCK(APPLETS_LOCK, &applet_active_lock);
|
||||
HA_SPIN_UNLOCK(APPLETS_LOCK, &applet_active_lock);
|
||||
}
|
||||
|
||||
/* wakes up an applet when conditions have changed */
|
||||
@ -120,14 +120,14 @@ static inline void __appctx_wakeup(struct appctx *appctx)
|
||||
|
||||
static inline void appctx_wakeup(struct appctx *appctx)
|
||||
{
|
||||
SPIN_LOCK(APPLETS_LOCK, &applet_active_lock);
|
||||
HA_SPIN_LOCK(APPLETS_LOCK, &applet_active_lock);
|
||||
if (appctx->state & APPLET_RUNNING) {
|
||||
appctx->state |= APPLET_WOKEN_UP;
|
||||
SPIN_UNLOCK(APPLETS_LOCK, &applet_active_lock);
|
||||
HA_SPIN_UNLOCK(APPLETS_LOCK, &applet_active_lock);
|
||||
return;
|
||||
}
|
||||
__appctx_wakeup(appctx);
|
||||
SPIN_UNLOCK(APPLETS_LOCK, &applet_active_lock);
|
||||
HA_SPIN_UNLOCK(APPLETS_LOCK, &applet_active_lock);
|
||||
}
|
||||
|
||||
/* Callback used to wake up an applet when a buffer is available. The applet
|
||||
@ -137,18 +137,18 @@ static inline void appctx_wakeup(struct appctx *appctx)
|
||||
* requested */
|
||||
static inline int appctx_res_wakeup(struct appctx *appctx)
|
||||
{
|
||||
SPIN_LOCK(APPLETS_LOCK, &applet_active_lock);
|
||||
HA_SPIN_LOCK(APPLETS_LOCK, &applet_active_lock);
|
||||
if (appctx->state & APPLET_RUNNING) {
|
||||
if (appctx->state & APPLET_WOKEN_UP) {
|
||||
SPIN_UNLOCK(APPLETS_LOCK, &applet_active_lock);
|
||||
HA_SPIN_UNLOCK(APPLETS_LOCK, &applet_active_lock);
|
||||
return 0;
|
||||
}
|
||||
appctx->state |= APPLET_WOKEN_UP;
|
||||
SPIN_UNLOCK(APPLETS_LOCK, &applet_active_lock);
|
||||
HA_SPIN_UNLOCK(APPLETS_LOCK, &applet_active_lock);
|
||||
return 1;
|
||||
}
|
||||
__appctx_wakeup(appctx);
|
||||
SPIN_UNLOCK(APPLETS_LOCK, &applet_active_lock);
|
||||
HA_SPIN_UNLOCK(APPLETS_LOCK, &applet_active_lock);
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@ -441,9 +441,9 @@ static inline int channel_alloc_buffer(struct channel *chn, struct buffer_wait *
|
||||
return 1;
|
||||
|
||||
if (LIST_ISEMPTY(&wait->list)) {
|
||||
SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock);
|
||||
HA_SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock);
|
||||
LIST_ADDQ(&buffer_wq, &wait->list);
|
||||
SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock);
|
||||
HA_SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -37,15 +37,15 @@ extern struct data_cb check_conn_cb;
|
||||
*/
|
||||
static inline void health_adjust(struct server *s, short status)
|
||||
{
|
||||
SPIN_LOCK(SERVER_LOCK, &s->lock);
|
||||
HA_SPIN_LOCK(SERVER_LOCK, &s->lock);
|
||||
/* return now if observing nor health check is not enabled */
|
||||
if (!s->observe || !s->check.task) {
|
||||
SPIN_UNLOCK(SERVER_LOCK, &s->lock);
|
||||
HA_SPIN_UNLOCK(SERVER_LOCK, &s->lock);
|
||||
return;
|
||||
}
|
||||
|
||||
__health_adjust(s, status);
|
||||
SPIN_UNLOCK(SERVER_LOCK, &s->lock);
|
||||
HA_SPIN_UNLOCK(SERVER_LOCK, &s->lock);
|
||||
}
|
||||
|
||||
const char *init_check(struct check *check, int type);
|
||||
|
@ -113,14 +113,14 @@ static inline void updt_fd_polling(const int fd)
|
||||
*/
|
||||
static inline void fd_alloc_cache_entry(const int fd)
|
||||
{
|
||||
RWLOCK_WRLOCK(FDCACHE_LOCK, &fdcache_lock);
|
||||
HA_RWLOCK_WRLOCK(FDCACHE_LOCK, &fdcache_lock);
|
||||
if (fdtab[fd].cache)
|
||||
goto end;
|
||||
fd_cache_num++;
|
||||
fdtab[fd].cache = fd_cache_num;
|
||||
fd_cache[fd_cache_num-1] = fd;
|
||||
end:
|
||||
RWLOCK_WRUNLOCK(FDCACHE_LOCK, &fdcache_lock);
|
||||
HA_RWLOCK_WRUNLOCK(FDCACHE_LOCK, &fdcache_lock);
|
||||
}
|
||||
|
||||
/* Removes entry used by fd <fd> from the FD cache and replaces it with the
|
||||
@ -131,7 +131,7 @@ static inline void fd_release_cache_entry(int fd)
|
||||
{
|
||||
unsigned int pos;
|
||||
|
||||
RWLOCK_WRLOCK(FDCACHE_LOCK, &fdcache_lock);
|
||||
HA_RWLOCK_WRLOCK(FDCACHE_LOCK, &fdcache_lock);
|
||||
pos = fdtab[fd].cache;
|
||||
if (!pos)
|
||||
goto end;
|
||||
@ -144,7 +144,7 @@ static inline void fd_release_cache_entry(int fd)
|
||||
fdtab[fd].cache = pos;
|
||||
}
|
||||
end:
|
||||
RWLOCK_WRUNLOCK(FDCACHE_LOCK, &fdcache_lock);
|
||||
HA_RWLOCK_WRUNLOCK(FDCACHE_LOCK, &fdcache_lock);
|
||||
}
|
||||
|
||||
/* Computes the new polled status based on the active and ready statuses, for
|
||||
@ -267,56 +267,56 @@ static inline int fd_active(const int fd)
|
||||
/* Disable processing recv events on fd <fd> */
|
||||
static inline void fd_stop_recv(int fd)
|
||||
{
|
||||
SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
|
||||
HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
|
||||
if (fd_recv_active(fd)) {
|
||||
fdtab[fd].state &= ~FD_EV_ACTIVE_R;
|
||||
fd_update_cache(fd); /* need an update entry to change the state */
|
||||
}
|
||||
SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
|
||||
HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
|
||||
}
|
||||
|
||||
/* Disable processing send events on fd <fd> */
|
||||
static inline void fd_stop_send(int fd)
|
||||
{
|
||||
SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
|
||||
HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
|
||||
if (fd_send_active(fd)) {
|
||||
fdtab[fd].state &= ~FD_EV_ACTIVE_W;
|
||||
fd_update_cache(fd); /* need an update entry to change the state */
|
||||
}
|
||||
SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
|
||||
HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
|
||||
}
|
||||
|
||||
/* Disable processing of events on fd <fd> for both directions. */
|
||||
static inline void fd_stop_both(int fd)
|
||||
{
|
||||
SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
|
||||
HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
|
||||
if (fd_active(fd)) {
|
||||
fdtab[fd].state &= ~FD_EV_ACTIVE_RW;
|
||||
fd_update_cache(fd); /* need an update entry to change the state */
|
||||
}
|
||||
SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
|
||||
HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
|
||||
}
|
||||
|
||||
/* Report that FD <fd> cannot receive anymore without polling (EAGAIN detected). */
|
||||
static inline void fd_cant_recv(const int fd)
|
||||
{
|
||||
SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
|
||||
HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
|
||||
if (fd_recv_ready(fd)) {
|
||||
fdtab[fd].state &= ~FD_EV_READY_R;
|
||||
fd_update_cache(fd); /* need an update entry to change the state */
|
||||
}
|
||||
SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
|
||||
HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
|
||||
}
|
||||
|
||||
/* Report that FD <fd> can receive anymore without polling. */
|
||||
static inline void fd_may_recv(const int fd)
|
||||
{
|
||||
SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
|
||||
HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
|
||||
if (!fd_recv_ready(fd)) {
|
||||
fdtab[fd].state |= FD_EV_READY_R;
|
||||
fd_update_cache(fd); /* need an update entry to change the state */
|
||||
}
|
||||
SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
|
||||
HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
|
||||
}
|
||||
|
||||
/* Disable readiness when polled. This is useful to interrupt reading when it
|
||||
@ -326,66 +326,66 @@ static inline void fd_may_recv(const int fd)
|
||||
*/
|
||||
static inline void fd_done_recv(const int fd)
|
||||
{
|
||||
SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
|
||||
HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
|
||||
if (fd_recv_polled(fd) && fd_recv_ready(fd)) {
|
||||
fdtab[fd].state &= ~FD_EV_READY_R;
|
||||
fd_update_cache(fd); /* need an update entry to change the state */
|
||||
}
|
||||
SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
|
||||
HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
|
||||
}
|
||||
|
||||
/* Report that FD <fd> cannot send anymore without polling (EAGAIN detected). */
|
||||
static inline void fd_cant_send(const int fd)
|
||||
{
|
||||
SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
|
||||
HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
|
||||
if (fd_send_ready(fd)) {
|
||||
fdtab[fd].state &= ~FD_EV_READY_W;
|
||||
fd_update_cache(fd); /* need an update entry to change the state */
|
||||
}
|
||||
SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
|
||||
HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
|
||||
}
|
||||
|
||||
/* Report that FD <fd> can send anymore without polling (EAGAIN detected). */
|
||||
static inline void fd_may_send(const int fd)
|
||||
{
|
||||
SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
|
||||
HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
|
||||
if (!fd_send_ready(fd)) {
|
||||
fdtab[fd].state |= FD_EV_READY_W;
|
||||
fd_update_cache(fd); /* need an update entry to change the state */
|
||||
}
|
||||
SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
|
||||
HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
|
||||
}
|
||||
|
||||
/* Prepare FD <fd> to try to receive */
|
||||
static inline void fd_want_recv(int fd)
|
||||
{
|
||||
SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
|
||||
HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
|
||||
if (!fd_recv_active(fd)) {
|
||||
fdtab[fd].state |= FD_EV_ACTIVE_R;
|
||||
fd_update_cache(fd); /* need an update entry to change the state */
|
||||
}
|
||||
SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
|
||||
HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
|
||||
}
|
||||
|
||||
/* Prepare FD <fd> to try to send */
|
||||
static inline void fd_want_send(int fd)
|
||||
{
|
||||
SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
|
||||
HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
|
||||
if (!fd_send_active(fd)) {
|
||||
fdtab[fd].state |= FD_EV_ACTIVE_W;
|
||||
fd_update_cache(fd); /* need an update entry to change the state */
|
||||
}
|
||||
SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
|
||||
HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
|
||||
}
|
||||
|
||||
/* Update events seen for FD <fd> and its state if needed. This should be called
|
||||
* by the poller to set FD_POLL_* flags. */
|
||||
static inline void fd_update_events(int fd, int evts)
|
||||
{
|
||||
SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
|
||||
HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
|
||||
fdtab[fd].ev &= FD_POLL_STICKY;
|
||||
fdtab[fd].ev |= evts;
|
||||
SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
|
||||
HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
|
||||
|
||||
if (fdtab[fd].ev & (FD_POLL_IN | FD_POLL_HUP | FD_POLL_ERR))
|
||||
fd_may_recv(fd);
|
||||
@ -397,7 +397,7 @@ static inline void fd_update_events(int fd, int evts)
|
||||
/* Prepares <fd> for being polled */
|
||||
static inline void fd_insert(int fd, unsigned long thread_mask)
|
||||
{
|
||||
SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
|
||||
HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
|
||||
fdtab[fd].ev = 0;
|
||||
fdtab[fd].new = 1;
|
||||
fdtab[fd].updated = 0;
|
||||
@ -405,12 +405,12 @@ static inline void fd_insert(int fd, unsigned long thread_mask)
|
||||
fdtab[fd].cloned = 0;
|
||||
fdtab[fd].cache = 0;
|
||||
fdtab[fd].thread_mask = thread_mask;
|
||||
SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
|
||||
HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
|
||||
|
||||
SPIN_LOCK(FDTAB_LOCK, &fdtab_lock);
|
||||
HA_SPIN_LOCK(FDTAB_LOCK, &fdtab_lock);
|
||||
if (fd + 1 > maxfd)
|
||||
maxfd = fd + 1;
|
||||
SPIN_UNLOCK(FDTAB_LOCK, &fdtab_lock);
|
||||
HA_SPIN_UNLOCK(FDTAB_LOCK, &fdtab_lock);
|
||||
}
|
||||
|
||||
|
||||
|
@ -57,11 +57,11 @@ static inline void session_store_counters(struct session *sess)
|
||||
|
||||
ptr = stktable_data_ptr(stkctr->table, ts, STKTABLE_DT_CONN_CUR);
|
||||
if (ptr) {
|
||||
RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
|
||||
stktable_data_cast(ptr, conn_cur)--;
|
||||
|
||||
RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
}
|
||||
|
||||
stkctr_set_entry(stkctr, NULL);
|
||||
|
@ -141,7 +141,7 @@ static inline int __stksess_kill_if_expired(struct stktable *t, struct stksess *
|
||||
|
||||
static inline void stksess_kill_if_expired(struct stktable *t, struct stksess *ts, int decrefcnt)
|
||||
{
|
||||
SPIN_LOCK(STK_TABLE_LOCK, &t->lock);
|
||||
HA_SPIN_LOCK(STK_TABLE_LOCK, &t->lock);
|
||||
|
||||
if (decrefcnt)
|
||||
ts->ref_cnt--;
|
||||
@ -149,7 +149,7 @@ static inline void stksess_kill_if_expired(struct stktable *t, struct stksess *t
|
||||
if (t->expire != TICK_ETERNITY && tick_is_expired(ts->expire, now_ms))
|
||||
__stksess_kill_if_expired(t, ts);
|
||||
|
||||
SPIN_UNLOCK(STK_TABLE_LOCK, &t->lock);
|
||||
HA_SPIN_UNLOCK(STK_TABLE_LOCK, &t->lock);
|
||||
}
|
||||
|
||||
/* sets the stick counter's entry pointer */
|
||||
|
@ -102,11 +102,11 @@ static inline void stream_store_counters(struct stream *s)
|
||||
|
||||
ptr = stktable_data_ptr(s->stkctr[i].table, ts, STKTABLE_DT_CONN_CUR);
|
||||
if (ptr) {
|
||||
RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
|
||||
stktable_data_cast(ptr, conn_cur)--;
|
||||
|
||||
RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
}
|
||||
stkctr_set_entry(&s->stkctr[i], NULL);
|
||||
stksess_kill_if_expired(s->stkctr[i].table, ts, 1);
|
||||
@ -137,11 +137,11 @@ static inline void stream_stop_content_counters(struct stream *s)
|
||||
|
||||
ptr = stktable_data_ptr(s->stkctr[i].table, ts, STKTABLE_DT_CONN_CUR);
|
||||
if (ptr) {
|
||||
RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
|
||||
stktable_data_cast(ptr, conn_cur)--;
|
||||
|
||||
RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
}
|
||||
stkctr_set_entry(&s->stkctr[i], NULL);
|
||||
stksess_kill_if_expired(s->stkctr[i].table, ts, 1);
|
||||
@ -156,7 +156,7 @@ static inline void stream_start_counters(struct stktable *t, struct stksess *ts)
|
||||
{
|
||||
void *ptr;
|
||||
|
||||
RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
|
||||
ptr = stktable_data_ptr(t, ts, STKTABLE_DT_CONN_CUR);
|
||||
if (ptr)
|
||||
@ -173,7 +173,7 @@ static inline void stream_start_counters(struct stktable *t, struct stksess *ts)
|
||||
if (tick_isset(t->expire))
|
||||
ts->expire = tick_add(now_ms, MS_TO_TICKS(t->expire));
|
||||
|
||||
RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
}
|
||||
|
||||
/* Enable tracking of stream counters as <stkctr> on stksess <ts>. The caller is
|
||||
@ -209,7 +209,7 @@ static void inline stream_inc_http_req_ctr(struct stream *s)
|
||||
continue;
|
||||
}
|
||||
|
||||
RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
|
||||
ptr = stktable_data_ptr(stkctr->table, ts, STKTABLE_DT_HTTP_REQ_CNT);
|
||||
if (ptr)
|
||||
@ -220,7 +220,7 @@ static void inline stream_inc_http_req_ctr(struct stream *s)
|
||||
update_freq_ctr_period(&stktable_data_cast(ptr, http_req_rate),
|
||||
stkctr->table->data_arg[STKTABLE_DT_HTTP_REQ_RATE].u, 1);
|
||||
|
||||
RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
}
|
||||
}
|
||||
|
||||
@ -243,7 +243,7 @@ static void inline stream_inc_be_http_req_ctr(struct stream *s)
|
||||
if (!(stkctr_flags(&s->stkctr[i]) & STKCTR_TRACK_BACKEND))
|
||||
continue;
|
||||
|
||||
RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
|
||||
ptr = stktable_data_ptr(stkctr->table, ts, STKTABLE_DT_HTTP_REQ_CNT);
|
||||
if (ptr)
|
||||
@ -254,7 +254,7 @@ static void inline stream_inc_be_http_req_ctr(struct stream *s)
|
||||
update_freq_ctr_period(&stktable_data_cast(ptr, http_req_rate),
|
||||
stkctr->table->data_arg[STKTABLE_DT_HTTP_REQ_RATE].u, 1);
|
||||
|
||||
RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
}
|
||||
}
|
||||
|
||||
@ -281,7 +281,7 @@ static void inline stream_inc_http_err_ctr(struct stream *s)
|
||||
continue;
|
||||
}
|
||||
|
||||
RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
|
||||
ptr = stktable_data_ptr(stkctr->table, ts, STKTABLE_DT_HTTP_ERR_CNT);
|
||||
if (ptr)
|
||||
@ -292,16 +292,16 @@ static void inline stream_inc_http_err_ctr(struct stream *s)
|
||||
update_freq_ctr_period(&stktable_data_cast(ptr, http_err_rate),
|
||||
stkctr->table->data_arg[STKTABLE_DT_HTTP_ERR_RATE].u, 1);
|
||||
|
||||
RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
}
|
||||
}
|
||||
|
||||
static void inline stream_add_srv_conn(struct stream *sess, struct server *srv)
|
||||
{
|
||||
SPIN_LOCK(SERVER_LOCK, &srv->lock);
|
||||
HA_SPIN_LOCK(SERVER_LOCK, &srv->lock);
|
||||
sess->srv_conn = srv;
|
||||
LIST_ADD(&srv->actconns, &sess->by_srv);
|
||||
SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
|
||||
HA_SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
|
||||
}
|
||||
|
||||
static void inline stream_del_srv_conn(struct stream *sess)
|
||||
@ -311,10 +311,10 @@ static void inline stream_del_srv_conn(struct stream *sess)
|
||||
if (!srv)
|
||||
return;
|
||||
|
||||
SPIN_LOCK(SERVER_LOCK, &srv->lock);
|
||||
HA_SPIN_LOCK(SERVER_LOCK, &srv->lock);
|
||||
sess->srv_conn = NULL;
|
||||
LIST_DEL(&sess->by_srv);
|
||||
SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
|
||||
HA_SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
|
||||
}
|
||||
|
||||
static void inline stream_init_srv_conn(struct stream *sess)
|
||||
|
@ -110,20 +110,20 @@ static inline int task_in_wq(struct task *t)
|
||||
struct task *__task_wakeup(struct task *t);
|
||||
static inline struct task *task_wakeup(struct task *t, unsigned int f)
|
||||
{
|
||||
SPIN_LOCK(TASK_RQ_LOCK, &rq_lock);
|
||||
HA_SPIN_LOCK(TASK_RQ_LOCK, &rq_lock);
|
||||
|
||||
/* If task is running, we postpone the call
|
||||
* and backup the state.
|
||||
*/
|
||||
if (unlikely(t->state & TASK_RUNNING)) {
|
||||
t->pending_state |= f;
|
||||
SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock);
|
||||
HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock);
|
||||
return t;
|
||||
}
|
||||
if (likely(!task_in_rq(t)))
|
||||
__task_wakeup(t);
|
||||
t->state |= f;
|
||||
SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock);
|
||||
HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock);
|
||||
|
||||
return t;
|
||||
}
|
||||
@ -148,10 +148,10 @@ static inline struct task *__task_unlink_wq(struct task *t)
|
||||
|
||||
static inline struct task *task_unlink_wq(struct task *t)
|
||||
{
|
||||
SPIN_LOCK(TASK_WQ_LOCK, &wq_lock);
|
||||
HA_SPIN_LOCK(TASK_WQ_LOCK, &wq_lock);
|
||||
if (likely(task_in_wq(t)))
|
||||
__task_unlink_wq(t);
|
||||
SPIN_UNLOCK(TASK_WQ_LOCK, &wq_lock);
|
||||
HA_SPIN_UNLOCK(TASK_WQ_LOCK, &wq_lock);
|
||||
return t;
|
||||
}
|
||||
|
||||
@ -176,10 +176,10 @@ static inline struct task *__task_unlink_rq(struct task *t)
|
||||
*/
|
||||
static inline struct task *task_unlink_rq(struct task *t)
|
||||
{
|
||||
SPIN_LOCK(TASK_RQ_LOCK, &rq_lock);
|
||||
HA_SPIN_LOCK(TASK_RQ_LOCK, &rq_lock);
|
||||
if (likely(task_in_rq(t)))
|
||||
__task_unlink_rq(t);
|
||||
SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock);
|
||||
HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock);
|
||||
return t;
|
||||
}
|
||||
|
||||
@ -256,10 +256,10 @@ static inline void task_queue(struct task *task)
|
||||
if (!tick_isset(task->expire))
|
||||
return;
|
||||
|
||||
SPIN_LOCK(TASK_WQ_LOCK, &wq_lock);
|
||||
HA_SPIN_LOCK(TASK_WQ_LOCK, &wq_lock);
|
||||
if (!task_in_wq(task) || tick_is_lt(task->expire, task->wq.key))
|
||||
__task_queue(task);
|
||||
SPIN_UNLOCK(TASK_WQ_LOCK, &wq_lock);
|
||||
HA_SPIN_UNLOCK(TASK_WQ_LOCK, &wq_lock);
|
||||
}
|
||||
|
||||
/* Ensure <task> will be woken up at most at <when>. If the task is already in
|
||||
@ -272,14 +272,14 @@ static inline void task_schedule(struct task *task, int when)
|
||||
if (task_in_rq(task))
|
||||
return;
|
||||
|
||||
SPIN_LOCK(TASK_WQ_LOCK, &wq_lock);
|
||||
HA_SPIN_LOCK(TASK_WQ_LOCK, &wq_lock);
|
||||
if (task_in_wq(task))
|
||||
when = tick_first(when, task->expire);
|
||||
|
||||
task->expire = when;
|
||||
if (!task_in_wq(task) || tick_is_lt(task->expire, task->wq.key))
|
||||
__task_queue(task);
|
||||
SPIN_UNLOCK(TASK_WQ_LOCK, &wq_lock);
|
||||
HA_SPIN_UNLOCK(TASK_WQ_LOCK, &wq_lock);
|
||||
}
|
||||
|
||||
/* This function register a new signal. "lua" is the current lua
|
||||
@ -296,7 +296,7 @@ static inline struct notification *notification_new(struct list *purge, struct l
|
||||
return NULL;
|
||||
LIST_ADDQ(purge, &com->purge_me);
|
||||
LIST_ADDQ(event, &com->wake_me);
|
||||
SPIN_INIT(&com->lock);
|
||||
HA_SPIN_INIT(&com->lock);
|
||||
com->task = wakeup;
|
||||
return com;
|
||||
}
|
||||
@ -311,15 +311,15 @@ static inline void notification_purge(struct list *purge)
|
||||
|
||||
/* Delete all pending communication signals. */
|
||||
list_for_each_entry_safe(com, back, purge, purge_me) {
|
||||
SPIN_LOCK(NOTIF_LOCK, &com->lock);
|
||||
HA_SPIN_LOCK(NOTIF_LOCK, &com->lock);
|
||||
LIST_DEL(&com->purge_me);
|
||||
if (!com->task) {
|
||||
SPIN_UNLOCK(NOTIF_LOCK, &com->lock);
|
||||
HA_SPIN_UNLOCK(NOTIF_LOCK, &com->lock);
|
||||
pool_free2(pool2_notification, com);
|
||||
continue;
|
||||
}
|
||||
com->task = NULL;
|
||||
SPIN_UNLOCK(NOTIF_LOCK, &com->lock);
|
||||
HA_SPIN_UNLOCK(NOTIF_LOCK, &com->lock);
|
||||
}
|
||||
}
|
||||
|
||||
@ -333,16 +333,16 @@ static inline void notification_wake(struct list *wake)
|
||||
|
||||
/* Wake task and delete all pending communication signals. */
|
||||
list_for_each_entry_safe(com, back, wake, wake_me) {
|
||||
SPIN_LOCK(NOTIF_LOCK, &com->lock);
|
||||
HA_SPIN_LOCK(NOTIF_LOCK, &com->lock);
|
||||
LIST_DEL(&com->wake_me);
|
||||
if (!com->task) {
|
||||
SPIN_UNLOCK(NOTIF_LOCK, &com->lock);
|
||||
HA_SPIN_UNLOCK(NOTIF_LOCK, &com->lock);
|
||||
pool_free2(pool2_notification, com);
|
||||
continue;
|
||||
}
|
||||
task_wakeup(com->task, TASK_WOKEN_MSG);
|
||||
com->task = NULL;
|
||||
SPIN_UNLOCK(NOTIF_LOCK, &com->lock);
|
||||
HA_SPIN_UNLOCK(NOTIF_LOCK, &com->lock);
|
||||
}
|
||||
}
|
||||
|
||||
|
10
src/applet.c
10
src/applet.c
@ -38,7 +38,7 @@ void applet_run_active()
|
||||
if (!applets_active_queue)
|
||||
return;
|
||||
|
||||
SPIN_LOCK(APPLETS_LOCK, &applet_active_lock);
|
||||
HA_SPIN_LOCK(APPLETS_LOCK, &applet_active_lock);
|
||||
|
||||
curr = LIST_NEXT(&applet_active_queue, typeof(curr), runq);
|
||||
while (&curr->runq != &applet_active_queue) {
|
||||
@ -52,7 +52,7 @@ void applet_run_active()
|
||||
curr = next;
|
||||
}
|
||||
|
||||
SPIN_UNLOCK(APPLETS_LOCK, &applet_active_lock);
|
||||
HA_SPIN_UNLOCK(APPLETS_LOCK, &applet_active_lock);
|
||||
|
||||
/* The list is only scanned from the head. This guarantees that if any
|
||||
* applet removes another one, there is no side effect while walking
|
||||
@ -84,7 +84,7 @@ void applet_run_active()
|
||||
/* curr was left in the list, move it back to the active list */
|
||||
LIST_DEL(&curr->runq);
|
||||
LIST_INIT(&curr->runq);
|
||||
SPIN_LOCK(APPLETS_LOCK, &applet_active_lock);
|
||||
HA_SPIN_LOCK(APPLETS_LOCK, &applet_active_lock);
|
||||
if (curr->state & APPLET_WANT_DIE) {
|
||||
curr->state = APPLET_SLEEPING;
|
||||
__appctx_free(curr);
|
||||
@ -98,7 +98,7 @@ void applet_run_active()
|
||||
curr->state = APPLET_SLEEPING;
|
||||
}
|
||||
}
|
||||
SPIN_UNLOCK(APPLETS_LOCK, &applet_active_lock);
|
||||
HA_SPIN_UNLOCK(APPLETS_LOCK, &applet_active_lock);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -106,5 +106,5 @@ void applet_run_active()
|
||||
__attribute__((constructor))
|
||||
static void __applet_init(void)
|
||||
{
|
||||
SPIN_INIT(&applet_active_lock);
|
||||
HA_SPIN_INIT(&applet_active_lock);
|
||||
}
|
||||
|
@ -75,7 +75,7 @@ int init_buffer()
|
||||
if (global.tune.buf_limit)
|
||||
pool2_buffer->limit = global.tune.buf_limit;
|
||||
|
||||
SPIN_INIT(&buffer_wq_lock);
|
||||
HA_SPIN_INIT(&buffer_wq_lock);
|
||||
|
||||
buffer = pool_refill_alloc(pool2_buffer, pool2_buffer->minavail - 1);
|
||||
if (!buffer)
|
||||
|
@ -2108,7 +2108,7 @@ int cfg_parse_peers(const char *file, int linenum, char **args, int kwm)
|
||||
newpeer->proto = proto;
|
||||
newpeer->xprt = xprt_get(XPRT_RAW);
|
||||
newpeer->sock_init_arg = NULL;
|
||||
SPIN_INIT(&newpeer->lock);
|
||||
HA_SPIN_INIT(&newpeer->lock);
|
||||
|
||||
if (strcmp(newpeer->id, localpeer) == 0) {
|
||||
/* Current is local peer, it define a frontend */
|
||||
@ -2251,7 +2251,7 @@ int cfg_parse_resolvers(const char *file, int linenum, char **args, int kwm)
|
||||
LIST_INIT(&curr_resolvers->nameservers);
|
||||
LIST_INIT(&curr_resolvers->resolutions.curr);
|
||||
LIST_INIT(&curr_resolvers->resolutions.wait);
|
||||
SPIN_INIT(&curr_resolvers->lock);
|
||||
HA_SPIN_INIT(&curr_resolvers->lock);
|
||||
}
|
||||
else if (strcmp(args[0], "nameserver") == 0) { /* nameserver definition */
|
||||
struct sockaddr_storage *sk;
|
||||
@ -8505,7 +8505,7 @@ int check_config_validity()
|
||||
}
|
||||
break;
|
||||
}
|
||||
SPIN_INIT(&curproxy->lbprm.lock);
|
||||
HA_SPIN_INIT(&curproxy->lbprm.lock);
|
||||
|
||||
if (curproxy->options & PR_O_LOGASAP)
|
||||
curproxy->to_log &= ~LW_BYTES;
|
||||
|
48
src/checks.c
48
src/checks.c
@ -715,7 +715,7 @@ static void event_srv_chk_w(struct conn_stream *cs)
|
||||
struct server *s = check->server;
|
||||
struct task *t = check->task;
|
||||
|
||||
SPIN_LOCK(SERVER_LOCK, &check->server->lock);
|
||||
HA_SPIN_LOCK(SERVER_LOCK, &check->server->lock);
|
||||
if (unlikely(check->result == CHK_RES_FAILED))
|
||||
goto out_wakeup;
|
||||
|
||||
@ -768,7 +768,7 @@ static void event_srv_chk_w(struct conn_stream *cs)
|
||||
out_nowake:
|
||||
__cs_stop_send(cs); /* nothing more to write */
|
||||
out_unlock:
|
||||
SPIN_UNLOCK(SERVER_LOCK, &check->server->lock);
|
||||
HA_SPIN_UNLOCK(SERVER_LOCK, &check->server->lock);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -798,7 +798,7 @@ static void event_srv_chk_r(struct conn_stream *cs)
|
||||
int done;
|
||||
unsigned short msglen;
|
||||
|
||||
SPIN_LOCK(SERVER_LOCK, &check->server->lock);
|
||||
HA_SPIN_LOCK(SERVER_LOCK, &check->server->lock);
|
||||
|
||||
if (unlikely(check->result == CHK_RES_FAILED))
|
||||
goto out_wakeup;
|
||||
@ -1354,7 +1354,7 @@ static void event_srv_chk_r(struct conn_stream *cs)
|
||||
|
||||
task_wakeup(t, TASK_WOKEN_IO);
|
||||
out_unlock:
|
||||
SPIN_UNLOCK(SERVER_LOCK, &check->server->lock);
|
||||
HA_SPIN_UNLOCK(SERVER_LOCK, &check->server->lock);
|
||||
return;
|
||||
|
||||
wait_more_data:
|
||||
@ -1374,7 +1374,7 @@ static int wake_srv_chk(struct conn_stream *cs)
|
||||
struct check *check = cs->data;
|
||||
int ret = 0;
|
||||
|
||||
SPIN_LOCK(SERVER_LOCK, &check->server->lock);
|
||||
HA_SPIN_LOCK(SERVER_LOCK, &check->server->lock);
|
||||
|
||||
/* we may have to make progress on the TCP checks */
|
||||
if (check->type == PR_O2_TCPCHK_CHK) {
|
||||
@ -1411,7 +1411,7 @@ static int wake_srv_chk(struct conn_stream *cs)
|
||||
ret = -1;
|
||||
}
|
||||
|
||||
SPIN_UNLOCK(SERVER_LOCK, &check->server->lock);
|
||||
HA_SPIN_UNLOCK(SERVER_LOCK, &check->server->lock);
|
||||
|
||||
/* if a connection got replaced, we must absolutely prevent the connection
|
||||
* handler from touching its fd, and perform the FD polling updates ourselves
|
||||
@ -1647,9 +1647,9 @@ static struct pid_list *pid_list_add(pid_t pid, struct task *t)
|
||||
check->curpid = elem;
|
||||
LIST_INIT(&elem->list);
|
||||
|
||||
SPIN_LOCK(PID_LIST_LOCK, &pid_list_lock);
|
||||
HA_SPIN_LOCK(PID_LIST_LOCK, &pid_list_lock);
|
||||
LIST_ADD(&pid_list, &elem->list);
|
||||
SPIN_UNLOCK(PID_LIST_LOCK, &pid_list_lock);
|
||||
HA_SPIN_UNLOCK(PID_LIST_LOCK, &pid_list_lock);
|
||||
|
||||
return elem;
|
||||
}
|
||||
@ -1661,9 +1661,9 @@ static void pid_list_del(struct pid_list *elem)
|
||||
if (!elem)
|
||||
return;
|
||||
|
||||
SPIN_LOCK(PID_LIST_LOCK, &pid_list_lock);
|
||||
HA_SPIN_LOCK(PID_LIST_LOCK, &pid_list_lock);
|
||||
LIST_DEL(&elem->list);
|
||||
SPIN_UNLOCK(PID_LIST_LOCK, &pid_list_lock);
|
||||
HA_SPIN_UNLOCK(PID_LIST_LOCK, &pid_list_lock);
|
||||
|
||||
if (!elem->exited)
|
||||
kill(elem->pid, SIGTERM);
|
||||
@ -1678,7 +1678,7 @@ static void pid_list_expire(pid_t pid, int status)
|
||||
{
|
||||
struct pid_list *elem;
|
||||
|
||||
SPIN_LOCK(PID_LIST_LOCK, &pid_list_lock);
|
||||
HA_SPIN_LOCK(PID_LIST_LOCK, &pid_list_lock);
|
||||
list_for_each_entry(elem, &pid_list, list) {
|
||||
if (elem->pid == pid) {
|
||||
elem->t->expire = now_ms;
|
||||
@ -1688,7 +1688,7 @@ static void pid_list_expire(pid_t pid, int status)
|
||||
break;
|
||||
}
|
||||
}
|
||||
SPIN_UNLOCK(PID_LIST_LOCK, &pid_list_lock);
|
||||
HA_SPIN_UNLOCK(PID_LIST_LOCK, &pid_list_lock);
|
||||
}
|
||||
|
||||
static void sigchld_handler(struct sig_handler *sh)
|
||||
@ -1719,7 +1719,7 @@ static int init_pid_list(void)
|
||||
return 1;
|
||||
}
|
||||
|
||||
SPIN_INIT(&pid_list_lock);
|
||||
HA_SPIN_INIT(&pid_list_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1979,7 +1979,7 @@ static struct task *process_chk_proc(struct task *t)
|
||||
int ret;
|
||||
int expired = tick_is_expired(t->expire, now_ms);
|
||||
|
||||
SPIN_LOCK(SERVER_LOCK, &check->server->lock);
|
||||
HA_SPIN_LOCK(SERVER_LOCK, &check->server->lock);
|
||||
if (!(check->state & CHK_ST_INPROGRESS)) {
|
||||
/* no check currently running */
|
||||
if (!expired) /* woke up too early */
|
||||
@ -2092,7 +2092,7 @@ static struct task *process_chk_proc(struct task *t)
|
||||
t->expire = tick_add(t->expire, MS_TO_TICKS(check->inter));
|
||||
|
||||
out_unlock:
|
||||
SPIN_UNLOCK(SERVER_LOCK, &check->server->lock);
|
||||
HA_SPIN_UNLOCK(SERVER_LOCK, &check->server->lock);
|
||||
return t;
|
||||
}
|
||||
|
||||
@ -2113,7 +2113,7 @@ static struct task *process_chk_conn(struct task *t)
|
||||
int ret;
|
||||
int expired = tick_is_expired(t->expire, now_ms);
|
||||
|
||||
SPIN_LOCK(SERVER_LOCK, &check->server->lock);
|
||||
HA_SPIN_LOCK(SERVER_LOCK, &check->server->lock);
|
||||
if (!(check->state & CHK_ST_INPROGRESS)) {
|
||||
/* no check currently running */
|
||||
if (!expired) /* woke up too early */
|
||||
@ -2268,7 +2268,7 @@ static struct task *process_chk_conn(struct task *t)
|
||||
while (tick_is_expired(t->expire, now_ms))
|
||||
t->expire = tick_add(t->expire, MS_TO_TICKS(check->inter));
|
||||
out_unlock:
|
||||
SPIN_UNLOCK(SERVER_LOCK, &check->server->lock);
|
||||
HA_SPIN_UNLOCK(SERVER_LOCK, &check->server->lock);
|
||||
return t;
|
||||
}
|
||||
|
||||
@ -2597,7 +2597,7 @@ static int tcpcheck_main(struct check *check)
|
||||
struct list *head = check->tcpcheck_rules;
|
||||
int retcode = 0;
|
||||
|
||||
SPIN_LOCK(SERVER_LOCK, &check->server->lock);
|
||||
HA_SPIN_LOCK(SERVER_LOCK, &check->server->lock);
|
||||
|
||||
/* here, we know that the check is complete or that it failed */
|
||||
if (check->result != CHK_RES_UNKNOWN)
|
||||
@ -3077,7 +3077,7 @@ static int tcpcheck_main(struct check *check)
|
||||
__cs_stop_both(cs);
|
||||
|
||||
out_unlock:
|
||||
SPIN_UNLOCK(SERVER_LOCK, &check->server->lock);
|
||||
HA_SPIN_UNLOCK(SERVER_LOCK, &check->server->lock);
|
||||
return retcode;
|
||||
}
|
||||
|
||||
@ -3137,7 +3137,7 @@ static struct task *process_email_alert(struct task *t)
|
||||
|
||||
q = container_of(check, typeof(*q), check);
|
||||
|
||||
SPIN_LOCK(EMAIL_ALERTS_LOCK, &q->lock);
|
||||
HA_SPIN_LOCK(EMAIL_ALERTS_LOCK, &q->lock);
|
||||
while (1) {
|
||||
if (!(check->state & CHK_ST_ENABLED)) {
|
||||
if (LIST_ISEMPTY(&q->email_alerts)) {
|
||||
@ -3167,7 +3167,7 @@ static struct task *process_email_alert(struct task *t)
|
||||
check->state &= ~CHK_ST_ENABLED;
|
||||
}
|
||||
end:
|
||||
SPIN_UNLOCK(EMAIL_ALERTS_LOCK, &q->lock);
|
||||
HA_SPIN_UNLOCK(EMAIL_ALERTS_LOCK, &q->lock);
|
||||
return t;
|
||||
}
|
||||
|
||||
@ -3194,7 +3194,7 @@ int init_email_alert(struct mailers *mls, struct proxy *p, char **err)
|
||||
struct task *t;
|
||||
|
||||
LIST_INIT(&q->email_alerts);
|
||||
SPIN_INIT(&q->lock);
|
||||
HA_SPIN_INIT(&q->lock);
|
||||
check->inter = mls->timeout.mail;
|
||||
check->rise = DEF_AGENT_RISETIME;
|
||||
check->fall = DEF_AGENT_FALLTIME;
|
||||
@ -3398,10 +3398,10 @@ static int enqueue_one_email_alert(struct proxy *p, struct server *s,
|
||||
if (!add_tcpcheck_expect_str(&alert->tcpcheck_rules, "221 "))
|
||||
goto error;
|
||||
|
||||
SPIN_LOCK(EMAIL_ALERTS_LOCK, &q->lock);
|
||||
HA_SPIN_LOCK(EMAIL_ALERTS_LOCK, &q->lock);
|
||||
task_wakeup(check->task, TASK_WOKEN_MSG);
|
||||
LIST_ADDQ(&q->email_alerts, &alert->list);
|
||||
SPIN_UNLOCK(EMAIL_ALERTS_LOCK, &q->lock);
|
||||
HA_SPIN_UNLOCK(EMAIL_ALERTS_LOCK, &q->lock);
|
||||
return 1;
|
||||
|
||||
error:
|
||||
|
@ -160,10 +160,10 @@ static inline int init_comp_ctx(struct comp_ctx **comp_ctx)
|
||||
#endif
|
||||
|
||||
if (unlikely(pool_comp_ctx == NULL)) {
|
||||
SPIN_LOCK(COMP_POOL_LOCK, &comp_pool_lock);
|
||||
HA_SPIN_LOCK(COMP_POOL_LOCK, &comp_pool_lock);
|
||||
if (unlikely(pool_comp_ctx == NULL))
|
||||
pool_comp_ctx = create_pool("comp_ctx", sizeof(struct comp_ctx), MEM_F_SHARED);
|
||||
SPIN_UNLOCK(COMP_POOL_LOCK, &comp_pool_lock);
|
||||
HA_SPIN_UNLOCK(COMP_POOL_LOCK, &comp_pool_lock);
|
||||
}
|
||||
|
||||
*comp_ctx = pool_alloc2(pool_comp_ctx);
|
||||
@ -412,10 +412,10 @@ static void *alloc_zlib(void *opaque, unsigned int items, unsigned int size)
|
||||
switch (round) {
|
||||
case 0:
|
||||
if (zlib_pool_deflate_state == NULL) {
|
||||
SPIN_LOCK(COMP_POOL_LOCK, &comp_pool_lock);
|
||||
HA_SPIN_LOCK(COMP_POOL_LOCK, &comp_pool_lock);
|
||||
if (zlib_pool_deflate_state == NULL)
|
||||
zlib_pool_deflate_state = create_pool("zlib_state", size * items, MEM_F_SHARED);
|
||||
SPIN_UNLOCK(COMP_POOL_LOCK, &comp_pool_lock);
|
||||
HA_SPIN_UNLOCK(COMP_POOL_LOCK, &comp_pool_lock);
|
||||
}
|
||||
pool = zlib_pool_deflate_state;
|
||||
ctx->zlib_deflate_state = buf = pool_alloc2(pool);
|
||||
@ -423,10 +423,10 @@ static void *alloc_zlib(void *opaque, unsigned int items, unsigned int size)
|
||||
|
||||
case 1:
|
||||
if (zlib_pool_window == NULL) {
|
||||
SPIN_LOCK(COMP_POOL_LOCK, &comp_pool_lock);
|
||||
HA_SPIN_LOCK(COMP_POOL_LOCK, &comp_pool_lock);
|
||||
if (zlib_pool_window == NULL)
|
||||
zlib_pool_window = create_pool("zlib_window", size * items, MEM_F_SHARED);
|
||||
SPIN_UNLOCK(COMP_POOL_LOCK, &comp_pool_lock);
|
||||
HA_SPIN_UNLOCK(COMP_POOL_LOCK, &comp_pool_lock);
|
||||
}
|
||||
pool = zlib_pool_window;
|
||||
ctx->zlib_window = buf = pool_alloc2(pool);
|
||||
@ -434,10 +434,10 @@ static void *alloc_zlib(void *opaque, unsigned int items, unsigned int size)
|
||||
|
||||
case 2:
|
||||
if (zlib_pool_prev == NULL) {
|
||||
SPIN_LOCK(COMP_POOL_LOCK, &comp_pool_lock);
|
||||
HA_SPIN_LOCK(COMP_POOL_LOCK, &comp_pool_lock);
|
||||
if (zlib_pool_prev == NULL)
|
||||
zlib_pool_prev = create_pool("zlib_prev", size * items, MEM_F_SHARED);
|
||||
SPIN_UNLOCK(COMP_POOL_LOCK, &comp_pool_lock);
|
||||
HA_SPIN_UNLOCK(COMP_POOL_LOCK, &comp_pool_lock);
|
||||
}
|
||||
pool = zlib_pool_prev;
|
||||
ctx->zlib_prev = buf = pool_alloc2(pool);
|
||||
@ -445,10 +445,10 @@ static void *alloc_zlib(void *opaque, unsigned int items, unsigned int size)
|
||||
|
||||
case 3:
|
||||
if (zlib_pool_head == NULL) {
|
||||
SPIN_LOCK(COMP_POOL_LOCK, &comp_pool_lock);
|
||||
HA_SPIN_LOCK(COMP_POOL_LOCK, &comp_pool_lock);
|
||||
if (zlib_pool_head == NULL)
|
||||
zlib_pool_head = create_pool("zlib_head", size * items, MEM_F_SHARED);
|
||||
SPIN_UNLOCK(COMP_POOL_LOCK, &comp_pool_lock);
|
||||
HA_SPIN_UNLOCK(COMP_POOL_LOCK, &comp_pool_lock);
|
||||
}
|
||||
pool = zlib_pool_head;
|
||||
ctx->zlib_head = buf = pool_alloc2(pool);
|
||||
@ -456,10 +456,10 @@ static void *alloc_zlib(void *opaque, unsigned int items, unsigned int size)
|
||||
|
||||
case 4:
|
||||
if (zlib_pool_pending_buf == NULL) {
|
||||
SPIN_LOCK(COMP_POOL_LOCK, &comp_pool_lock);
|
||||
HA_SPIN_LOCK(COMP_POOL_LOCK, &comp_pool_lock);
|
||||
if (zlib_pool_pending_buf == NULL)
|
||||
zlib_pool_pending_buf = create_pool("zlib_pending_buf", size * items, MEM_F_SHARED);
|
||||
SPIN_UNLOCK(COMP_POOL_LOCK, &comp_pool_lock);
|
||||
HA_SPIN_UNLOCK(COMP_POOL_LOCK, &comp_pool_lock);
|
||||
}
|
||||
pool = zlib_pool_pending_buf;
|
||||
ctx->zlib_pending_buf = buf = pool_alloc2(pool);
|
||||
@ -721,7 +721,7 @@ static void __comp_fetch_init(void)
|
||||
global.tune.maxzlibmem = DEFAULT_MAXZLIBMEM * 1024U * 1024U,
|
||||
#endif
|
||||
#ifdef USE_ZLIB
|
||||
SPIN_INIT(&comp_pool_lock);
|
||||
HA_SPIN_INIT(&comp_pool_lock);
|
||||
memprintf(&ptr, "Built with zlib version : " ZLIB_VERSION);
|
||||
memprintf(&ptr, "%s\nRunning on zlib version : %s", ptr, zlibVersion());
|
||||
#elif defined(USE_SLZ)
|
||||
|
40
src/dns.c
40
src/dns.c
@ -486,7 +486,7 @@ static void dns_check_dns_response(struct dns_resolution *res)
|
||||
|
||||
/* Remove any associated server */
|
||||
for (srv = srvrq->proxy->srv; srv != NULL; srv = srv->next) {
|
||||
SPIN_LOCK(SERVER_LOCK, &srv->lock);
|
||||
HA_SPIN_LOCK(SERVER_LOCK, &srv->lock);
|
||||
if (srv->srvrq == srvrq && srv->svc_port == item->port &&
|
||||
item->data_len == srv->hostname_dn_len &&
|
||||
!memcmp(srv->hostname_dn, item->target, item->data_len)) {
|
||||
@ -498,7 +498,7 @@ static void dns_check_dns_response(struct dns_resolution *res)
|
||||
srv->hostname_dn_len = 0;
|
||||
dns_unlink_resolution(srv->dns_requester);
|
||||
}
|
||||
SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
|
||||
HA_SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
|
||||
}
|
||||
}
|
||||
|
||||
@ -518,7 +518,7 @@ static void dns_check_dns_response(struct dns_resolution *res)
|
||||
|
||||
/* Check if a server already uses that hostname */
|
||||
for (srv = srvrq->proxy->srv; srv != NULL; srv = srv->next) {
|
||||
SPIN_LOCK(SERVER_LOCK, &srv->lock);
|
||||
HA_SPIN_LOCK(SERVER_LOCK, &srv->lock);
|
||||
if (srv->srvrq == srvrq && srv->svc_port == item->port &&
|
||||
item->data_len == srv->hostname_dn_len &&
|
||||
!memcmp(srv->hostname_dn, item->target, item->data_len)) {
|
||||
@ -528,20 +528,20 @@ static void dns_check_dns_response(struct dns_resolution *res)
|
||||
snprintf(weight, sizeof(weight), "%d", item->weight);
|
||||
server_parse_weight_change_request(srv, weight);
|
||||
}
|
||||
SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
|
||||
HA_SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
|
||||
break;
|
||||
}
|
||||
SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
|
||||
HA_SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
|
||||
}
|
||||
if (srv)
|
||||
continue;
|
||||
|
||||
/* If not, try to find a server with undefined hostname */
|
||||
for (srv = srvrq->proxy->srv; srv != NULL; srv = srv->next) {
|
||||
SPIN_LOCK(SERVER_LOCK, &srv->lock);
|
||||
HA_SPIN_LOCK(SERVER_LOCK, &srv->lock);
|
||||
if (srv->srvrq == srvrq && !srv->hostname_dn)
|
||||
break;
|
||||
SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
|
||||
HA_SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
|
||||
}
|
||||
/* And update this server, if found */
|
||||
if (srv) {
|
||||
@ -551,7 +551,7 @@ static void dns_check_dns_response(struct dns_resolution *res)
|
||||
|
||||
if (dns_dn_label_to_str(item->target, item->data_len+1,
|
||||
hostname, DNS_MAX_NAME_SIZE) == -1) {
|
||||
SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
|
||||
HA_SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
|
||||
continue;
|
||||
}
|
||||
msg = update_server_fqdn(srv, hostname, "SRV record", 1);
|
||||
@ -565,7 +565,7 @@ static void dns_check_dns_response(struct dns_resolution *res)
|
||||
srv->check.port = item->port;
|
||||
snprintf(weight, sizeof(weight), "%d", item->weight);
|
||||
server_parse_weight_change_request(srv, weight);
|
||||
SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
|
||||
HA_SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1348,11 +1348,11 @@ int dns_link_resolution(void *requester, int requester_type, int requester_locke
|
||||
|
||||
if (srv) {
|
||||
if (!requester_locked)
|
||||
SPIN_LOCK(SERVER_LOCK, &srv->lock);
|
||||
HA_SPIN_LOCK(SERVER_LOCK, &srv->lock);
|
||||
if (srv->dns_requester == NULL) {
|
||||
if ((req = calloc(1, sizeof(*req))) == NULL) {
|
||||
if (!requester_locked)
|
||||
SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
|
||||
HA_SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
|
||||
goto err;
|
||||
}
|
||||
req->owner = &srv->obj_type;
|
||||
@ -1361,7 +1361,7 @@ int dns_link_resolution(void *requester, int requester_type, int requester_locke
|
||||
else
|
||||
req = srv->dns_requester;
|
||||
if (!requester_locked)
|
||||
SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
|
||||
HA_SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
|
||||
}
|
||||
else if (srvrq) {
|
||||
if (srvrq->dns_requester == NULL) {
|
||||
@ -1463,7 +1463,7 @@ static void dns_resolve_recv(struct dgram_conn *dgram)
|
||||
return;
|
||||
|
||||
resolvers = ns->resolvers;
|
||||
SPIN_LOCK(DNS_LOCK, &resolvers->lock);
|
||||
HA_SPIN_LOCK(DNS_LOCK, &resolvers->lock);
|
||||
|
||||
/* process all pending input messages */
|
||||
while (1) {
|
||||
@ -1617,10 +1617,10 @@ static void dns_resolve_recv(struct dgram_conn *dgram)
|
||||
struct server *s = objt_server(req->owner);
|
||||
|
||||
if (s)
|
||||
SPIN_LOCK(SERVER_LOCK, &s->lock);
|
||||
HA_SPIN_LOCK(SERVER_LOCK, &s->lock);
|
||||
req->requester_cb(req, tmpns);
|
||||
if (s)
|
||||
SPIN_UNLOCK(SERVER_LOCK, &s->lock);
|
||||
HA_SPIN_UNLOCK(SERVER_LOCK, &s->lock);
|
||||
tmpns = NULL;
|
||||
}
|
||||
|
||||
@ -1630,7 +1630,7 @@ static void dns_resolve_recv(struct dgram_conn *dgram)
|
||||
continue;
|
||||
}
|
||||
dns_update_resolvers_timeout(resolvers);
|
||||
SPIN_UNLOCK(DNS_LOCK, &resolvers->lock);
|
||||
HA_SPIN_UNLOCK(DNS_LOCK, &resolvers->lock);
|
||||
}
|
||||
|
||||
/* Called when a resolvers network socket is ready to send data */
|
||||
@ -1655,7 +1655,7 @@ static void dns_resolve_send(struct dgram_conn *dgram)
|
||||
return;
|
||||
|
||||
resolvers = ns->resolvers;
|
||||
SPIN_LOCK(DNS_LOCK, &resolvers->lock);
|
||||
HA_SPIN_LOCK(DNS_LOCK, &resolvers->lock);
|
||||
|
||||
list_for_each_entry(res, &resolvers->resolutions.curr, list) {
|
||||
int ret;
|
||||
@ -1682,7 +1682,7 @@ static void dns_resolve_send(struct dgram_conn *dgram)
|
||||
ns->counters.snd_error++;
|
||||
res->nb_queries++;
|
||||
}
|
||||
SPIN_UNLOCK(DNS_LOCK, &resolvers->lock);
|
||||
HA_SPIN_UNLOCK(DNS_LOCK, &resolvers->lock);
|
||||
}
|
||||
|
||||
/* Processes DNS resolution. First, it checks the active list to detect expired
|
||||
@ -1695,7 +1695,7 @@ static struct task *dns_process_resolvers(struct task *t)
|
||||
struct dns_resolution *res, *resback;
|
||||
int exp;
|
||||
|
||||
SPIN_LOCK(DNS_LOCK, &resolvers->lock);
|
||||
HA_SPIN_LOCK(DNS_LOCK, &resolvers->lock);
|
||||
|
||||
/* Handle all expired resolutions from the active list */
|
||||
list_for_each_entry_safe(res, resback, &resolvers->resolutions.curr, list) {
|
||||
@ -1765,7 +1765,7 @@ static struct task *dns_process_resolvers(struct task *t)
|
||||
}
|
||||
|
||||
dns_update_resolvers_timeout(resolvers);
|
||||
SPIN_UNLOCK(DNS_LOCK, &resolvers->lock);
|
||||
HA_SPIN_UNLOCK(DNS_LOCK, &resolvers->lock);
|
||||
return t;
|
||||
}
|
||||
|
||||
|
@ -71,14 +71,14 @@ REGPRM2 static void _do_poll(struct poller *p, int exp)
|
||||
if (!fdtab[fd].owner)
|
||||
continue;
|
||||
|
||||
SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
|
||||
HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
|
||||
fdtab[fd].updated = 0;
|
||||
fdtab[fd].new = 0;
|
||||
|
||||
eo = fdtab[fd].state;
|
||||
en = fd_compute_new_polled_status(eo);
|
||||
fdtab[fd].state = en;
|
||||
SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
|
||||
HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
|
||||
|
||||
if ((eo ^ en) & FD_EV_POLLED_RW) {
|
||||
/* poll status changed */
|
||||
|
@ -50,14 +50,14 @@ REGPRM2 static void _do_poll(struct poller *p, int exp)
|
||||
if (!fdtab[fd].owner)
|
||||
continue;
|
||||
|
||||
SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
|
||||
HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
|
||||
fdtab[fd].updated = 0;
|
||||
fdtab[fd].new = 0;
|
||||
|
||||
eo = fdtab[fd].state;
|
||||
en = fd_compute_new_polled_status(eo);
|
||||
fdtab[fd].state = en;
|
||||
SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
|
||||
HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
|
||||
|
||||
if ((eo ^ en) & FD_EV_POLLED_RW) {
|
||||
/* poll status changed */
|
||||
|
@ -50,10 +50,10 @@ static inline void hap_fd_clr(int fd, unsigned int *evts)
|
||||
|
||||
REGPRM1 static void __fd_clo(int fd)
|
||||
{
|
||||
SPIN_LOCK(POLL_LOCK, &poll_lock);
|
||||
HA_SPIN_LOCK(POLL_LOCK, &poll_lock);
|
||||
hap_fd_clr(fd, fd_evts[DIR_RD]);
|
||||
hap_fd_clr(fd, fd_evts[DIR_WR]);
|
||||
SPIN_UNLOCK(POLL_LOCK, &poll_lock);
|
||||
HA_SPIN_UNLOCK(POLL_LOCK, &poll_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -76,18 +76,18 @@ REGPRM2 static void _do_poll(struct poller *p, int exp)
|
||||
if (!fdtab[fd].owner)
|
||||
continue;
|
||||
|
||||
SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
|
||||
HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
|
||||
fdtab[fd].updated = 0;
|
||||
fdtab[fd].new = 0;
|
||||
|
||||
eo = fdtab[fd].state;
|
||||
en = fd_compute_new_polled_status(eo);
|
||||
fdtab[fd].state = en;
|
||||
SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
|
||||
HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
|
||||
|
||||
if ((eo ^ en) & FD_EV_POLLED_RW) {
|
||||
/* poll status changed, update the lists */
|
||||
SPIN_LOCK(POLL_LOCK, &poll_lock);
|
||||
HA_SPIN_LOCK(POLL_LOCK, &poll_lock);
|
||||
if ((eo & ~en) & FD_EV_POLLED_R)
|
||||
hap_fd_clr(fd, fd_evts[DIR_RD]);
|
||||
else if ((en & ~eo) & FD_EV_POLLED_R)
|
||||
@ -97,7 +97,7 @@ REGPRM2 static void _do_poll(struct poller *p, int exp)
|
||||
hap_fd_clr(fd, fd_evts[DIR_WR]);
|
||||
else if ((en & ~eo) & FD_EV_POLLED_W)
|
||||
hap_fd_set(fd, fd_evts[DIR_WR]);
|
||||
SPIN_UNLOCK(POLL_LOCK, &poll_lock);
|
||||
HA_SPIN_UNLOCK(POLL_LOCK, &poll_lock);
|
||||
}
|
||||
}
|
||||
fd_nbupdt = 0;
|
||||
|
@ -31,10 +31,10 @@ static THREAD_LOCAL fd_set *tmp_evts[2];
|
||||
/* Immediately remove the entry upon close() */
|
||||
REGPRM1 static void __fd_clo(int fd)
|
||||
{
|
||||
SPIN_LOCK(POLL_LOCK, &poll_lock);
|
||||
HA_SPIN_LOCK(POLL_LOCK, &poll_lock);
|
||||
FD_CLR(fd, fd_evts[DIR_RD]);
|
||||
FD_CLR(fd, fd_evts[DIR_WR]);
|
||||
SPIN_UNLOCK(POLL_LOCK, &poll_lock);
|
||||
HA_SPIN_UNLOCK(POLL_LOCK, &poll_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -58,18 +58,18 @@ REGPRM2 static void _do_poll(struct poller *p, int exp)
|
||||
if (!fdtab[fd].owner)
|
||||
continue;
|
||||
|
||||
SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
|
||||
HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
|
||||
fdtab[fd].updated = 0;
|
||||
fdtab[fd].new = 0;
|
||||
|
||||
eo = fdtab[fd].state;
|
||||
en = fd_compute_new_polled_status(eo);
|
||||
fdtab[fd].state = en;
|
||||
SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
|
||||
HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
|
||||
|
||||
if ((eo ^ en) & FD_EV_POLLED_RW) {
|
||||
/* poll status changed, update the lists */
|
||||
SPIN_LOCK(POLL_LOCK, &poll_lock);
|
||||
HA_SPIN_LOCK(POLL_LOCK, &poll_lock);
|
||||
if ((eo & ~en) & FD_EV_POLLED_R)
|
||||
FD_CLR(fd, fd_evts[DIR_RD]);
|
||||
else if ((en & ~eo) & FD_EV_POLLED_R)
|
||||
@ -79,7 +79,7 @@ REGPRM2 static void _do_poll(struct poller *p, int exp)
|
||||
FD_CLR(fd, fd_evts[DIR_WR]);
|
||||
else if ((en & ~eo) & FD_EV_POLLED_W)
|
||||
FD_SET(fd, fd_evts[DIR_WR]);
|
||||
SPIN_UNLOCK(POLL_LOCK, &poll_lock);
|
||||
HA_SPIN_UNLOCK(POLL_LOCK, &poll_lock);
|
||||
}
|
||||
}
|
||||
fd_nbupdt = 0;
|
||||
|
38
src/fd.c
38
src/fd.c
@ -185,7 +185,7 @@ HA_SPINLOCK_T poll_lock; /* global lock to protect poll info */
|
||||
*/
|
||||
static void fd_dodelete(int fd, int do_close)
|
||||
{
|
||||
SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
|
||||
HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
|
||||
if (fdtab[fd].linger_risk) {
|
||||
/* this is generally set when connecting to servers */
|
||||
setsockopt(fd, SOL_SOCKET, SO_LINGER,
|
||||
@ -205,12 +205,12 @@ static void fd_dodelete(int fd, int do_close)
|
||||
fdtab[fd].thread_mask = 0;
|
||||
if (do_close)
|
||||
close(fd);
|
||||
SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
|
||||
HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
|
||||
|
||||
SPIN_LOCK(FDTAB_LOCK, &fdtab_lock);
|
||||
HA_SPIN_LOCK(FDTAB_LOCK, &fdtab_lock);
|
||||
while ((maxfd-1 >= 0) && !fdtab[maxfd-1].owner)
|
||||
maxfd--;
|
||||
SPIN_UNLOCK(FDTAB_LOCK, &fdtab_lock);
|
||||
HA_SPIN_UNLOCK(FDTAB_LOCK, &fdtab_lock);
|
||||
}
|
||||
|
||||
/* Deletes an FD from the fdsets, and recomputes the maxfd limit.
|
||||
@ -241,16 +241,16 @@ void fd_process_cached_events()
|
||||
if (!fd_cache_num)
|
||||
return;
|
||||
|
||||
RWLOCK_RDLOCK(FDCACHE_LOCK, &fdcache_lock);
|
||||
HA_RWLOCK_RDLOCK(FDCACHE_LOCK, &fdcache_lock);
|
||||
for (entry = 0; entry < fd_cache_num; ) {
|
||||
fd = fd_cache[entry];
|
||||
|
||||
if (!(fdtab[fd].thread_mask & tid_bit))
|
||||
goto next;
|
||||
if (SPIN_TRYLOCK(FD_LOCK, &fdtab[fd].lock))
|
||||
if (HA_SPIN_TRYLOCK(FD_LOCK, &fdtab[fd].lock))
|
||||
goto next;
|
||||
|
||||
RWLOCK_RDUNLOCK(FDCACHE_LOCK, &fdcache_lock);
|
||||
HA_RWLOCK_RDUNLOCK(FDCACHE_LOCK, &fdcache_lock);
|
||||
|
||||
e = fdtab[fd].state;
|
||||
fdtab[fd].ev &= FD_POLL_STICKY;
|
||||
@ -262,15 +262,15 @@ void fd_process_cached_events()
|
||||
fdtab[fd].ev |= FD_POLL_OUT;
|
||||
|
||||
if (fdtab[fd].iocb && fdtab[fd].owner && fdtab[fd].ev) {
|
||||
SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
|
||||
HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
|
||||
fdtab[fd].iocb(fd);
|
||||
}
|
||||
else {
|
||||
fd_release_cache_entry(fd);
|
||||
SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
|
||||
HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
|
||||
}
|
||||
|
||||
RWLOCK_RDLOCK(FDCACHE_LOCK, &fdcache_lock);
|
||||
HA_RWLOCK_RDLOCK(FDCACHE_LOCK, &fdcache_lock);
|
||||
/* If the fd was removed from the cache, it has been
|
||||
* replaced by the next one that we don't want to skip !
|
||||
*/
|
||||
@ -279,7 +279,7 @@ void fd_process_cached_events()
|
||||
next:
|
||||
entry++;
|
||||
}
|
||||
RWLOCK_RDUNLOCK(FDCACHE_LOCK, &fdcache_lock);
|
||||
HA_RWLOCK_RDUNLOCK(FDCACHE_LOCK, &fdcache_lock);
|
||||
}
|
||||
|
||||
/* disable the specified poller */
|
||||
@ -329,11 +329,11 @@ int init_pollers()
|
||||
hap_register_per_thread_deinit(deinit_pollers_per_thread);
|
||||
|
||||
for (p = 0; p < global.maxsock; p++)
|
||||
SPIN_INIT(&fdtab[p].lock);
|
||||
HA_SPIN_INIT(&fdtab[p].lock);
|
||||
|
||||
SPIN_INIT(&fdtab_lock);
|
||||
RWLOCK_INIT(&fdcache_lock);
|
||||
SPIN_INIT(&poll_lock);
|
||||
HA_SPIN_INIT(&fdtab_lock);
|
||||
HA_RWLOCK_INIT(&fdcache_lock);
|
||||
HA_SPIN_INIT(&poll_lock);
|
||||
do {
|
||||
bp = NULL;
|
||||
for (p = 0; p < nbpollers; p++)
|
||||
@ -367,7 +367,7 @@ void deinit_pollers() {
|
||||
int p;
|
||||
|
||||
for (p = 0; p < global.maxsock; p++)
|
||||
SPIN_DESTROY(&fdtab[p].lock);
|
||||
HA_SPIN_DESTROY(&fdtab[p].lock);
|
||||
|
||||
for (p = 0; p < nbpollers; p++) {
|
||||
bp = &pollers[p];
|
||||
@ -380,9 +380,9 @@ void deinit_pollers() {
|
||||
free(fdinfo); fdinfo = NULL;
|
||||
free(fdtab); fdtab = NULL;
|
||||
|
||||
SPIN_DESTROY(&fdtab_lock);
|
||||
RWLOCK_DESTROY(&fdcache_lock);
|
||||
SPIN_DESTROY(&poll_lock);
|
||||
HA_SPIN_DESTROY(&fdtab_lock);
|
||||
HA_RWLOCK_DESTROY(&fdcache_lock);
|
||||
HA_SPIN_DESTROY(&poll_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -171,7 +171,7 @@ spoe_release_agent(struct spoe_agent *agent)
|
||||
spoe_release_group(grp);
|
||||
}
|
||||
for (i = 0; i < global.nbthread; ++i)
|
||||
SPIN_DESTROY(&agent->rt[i].lock);
|
||||
HA_SPIN_DESTROY(&agent->rt[i].lock);
|
||||
free(agent->rt);
|
||||
free(agent);
|
||||
}
|
||||
@ -1426,10 +1426,10 @@ spoe_handle_connecting_appctx(struct appctx *appctx)
|
||||
* add the applet in the list of running applets. */
|
||||
agent->rt[tid].applets_idle++;
|
||||
appctx->st0 = SPOE_APPCTX_ST_IDLE;
|
||||
SPIN_LOCK(SPOE_APPLET_LOCK, &agent->rt[tid].lock);
|
||||
HA_SPIN_LOCK(SPOE_APPLET_LOCK, &agent->rt[tid].lock);
|
||||
LIST_DEL(&SPOE_APPCTX(appctx)->list);
|
||||
LIST_ADD(&agent->rt[tid].applets, &SPOE_APPCTX(appctx)->list);
|
||||
SPIN_UNLOCK(SPOE_APPLET_LOCK, &agent->rt[tid].lock);
|
||||
HA_SPIN_UNLOCK(SPOE_APPLET_LOCK, &agent->rt[tid].lock);
|
||||
|
||||
/* Update runtinme agent info */
|
||||
HA_ATOMIC_UPDATE_MIN(&agent->rt[tid].frame_size, SPOE_APPCTX(appctx)->max_frame_size);
|
||||
@ -1710,10 +1710,10 @@ spoe_handle_processing_appctx(struct appctx *appctx)
|
||||
agent->rt[tid].applets_idle++;
|
||||
}
|
||||
if (fpa || (SPOE_APPCTX(appctx)->flags & SPOE_APPCTX_FL_PERSIST)) {
|
||||
SPIN_LOCK(SPOE_APPLET_LOCK, &agent->rt[tid].lock);
|
||||
HA_SPIN_LOCK(SPOE_APPLET_LOCK, &agent->rt[tid].lock);
|
||||
LIST_DEL(&SPOE_APPCTX(appctx)->list);
|
||||
LIST_ADD(&agent->rt[tid].applets, &SPOE_APPCTX(appctx)->list);
|
||||
SPIN_UNLOCK(SPOE_APPLET_LOCK, &agent->rt[tid].lock);
|
||||
HA_SPIN_UNLOCK(SPOE_APPLET_LOCK, &agent->rt[tid].lock);
|
||||
if (fpa)
|
||||
SPOE_APPCTX(appctx)->task->expire =
|
||||
tick_add_ifset(now_ms, agent->timeout.idle);
|
||||
@ -1985,9 +1985,9 @@ spoe_create_appctx(struct spoe_config *conf)
|
||||
strm->do_log = NULL;
|
||||
strm->res.flags |= CF_READ_DONTWAIT;
|
||||
|
||||
SPIN_LOCK(SPOE_APPLET_LOCK, &conf->agent->rt[tid].lock);
|
||||
HA_SPIN_LOCK(SPOE_APPLET_LOCK, &conf->agent->rt[tid].lock);
|
||||
LIST_ADDQ(&conf->agent->rt[tid].applets, &SPOE_APPCTX(appctx)->list);
|
||||
SPIN_UNLOCK(SPOE_APPLET_LOCK, &conf->agent->rt[tid].lock);
|
||||
HA_SPIN_UNLOCK(SPOE_APPLET_LOCK, &conf->agent->rt[tid].lock);
|
||||
conf->agent->rt[tid].applets_act++;
|
||||
|
||||
task_wakeup(SPOE_APPCTX(appctx)->task, TASK_WOKEN_INIT);
|
||||
@ -2096,10 +2096,10 @@ spoe_queue_context(struct spoe_context *ctx)
|
||||
appctx = spoe_appctx->owner;
|
||||
if (appctx->st0 == SPOE_APPCTX_ST_IDLE) {
|
||||
spoe_wakeup_appctx(appctx);
|
||||
SPIN_LOCK(SPOE_APPLET_LOCK, &agent->rt[tid].lock);
|
||||
HA_SPIN_LOCK(SPOE_APPLET_LOCK, &agent->rt[tid].lock);
|
||||
LIST_DEL(&spoe_appctx->list);
|
||||
LIST_ADDQ(&agent->rt[tid].applets, &spoe_appctx->list);
|
||||
SPIN_UNLOCK(SPOE_APPLET_LOCK, &agent->rt[tid].lock);
|
||||
HA_SPIN_UNLOCK(SPOE_APPLET_LOCK, &agent->rt[tid].lock);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -2699,18 +2699,18 @@ spoe_acquire_buffer(struct buffer **buf, struct buffer_wait *buffer_wait)
|
||||
return 1;
|
||||
|
||||
if (!LIST_ISEMPTY(&buffer_wait->list)) {
|
||||
SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock);
|
||||
HA_SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock);
|
||||
LIST_DEL(&buffer_wait->list);
|
||||
LIST_INIT(&buffer_wait->list);
|
||||
SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock);
|
||||
HA_SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock);
|
||||
}
|
||||
|
||||
if (b_alloc_margin(buf, global.tune.reserved_bufs))
|
||||
return 1;
|
||||
|
||||
SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock);
|
||||
HA_SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock);
|
||||
LIST_ADDQ(&buffer_wq, &buffer_wait->list);
|
||||
SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock);
|
||||
HA_SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -2718,10 +2718,10 @@ static void
|
||||
spoe_release_buffer(struct buffer **buf, struct buffer_wait *buffer_wait)
|
||||
{
|
||||
if (!LIST_ISEMPTY(&buffer_wait->list)) {
|
||||
SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock);
|
||||
HA_SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock);
|
||||
LIST_DEL(&buffer_wait->list);
|
||||
LIST_INIT(&buffer_wait->list);
|
||||
SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock);
|
||||
HA_SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock);
|
||||
}
|
||||
|
||||
/* Release the buffer if needed */
|
||||
@ -2813,10 +2813,10 @@ spoe_sig_stop(struct sig_handler *sh)
|
||||
agent = conf->agent;
|
||||
|
||||
for (i = 0; i < global.nbthread; ++i) {
|
||||
SPIN_LOCK(SPOE_APPLET_LOCK, &agent->rt[i].lock);
|
||||
HA_SPIN_LOCK(SPOE_APPLET_LOCK, &agent->rt[i].lock);
|
||||
list_for_each_entry(spoe_appctx, &agent->rt[i].applets, list)
|
||||
spoe_wakeup_appctx(spoe_appctx->owner);
|
||||
SPIN_UNLOCK(SPOE_APPLET_LOCK, &agent->rt[i].lock);
|
||||
HA_SPIN_UNLOCK(SPOE_APPLET_LOCK, &agent->rt[i].lock);
|
||||
}
|
||||
}
|
||||
p = p->next;
|
||||
@ -3221,7 +3221,7 @@ cfg_parse_spoe_agent(const char *file, int linenum, char **args, int kwm)
|
||||
LIST_INIT(&curagent->rt[i].applets);
|
||||
LIST_INIT(&curagent->rt[i].sending_queue);
|
||||
LIST_INIT(&curagent->rt[i].waiting_queue);
|
||||
SPIN_INIT(&curagent->rt[i].lock);
|
||||
HA_SPIN_INIT(&curagent->rt[i].lock);
|
||||
}
|
||||
}
|
||||
else if (!strcmp(args[0], "use-backend")) {
|
||||
|
@ -2075,7 +2075,7 @@ void deinit(void)
|
||||
if (xprt_get(XPRT_SSL) && xprt_get(XPRT_SSL)->destroy_srv)
|
||||
xprt_get(XPRT_SSL)->destroy_srv(s);
|
||||
}
|
||||
SPIN_DESTROY(&s->lock);
|
||||
HA_SPIN_DESTROY(&s->lock);
|
||||
free(s);
|
||||
s = s_next;
|
||||
}/* end while(s) */
|
||||
@ -2124,8 +2124,8 @@ void deinit(void)
|
||||
|
||||
p0 = p;
|
||||
p = p->next;
|
||||
SPIN_DESTROY(&p0->lbprm.lock);
|
||||
SPIN_DESTROY(&p0->lock);
|
||||
HA_SPIN_DESTROY(&p0->lbprm.lock);
|
||||
HA_SPIN_DESTROY(&p0->lock);
|
||||
free(p0);
|
||||
}/* end while(p) */
|
||||
|
||||
|
@ -120,7 +120,7 @@ void thread_enter_sync()
|
||||
|
||||
thread_sync_barrier(&barrier);
|
||||
if (threads_want_sync & tid_bit)
|
||||
SPIN_LOCK(THREAD_SYNC_LOCK, &sync_lock);
|
||||
HA_SPIN_LOCK(THREAD_SYNC_LOCK, &sync_lock);
|
||||
}
|
||||
|
||||
/* Exit from the sync point and unlock it if it was previously locked. If the
|
||||
@ -135,7 +135,7 @@ void thread_exit_sync()
|
||||
return;
|
||||
|
||||
if (threads_want_sync & tid_bit)
|
||||
SPIN_UNLOCK(THREAD_SYNC_LOCK, &sync_lock);
|
||||
HA_SPIN_UNLOCK(THREAD_SYNC_LOCK, &sync_lock);
|
||||
|
||||
if (HA_ATOMIC_AND(&threads_want_sync, ~tid_bit) == 0) {
|
||||
char c;
|
||||
@ -151,7 +151,7 @@ void thread_exit_sync()
|
||||
__attribute__((constructor))
|
||||
static void __hathreads_init(void)
|
||||
{
|
||||
SPIN_INIT(&sync_lock);
|
||||
HA_SPIN_INIT(&sync_lock);
|
||||
#if defined(DEBUG_THREAD) || defined(DEBUG_FULL)
|
||||
memset(lock_stats, 0, sizeof(lock_stats));
|
||||
#endif
|
||||
|
12
src/hlua.c
12
src/hlua.c
@ -125,11 +125,11 @@ static int hlua_panic_ljmp(lua_State *L) { longjmp(safe_ljmp_env, 1); }
|
||||
#define SET_SAFE_LJMP(__L) \
|
||||
({ \
|
||||
int ret; \
|
||||
SPIN_LOCK(LUA_LOCK, &hlua_global_lock); \
|
||||
HA_SPIN_LOCK(LUA_LOCK, &hlua_global_lock); \
|
||||
if (setjmp(safe_ljmp_env) != 0) { \
|
||||
lua_atpanic(__L, hlua_panic_safe); \
|
||||
ret = 0; \
|
||||
SPIN_UNLOCK(LUA_LOCK, &hlua_global_lock); \
|
||||
HA_SPIN_UNLOCK(LUA_LOCK, &hlua_global_lock); \
|
||||
} else { \
|
||||
lua_atpanic(__L, hlua_panic_ljmp); \
|
||||
ret = 1; \
|
||||
@ -143,7 +143,7 @@ static int hlua_panic_ljmp(lua_State *L) { longjmp(safe_ljmp_env, 1); }
|
||||
#define RESET_SAFE_LJMP(__L) \
|
||||
do { \
|
||||
lua_atpanic(__L, hlua_panic_safe); \
|
||||
SPIN_UNLOCK(LUA_LOCK, &hlua_global_lock); \
|
||||
HA_SPIN_UNLOCK(LUA_LOCK, &hlua_global_lock); \
|
||||
} while(0)
|
||||
|
||||
/* Applet status flags */
|
||||
@ -994,7 +994,7 @@ static enum hlua_exec hlua_ctx_resume(struct hlua *lua, int yield_allowed)
|
||||
/* Lock the whole Lua execution. This lock must be before the
|
||||
* label "resume_execution".
|
||||
*/
|
||||
SPIN_LOCK(LUA_LOCK, &hlua_global_lock);
|
||||
HA_SPIN_LOCK(LUA_LOCK, &hlua_global_lock);
|
||||
|
||||
resume_execution:
|
||||
|
||||
@ -1154,7 +1154,7 @@ static enum hlua_exec hlua_ctx_resume(struct hlua *lua, int yield_allowed)
|
||||
}
|
||||
|
||||
/* This is the main exit point, remove the Lua lock. */
|
||||
SPIN_UNLOCK(LUA_LOCK, &hlua_global_lock);
|
||||
HA_SPIN_UNLOCK(LUA_LOCK, &hlua_global_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -7370,7 +7370,7 @@ void hlua_init(void)
|
||||
};
|
||||
#endif
|
||||
|
||||
SPIN_INIT(&hlua_global_lock);
|
||||
HA_SPIN_INIT(&hlua_global_lock);
|
||||
|
||||
/* Initialise struct hlua and com signals pool */
|
||||
pool2_hlua = create_pool("hlua", sizeof(struct hlua), MEM_F_SHARED);
|
||||
|
@ -587,9 +587,9 @@ int hlua_server_set_weight(lua_State *L)
|
||||
srv = hlua_check_server(L, 1);
|
||||
weight = luaL_checkstring(L, 2);
|
||||
|
||||
SPIN_LOCK(SERVER_LOCK, &srv->lock);
|
||||
HA_SPIN_LOCK(SERVER_LOCK, &srv->lock);
|
||||
err = server_parse_weight_change_request(srv, weight);
|
||||
SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
|
||||
HA_SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
|
||||
if (!err)
|
||||
lua_pushnil(L);
|
||||
else
|
||||
@ -615,9 +615,9 @@ int hlua_server_set_addr(lua_State *L)
|
||||
srv = hlua_check_server(L, 1);
|
||||
addr = luaL_checkstring(L, 2);
|
||||
|
||||
SPIN_LOCK(SERVER_LOCK, &srv->lock);
|
||||
HA_SPIN_LOCK(SERVER_LOCK, &srv->lock);
|
||||
err = server_parse_addr_change_request(srv, addr, "Lua script");
|
||||
SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
|
||||
HA_SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
|
||||
if (!err)
|
||||
lua_pushnil(L);
|
||||
else
|
||||
@ -630,9 +630,9 @@ int hlua_server_shut_sess(lua_State *L)
|
||||
struct server *srv;
|
||||
|
||||
srv = hlua_check_server(L, 1);
|
||||
SPIN_LOCK(SERVER_LOCK, &srv->lock);
|
||||
HA_SPIN_LOCK(SERVER_LOCK, &srv->lock);
|
||||
srv_shutdown_streams(srv, SF_ERR_KILLED);
|
||||
SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
|
||||
HA_SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -641,9 +641,9 @@ int hlua_server_set_drain(lua_State *L)
|
||||
struct server *srv;
|
||||
|
||||
srv = hlua_check_server(L, 1);
|
||||
SPIN_LOCK(SERVER_LOCK, &srv->lock);
|
||||
HA_SPIN_LOCK(SERVER_LOCK, &srv->lock);
|
||||
srv_adm_set_drain(srv);
|
||||
SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
|
||||
HA_SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -652,9 +652,9 @@ int hlua_server_set_maint(lua_State *L)
|
||||
struct server *srv;
|
||||
|
||||
srv = hlua_check_server(L, 1);
|
||||
SPIN_LOCK(SERVER_LOCK, &srv->lock);
|
||||
HA_SPIN_LOCK(SERVER_LOCK, &srv->lock);
|
||||
srv_adm_set_maint(srv);
|
||||
SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
|
||||
HA_SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -663,9 +663,9 @@ int hlua_server_set_ready(lua_State *L)
|
||||
struct server *srv;
|
||||
|
||||
srv = hlua_check_server(L, 1);
|
||||
SPIN_LOCK(SERVER_LOCK, &srv->lock);
|
||||
HA_SPIN_LOCK(SERVER_LOCK, &srv->lock);
|
||||
srv_adm_set_ready(srv);
|
||||
SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
|
||||
HA_SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -674,11 +674,11 @@ int hlua_server_check_enable(lua_State *L)
|
||||
struct server *sv;
|
||||
|
||||
sv = hlua_check_server(L, 1);
|
||||
SPIN_LOCK(SERVER_LOCK, &sv->lock);
|
||||
HA_SPIN_LOCK(SERVER_LOCK, &sv->lock);
|
||||
if (sv->check.state & CHK_ST_CONFIGURED) {
|
||||
sv->check.state |= CHK_ST_ENABLED;
|
||||
}
|
||||
SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
|
||||
HA_SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -687,11 +687,11 @@ int hlua_server_check_disable(lua_State *L)
|
||||
struct server *sv;
|
||||
|
||||
sv = hlua_check_server(L, 1);
|
||||
SPIN_LOCK(SERVER_LOCK, &sv->lock);
|
||||
HA_SPIN_LOCK(SERVER_LOCK, &sv->lock);
|
||||
if (sv->check.state & CHK_ST_CONFIGURED) {
|
||||
sv->check.state &= ~CHK_ST_ENABLED;
|
||||
}
|
||||
SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
|
||||
HA_SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -700,12 +700,12 @@ int hlua_server_check_force_up(lua_State *L)
|
||||
struct server *sv;
|
||||
|
||||
sv = hlua_check_server(L, 1);
|
||||
SPIN_LOCK(SERVER_LOCK, &sv->lock);
|
||||
HA_SPIN_LOCK(SERVER_LOCK, &sv->lock);
|
||||
if (!(sv->track)) {
|
||||
sv->check.health = sv->check.rise + sv->check.fall - 1;
|
||||
srv_set_running(sv, "changed from Lua script", NULL);
|
||||
}
|
||||
SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
|
||||
HA_SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -714,12 +714,12 @@ int hlua_server_check_force_nolb(lua_State *L)
|
||||
struct server *sv;
|
||||
|
||||
sv = hlua_check_server(L, 1);
|
||||
SPIN_LOCK(SERVER_LOCK, &sv->lock);
|
||||
HA_SPIN_LOCK(SERVER_LOCK, &sv->lock);
|
||||
if (!(sv->track)) {
|
||||
sv->check.health = sv->check.rise + sv->check.fall - 1;
|
||||
srv_set_stopping(sv, "changed from Lua script", NULL);
|
||||
}
|
||||
SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
|
||||
HA_SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -728,12 +728,12 @@ int hlua_server_check_force_down(lua_State *L)
|
||||
struct server *sv;
|
||||
|
||||
sv = hlua_check_server(L, 1);
|
||||
SPIN_LOCK(SERVER_LOCK, &sv->lock);
|
||||
HA_SPIN_LOCK(SERVER_LOCK, &sv->lock);
|
||||
if (!(sv->track)) {
|
||||
sv->check.health = 0;
|
||||
srv_set_stopped(sv, "changed from Lua script", NULL);
|
||||
}
|
||||
SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
|
||||
HA_SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -742,11 +742,11 @@ int hlua_server_agent_enable(lua_State *L)
|
||||
struct server *sv;
|
||||
|
||||
sv = hlua_check_server(L, 1);
|
||||
SPIN_LOCK(SERVER_LOCK, &sv->lock);
|
||||
HA_SPIN_LOCK(SERVER_LOCK, &sv->lock);
|
||||
if (sv->agent.state & CHK_ST_CONFIGURED) {
|
||||
sv->agent.state |= CHK_ST_ENABLED;
|
||||
}
|
||||
SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
|
||||
HA_SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -755,11 +755,11 @@ int hlua_server_agent_disable(lua_State *L)
|
||||
struct server *sv;
|
||||
|
||||
sv = hlua_check_server(L, 1);
|
||||
SPIN_LOCK(SERVER_LOCK, &sv->lock);
|
||||
HA_SPIN_LOCK(SERVER_LOCK, &sv->lock);
|
||||
if (sv->agent.state & CHK_ST_CONFIGURED) {
|
||||
sv->agent.state &= ~CHK_ST_ENABLED;
|
||||
}
|
||||
SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
|
||||
HA_SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -768,12 +768,12 @@ int hlua_server_agent_force_up(lua_State *L)
|
||||
struct server *sv;
|
||||
|
||||
sv = hlua_check_server(L, 1);
|
||||
SPIN_LOCK(SERVER_LOCK, &sv->lock);
|
||||
HA_SPIN_LOCK(SERVER_LOCK, &sv->lock);
|
||||
if (sv->agent.state & CHK_ST_ENABLED) {
|
||||
sv->agent.health = sv->agent.rise + sv->agent.fall - 1;
|
||||
srv_set_running(sv, "changed from Lua script", NULL);
|
||||
}
|
||||
SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
|
||||
HA_SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -782,12 +782,12 @@ int hlua_server_agent_force_down(lua_State *L)
|
||||
struct server *sv;
|
||||
|
||||
sv = hlua_check_server(L, 1);
|
||||
SPIN_LOCK(SERVER_LOCK, &sv->lock);
|
||||
HA_SPIN_LOCK(SERVER_LOCK, &sv->lock);
|
||||
if (sv->agent.state & CHK_ST_ENABLED) {
|
||||
sv->agent.health = 0;
|
||||
srv_set_stopped(sv, "changed from Lua script", NULL);
|
||||
}
|
||||
SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
|
||||
HA_SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -364,7 +364,7 @@ struct server *chash_get_next_server(struct proxy *p, struct server *srvtoavoid)
|
||||
srv = avoided = NULL;
|
||||
avoided_node = NULL;
|
||||
|
||||
SPIN_LOCK(LBPRM_LOCK, &p->lbprm.lock);
|
||||
HA_SPIN_LOCK(LBPRM_LOCK, &p->lbprm.lock);
|
||||
if (p->srv_act)
|
||||
root = &p->lbprm.chash.act;
|
||||
else if (p->lbprm.fbck) {
|
||||
@ -423,7 +423,7 @@ struct server *chash_get_next_server(struct proxy *p, struct server *srvtoavoid)
|
||||
}
|
||||
|
||||
out:
|
||||
SPIN_UNLOCK(LBPRM_LOCK, &p->lbprm.lock);
|
||||
HA_SPIN_UNLOCK(LBPRM_LOCK, &p->lbprm.lock);
|
||||
return srv;
|
||||
}
|
||||
|
||||
|
@ -64,10 +64,10 @@ static void fas_srv_reposition(struct server *s)
|
||||
if (!s->lb_tree)
|
||||
return;
|
||||
|
||||
SPIN_LOCK(LBPRM_LOCK, &s->proxy->lbprm.lock);
|
||||
HA_SPIN_LOCK(LBPRM_LOCK, &s->proxy->lbprm.lock);
|
||||
fas_dequeue_srv(s);
|
||||
fas_queue_srv(s);
|
||||
SPIN_UNLOCK(LBPRM_LOCK, &s->proxy->lbprm.lock);
|
||||
HA_SPIN_UNLOCK(LBPRM_LOCK, &s->proxy->lbprm.lock);
|
||||
}
|
||||
|
||||
/* This function updates the server trees according to server <srv>'s new
|
||||
@ -277,7 +277,7 @@ struct server *fas_get_next_server(struct proxy *p, struct server *srvtoavoid)
|
||||
|
||||
srv = avoided = NULL;
|
||||
|
||||
SPIN_LOCK(LBPRM_LOCK, &p->lbprm.lock);
|
||||
HA_SPIN_LOCK(LBPRM_LOCK, &p->lbprm.lock);
|
||||
if (p->srv_act)
|
||||
node = eb32_first(&p->lbprm.fas.act);
|
||||
else if (p->lbprm.fbck) {
|
||||
@ -313,7 +313,7 @@ struct server *fas_get_next_server(struct proxy *p, struct server *srvtoavoid)
|
||||
if (!srv)
|
||||
srv = avoided;
|
||||
out:
|
||||
SPIN_UNLOCK(LBPRM_LOCK, &p->lbprm.lock);
|
||||
HA_SPIN_UNLOCK(LBPRM_LOCK, &p->lbprm.lock);
|
||||
return srv;
|
||||
}
|
||||
|
||||
|
@ -56,10 +56,10 @@ static void fwlc_srv_reposition(struct server *s)
|
||||
if (!s->lb_tree)
|
||||
return;
|
||||
|
||||
SPIN_LOCK(LBPRM_LOCK, &s->proxy->lbprm.lock);
|
||||
HA_SPIN_LOCK(LBPRM_LOCK, &s->proxy->lbprm.lock);
|
||||
fwlc_dequeue_srv(s);
|
||||
fwlc_queue_srv(s);
|
||||
SPIN_UNLOCK(LBPRM_LOCK, &s->proxy->lbprm.lock);
|
||||
HA_SPIN_UNLOCK(LBPRM_LOCK, &s->proxy->lbprm.lock);
|
||||
}
|
||||
|
||||
/* This function updates the server trees according to server <srv>'s new
|
||||
@ -269,7 +269,7 @@ struct server *fwlc_get_next_server(struct proxy *p, struct server *srvtoavoid)
|
||||
|
||||
srv = avoided = NULL;
|
||||
|
||||
SPIN_LOCK(LBPRM_LOCK, &p->lbprm.lock);
|
||||
HA_SPIN_LOCK(LBPRM_LOCK, &p->lbprm.lock);
|
||||
if (p->srv_act)
|
||||
node = eb32_first(&p->lbprm.fwlc.act);
|
||||
else if (p->lbprm.fbck) {
|
||||
@ -305,7 +305,7 @@ struct server *fwlc_get_next_server(struct proxy *p, struct server *srvtoavoid)
|
||||
if (!srv)
|
||||
srv = avoided;
|
||||
out:
|
||||
SPIN_UNLOCK(LBPRM_LOCK, &p->lbprm.lock);
|
||||
HA_SPIN_UNLOCK(LBPRM_LOCK, &p->lbprm.lock);
|
||||
return srv;
|
||||
}
|
||||
|
||||
|
@ -470,7 +470,7 @@ struct server *fwrr_get_next_server(struct proxy *p, struct server *srvtoavoid)
|
||||
struct fwrr_group *grp;
|
||||
int switched;
|
||||
|
||||
SPIN_LOCK(LBPRM_LOCK, &p->lbprm.lock);
|
||||
HA_SPIN_LOCK(LBPRM_LOCK, &p->lbprm.lock);
|
||||
if (p->srv_act)
|
||||
grp = &p->lbprm.fwrr.act;
|
||||
else if (p->lbprm.fbck) {
|
||||
@ -564,7 +564,7 @@ struct server *fwrr_get_next_server(struct proxy *p, struct server *srvtoavoid)
|
||||
}
|
||||
}
|
||||
out:
|
||||
SPIN_UNLOCK(LBPRM_LOCK, &p->lbprm.lock);
|
||||
HA_SPIN_UNLOCK(LBPRM_LOCK, &p->lbprm.lock);
|
||||
return srv;
|
||||
}
|
||||
|
||||
|
@ -208,7 +208,7 @@ struct server *map_get_server_rr(struct proxy *px, struct server *srvtoavoid)
|
||||
int newidx, avoididx;
|
||||
struct server *srv, *avoided;
|
||||
|
||||
SPIN_LOCK(LBPRM_LOCK, &px->lbprm.lock);
|
||||
HA_SPIN_LOCK(LBPRM_LOCK, &px->lbprm.lock);
|
||||
if (px->lbprm.tot_weight == 0) {
|
||||
avoided = NULL;
|
||||
goto out;
|
||||
@ -240,7 +240,7 @@ struct server *map_get_server_rr(struct proxy *px, struct server *srvtoavoid)
|
||||
px->lbprm.map.rr_idx = avoididx;
|
||||
|
||||
out:
|
||||
SPIN_UNLOCK(LBPRM_LOCK, &px->lbprm.lock);
|
||||
HA_SPIN_UNLOCK(LBPRM_LOCK, &px->lbprm.lock);
|
||||
/* return NULL or srvtoavoid if found */
|
||||
return avoided;
|
||||
}
|
||||
|
@ -60,7 +60,7 @@ static void __do_unbind_listener(struct listener *listener, int do_close);
|
||||
*/
|
||||
static void enable_listener(struct listener *listener)
|
||||
{
|
||||
SPIN_LOCK(LISTENER_LOCK, &listener->lock);
|
||||
HA_SPIN_LOCK(LISTENER_LOCK, &listener->lock);
|
||||
if (listener->state == LI_LISTEN) {
|
||||
if ((global.mode & (MODE_DAEMON | MODE_MWORKER)) &&
|
||||
listener->bind_conf->bind_proc &&
|
||||
@ -83,7 +83,7 @@ static void enable_listener(struct listener *listener)
|
||||
listener->state = LI_FULL;
|
||||
}
|
||||
}
|
||||
SPIN_UNLOCK(LISTENER_LOCK, &listener->lock);
|
||||
HA_SPIN_UNLOCK(LISTENER_LOCK, &listener->lock);
|
||||
}
|
||||
|
||||
/* This function removes the specified listener's file descriptor from the
|
||||
@ -92,19 +92,19 @@ static void enable_listener(struct listener *listener)
|
||||
*/
|
||||
static void disable_listener(struct listener *listener)
|
||||
{
|
||||
SPIN_LOCK(LISTENER_LOCK, &listener->lock);
|
||||
HA_SPIN_LOCK(LISTENER_LOCK, &listener->lock);
|
||||
if (listener->state < LI_READY)
|
||||
goto end;
|
||||
if (listener->state == LI_READY)
|
||||
fd_stop_recv(listener->fd);
|
||||
if (listener->state == LI_LIMITED) {
|
||||
SPIN_LOCK(LISTENER_QUEUE_LOCK, &lq_lock);
|
||||
HA_SPIN_LOCK(LISTENER_QUEUE_LOCK, &lq_lock);
|
||||
LIST_DEL(&listener->wait_queue);
|
||||
SPIN_UNLOCK(LISTENER_QUEUE_LOCK, &lq_lock);
|
||||
HA_SPIN_UNLOCK(LISTENER_QUEUE_LOCK, &lq_lock);
|
||||
}
|
||||
listener->state = LI_LISTEN;
|
||||
end:
|
||||
SPIN_UNLOCK(LISTENER_LOCK, &listener->lock);
|
||||
HA_SPIN_UNLOCK(LISTENER_LOCK, &listener->lock);
|
||||
}
|
||||
|
||||
/* This function tries to temporarily disable a listener, depending on the OS
|
||||
@ -118,7 +118,7 @@ int pause_listener(struct listener *l)
|
||||
{
|
||||
int ret = 1;
|
||||
|
||||
SPIN_LOCK(LISTENER_LOCK, &l->lock);
|
||||
HA_SPIN_LOCK(LISTENER_LOCK, &l->lock);
|
||||
|
||||
if (l->state <= LI_ZOMBIE)
|
||||
goto end;
|
||||
@ -138,15 +138,15 @@ int pause_listener(struct listener *l)
|
||||
}
|
||||
|
||||
if (l->state == LI_LIMITED) {
|
||||
SPIN_LOCK(LISTENER_QUEUE_LOCK, &lq_lock);
|
||||
HA_SPIN_LOCK(LISTENER_QUEUE_LOCK, &lq_lock);
|
||||
LIST_DEL(&l->wait_queue);
|
||||
SPIN_UNLOCK(LISTENER_QUEUE_LOCK, &lq_lock);
|
||||
HA_SPIN_UNLOCK(LISTENER_QUEUE_LOCK, &lq_lock);
|
||||
}
|
||||
|
||||
fd_stop_recv(l->fd);
|
||||
l->state = LI_PAUSED;
|
||||
end:
|
||||
SPIN_UNLOCK(LISTENER_LOCK, &l->lock);
|
||||
HA_SPIN_UNLOCK(LISTENER_LOCK, &l->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -164,7 +164,7 @@ static int __resume_listener(struct listener *l)
|
||||
{
|
||||
int ret = 1;
|
||||
|
||||
SPIN_LOCK(LISTENER_LOCK, &l->lock);
|
||||
HA_SPIN_LOCK(LISTENER_LOCK, &l->lock);
|
||||
|
||||
if ((global.mode & (MODE_DAEMON | MODE_MWORKER)) &&
|
||||
l->bind_conf->bind_proc &&
|
||||
@ -213,7 +213,7 @@ static int __resume_listener(struct listener *l)
|
||||
fd_want_recv(l->fd);
|
||||
l->state = LI_READY;
|
||||
end:
|
||||
SPIN_UNLOCK(LISTENER_LOCK, &l->lock);
|
||||
HA_SPIN_UNLOCK(LISTENER_LOCK, &l->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -221,9 +221,9 @@ int resume_listener(struct listener *l)
|
||||
{
|
||||
int ret;
|
||||
|
||||
SPIN_LOCK(LISTENER_QUEUE_LOCK, &lq_lock);
|
||||
HA_SPIN_LOCK(LISTENER_QUEUE_LOCK, &lq_lock);
|
||||
ret = __resume_listener(l);
|
||||
SPIN_UNLOCK(LISTENER_QUEUE_LOCK, &lq_lock);
|
||||
HA_SPIN_UNLOCK(LISTENER_QUEUE_LOCK, &lq_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -237,9 +237,9 @@ static void listener_full(struct listener *l)
|
||||
{
|
||||
if (l->state >= LI_READY) {
|
||||
if (l->state == LI_LIMITED) {
|
||||
SPIN_LOCK(LISTENER_QUEUE_LOCK, &lq_lock);
|
||||
HA_SPIN_LOCK(LISTENER_QUEUE_LOCK, &lq_lock);
|
||||
LIST_DEL(&l->wait_queue);
|
||||
SPIN_UNLOCK(LISTENER_QUEUE_LOCK, &lq_lock);
|
||||
HA_SPIN_UNLOCK(LISTENER_QUEUE_LOCK, &lq_lock);
|
||||
}
|
||||
|
||||
fd_stop_recv(l->fd);
|
||||
@ -256,9 +256,9 @@ static void listener_full(struct listener *l)
|
||||
static void limit_listener(struct listener *l, struct list *list)
|
||||
{
|
||||
if (l->state == LI_READY) {
|
||||
SPIN_LOCK(LISTENER_QUEUE_LOCK, &lq_lock);
|
||||
HA_SPIN_LOCK(LISTENER_QUEUE_LOCK, &lq_lock);
|
||||
LIST_ADDQ(list, &l->wait_queue);
|
||||
SPIN_UNLOCK(LISTENER_QUEUE_LOCK, &lq_lock);
|
||||
HA_SPIN_UNLOCK(LISTENER_QUEUE_LOCK, &lq_lock);
|
||||
fd_stop_recv(l->fd);
|
||||
l->state = LI_LIMITED;
|
||||
}
|
||||
@ -298,7 +298,7 @@ void dequeue_all_listeners(struct list *list)
|
||||
{
|
||||
struct listener *listener, *l_back;
|
||||
|
||||
SPIN_LOCK(LISTENER_QUEUE_LOCK, &lq_lock);
|
||||
HA_SPIN_LOCK(LISTENER_QUEUE_LOCK, &lq_lock);
|
||||
list_for_each_entry_safe(listener, l_back, list, wait_queue) {
|
||||
/* This cannot fail because the listeners are by definition in
|
||||
* the LI_LIMITED state. The function also removes the entry
|
||||
@ -306,7 +306,7 @@ void dequeue_all_listeners(struct list *list)
|
||||
*/
|
||||
__resume_listener(listener);
|
||||
}
|
||||
SPIN_UNLOCK(LISTENER_QUEUE_LOCK, &lq_lock);
|
||||
HA_SPIN_UNLOCK(LISTENER_QUEUE_LOCK, &lq_lock);
|
||||
}
|
||||
|
||||
/* must be called with the lock held */
|
||||
@ -316,9 +316,9 @@ static void __do_unbind_listener(struct listener *listener, int do_close)
|
||||
fd_stop_recv(listener->fd);
|
||||
|
||||
if (listener->state == LI_LIMITED) {
|
||||
SPIN_LOCK(LISTENER_QUEUE_LOCK, &lq_lock);
|
||||
HA_SPIN_LOCK(LISTENER_QUEUE_LOCK, &lq_lock);
|
||||
LIST_DEL(&listener->wait_queue);
|
||||
SPIN_UNLOCK(LISTENER_QUEUE_LOCK, &lq_lock);
|
||||
HA_SPIN_UNLOCK(LISTENER_QUEUE_LOCK, &lq_lock);
|
||||
}
|
||||
|
||||
if (listener->state >= LI_PAUSED) {
|
||||
@ -334,9 +334,9 @@ static void __do_unbind_listener(struct listener *listener, int do_close)
|
||||
|
||||
static void do_unbind_listener(struct listener *listener, int do_close)
|
||||
{
|
||||
SPIN_LOCK(LISTENER_LOCK, &listener->lock);
|
||||
HA_SPIN_LOCK(LISTENER_LOCK, &listener->lock);
|
||||
__do_unbind_listener(listener, do_close);
|
||||
SPIN_UNLOCK(LISTENER_LOCK, &listener->lock);
|
||||
HA_SPIN_UNLOCK(LISTENER_LOCK, &listener->lock);
|
||||
}
|
||||
|
||||
/* This function closes the listening socket for the specified listener,
|
||||
@ -406,7 +406,7 @@ int create_listeners(struct bind_conf *bc, const struct sockaddr_storage *ss,
|
||||
|
||||
proto->add(l, port);
|
||||
|
||||
SPIN_INIT(&l->lock);
|
||||
HA_SPIN_INIT(&l->lock);
|
||||
HA_ATOMIC_ADD(&jobs, 1);
|
||||
HA_ATOMIC_ADD(&listeners, 1);
|
||||
}
|
||||
@ -424,13 +424,13 @@ void delete_listener(struct listener *listener)
|
||||
if (listener->state != LI_ASSIGNED)
|
||||
return;
|
||||
|
||||
SPIN_LOCK(LISTENER_LOCK, &listener->lock);
|
||||
HA_SPIN_LOCK(LISTENER_LOCK, &listener->lock);
|
||||
listener->state = LI_INIT;
|
||||
LIST_DEL(&listener->proto_list);
|
||||
listener->proto->nb_listeners--;
|
||||
HA_ATOMIC_SUB(&jobs, 1);
|
||||
HA_ATOMIC_SUB(&listeners, 1);
|
||||
SPIN_UNLOCK(LISTENER_LOCK, &listener->lock);
|
||||
HA_SPIN_UNLOCK(LISTENER_LOCK, &listener->lock);
|
||||
}
|
||||
|
||||
/* This function is called on a read event from a listening socket, corresponding
|
||||
@ -449,7 +449,7 @@ void listener_accept(int fd)
|
||||
static int accept4_broken;
|
||||
#endif
|
||||
|
||||
if (SPIN_TRYLOCK(LISTENER_LOCK, &l->lock))
|
||||
if (HA_SPIN_TRYLOCK(LISTENER_LOCK, &l->lock))
|
||||
return;
|
||||
|
||||
if (unlikely(l->nbconn >= l->maxconn)) {
|
||||
@ -657,7 +657,7 @@ void listener_accept(int fd)
|
||||
limit_listener(l, &global_listener_queue);
|
||||
task_schedule(global_listener_queue_task, tick_first(expire, global_listener_queue_task->expire));
|
||||
end:
|
||||
SPIN_UNLOCK(LISTENER_LOCK, &l->lock);
|
||||
HA_SPIN_UNLOCK(LISTENER_LOCK, &l->lock);
|
||||
}
|
||||
|
||||
/* Notify the listener that a connection initiated from it was released. This
|
||||
@ -1019,7 +1019,7 @@ static void __listener_init(void)
|
||||
sample_register_fetches(&smp_kws);
|
||||
acl_register_keywords(&acl_kws);
|
||||
bind_register_keywords(&bind_kws);
|
||||
SPIN_INIT(&lq_lock);
|
||||
HA_SPIN_INIT(&lq_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
|
52
src/map.c
52
src/map.c
@ -325,16 +325,16 @@ static int cli_io_handler_pat_list(struct appctx *appctx)
|
||||
* this pointer. We know we have reached the end when this
|
||||
* pointer points back to the head of the streams list.
|
||||
*/
|
||||
SPIN_LOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
|
||||
HA_SPIN_LOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
|
||||
LIST_INIT(&appctx->ctx.map.bref.users);
|
||||
appctx->ctx.map.bref.ref = appctx->ctx.map.ref->head.n;
|
||||
SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
|
||||
HA_SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
|
||||
appctx->st2 = STAT_ST_LIST;
|
||||
/* fall through */
|
||||
|
||||
case STAT_ST_LIST:
|
||||
|
||||
SPIN_LOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
|
||||
HA_SPIN_LOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
|
||||
|
||||
if (!LIST_ISEMPTY(&appctx->ctx.map.bref.users)) {
|
||||
LIST_DEL(&appctx->ctx.map.bref.users);
|
||||
@ -360,7 +360,7 @@ static int cli_io_handler_pat_list(struct appctx *appctx)
|
||||
* this stream's users so that it can remove us upon termination.
|
||||
*/
|
||||
LIST_ADDQ(&elt->back_refs, &appctx->ctx.map.bref.users);
|
||||
SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
|
||||
HA_SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
|
||||
si_applet_cant_put(si);
|
||||
return 0;
|
||||
}
|
||||
@ -368,7 +368,7 @@ static int cli_io_handler_pat_list(struct appctx *appctx)
|
||||
/* get next list entry and check the end of the list */
|
||||
appctx->ctx.map.bref.ref = elt->list.n;
|
||||
}
|
||||
SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
|
||||
HA_SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
|
||||
appctx->st2 = STAT_ST_FIN;
|
||||
/* fall through */
|
||||
|
||||
@ -456,7 +456,7 @@ static int cli_io_handler_map_lookup(struct appctx *appctx)
|
||||
/* fall through */
|
||||
|
||||
case STAT_ST_LIST:
|
||||
SPIN_LOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
|
||||
HA_SPIN_LOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
|
||||
/* for each lookup type */
|
||||
while (appctx->ctx.map.expr) {
|
||||
/* initialise chunk to build new message */
|
||||
@ -542,7 +542,7 @@ static int cli_io_handler_map_lookup(struct appctx *appctx)
|
||||
/* let's try again later from this stream. We add ourselves into
|
||||
* this stream's users so that it can remove us upon termination.
|
||||
*/
|
||||
SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
|
||||
HA_SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
|
||||
si_applet_cant_put(si);
|
||||
return 0;
|
||||
}
|
||||
@ -551,7 +551,7 @@ static int cli_io_handler_map_lookup(struct appctx *appctx)
|
||||
appctx->ctx.map.expr = pat_expr_get_next(appctx->ctx.map.expr,
|
||||
&appctx->ctx.map.ref->pat);
|
||||
}
|
||||
SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
|
||||
HA_SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
|
||||
appctx->st2 = STAT_ST_FIN;
|
||||
/* fall through */
|
||||
|
||||
@ -628,10 +628,10 @@ static int cli_parse_get_map(char **args, struct appctx *appctx, void *private)
|
||||
static void cli_release_show_map(struct appctx *appctx)
|
||||
{
|
||||
if (appctx->st2 == STAT_ST_LIST) {
|
||||
SPIN_LOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
|
||||
HA_SPIN_LOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
|
||||
if (!LIST_ISEMPTY(&appctx->ctx.map.bref.users))
|
||||
LIST_DEL(&appctx->ctx.map.bref.users);
|
||||
SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
|
||||
HA_SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
|
||||
}
|
||||
}
|
||||
|
||||
@ -728,32 +728,32 @@ static int cli_parse_set_map(char **args, struct appctx *appctx, void *private)
|
||||
|
||||
/* Try to delete the entry. */
|
||||
err = NULL;
|
||||
SPIN_LOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
|
||||
HA_SPIN_LOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
|
||||
if (!pat_ref_set_by_id(appctx->ctx.map.ref, ref, args[4], &err)) {
|
||||
SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
|
||||
HA_SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
|
||||
if (err)
|
||||
memprintf(&err, "%s.\n", err);
|
||||
appctx->ctx.cli.err = err;
|
||||
appctx->st0 = CLI_ST_PRINT_FREE;
|
||||
return 1;
|
||||
}
|
||||
SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
|
||||
HA_SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
|
||||
}
|
||||
else {
|
||||
/* Else, use the entry identifier as pattern
|
||||
* string, and update the value.
|
||||
*/
|
||||
err = NULL;
|
||||
SPIN_LOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
|
||||
HA_SPIN_LOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
|
||||
if (!pat_ref_set(appctx->ctx.map.ref, args[3], args[4], &err)) {
|
||||
SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
|
||||
HA_SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
|
||||
if (err)
|
||||
memprintf(&err, "%s.\n", err);
|
||||
appctx->ctx.cli.err = err;
|
||||
appctx->st0 = CLI_ST_PRINT_FREE;
|
||||
return 1;
|
||||
}
|
||||
SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
|
||||
HA_SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
|
||||
}
|
||||
|
||||
/* The set is done, send message. */
|
||||
@ -825,12 +825,12 @@ static int cli_parse_add_map(char **args, struct appctx *appctx, void *private)
|
||||
|
||||
/* Add value. */
|
||||
err = NULL;
|
||||
SPIN_LOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
|
||||
HA_SPIN_LOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
|
||||
if (appctx->ctx.map.display_flags == PAT_REF_MAP)
|
||||
ret = pat_ref_add(appctx->ctx.map.ref, args[3], args[4], &err);
|
||||
else
|
||||
ret = pat_ref_add(appctx->ctx.map.ref, args[3], NULL, &err);
|
||||
SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
|
||||
HA_SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
|
||||
if (!ret) {
|
||||
if (err)
|
||||
memprintf(&err, "%s.\n", err);
|
||||
@ -910,31 +910,31 @@ static int cli_parse_del_map(char **args, struct appctx *appctx, void *private)
|
||||
}
|
||||
|
||||
/* Try to delete the entry. */
|
||||
SPIN_LOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
|
||||
HA_SPIN_LOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
|
||||
if (!pat_ref_delete_by_id(appctx->ctx.map.ref, ref)) {
|
||||
SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
|
||||
HA_SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
|
||||
/* The entry is not found, send message. */
|
||||
appctx->ctx.cli.severity = LOG_ERR;
|
||||
appctx->ctx.cli.msg = "Key not found.\n";
|
||||
appctx->st0 = CLI_ST_PRINT;
|
||||
return 1;
|
||||
}
|
||||
SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
|
||||
HA_SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
|
||||
}
|
||||
else {
|
||||
/* Else, use the entry identifier as pattern
|
||||
* string and try to delete the entry.
|
||||
*/
|
||||
SPIN_LOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
|
||||
HA_SPIN_LOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
|
||||
if (!pat_ref_delete(appctx->ctx.map.ref, args[3])) {
|
||||
SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
|
||||
HA_SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
|
||||
/* The entry is not found, send message. */
|
||||
appctx->ctx.cli.severity = LOG_ERR;
|
||||
appctx->ctx.cli.msg = "Key not found.\n";
|
||||
appctx->st0 = CLI_ST_PRINT;
|
||||
return 1;
|
||||
}
|
||||
SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
|
||||
HA_SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
|
||||
}
|
||||
|
||||
/* The deletion is done, send message. */
|
||||
@ -983,9 +983,9 @@ static int cli_parse_clear_map(char **args, struct appctx *appctx, void *private
|
||||
}
|
||||
|
||||
/* Clear all. */
|
||||
SPIN_LOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
|
||||
HA_SPIN_LOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
|
||||
pat_ref_prune(appctx->ctx.map.ref);
|
||||
SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
|
||||
HA_SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
|
||||
|
||||
/* return response */
|
||||
appctx->st0 = CLI_ST_PROMPT;
|
||||
|
20
src/memory.c
20
src/memory.c
@ -93,7 +93,7 @@ struct pool_head *create_pool(char *name, unsigned int size, unsigned int flags)
|
||||
LIST_ADDQ(start, &pool->list);
|
||||
}
|
||||
pool->users++;
|
||||
SPIN_INIT(&pool->lock);
|
||||
HA_SPIN_INIT(&pool->lock);
|
||||
return pool;
|
||||
}
|
||||
|
||||
@ -143,9 +143,9 @@ void *pool_refill_alloc(struct pool_head *pool, unsigned int avail)
|
||||
{
|
||||
void *ptr;
|
||||
|
||||
SPIN_LOCK(POOL_LOCK, &pool->lock);
|
||||
HA_SPIN_LOCK(POOL_LOCK, &pool->lock);
|
||||
ptr = __pool_refill_alloc(pool, avail);
|
||||
SPIN_UNLOCK(POOL_LOCK, &pool->lock);
|
||||
HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
|
||||
return ptr;
|
||||
}
|
||||
/*
|
||||
@ -157,7 +157,7 @@ void pool_flush2(struct pool_head *pool)
|
||||
if (!pool)
|
||||
return;
|
||||
|
||||
SPIN_LOCK(POOL_LOCK, &pool->lock);
|
||||
HA_SPIN_LOCK(POOL_LOCK, &pool->lock);
|
||||
next = pool->free_list;
|
||||
while (next) {
|
||||
temp = next;
|
||||
@ -166,7 +166,7 @@ void pool_flush2(struct pool_head *pool)
|
||||
free(temp);
|
||||
}
|
||||
pool->free_list = next;
|
||||
SPIN_UNLOCK(POOL_LOCK, &pool->lock);
|
||||
HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
|
||||
/* here, we should have pool->allocate == pool->used */
|
||||
}
|
||||
|
||||
@ -192,7 +192,7 @@ void pool_gc2(struct pool_head *pool_ctx)
|
||||
void *temp, *next;
|
||||
//qfprintf(stderr, "Flushing pool %s\n", entry->name);
|
||||
if (entry != pool_ctx)
|
||||
SPIN_LOCK(POOL_LOCK, &entry->lock);
|
||||
HA_SPIN_LOCK(POOL_LOCK, &entry->lock);
|
||||
next = entry->free_list;
|
||||
while (next &&
|
||||
(int)(entry->allocated - entry->used) > (int)entry->minavail) {
|
||||
@ -203,7 +203,7 @@ void pool_gc2(struct pool_head *pool_ctx)
|
||||
}
|
||||
entry->free_list = next;
|
||||
if (entry != pool_ctx)
|
||||
SPIN_UNLOCK(POOL_LOCK, &entry->lock);
|
||||
HA_SPIN_UNLOCK(POOL_LOCK, &entry->lock);
|
||||
}
|
||||
|
||||
HA_ATOMIC_STORE(&recurse, 0);
|
||||
@ -225,7 +225,7 @@ void *pool_destroy2(struct pool_head *pool)
|
||||
pool->users--;
|
||||
if (!pool->users) {
|
||||
LIST_DEL(&pool->list);
|
||||
SPIN_DESTROY(&pool->lock);
|
||||
HA_SPIN_DESTROY(&pool->lock);
|
||||
free(pool);
|
||||
}
|
||||
}
|
||||
@ -242,7 +242,7 @@ void dump_pools_to_trash()
|
||||
allocated = used = nbpools = 0;
|
||||
chunk_printf(&trash, "Dumping pools usage. Use SIGQUIT to flush them.\n");
|
||||
list_for_each_entry(entry, &pools, list) {
|
||||
SPIN_LOCK(POOL_LOCK, &entry->lock);
|
||||
HA_SPIN_LOCK(POOL_LOCK, &entry->lock);
|
||||
chunk_appendf(&trash, " - Pool %s (%d bytes) : %d allocated (%u bytes), %d used, %d failures, %d users%s\n",
|
||||
entry->name, entry->size, entry->allocated,
|
||||
entry->size * entry->allocated, entry->used, entry->failed,
|
||||
@ -251,7 +251,7 @@ void dump_pools_to_trash()
|
||||
allocated += entry->allocated * entry->size;
|
||||
used += entry->used * entry->size;
|
||||
nbpools++;
|
||||
SPIN_UNLOCK(POOL_LOCK, &entry->lock);
|
||||
HA_SPIN_UNLOCK(POOL_LOCK, &entry->lock);
|
||||
}
|
||||
chunk_appendf(&trash, "Total: %d pools, %lu bytes allocated, %lu used.\n",
|
||||
nbpools, allocated, used);
|
||||
|
16
src/mux_h2.c
16
src/mux_h2.c
@ -233,9 +233,9 @@ static inline struct buffer *h2_get_dbuf(struct h2c *h2c)
|
||||
unlikely((buf = b_alloc_margin(&h2c->dbuf, 0)) == NULL)) {
|
||||
h2c->dbuf_wait.target = h2c->conn;
|
||||
h2c->dbuf_wait.wakeup_cb = h2_dbuf_available;
|
||||
SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock);
|
||||
HA_SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock);
|
||||
LIST_ADDQ(&buffer_wq, &h2c->dbuf_wait.list);
|
||||
SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock);
|
||||
HA_SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock);
|
||||
__conn_xprt_stop_recv(h2c->conn);
|
||||
}
|
||||
return buf;
|
||||
@ -289,9 +289,9 @@ static inline struct buffer *h2_get_mbuf(struct h2c *h2c)
|
||||
unlikely((buf = b_alloc_margin(&h2c->mbuf, 0)) == NULL)) {
|
||||
h2c->mbuf_wait.target = h2c;
|
||||
h2c->mbuf_wait.wakeup_cb = h2_mbuf_available;
|
||||
SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock);
|
||||
HA_SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock);
|
||||
LIST_ADDQ(&buffer_wq, &h2c->mbuf_wait.list);
|
||||
SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock);
|
||||
HA_SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock);
|
||||
|
||||
/* FIXME: we should in fact only block the direction being
|
||||
* currently used. For now it will be enough like this.
|
||||
@ -425,14 +425,14 @@ static void h2_release(struct connection *conn)
|
||||
if (h2c) {
|
||||
hpack_dht_free(h2c->ddht);
|
||||
h2_release_dbuf(h2c);
|
||||
SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock);
|
||||
HA_SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock);
|
||||
LIST_DEL(&h2c->dbuf_wait.list);
|
||||
SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock);
|
||||
HA_SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock);
|
||||
|
||||
h2_release_mbuf(h2c);
|
||||
SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock);
|
||||
HA_SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock);
|
||||
LIST_DEL(&h2c->mbuf_wait.list);
|
||||
SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock);
|
||||
HA_SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock);
|
||||
|
||||
if (h2c->task) {
|
||||
task_delete(h2c->task);
|
||||
|
@ -489,15 +489,15 @@ struct pattern *pat_match_str(struct sample *smp, struct pattern_expr *expr, int
|
||||
if (pat_lru_tree) {
|
||||
unsigned long long seed = pat_lru_seed ^ (long)expr;
|
||||
|
||||
SPIN_LOCK(PATLRU_LOCK, &pat_lru_tree_lock);
|
||||
HA_SPIN_LOCK(PATLRU_LOCK, &pat_lru_tree_lock);
|
||||
lru = lru64_get(XXH64(smp->data.u.str.str, smp->data.u.str.len, seed),
|
||||
pat_lru_tree, expr, expr->revision);
|
||||
if (!lru) {
|
||||
SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
|
||||
HA_SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
|
||||
}
|
||||
else if (lru->domain) {
|
||||
ret = lru->data;
|
||||
SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
|
||||
HA_SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
@ -519,7 +519,7 @@ struct pattern *pat_match_str(struct sample *smp, struct pattern_expr *expr, int
|
||||
|
||||
if (lru) {
|
||||
lru64_commit(lru, ret, expr, expr->revision, NULL);
|
||||
SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
|
||||
HA_SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
|
||||
}
|
||||
|
||||
return ret;
|
||||
@ -536,15 +536,15 @@ struct pattern *pat_match_bin(struct sample *smp, struct pattern_expr *expr, int
|
||||
if (pat_lru_tree) {
|
||||
unsigned long long seed = pat_lru_seed ^ (long)expr;
|
||||
|
||||
SPIN_LOCK(PATLRU_LOCK, &pat_lru_tree_lock);
|
||||
HA_SPIN_LOCK(PATLRU_LOCK, &pat_lru_tree_lock);
|
||||
lru = lru64_get(XXH64(smp->data.u.str.str, smp->data.u.str.len, seed),
|
||||
pat_lru_tree, expr, expr->revision);
|
||||
if (!lru) {
|
||||
SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
|
||||
HA_SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
|
||||
}
|
||||
else if (lru->domain) {
|
||||
ret = lru->data;
|
||||
SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
|
||||
HA_SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
@ -563,7 +563,7 @@ struct pattern *pat_match_bin(struct sample *smp, struct pattern_expr *expr, int
|
||||
|
||||
if (lru) {
|
||||
lru64_commit(lru, ret, expr, expr->revision, NULL);
|
||||
SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
|
||||
HA_SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
|
||||
}
|
||||
|
||||
return ret;
|
||||
@ -606,15 +606,15 @@ struct pattern *pat_match_reg(struct sample *smp, struct pattern_expr *expr, int
|
||||
if (pat_lru_tree) {
|
||||
unsigned long long seed = pat_lru_seed ^ (long)expr;
|
||||
|
||||
SPIN_LOCK(PATLRU_LOCK, &pat_lru_tree_lock);
|
||||
HA_SPIN_LOCK(PATLRU_LOCK, &pat_lru_tree_lock);
|
||||
lru = lru64_get(XXH64(smp->data.u.str.str, smp->data.u.str.len, seed),
|
||||
pat_lru_tree, expr, expr->revision);
|
||||
if (!lru) {
|
||||
SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
|
||||
HA_SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
|
||||
}
|
||||
else if (lru->domain) {
|
||||
ret = lru->data;
|
||||
SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
|
||||
HA_SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
@ -630,7 +630,7 @@ struct pattern *pat_match_reg(struct sample *smp, struct pattern_expr *expr, int
|
||||
|
||||
if (lru) {
|
||||
lru64_commit(lru, ret, expr, expr->revision, NULL);
|
||||
SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
|
||||
HA_SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
|
||||
}
|
||||
|
||||
return ret;
|
||||
@ -675,15 +675,15 @@ struct pattern *pat_match_beg(struct sample *smp, struct pattern_expr *expr, int
|
||||
if (pat_lru_tree) {
|
||||
unsigned long long seed = pat_lru_seed ^ (long)expr;
|
||||
|
||||
SPIN_LOCK(PATLRU_LOCK, &pat_lru_tree_lock);
|
||||
HA_SPIN_LOCK(PATLRU_LOCK, &pat_lru_tree_lock);
|
||||
lru = lru64_get(XXH64(smp->data.u.str.str, smp->data.u.str.len, seed),
|
||||
pat_lru_tree, expr, expr->revision);
|
||||
if (!lru) {
|
||||
SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
|
||||
HA_SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
|
||||
}
|
||||
else if (lru->domain) {
|
||||
ret = lru->data;
|
||||
SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
|
||||
HA_SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
@ -705,7 +705,7 @@ struct pattern *pat_match_beg(struct sample *smp, struct pattern_expr *expr, int
|
||||
|
||||
if (lru) {
|
||||
lru64_commit(lru, ret, expr, expr->revision, NULL);
|
||||
SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
|
||||
HA_SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
|
||||
}
|
||||
|
||||
return ret;
|
||||
@ -723,15 +723,15 @@ struct pattern *pat_match_end(struct sample *smp, struct pattern_expr *expr, int
|
||||
if (pat_lru_tree) {
|
||||
unsigned long long seed = pat_lru_seed ^ (long)expr;
|
||||
|
||||
SPIN_LOCK(PATLRU_LOCK, &pat_lru_tree_lock);
|
||||
HA_SPIN_LOCK(PATLRU_LOCK, &pat_lru_tree_lock);
|
||||
lru = lru64_get(XXH64(smp->data.u.str.str, smp->data.u.str.len, seed),
|
||||
pat_lru_tree, expr, expr->revision);
|
||||
if (!lru) {
|
||||
SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
|
||||
HA_SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
|
||||
}
|
||||
else if (lru->domain) {
|
||||
ret = lru->data;
|
||||
SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
|
||||
HA_SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
@ -753,7 +753,7 @@ struct pattern *pat_match_end(struct sample *smp, struct pattern_expr *expr, int
|
||||
|
||||
if (lru) {
|
||||
lru64_commit(lru, ret, expr, expr->revision, NULL);
|
||||
SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
|
||||
HA_SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
|
||||
}
|
||||
|
||||
return ret;
|
||||
@ -775,15 +775,15 @@ struct pattern *pat_match_sub(struct sample *smp, struct pattern_expr *expr, int
|
||||
if (pat_lru_tree) {
|
||||
unsigned long long seed = pat_lru_seed ^ (long)expr;
|
||||
|
||||
SPIN_LOCK(PATLRU_LOCK, &pat_lru_tree_lock);
|
||||
HA_SPIN_LOCK(PATLRU_LOCK, &pat_lru_tree_lock);
|
||||
lru = lru64_get(XXH64(smp->data.u.str.str, smp->data.u.str.len, seed),
|
||||
pat_lru_tree, expr, expr->revision);
|
||||
if (!lru) {
|
||||
SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
|
||||
HA_SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
|
||||
}
|
||||
else if (lru->domain) {
|
||||
ret = lru->data;
|
||||
SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
|
||||
HA_SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
@ -819,7 +819,7 @@ struct pattern *pat_match_sub(struct sample *smp, struct pattern_expr *expr, int
|
||||
leave:
|
||||
if (lru) {
|
||||
lru64_commit(lru, ret, expr, expr->revision, NULL);
|
||||
SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
|
||||
HA_SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
|
||||
}
|
||||
|
||||
return ret;
|
||||
@ -1765,11 +1765,11 @@ static inline int pat_ref_set_elt(struct pat_ref *ref, struct pat_ref_elt *elt,
|
||||
if (!expr->pat_head->parse_smp)
|
||||
continue;
|
||||
|
||||
RWLOCK_WRLOCK(PATEXP_LOCK, &expr->lock);
|
||||
HA_RWLOCK_WRLOCK(PATEXP_LOCK, &expr->lock);
|
||||
data = pattern_find_smp(expr, elt);
|
||||
if (data && *data && !expr->pat_head->parse_smp(sample, *data))
|
||||
*data = NULL;
|
||||
RWLOCK_WRUNLOCK(PATEXP_LOCK, &expr->lock);
|
||||
HA_RWLOCK_WRUNLOCK(PATEXP_LOCK, &expr->lock);
|
||||
}
|
||||
|
||||
/* free old sample only when all exprs are updated */
|
||||
@ -1872,7 +1872,7 @@ struct pat_ref *pat_ref_new(const char *reference, const char *display, unsigned
|
||||
|
||||
LIST_INIT(&ref->head);
|
||||
LIST_INIT(&ref->pat);
|
||||
SPIN_INIT(&ref->lock);
|
||||
HA_SPIN_INIT(&ref->lock);
|
||||
LIST_ADDQ(&pattern_reference, &ref->list);
|
||||
|
||||
return ref;
|
||||
@ -1991,14 +1991,14 @@ int pat_ref_push(struct pat_ref_elt *elt, struct pattern_expr *expr,
|
||||
return 0;
|
||||
}
|
||||
|
||||
RWLOCK_WRLOCK(PATEXP_LOCK, &expr->lock);
|
||||
HA_RWLOCK_WRLOCK(PATEXP_LOCK, &expr->lock);
|
||||
/* index pattern */
|
||||
if (!expr->pat_head->index(expr, &pattern, err)) {
|
||||
RWLOCK_WRUNLOCK(PATEXP_LOCK, &expr->lock);
|
||||
HA_RWLOCK_WRUNLOCK(PATEXP_LOCK, &expr->lock);
|
||||
free(data);
|
||||
return 0;
|
||||
}
|
||||
RWLOCK_WRUNLOCK(PATEXP_LOCK, &expr->lock);
|
||||
HA_RWLOCK_WRUNLOCK(PATEXP_LOCK, &expr->lock);
|
||||
|
||||
return 1;
|
||||
}
|
||||
@ -2073,9 +2073,9 @@ void pat_ref_reload(struct pat_ref *ref, struct pat_ref *replace)
|
||||
struct pattern pattern;
|
||||
|
||||
|
||||
SPIN_LOCK(PATREF_LOCK, &ref->lock);
|
||||
HA_SPIN_LOCK(PATREF_LOCK, &ref->lock);
|
||||
list_for_each_entry(expr, &ref->pat, list) {
|
||||
RWLOCK_WRLOCK(PATEXP_LOCK, &expr->lock);
|
||||
HA_RWLOCK_WRLOCK(PATEXP_LOCK, &expr->lock);
|
||||
}
|
||||
|
||||
/* all expr are locked, we can safely remove all pat_ref */
|
||||
@ -2145,9 +2145,9 @@ void pat_ref_reload(struct pat_ref *ref, struct pat_ref *replace)
|
||||
continue;
|
||||
}
|
||||
}
|
||||
RWLOCK_WRUNLOCK(PATEXP_LOCK, &expr->lock);
|
||||
HA_RWLOCK_WRUNLOCK(PATEXP_LOCK, &expr->lock);
|
||||
}
|
||||
SPIN_UNLOCK(PATREF_LOCK, &ref->lock);
|
||||
HA_SPIN_UNLOCK(PATREF_LOCK, &ref->lock);
|
||||
}
|
||||
|
||||
/* This function prune all entries of <ref>. This function
|
||||
@ -2160,9 +2160,9 @@ void pat_ref_prune(struct pat_ref *ref)
|
||||
struct bref *bref, *back;
|
||||
|
||||
list_for_each_entry(expr, &ref->pat, list) {
|
||||
RWLOCK_WRLOCK(PATEXP_LOCK, &expr->lock);
|
||||
HA_RWLOCK_WRLOCK(PATEXP_LOCK, &expr->lock);
|
||||
expr->pat_head->prune(expr);
|
||||
RWLOCK_WRUNLOCK(PATEXP_LOCK, &expr->lock);
|
||||
HA_RWLOCK_WRUNLOCK(PATEXP_LOCK, &expr->lock);
|
||||
}
|
||||
|
||||
/* we trash pat_ref_elt in a second time to ensure that data is
|
||||
@ -2267,7 +2267,7 @@ struct pattern_expr *pattern_new_expr(struct pattern_head *head, struct pat_ref
|
||||
|
||||
expr->ref = ref;
|
||||
|
||||
RWLOCK_INIT(&expr->lock);
|
||||
HA_RWLOCK_INIT(&expr->lock);
|
||||
|
||||
/* We must free this pattern if it is no more used. */
|
||||
list->do_free = 1;
|
||||
@ -2579,7 +2579,7 @@ struct pattern *pattern_exec_match(struct pattern_head *head, struct sample *smp
|
||||
return NULL;
|
||||
|
||||
list_for_each_entry(list, &head->head, list) {
|
||||
RWLOCK_RDLOCK(PATEXP_LOCK, &list->expr->lock);
|
||||
HA_RWLOCK_RDLOCK(PATEXP_LOCK, &list->expr->lock);
|
||||
pat = head->match(smp, list->expr, fill);
|
||||
if (pat) {
|
||||
/* We duplicate the pattern cause it could be modified
|
||||
@ -2610,10 +2610,10 @@ struct pattern *pattern_exec_match(struct pattern_head *head, struct sample *smp
|
||||
}
|
||||
pat->data = &static_sample_data;
|
||||
}
|
||||
RWLOCK_RDUNLOCK(PATEXP_LOCK, &list->expr->lock);
|
||||
HA_RWLOCK_RDUNLOCK(PATEXP_LOCK, &list->expr->lock);
|
||||
return pat;
|
||||
}
|
||||
RWLOCK_RDUNLOCK(PATEXP_LOCK, &list->expr->lock);
|
||||
HA_RWLOCK_RDUNLOCK(PATEXP_LOCK, &list->expr->lock);
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
@ -2627,9 +2627,9 @@ void pattern_prune(struct pattern_head *head)
|
||||
LIST_DEL(&list->list);
|
||||
if (list->do_free) {
|
||||
LIST_DEL(&list->expr->list);
|
||||
RWLOCK_WRLOCK(PATEXP_LOCK, &list->expr->lock);
|
||||
HA_RWLOCK_WRLOCK(PATEXP_LOCK, &list->expr->lock);
|
||||
head->prune(list->expr);
|
||||
RWLOCK_WRUNLOCK(PATEXP_LOCK, &list->expr->lock);
|
||||
HA_RWLOCK_WRUNLOCK(PATEXP_LOCK, &list->expr->lock);
|
||||
free(list->expr);
|
||||
}
|
||||
free(list);
|
||||
@ -2676,9 +2676,9 @@ struct sample_data **pattern_find_smp(struct pattern_expr *expr, struct pat_ref_
|
||||
*/
|
||||
int pattern_delete(struct pattern_expr *expr, struct pat_ref_elt *ref)
|
||||
{
|
||||
RWLOCK_WRLOCK(PATEXP_LOCK, &expr->lock);
|
||||
HA_RWLOCK_WRLOCK(PATEXP_LOCK, &expr->lock);
|
||||
expr->pat_head->delete(expr, ref);
|
||||
RWLOCK_WRUNLOCK(PATEXP_LOCK, &expr->lock);
|
||||
HA_RWLOCK_WRUNLOCK(PATEXP_LOCK, &expr->lock);
|
||||
return 1;
|
||||
}
|
||||
|
||||
@ -2694,7 +2694,7 @@ void pattern_finalize_config(void)
|
||||
pat_lru_seed = random();
|
||||
if (global.tune.pattern_cache) {
|
||||
pat_lru_tree = lru64_new(global.tune.pattern_cache);
|
||||
SPIN_INIT(&pat_lru_tree_lock);
|
||||
HA_SPIN_INIT(&pat_lru_tree_lock);
|
||||
}
|
||||
|
||||
list_for_each_entry(ref, &pattern_reference, list) {
|
||||
|
90
src/peers.c
90
src/peers.c
@ -319,7 +319,7 @@ static int peer_prepare_updatemsg(struct stksess *ts, struct shared_table *st, u
|
||||
cursor += st->table->key_size;
|
||||
}
|
||||
|
||||
RWLOCK_RDLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
/* encode values */
|
||||
for (data_type = 0 ; data_type < STKTABLE_DATA_TYPES ; data_type++) {
|
||||
|
||||
@ -359,7 +359,7 @@ static int peer_prepare_updatemsg(struct stksess *ts, struct shared_table *st, u
|
||||
}
|
||||
}
|
||||
}
|
||||
RWLOCK_RDUNLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
|
||||
/* Compute datalen */
|
||||
datalen = (cursor - datamsg);
|
||||
@ -510,7 +510,7 @@ static void peer_session_release(struct appctx *appctx)
|
||||
|
||||
/* peer session identified */
|
||||
if (peer) {
|
||||
SPIN_LOCK(PEER_LOCK, &peer->lock);
|
||||
HA_SPIN_LOCK(PEER_LOCK, &peer->lock);
|
||||
if (peer->appctx == appctx) {
|
||||
/* Re-init current table pointers to force announcement on re-connect */
|
||||
peer->remote_table = peer->last_local_table = NULL;
|
||||
@ -527,7 +527,7 @@ static void peer_session_release(struct appctx *appctx)
|
||||
peer->flags &= PEER_TEACH_RESET;
|
||||
peer->flags &= PEER_LEARN_RESET;
|
||||
}
|
||||
SPIN_UNLOCK(PEER_LOCK, &peer->lock);
|
||||
HA_SPIN_UNLOCK(PEER_LOCK, &peer->lock);
|
||||
task_wakeup(peers->sync_task, TASK_WOKEN_MSG);
|
||||
}
|
||||
}
|
||||
@ -692,7 +692,7 @@ static void peer_io_handler(struct appctx *appctx)
|
||||
goto switchstate;
|
||||
}
|
||||
|
||||
SPIN_LOCK(PEER_LOCK, &curpeer->lock);
|
||||
HA_SPIN_LOCK(PEER_LOCK, &curpeer->lock);
|
||||
if (curpeer->appctx && curpeer->appctx != appctx) {
|
||||
if (curpeer->local) {
|
||||
/* Local connection, reply a retry */
|
||||
@ -726,7 +726,7 @@ static void peer_io_handler(struct appctx *appctx)
|
||||
|
||||
if (!curpeer) {
|
||||
curpeer = appctx->ctx.peers.ptr;
|
||||
SPIN_LOCK(PEER_LOCK, &curpeer->lock);
|
||||
HA_SPIN_LOCK(PEER_LOCK, &curpeer->lock);
|
||||
if (curpeer->appctx != appctx) {
|
||||
appctx->st0 = PEER_SESS_ST_END;
|
||||
goto switchstate;
|
||||
@ -787,7 +787,7 @@ static void peer_io_handler(struct appctx *appctx)
|
||||
|
||||
if (!curpeer) {
|
||||
curpeer = appctx->ctx.peers.ptr;
|
||||
SPIN_LOCK(PEER_LOCK, &curpeer->lock);
|
||||
HA_SPIN_LOCK(PEER_LOCK, &curpeer->lock);
|
||||
if (curpeer->appctx != appctx) {
|
||||
appctx->st0 = PEER_SESS_ST_END;
|
||||
goto switchstate;
|
||||
@ -826,7 +826,7 @@ static void peer_io_handler(struct appctx *appctx)
|
||||
|
||||
if (!curpeer) {
|
||||
curpeer = appctx->ctx.peers.ptr;
|
||||
SPIN_LOCK(PEER_LOCK, &curpeer->lock);
|
||||
HA_SPIN_LOCK(PEER_LOCK, &curpeer->lock);
|
||||
if (curpeer->appctx != appctx) {
|
||||
appctx->st0 = PEER_SESS_ST_END;
|
||||
goto switchstate;
|
||||
@ -913,7 +913,7 @@ static void peer_io_handler(struct appctx *appctx)
|
||||
|
||||
if (!curpeer) {
|
||||
curpeer = appctx->ctx.peers.ptr;
|
||||
SPIN_LOCK(PEER_LOCK, &curpeer->lock);
|
||||
HA_SPIN_LOCK(PEER_LOCK, &curpeer->lock);
|
||||
if (curpeer->appctx != appctx) {
|
||||
appctx->st0 = PEER_SESS_ST_END;
|
||||
goto switchstate;
|
||||
@ -1252,7 +1252,7 @@ static void peer_io_handler(struct appctx *appctx)
|
||||
newts = NULL;
|
||||
}
|
||||
|
||||
RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
|
||||
for (data_type = 0 ; data_type < STKTABLE_DATA_TYPES ; data_type++) {
|
||||
|
||||
@ -1264,7 +1264,7 @@ static void peer_io_handler(struct appctx *appctx)
|
||||
data = intdecode(&msg_cur, msg_end);
|
||||
if (!msg_cur) {
|
||||
/* malformed message */
|
||||
RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
stktable_touch_remote(st->table, ts, 1);
|
||||
appctx->st0 = PEER_SESS_ST_ERRPROTO;
|
||||
goto switchstate;
|
||||
@ -1281,7 +1281,7 @@ static void peer_io_handler(struct appctx *appctx)
|
||||
data = intdecode(&msg_cur, msg_end);
|
||||
if (!msg_cur) {
|
||||
/* malformed message */
|
||||
RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
stktable_touch_remote(st->table, ts, 1);
|
||||
appctx->st0 = PEER_SESS_ST_ERRPROTO;
|
||||
goto switchstate;
|
||||
@ -1298,7 +1298,7 @@ static void peer_io_handler(struct appctx *appctx)
|
||||
data = intdecode(&msg_cur, msg_end);
|
||||
if (!msg_cur) {
|
||||
/* malformed message */
|
||||
RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
stktable_touch_remote(st->table, ts, 1);
|
||||
appctx->st0 = PEER_SESS_ST_ERRPROTO;
|
||||
goto switchstate;
|
||||
@ -1320,7 +1320,7 @@ static void peer_io_handler(struct appctx *appctx)
|
||||
data.curr_tick = tick_add(now_ms, -intdecode(&msg_cur, msg_end)) & ~0x1;
|
||||
if (!msg_cur) {
|
||||
/* malformed message */
|
||||
RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
stktable_touch_remote(st->table, ts, 1);
|
||||
appctx->st0 = PEER_SESS_ST_ERRPROTO;
|
||||
goto switchstate;
|
||||
@ -1328,7 +1328,7 @@ static void peer_io_handler(struct appctx *appctx)
|
||||
data.curr_ctr = intdecode(&msg_cur, msg_end);
|
||||
if (!msg_cur) {
|
||||
/* malformed message */
|
||||
RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
stktable_touch_remote(st->table, ts, 1);
|
||||
appctx->st0 = PEER_SESS_ST_ERRPROTO;
|
||||
goto switchstate;
|
||||
@ -1336,7 +1336,7 @@ static void peer_io_handler(struct appctx *appctx)
|
||||
data.prev_ctr = intdecode(&msg_cur, msg_end);
|
||||
if (!msg_cur) {
|
||||
/* malformed message */
|
||||
RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
stktable_touch_remote(st->table, ts, 1);
|
||||
appctx->st0 = PEER_SESS_ST_ERRPROTO;
|
||||
goto switchstate;
|
||||
@ -1351,7 +1351,7 @@ static void peer_io_handler(struct appctx *appctx)
|
||||
}
|
||||
}
|
||||
|
||||
RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
stktable_touch_remote(st->table, ts, 1);
|
||||
|
||||
}
|
||||
@ -1463,7 +1463,7 @@ static void peer_io_handler(struct appctx *appctx)
|
||||
}
|
||||
|
||||
if (!(curpeer->flags & PEER_F_TEACH_PROCESS)) {
|
||||
SPIN_LOCK(STK_TABLE_LOCK, &st->table->lock);
|
||||
HA_SPIN_LOCK(STK_TABLE_LOCK, &st->table->lock);
|
||||
if (!(curpeer->flags & PEER_F_LEARN_ASSIGN) &&
|
||||
((int)(st->last_pushed - st->table->localupdate) < 0)) {
|
||||
struct eb32_node *eb;
|
||||
@ -1517,14 +1517,14 @@ static void peer_io_handler(struct appctx *appctx)
|
||||
ts = eb32_entry(eb, struct stksess, upd);
|
||||
updateid = ts->upd.key;
|
||||
ts->ref_cnt++;
|
||||
SPIN_UNLOCK(STK_TABLE_LOCK, &st->table->lock);
|
||||
HA_SPIN_UNLOCK(STK_TABLE_LOCK, &st->table->lock);
|
||||
|
||||
msglen = peer_prepare_updatemsg(ts, st, updateid, trash.str, trash.size, new_pushed, 0);
|
||||
if (!msglen) {
|
||||
/* internal error: message does not fit in trash */
|
||||
SPIN_LOCK(STK_TABLE_LOCK, &st->table->lock);
|
||||
HA_SPIN_LOCK(STK_TABLE_LOCK, &st->table->lock);
|
||||
ts->ref_cnt--;
|
||||
SPIN_UNLOCK(STK_TABLE_LOCK, &st->table->lock);
|
||||
HA_SPIN_UNLOCK(STK_TABLE_LOCK, &st->table->lock);
|
||||
appctx->st0 = PEER_SESS_ST_END;
|
||||
goto switchstate;
|
||||
}
|
||||
@ -1533,9 +1533,9 @@ static void peer_io_handler(struct appctx *appctx)
|
||||
repl = ci_putblk(si_ic(si), trash.str, msglen);
|
||||
if (repl <= 0) {
|
||||
/* no more write possible */
|
||||
SPIN_LOCK(STK_TABLE_LOCK, &st->table->lock);
|
||||
HA_SPIN_LOCK(STK_TABLE_LOCK, &st->table->lock);
|
||||
ts->ref_cnt--;
|
||||
SPIN_UNLOCK(STK_TABLE_LOCK, &st->table->lock);
|
||||
HA_SPIN_UNLOCK(STK_TABLE_LOCK, &st->table->lock);
|
||||
if (repl == -1) {
|
||||
goto full;
|
||||
}
|
||||
@ -1543,7 +1543,7 @@ static void peer_io_handler(struct appctx *appctx)
|
||||
goto switchstate;
|
||||
}
|
||||
|
||||
SPIN_LOCK(STK_TABLE_LOCK, &st->table->lock);
|
||||
HA_SPIN_LOCK(STK_TABLE_LOCK, &st->table->lock);
|
||||
ts->ref_cnt--;
|
||||
st->last_pushed = updateid;
|
||||
if ((int)(st->last_pushed - st->table->commitupdate) > 0)
|
||||
@ -1552,7 +1552,7 @@ static void peer_io_handler(struct appctx *appctx)
|
||||
new_pushed = 0;
|
||||
}
|
||||
}
|
||||
SPIN_UNLOCK(STK_TABLE_LOCK, &st->table->lock);
|
||||
HA_SPIN_UNLOCK(STK_TABLE_LOCK, &st->table->lock);
|
||||
}
|
||||
else {
|
||||
if (!(st->flags & SHTABLE_F_TEACH_STAGE1)) {
|
||||
@ -1584,7 +1584,7 @@ static void peer_io_handler(struct appctx *appctx)
|
||||
|
||||
/* We force new pushed to 1 to force identifier in update message */
|
||||
new_pushed = 1;
|
||||
SPIN_LOCK(STK_TABLE_LOCK, &st->table->lock);
|
||||
HA_SPIN_LOCK(STK_TABLE_LOCK, &st->table->lock);
|
||||
while (1) {
|
||||
uint32_t msglen;
|
||||
struct stksess *ts;
|
||||
@ -1604,15 +1604,15 @@ static void peer_io_handler(struct appctx *appctx)
|
||||
ts = eb32_entry(eb, struct stksess, upd);
|
||||
updateid = ts->upd.key;
|
||||
ts->ref_cnt++;
|
||||
SPIN_UNLOCK(STK_TABLE_LOCK, &st->table->lock);
|
||||
HA_SPIN_UNLOCK(STK_TABLE_LOCK, &st->table->lock);
|
||||
|
||||
use_timed = !(curpeer->flags & PEER_F_DWNGRD);
|
||||
msglen = peer_prepare_updatemsg(ts, st, updateid, trash.str, trash.size, new_pushed, use_timed);
|
||||
if (!msglen) {
|
||||
/* internal error: message does not fit in trash */
|
||||
SPIN_LOCK(STK_TABLE_LOCK, &st->table->lock);
|
||||
HA_SPIN_LOCK(STK_TABLE_LOCK, &st->table->lock);
|
||||
ts->ref_cnt--;
|
||||
SPIN_UNLOCK(STK_TABLE_LOCK, &st->table->lock);
|
||||
HA_SPIN_UNLOCK(STK_TABLE_LOCK, &st->table->lock);
|
||||
appctx->st0 = PEER_SESS_ST_END;
|
||||
goto switchstate;
|
||||
}
|
||||
@ -1621,22 +1621,22 @@ static void peer_io_handler(struct appctx *appctx)
|
||||
repl = ci_putblk(si_ic(si), trash.str, msglen);
|
||||
if (repl <= 0) {
|
||||
/* no more write possible */
|
||||
SPIN_LOCK(STK_TABLE_LOCK, &st->table->lock);
|
||||
HA_SPIN_LOCK(STK_TABLE_LOCK, &st->table->lock);
|
||||
ts->ref_cnt--;
|
||||
SPIN_UNLOCK(STK_TABLE_LOCK, &st->table->lock);
|
||||
HA_SPIN_UNLOCK(STK_TABLE_LOCK, &st->table->lock);
|
||||
if (repl == -1) {
|
||||
goto full;
|
||||
}
|
||||
appctx->st0 = PEER_SESS_ST_END;
|
||||
goto switchstate;
|
||||
}
|
||||
SPIN_LOCK(STK_TABLE_LOCK, &st->table->lock);
|
||||
HA_SPIN_LOCK(STK_TABLE_LOCK, &st->table->lock);
|
||||
ts->ref_cnt--;
|
||||
st->last_pushed = updateid;
|
||||
/* identifier may not needed in next update message */
|
||||
new_pushed = 0;
|
||||
}
|
||||
SPIN_UNLOCK(STK_TABLE_LOCK, &st->table->lock);
|
||||
HA_SPIN_UNLOCK(STK_TABLE_LOCK, &st->table->lock);
|
||||
}
|
||||
|
||||
if (!(st->flags & SHTABLE_F_TEACH_STAGE2)) {
|
||||
@ -1668,7 +1668,7 @@ static void peer_io_handler(struct appctx *appctx)
|
||||
|
||||
/* We force new pushed to 1 to force identifier in update message */
|
||||
new_pushed = 1;
|
||||
SPIN_LOCK(STK_TABLE_LOCK, &st->table->lock);
|
||||
HA_SPIN_LOCK(STK_TABLE_LOCK, &st->table->lock);
|
||||
while (1) {
|
||||
uint32_t msglen;
|
||||
struct stksess *ts;
|
||||
@ -1687,15 +1687,15 @@ static void peer_io_handler(struct appctx *appctx)
|
||||
ts = eb32_entry(eb, struct stksess, upd);
|
||||
updateid = ts->upd.key;
|
||||
ts->ref_cnt++;
|
||||
SPIN_UNLOCK(STK_TABLE_LOCK, &st->table->lock);
|
||||
HA_SPIN_UNLOCK(STK_TABLE_LOCK, &st->table->lock);
|
||||
|
||||
use_timed = !(curpeer->flags & PEER_F_DWNGRD);
|
||||
msglen = peer_prepare_updatemsg(ts, st, updateid, trash.str, trash.size, new_pushed, use_timed);
|
||||
if (!msglen) {
|
||||
/* internal error: message does not fit in trash */
|
||||
SPIN_LOCK(STK_TABLE_LOCK, &st->table->lock);
|
||||
HA_SPIN_LOCK(STK_TABLE_LOCK, &st->table->lock);
|
||||
ts->ref_cnt--;
|
||||
SPIN_UNLOCK(STK_TABLE_LOCK, &st->table->lock);
|
||||
HA_SPIN_UNLOCK(STK_TABLE_LOCK, &st->table->lock);
|
||||
appctx->st0 = PEER_SESS_ST_END;
|
||||
goto switchstate;
|
||||
}
|
||||
@ -1704,9 +1704,9 @@ static void peer_io_handler(struct appctx *appctx)
|
||||
repl = ci_putblk(si_ic(si), trash.str, msglen);
|
||||
if (repl <= 0) {
|
||||
/* no more write possible */
|
||||
SPIN_LOCK(STK_TABLE_LOCK, &st->table->lock);
|
||||
HA_SPIN_LOCK(STK_TABLE_LOCK, &st->table->lock);
|
||||
ts->ref_cnt--;
|
||||
SPIN_UNLOCK(STK_TABLE_LOCK, &st->table->lock);
|
||||
HA_SPIN_UNLOCK(STK_TABLE_LOCK, &st->table->lock);
|
||||
if (repl == -1) {
|
||||
goto full;
|
||||
}
|
||||
@ -1714,13 +1714,13 @@ static void peer_io_handler(struct appctx *appctx)
|
||||
goto switchstate;
|
||||
}
|
||||
|
||||
SPIN_LOCK(STK_TABLE_LOCK, &st->table->lock);
|
||||
HA_SPIN_LOCK(STK_TABLE_LOCK, &st->table->lock);
|
||||
ts->ref_cnt--;
|
||||
st->last_pushed = updateid;
|
||||
/* identifier may not needed in next update message */
|
||||
new_pushed = 0;
|
||||
}
|
||||
SPIN_UNLOCK(STK_TABLE_LOCK, &st->table->lock);
|
||||
HA_SPIN_UNLOCK(STK_TABLE_LOCK, &st->table->lock);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1803,7 +1803,7 @@ static void peer_io_handler(struct appctx *appctx)
|
||||
}
|
||||
case PEER_SESS_ST_END: {
|
||||
if (curpeer) {
|
||||
SPIN_UNLOCK(PEER_LOCK, &curpeer->lock);
|
||||
HA_SPIN_UNLOCK(PEER_LOCK, &curpeer->lock);
|
||||
curpeer = NULL;
|
||||
}
|
||||
si_shutw(si);
|
||||
@ -1817,7 +1817,7 @@ static void peer_io_handler(struct appctx *appctx)
|
||||
si_oc(si)->flags |= CF_READ_DONTWAIT;
|
||||
|
||||
if (curpeer)
|
||||
SPIN_UNLOCK(PEER_LOCK, &curpeer->lock);
|
||||
HA_SPIN_UNLOCK(PEER_LOCK, &curpeer->lock);
|
||||
return;
|
||||
full:
|
||||
si_applet_cant_put(si);
|
||||
@ -1973,7 +1973,7 @@ static struct task *process_peer_sync(struct task * task)
|
||||
|
||||
/* Acquire lock for all peers of the section */
|
||||
for (ps = peers->remote; ps; ps = ps->next)
|
||||
SPIN_LOCK(PEER_LOCK, &ps->lock);
|
||||
HA_SPIN_LOCK(PEER_LOCK, &ps->lock);
|
||||
|
||||
if (!stopping) {
|
||||
/* Normal case (not soft stop)*/
|
||||
@ -2147,7 +2147,7 @@ static struct task *process_peer_sync(struct task * task)
|
||||
|
||||
/* Release lock for all peers of the section */
|
||||
for (ps = peers->remote; ps; ps = ps->next)
|
||||
SPIN_UNLOCK(PEER_LOCK, &ps->lock);
|
||||
HA_SPIN_UNLOCK(PEER_LOCK, &ps->lock);
|
||||
|
||||
/* Wakeup for re-connect */
|
||||
return task;
|
||||
|
@ -2621,9 +2621,9 @@ http_req_get_intercept_rule(struct proxy *px, struct list *rules, struct stream
|
||||
|
||||
/* perform update */
|
||||
/* returned code: 1=ok, 0=ko */
|
||||
SPIN_LOCK(PATREF_LOCK, &ref->lock);
|
||||
HA_SPIN_LOCK(PATREF_LOCK, &ref->lock);
|
||||
pat_ref_delete(ref, key->str);
|
||||
SPIN_UNLOCK(PATREF_LOCK, &ref->lock);
|
||||
HA_SPIN_UNLOCK(PATREF_LOCK, &ref->lock);
|
||||
|
||||
free_trash_chunk(key);
|
||||
break;
|
||||
@ -2649,10 +2649,10 @@ http_req_get_intercept_rule(struct proxy *px, struct list *rules, struct stream
|
||||
|
||||
/* perform update */
|
||||
/* add entry only if it does not already exist */
|
||||
SPIN_LOCK(PATREF_LOCK, &ref->lock);
|
||||
HA_SPIN_LOCK(PATREF_LOCK, &ref->lock);
|
||||
if (pat_ref_find_elt(ref, key->str) == NULL)
|
||||
pat_ref_add(ref, key->str, NULL, NULL);
|
||||
SPIN_UNLOCK(PATREF_LOCK, &ref->lock);
|
||||
HA_SPIN_UNLOCK(PATREF_LOCK, &ref->lock);
|
||||
|
||||
free_trash_chunk(key);
|
||||
break;
|
||||
@ -2737,7 +2737,7 @@ http_req_get_intercept_rule(struct proxy *px, struct list *rules, struct stream
|
||||
ptr1 = stktable_data_ptr(t, ts, STKTABLE_DT_HTTP_REQ_CNT);
|
||||
ptr2 = stktable_data_ptr(t, ts, STKTABLE_DT_HTTP_REQ_RATE);
|
||||
if (ptr1 || ptr2) {
|
||||
RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
|
||||
if (ptr1)
|
||||
stktable_data_cast(ptr1, http_req_cnt)++;
|
||||
@ -2746,7 +2746,7 @@ http_req_get_intercept_rule(struct proxy *px, struct list *rules, struct stream
|
||||
update_freq_ctr_period(&stktable_data_cast(ptr2, http_req_rate),
|
||||
t->data_arg[STKTABLE_DT_HTTP_REQ_RATE].u, 1);
|
||||
|
||||
RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
}
|
||||
|
||||
stkctr_set_flags(&s->stkctr[trk_idx(rule->action)], STKCTR_TRACK_CONTENT);
|
||||
@ -2915,9 +2915,9 @@ http_res_get_intercept_rule(struct proxy *px, struct list *rules, struct stream
|
||||
|
||||
/* perform update */
|
||||
/* returned code: 1=ok, 0=ko */
|
||||
SPIN_LOCK(PATREF_LOCK, &ref->lock);
|
||||
HA_SPIN_LOCK(PATREF_LOCK, &ref->lock);
|
||||
pat_ref_delete(ref, key->str);
|
||||
SPIN_UNLOCK(PATREF_LOCK, &ref->lock);
|
||||
HA_SPIN_UNLOCK(PATREF_LOCK, &ref->lock);
|
||||
|
||||
free_trash_chunk(key);
|
||||
break;
|
||||
@ -2980,14 +2980,14 @@ http_res_get_intercept_rule(struct proxy *px, struct list *rules, struct stream
|
||||
value->str[value->len] = '\0';
|
||||
|
||||
/* perform update */
|
||||
SPIN_LOCK(PATREF_LOCK, &ref->lock);
|
||||
HA_SPIN_LOCK(PATREF_LOCK, &ref->lock);
|
||||
if (pat_ref_find_elt(ref, key->str) != NULL)
|
||||
/* update entry if it exists */
|
||||
pat_ref_set(ref, key->str, value->str, NULL);
|
||||
else
|
||||
/* insert a new entry */
|
||||
pat_ref_add(ref, key->str, value->str, NULL);
|
||||
SPIN_UNLOCK(PATREF_LOCK, &ref->lock);
|
||||
HA_SPIN_UNLOCK(PATREF_LOCK, &ref->lock);
|
||||
free_trash_chunk(key);
|
||||
free_trash_chunk(value);
|
||||
break;
|
||||
@ -3015,7 +3015,7 @@ http_res_get_intercept_rule(struct proxy *px, struct list *rules, struct stream
|
||||
if (key && (ts = stktable_get_entry(t, key))) {
|
||||
stream_track_stkctr(&s->stkctr[trk_idx(rule->action)], t, ts);
|
||||
|
||||
RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
|
||||
/* let's count a new HTTP request as it's the first time we do it */
|
||||
ptr = stktable_data_ptr(t, ts, STKTABLE_DT_HTTP_REQ_CNT);
|
||||
@ -3045,7 +3045,7 @@ http_res_get_intercept_rule(struct proxy *px, struct list *rules, struct stream
|
||||
t->data_arg[STKTABLE_DT_HTTP_ERR_RATE].u, 1);
|
||||
}
|
||||
|
||||
RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
|
||||
stkctr_set_flags(&s->stkctr[trk_idx(rule->action)], STKCTR_TRACK_CONTENT);
|
||||
if (sess->fe != s->be)
|
||||
@ -7755,7 +7755,7 @@ void http_capture_bad_message(struct proxy *proxy, struct error_snapshot *es, st
|
||||
struct channel *chn = msg->chn;
|
||||
int len1, len2;
|
||||
|
||||
SPIN_LOCK(PROXY_LOCK, &proxy->lock);
|
||||
HA_SPIN_LOCK(PROXY_LOCK, &proxy->lock);
|
||||
es->len = MIN(chn->buf->i, global.tune.bufsize);
|
||||
len1 = chn->buf->data + chn->buf->size - chn->buf->p;
|
||||
len1 = MIN(len1, es->len);
|
||||
@ -7795,7 +7795,7 @@ void http_capture_bad_message(struct proxy *proxy, struct error_snapshot *es, st
|
||||
es->b_tot = chn->total;
|
||||
es->m_clen = msg->chunk_len;
|
||||
es->m_blen = msg->body_len;
|
||||
SPIN_UNLOCK(PROXY_LOCK, &proxy->lock);
|
||||
HA_SPIN_UNLOCK(PROXY_LOCK, &proxy->lock);
|
||||
}
|
||||
|
||||
/* Return in <vptr> and <vlen> the pointer and length of occurrence <occ> of
|
||||
|
@ -762,7 +762,7 @@ void init_new_proxy(struct proxy *p)
|
||||
/* initial uuid is unassigned (-1) */
|
||||
p->uuid = -1;
|
||||
|
||||
SPIN_INIT(&p->lock);
|
||||
HA_SPIN_INIT(&p->lock);
|
||||
}
|
||||
|
||||
/*
|
||||
|
32
src/queue.c
32
src/queue.c
@ -142,8 +142,8 @@ void process_srv_queue(struct server *s)
|
||||
struct proxy *p = s->proxy;
|
||||
int maxconn;
|
||||
|
||||
SPIN_LOCK(PROXY_LOCK, &p->lock);
|
||||
SPIN_LOCK(SERVER_LOCK, &s->lock);
|
||||
HA_SPIN_LOCK(PROXY_LOCK, &p->lock);
|
||||
HA_SPIN_LOCK(SERVER_LOCK, &s->lock);
|
||||
|
||||
/* First, check if we can handle some connections queued at the proxy. We
|
||||
* will take as many as we can handle.
|
||||
@ -156,8 +156,8 @@ void process_srv_queue(struct server *s)
|
||||
break;
|
||||
task_wakeup(strm->task, TASK_WOKEN_RES);
|
||||
}
|
||||
SPIN_UNLOCK(SERVER_LOCK, &s->lock);
|
||||
SPIN_UNLOCK(PROXY_LOCK, &p->lock);
|
||||
HA_SPIN_UNLOCK(SERVER_LOCK, &s->lock);
|
||||
HA_SPIN_UNLOCK(PROXY_LOCK, &p->lock);
|
||||
}
|
||||
|
||||
/* Adds the stream <strm> to the pending connection list of server <strm>->srv
|
||||
@ -182,17 +182,17 @@ struct pendconn *pendconn_add(struct stream *strm)
|
||||
|
||||
if ((strm->flags & SF_ASSIGNED) && srv) {
|
||||
p->srv = srv;
|
||||
SPIN_LOCK(SERVER_LOCK, &srv->lock);
|
||||
HA_SPIN_LOCK(SERVER_LOCK, &srv->lock);
|
||||
LIST_ADDQ(&srv->pendconns, &p->list);
|
||||
SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
|
||||
HA_SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
|
||||
count = HA_ATOMIC_ADD(&srv->nbpend, 1);
|
||||
strm->logs.srv_queue_size += count;
|
||||
HA_ATOMIC_UPDATE_MAX(&srv->counters.nbpend_max, count);
|
||||
} else {
|
||||
p->srv = NULL;
|
||||
SPIN_LOCK(PROXY_LOCK, &strm->be->lock);
|
||||
HA_SPIN_LOCK(PROXY_LOCK, &strm->be->lock);
|
||||
LIST_ADDQ(&strm->be->pendconns, &p->list);
|
||||
SPIN_UNLOCK(PROXY_LOCK, &strm->be->lock);
|
||||
HA_SPIN_UNLOCK(PROXY_LOCK, &strm->be->lock);
|
||||
count = HA_ATOMIC_ADD(&strm->be->nbpend, 1);
|
||||
strm->logs.prx_queue_size += count;
|
||||
HA_ATOMIC_UPDATE_MAX(&strm->be->be_counters.nbpend_max, count);
|
||||
@ -209,7 +209,7 @@ int pendconn_redistribute(struct server *s)
|
||||
struct pendconn *pc, *pc_bck;
|
||||
int xferred = 0;
|
||||
|
||||
SPIN_LOCK(SERVER_LOCK, &s->lock);
|
||||
HA_SPIN_LOCK(SERVER_LOCK, &s->lock);
|
||||
list_for_each_entry_safe(pc, pc_bck, &s->pendconns, list) {
|
||||
struct stream *strm = pc->strm;
|
||||
|
||||
@ -227,7 +227,7 @@ int pendconn_redistribute(struct server *s)
|
||||
xferred++;
|
||||
}
|
||||
}
|
||||
SPIN_UNLOCK(SERVER_LOCK, &s->lock);
|
||||
HA_SPIN_UNLOCK(SERVER_LOCK, &s->lock);
|
||||
return xferred;
|
||||
}
|
||||
|
||||
@ -243,7 +243,7 @@ int pendconn_grab_from_px(struct server *s)
|
||||
if (!srv_currently_usable(s))
|
||||
return 0;
|
||||
|
||||
SPIN_LOCK(PROXY_LOCK, &s->proxy->lock);
|
||||
HA_SPIN_LOCK(PROXY_LOCK, &s->proxy->lock);
|
||||
for (xferred = 0; !s->maxconn || xferred < srv_dynamic_maxconn(s); xferred++) {
|
||||
struct stream *strm;
|
||||
struct pendconn *p;
|
||||
@ -256,7 +256,7 @@ int pendconn_grab_from_px(struct server *s)
|
||||
__pendconn_free(p);
|
||||
task_wakeup(strm->task, TASK_WOKEN_RES);
|
||||
}
|
||||
SPIN_UNLOCK(PROXY_LOCK, &s->proxy->lock);
|
||||
HA_SPIN_UNLOCK(PROXY_LOCK, &s->proxy->lock);
|
||||
return xferred;
|
||||
}
|
||||
|
||||
@ -268,15 +268,15 @@ int pendconn_grab_from_px(struct server *s)
|
||||
void pendconn_free(struct pendconn *p)
|
||||
{
|
||||
if (p->srv) {
|
||||
SPIN_LOCK(SERVER_LOCK, &p->srv->lock);
|
||||
HA_SPIN_LOCK(SERVER_LOCK, &p->srv->lock);
|
||||
LIST_DEL(&p->list);
|
||||
SPIN_UNLOCK(SERVER_LOCK, &p->srv->lock);
|
||||
HA_SPIN_UNLOCK(SERVER_LOCK, &p->srv->lock);
|
||||
HA_ATOMIC_SUB(&p->srv->nbpend, 1);
|
||||
}
|
||||
else {
|
||||
SPIN_LOCK(SERVER_LOCK, &p->strm->be->lock);
|
||||
HA_SPIN_LOCK(SERVER_LOCK, &p->strm->be->lock);
|
||||
LIST_DEL(&p->list);
|
||||
SPIN_UNLOCK(SERVER_LOCK, &p->strm->be->lock);
|
||||
HA_SPIN_UNLOCK(SERVER_LOCK, &p->strm->be->lock);
|
||||
HA_ATOMIC_SUB(&p->strm->be->nbpend, 1);
|
||||
}
|
||||
p->strm->pend_pos = NULL;
|
||||
|
60
src/server.c
60
src/server.c
@ -881,9 +881,9 @@ void srv_set_stopped(struct server *s, const char *reason, struct check *check)
|
||||
|
||||
srv_register_update(s);
|
||||
for (srv = s->trackers; srv; srv = srv->tracknext) {
|
||||
SPIN_LOCK(SERVER_LOCK, &srv->lock);
|
||||
HA_SPIN_LOCK(SERVER_LOCK, &srv->lock);
|
||||
srv_set_stopped(srv, NULL, NULL);
|
||||
SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
|
||||
HA_SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
|
||||
}
|
||||
}
|
||||
|
||||
@ -923,9 +923,9 @@ void srv_set_running(struct server *s, const char *reason, struct check *check)
|
||||
|
||||
srv_register_update(s);
|
||||
for (srv = s->trackers; srv; srv = srv->tracknext) {
|
||||
SPIN_LOCK(SERVER_LOCK, &srv->lock);
|
||||
HA_SPIN_LOCK(SERVER_LOCK, &srv->lock);
|
||||
srv_set_running(srv, NULL, NULL);
|
||||
SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
|
||||
HA_SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
|
||||
}
|
||||
}
|
||||
|
||||
@ -964,9 +964,9 @@ void srv_set_stopping(struct server *s, const char *reason, struct check *check)
|
||||
|
||||
srv_register_update(s);
|
||||
for (srv = s->trackers; srv; srv = srv->tracknext) {
|
||||
SPIN_LOCK(SERVER_LOCK, &srv->lock);
|
||||
HA_SPIN_LOCK(SERVER_LOCK, &srv->lock);
|
||||
srv_set_stopping(srv, NULL, NULL);
|
||||
SPIN_LOCK(SERVER_LOCK, &srv->lock);
|
||||
HA_SPIN_LOCK(SERVER_LOCK, &srv->lock);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1007,9 +1007,9 @@ void srv_set_admin_flag(struct server *s, enum srv_admin mode, const char *cause
|
||||
mode = SRV_ADMF_IDRAIN;
|
||||
|
||||
for (srv = s->trackers; srv; srv = srv->tracknext) {
|
||||
SPIN_LOCK(SERVER_LOCK, &srv->lock);
|
||||
HA_SPIN_LOCK(SERVER_LOCK, &srv->lock);
|
||||
srv_set_admin_flag(srv, mode, cause);
|
||||
SPIN_LOCK(SERVER_LOCK, &srv->lock);
|
||||
HA_SPIN_LOCK(SERVER_LOCK, &srv->lock);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1045,9 +1045,9 @@ void srv_clr_admin_flag(struct server *s, enum srv_admin mode)
|
||||
mode = SRV_ADMF_IDRAIN;
|
||||
|
||||
for (srv = s->trackers; srv; srv = srv->tracknext) {
|
||||
SPIN_LOCK(SERVER_LOCK, &srv->lock);
|
||||
HA_SPIN_LOCK(SERVER_LOCK, &srv->lock);
|
||||
srv_clr_admin_flag(srv, mode);
|
||||
SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
|
||||
HA_SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1062,13 +1062,13 @@ static void srv_propagate_admin_state(struct server *srv)
|
||||
return;
|
||||
|
||||
for (srv2 = srv->trackers; srv2; srv2 = srv2->tracknext) {
|
||||
SPIN_LOCK(SERVER_LOCK, &srv2->lock);
|
||||
HA_SPIN_LOCK(SERVER_LOCK, &srv2->lock);
|
||||
if (srv->next_admin & (SRV_ADMF_MAINT | SRV_ADMF_CMAINT))
|
||||
srv_set_admin_flag(srv2, SRV_ADMF_IMAINT, NULL);
|
||||
|
||||
if (srv->next_admin & SRV_ADMF_DRAIN)
|
||||
srv_set_admin_flag(srv2, SRV_ADMF_IDRAIN, NULL);
|
||||
SPIN_UNLOCK(SERVER_LOCK, &srv2->lock);
|
||||
HA_SPIN_UNLOCK(SERVER_LOCK, &srv2->lock);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2028,7 +2028,7 @@ int parse_server(const char *file, int linenum, char **args, struct proxy *curpr
|
||||
|
||||
/* Copy default server settings to new server settings. */
|
||||
srv_settings_cpy(newsrv, &curproxy->defsrv, 0);
|
||||
SPIN_INIT(&newsrv->lock);
|
||||
HA_SPIN_INIT(&newsrv->lock);
|
||||
cur_arg++;
|
||||
} else {
|
||||
newsrv = &curproxy->defsrv;
|
||||
@ -2600,10 +2600,10 @@ static void srv_register_update(struct server *srv)
|
||||
{
|
||||
if (LIST_ISEMPTY(&srv->update_status)) {
|
||||
THREAD_WANT_SYNC();
|
||||
SPIN_LOCK(UPDATED_SERVERS_LOCK, &updated_servers_lock);
|
||||
HA_SPIN_LOCK(UPDATED_SERVERS_LOCK, &updated_servers_lock);
|
||||
if (LIST_ISEMPTY(&srv->update_status))
|
||||
LIST_ADDQ(&updated_servers, &srv->update_status);
|
||||
SPIN_UNLOCK(UPDATED_SERVERS_LOCK, &updated_servers_lock);
|
||||
HA_SPIN_UNLOCK(UPDATED_SERVERS_LOCK, &updated_servers_lock);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2789,7 +2789,7 @@ static void srv_update_state(struct server *srv, int version, char **params)
|
||||
if (msg->len)
|
||||
goto out;
|
||||
|
||||
SPIN_LOCK(SERVER_LOCK, &srv->lock);
|
||||
HA_SPIN_LOCK(SERVER_LOCK, &srv->lock);
|
||||
/* recover operational state and apply it to this server
|
||||
* and all servers tracking this one */
|
||||
switch (srv_op_state) {
|
||||
@ -2919,7 +2919,7 @@ static void srv_update_state(struct server *srv, int version, char **params)
|
||||
|
||||
if (port_str)
|
||||
srv->svc_port = port;
|
||||
SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
|
||||
HA_SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
|
||||
|
||||
break;
|
||||
default:
|
||||
@ -3696,9 +3696,9 @@ int snr_resolution_error_cb(struct dns_requester *requester, int error_code)
|
||||
s = objt_server(requester->owner);
|
||||
if (!s)
|
||||
return 1;
|
||||
SPIN_LOCK(SERVER_LOCK, &s->lock);
|
||||
HA_SPIN_LOCK(SERVER_LOCK, &s->lock);
|
||||
snr_update_srv_status(s, 0);
|
||||
SPIN_UNLOCK(SERVER_LOCK, &s->lock);
|
||||
HA_SPIN_UNLOCK(SERVER_LOCK, &s->lock);
|
||||
return 1;
|
||||
}
|
||||
|
||||
@ -3731,18 +3731,18 @@ struct server *snr_check_ip_callback(struct server *srv, void *ip, unsigned char
|
||||
* one used for the server found in the backend
|
||||
* * the server found in the backend is not our current server
|
||||
*/
|
||||
SPIN_LOCK(SERVER_LOCK, &tmpsrv->lock);
|
||||
HA_SPIN_LOCK(SERVER_LOCK, &tmpsrv->lock);
|
||||
if ((tmpsrv->hostname_dn == NULL) ||
|
||||
(srv->hostname_dn_len != tmpsrv->hostname_dn_len) ||
|
||||
(strcmp(srv->hostname_dn, tmpsrv->hostname_dn) != 0) ||
|
||||
(srv->puid == tmpsrv->puid)) {
|
||||
SPIN_UNLOCK(SERVER_LOCK, &tmpsrv->lock);
|
||||
HA_SPIN_UNLOCK(SERVER_LOCK, &tmpsrv->lock);
|
||||
continue;
|
||||
}
|
||||
|
||||
/* If the server has been taken down, don't consider it */
|
||||
if (tmpsrv->next_admin & SRV_ADMF_RMAINT) {
|
||||
SPIN_UNLOCK(SERVER_LOCK, &tmpsrv->lock);
|
||||
HA_SPIN_UNLOCK(SERVER_LOCK, &tmpsrv->lock);
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -3754,10 +3754,10 @@ struct server *snr_check_ip_callback(struct server *srv, void *ip, unsigned char
|
||||
memcmp(ip, &((struct sockaddr_in *)&tmpsrv->addr)->sin_addr, 4) == 0) ||
|
||||
(tmpsrv->addr.ss_family == AF_INET6 &&
|
||||
memcmp(ip, &((struct sockaddr_in6 *)&tmpsrv->addr)->sin6_addr, 16) == 0))) {
|
||||
SPIN_UNLOCK(SERVER_LOCK, &tmpsrv->lock);
|
||||
HA_SPIN_UNLOCK(SERVER_LOCK, &tmpsrv->lock);
|
||||
return tmpsrv;
|
||||
}
|
||||
SPIN_UNLOCK(SERVER_LOCK, &tmpsrv->lock);
|
||||
HA_SPIN_UNLOCK(SERVER_LOCK, &tmpsrv->lock);
|
||||
}
|
||||
|
||||
|
||||
@ -3789,7 +3789,7 @@ int srv_set_fqdn(struct server *srv, const char *hostname, int dns_locked)
|
||||
int hostname_len, hostname_dn_len;
|
||||
|
||||
if (!dns_locked)
|
||||
SPIN_LOCK(DNS_LOCK, &srv->resolvers->lock);
|
||||
HA_SPIN_LOCK(DNS_LOCK, &srv->resolvers->lock);
|
||||
/* run time DNS resolution was not active for this server
|
||||
* and we can't enable it at run time for now.
|
||||
*/
|
||||
@ -3825,12 +3825,12 @@ int srv_set_fqdn(struct server *srv, const char *hostname, int dns_locked)
|
||||
|
||||
end:
|
||||
if (!dns_locked)
|
||||
SPIN_UNLOCK(DNS_LOCK, &srv->resolvers->lock);
|
||||
HA_SPIN_UNLOCK(DNS_LOCK, &srv->resolvers->lock);
|
||||
return 0;
|
||||
|
||||
err:
|
||||
if (!dns_locked)
|
||||
SPIN_UNLOCK(DNS_LOCK, &srv->resolvers->lock);
|
||||
HA_SPIN_UNLOCK(DNS_LOCK, &srv->resolvers->lock);
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -4053,7 +4053,7 @@ static int cli_parse_set_server(char **args, struct appctx *appctx, void *privat
|
||||
if (!sv)
|
||||
return 1;
|
||||
|
||||
SPIN_LOCK(SERVER_LOCK, &sv->lock);
|
||||
HA_SPIN_LOCK(SERVER_LOCK, &sv->lock);
|
||||
|
||||
if (strcmp(args[3], "weight") == 0) {
|
||||
warning = server_parse_weight_change_request(sv, args[4]);
|
||||
@ -4220,7 +4220,7 @@ static int cli_parse_set_server(char **args, struct appctx *appctx, void *privat
|
||||
appctx->st0 = CLI_ST_PRINT;
|
||||
}
|
||||
out_unlock:
|
||||
SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
|
||||
HA_SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
|
||||
return 1;
|
||||
}
|
||||
|
||||
@ -4427,7 +4427,7 @@ static struct cli_kw_list cli_kws = {{ },{
|
||||
__attribute__((constructor))
|
||||
static void __server_init(void)
|
||||
{
|
||||
SPIN_INIT(&updated_servers_lock);
|
||||
HA_SPIN_INIT(&updated_servers_lock);
|
||||
cli_register_kw(&cli_kws);
|
||||
}
|
||||
|
||||
|
@ -73,7 +73,7 @@ void __signal_process_queue()
|
||||
struct signal_descriptor *desc;
|
||||
sigset_t old_sig;
|
||||
|
||||
if (SPIN_TRYLOCK(SIGNALS_LOCK, &signals_lock))
|
||||
if (HA_SPIN_TRYLOCK(SIGNALS_LOCK, &signals_lock))
|
||||
return;
|
||||
|
||||
/* block signal delivery during processing */
|
||||
@ -102,7 +102,7 @@ void __signal_process_queue()
|
||||
|
||||
/* restore signal delivery */
|
||||
sigprocmask(SIG_SETMASK, &old_sig, NULL);
|
||||
SPIN_UNLOCK(SIGNALS_LOCK, &signals_lock);
|
||||
HA_SPIN_UNLOCK(SIGNALS_LOCK, &signals_lock);
|
||||
}
|
||||
|
||||
/* perform minimal intializations, report 0 in case of error, 1 if OK. */
|
||||
@ -114,7 +114,7 @@ int signal_init()
|
||||
memset(signal_queue, 0, sizeof(signal_queue));
|
||||
memset(signal_state, 0, sizeof(signal_state));
|
||||
|
||||
SPIN_INIT(&signals_lock);
|
||||
HA_SPIN_INIT(&signals_lock);
|
||||
|
||||
/* Ensure signals are not blocked. Some shells or service managers may
|
||||
* accidently block all of our signals unfortunately, causing lots of
|
||||
@ -150,7 +150,7 @@ void deinit_signals()
|
||||
pool_free2(pool2_sig_handlers, sh);
|
||||
}
|
||||
}
|
||||
SPIN_DESTROY(&signals_lock);
|
||||
HA_SPIN_DESTROY(&signals_lock);
|
||||
}
|
||||
|
||||
/* Register a function and an integer argument on a signal. A pointer to the
|
||||
|
@ -218,15 +218,15 @@ void ssl_locking_function(int mode, int n, const char * file, int line)
|
||||
{
|
||||
if (mode & CRYPTO_LOCK) {
|
||||
if (mode & CRYPTO_READ)
|
||||
RWLOCK_RDLOCK(SSL_LOCK, &ssl_rwlocks[n]);
|
||||
HA_RWLOCK_RDLOCK(SSL_LOCK, &ssl_rwlocks[n]);
|
||||
else
|
||||
RWLOCK_WRLOCK(SSL_LOCK, &ssl_rwlocks[n]);
|
||||
HA_RWLOCK_WRLOCK(SSL_LOCK, &ssl_rwlocks[n]);
|
||||
}
|
||||
else {
|
||||
if (mode & CRYPTO_READ)
|
||||
RWLOCK_RDUNLOCK(SSL_LOCK, &ssl_rwlocks[n]);
|
||||
HA_RWLOCK_RDUNLOCK(SSL_LOCK, &ssl_rwlocks[n]);
|
||||
else
|
||||
RWLOCK_WRUNLOCK(SSL_LOCK, &ssl_rwlocks[n]);
|
||||
HA_RWLOCK_WRUNLOCK(SSL_LOCK, &ssl_rwlocks[n]);
|
||||
}
|
||||
}
|
||||
|
||||
@ -239,7 +239,7 @@ static int ssl_locking_init(void)
|
||||
return -1;
|
||||
|
||||
for (i = 0 ; i < CRYPTO_num_locks() ; i++)
|
||||
RWLOCK_INIT(&ssl_rwlocks[i]);
|
||||
HA_RWLOCK_INIT(&ssl_rwlocks[i]);
|
||||
|
||||
CRYPTO_set_id_callback(ssl_id_function);
|
||||
CRYPTO_set_locking_callback(ssl_locking_function);
|
||||
@ -1795,15 +1795,15 @@ ssl_sock_assign_generated_cert(unsigned int key, struct bind_conf *bind_conf, SS
|
||||
struct lru64 *lru = NULL;
|
||||
|
||||
if (ssl_ctx_lru_tree) {
|
||||
RWLOCK_RDLOCK(SSL_GEN_CERTS_LOCK, &ssl_ctx_lru_rwlock);
|
||||
HA_RWLOCK_RDLOCK(SSL_GEN_CERTS_LOCK, &ssl_ctx_lru_rwlock);
|
||||
lru = lru64_lookup(key, ssl_ctx_lru_tree, bind_conf->ca_sign_cert, 0);
|
||||
if (lru && lru->domain) {
|
||||
if (ssl)
|
||||
SSL_set_SSL_CTX(ssl, (SSL_CTX *)lru->data);
|
||||
RWLOCK_RDUNLOCK(SSL_GEN_CERTS_LOCK, &ssl_ctx_lru_rwlock);
|
||||
HA_RWLOCK_RDUNLOCK(SSL_GEN_CERTS_LOCK, &ssl_ctx_lru_rwlock);
|
||||
return (SSL_CTX *)lru->data;
|
||||
}
|
||||
RWLOCK_RDUNLOCK(SSL_GEN_CERTS_LOCK, &ssl_ctx_lru_rwlock);
|
||||
HA_RWLOCK_RDUNLOCK(SSL_GEN_CERTS_LOCK, &ssl_ctx_lru_rwlock);
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
@ -1826,16 +1826,16 @@ ssl_sock_set_generated_cert(SSL_CTX *ssl_ctx, unsigned int key, struct bind_conf
|
||||
struct lru64 *lru = NULL;
|
||||
|
||||
if (ssl_ctx_lru_tree) {
|
||||
RWLOCK_WRLOCK(SSL_GEN_CERTS_LOCK, &ssl_ctx_lru_rwlock);
|
||||
HA_RWLOCK_WRLOCK(SSL_GEN_CERTS_LOCK, &ssl_ctx_lru_rwlock);
|
||||
lru = lru64_get(key, ssl_ctx_lru_tree, bind_conf->ca_sign_cert, 0);
|
||||
if (!lru) {
|
||||
RWLOCK_WRUNLOCK(SSL_GEN_CERTS_LOCK, &ssl_ctx_lru_rwlock);
|
||||
HA_RWLOCK_WRUNLOCK(SSL_GEN_CERTS_LOCK, &ssl_ctx_lru_rwlock);
|
||||
return -1;
|
||||
}
|
||||
if (lru->domain && lru->data)
|
||||
lru->free((SSL_CTX *)lru->data);
|
||||
lru64_commit(lru, ssl_ctx, bind_conf->ca_sign_cert, 0, (void (*)(void *))SSL_CTX_free);
|
||||
RWLOCK_WRUNLOCK(SSL_GEN_CERTS_LOCK, &ssl_ctx_lru_rwlock);
|
||||
HA_RWLOCK_WRUNLOCK(SSL_GEN_CERTS_LOCK, &ssl_ctx_lru_rwlock);
|
||||
return 0;
|
||||
}
|
||||
return -1;
|
||||
@ -1861,7 +1861,7 @@ ssl_sock_generate_certificate(const char *servername, struct bind_conf *bind_con
|
||||
|
||||
key = ssl_sock_generated_cert_key(servername, strlen(servername));
|
||||
if (ssl_ctx_lru_tree) {
|
||||
RWLOCK_WRLOCK(SSL_GEN_CERTS_LOCK, &ssl_ctx_lru_rwlock);
|
||||
HA_RWLOCK_WRLOCK(SSL_GEN_CERTS_LOCK, &ssl_ctx_lru_rwlock);
|
||||
lru = lru64_get(key, ssl_ctx_lru_tree, cacert, 0);
|
||||
if (lru && lru->domain)
|
||||
ssl_ctx = (SSL_CTX *)lru->data;
|
||||
@ -1870,7 +1870,7 @@ ssl_sock_generate_certificate(const char *servername, struct bind_conf *bind_con
|
||||
lru64_commit(lru, ssl_ctx, cacert, 0, (void (*)(void *))SSL_CTX_free);
|
||||
}
|
||||
SSL_set_SSL_CTX(ssl, ssl_ctx);
|
||||
RWLOCK_WRUNLOCK(SSL_GEN_CERTS_LOCK, &ssl_ctx_lru_rwlock);
|
||||
HA_RWLOCK_WRUNLOCK(SSL_GEN_CERTS_LOCK, &ssl_ctx_lru_rwlock);
|
||||
return 1;
|
||||
}
|
||||
else {
|
||||
@ -4782,7 +4782,7 @@ ssl_sock_load_ca(struct bind_conf *bind_conf)
|
||||
#if (defined SSL_CTRL_SET_TLSEXT_HOSTNAME && !defined SSL_NO_GENERATE_CERTIFICATES)
|
||||
if (global_ssl.ctx_cache) {
|
||||
ssl_ctx_lru_tree = lru64_new(global_ssl.ctx_cache);
|
||||
RWLOCK_INIT(&ssl_ctx_lru_rwlock);
|
||||
HA_RWLOCK_INIT(&ssl_ctx_lru_rwlock);
|
||||
}
|
||||
ssl_ctx_lru_seed = (unsigned int)time(NULL);
|
||||
ssl_ctx_serial = now_ms;
|
||||
@ -8803,7 +8803,7 @@ static void __ssl_sock_deinit(void)
|
||||
#if (defined SSL_CTRL_SET_TLSEXT_HOSTNAME && !defined SSL_NO_GENERATE_CERTIFICATES)
|
||||
if (ssl_ctx_lru_tree) {
|
||||
lru64_destroy(ssl_ctx_lru_tree);
|
||||
RWLOCK_DESTROY(&ssl_ctx_lru_rwlock);
|
||||
HA_RWLOCK_DESTROY(&ssl_ctx_lru_rwlock);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -2764,7 +2764,7 @@ static int stats_process_http_post(struct stream_interface *si)
|
||||
reprocess = 1;
|
||||
}
|
||||
else if ((sv = findserver(px, value)) != NULL) {
|
||||
SPIN_LOCK(SERVER_LOCK, &sv->lock);
|
||||
HA_SPIN_LOCK(SERVER_LOCK, &sv->lock);
|
||||
switch (action) {
|
||||
case ST_ADM_ACTION_DISABLE:
|
||||
if (!(sv->cur_admin & SRV_ADMF_FMAINT)) {
|
||||
@ -2890,7 +2890,7 @@ static int stats_process_http_post(struct stream_interface *si)
|
||||
}
|
||||
break;
|
||||
}
|
||||
SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
|
||||
HA_SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
|
||||
} else {
|
||||
/* the server name is unknown or ambiguous (duplicate names) */
|
||||
total_servers++;
|
||||
|
@ -61,9 +61,9 @@ void __stksess_free(struct stktable *t, struct stksess *ts)
|
||||
*/
|
||||
void stksess_free(struct stktable *t, struct stksess *ts)
|
||||
{
|
||||
SPIN_LOCK(STK_TABLE_LOCK, &t->lock);
|
||||
HA_SPIN_LOCK(STK_TABLE_LOCK, &t->lock);
|
||||
__stksess_free(t, ts);
|
||||
SPIN_UNLOCK(STK_TABLE_LOCK, &t->lock);
|
||||
HA_SPIN_UNLOCK(STK_TABLE_LOCK, &t->lock);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -90,11 +90,11 @@ int stksess_kill(struct stktable *t, struct stksess *ts, int decrefcnt)
|
||||
{
|
||||
int ret;
|
||||
|
||||
SPIN_LOCK(STK_TABLE_LOCK, &t->lock);
|
||||
HA_SPIN_LOCK(STK_TABLE_LOCK, &t->lock);
|
||||
if (decrefcnt)
|
||||
ts->ref_cnt--;
|
||||
ret = __stksess_kill(t, ts);
|
||||
SPIN_UNLOCK(STK_TABLE_LOCK, &t->lock);
|
||||
HA_SPIN_UNLOCK(STK_TABLE_LOCK, &t->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -126,7 +126,7 @@ static struct stksess *__stksess_init(struct stktable *t, struct stksess * ts)
|
||||
ts->exp.node.leaf_p = NULL;
|
||||
ts->upd.node.leaf_p = NULL;
|
||||
ts->expire = tick_add(now_ms, MS_TO_TICKS(t->expire));
|
||||
RWLOCK_INIT(&ts->lock);
|
||||
HA_RWLOCK_INIT(&ts->lock);
|
||||
return ts;
|
||||
}
|
||||
|
||||
@ -201,9 +201,9 @@ int stktable_trash_oldest(struct stktable *t, int to_batch)
|
||||
{
|
||||
int ret;
|
||||
|
||||
SPIN_LOCK(STK_TABLE_LOCK, &t->lock);
|
||||
HA_SPIN_LOCK(STK_TABLE_LOCK, &t->lock);
|
||||
ret = __stktable_trash_oldest(t, to_batch);
|
||||
SPIN_UNLOCK(STK_TABLE_LOCK, &t->lock);
|
||||
HA_SPIN_UNLOCK(STK_TABLE_LOCK, &t->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -249,9 +249,9 @@ struct stksess *stksess_new(struct stktable *t, struct stktable_key *key)
|
||||
{
|
||||
struct stksess *ts;
|
||||
|
||||
SPIN_LOCK(STK_TABLE_LOCK, &t->lock);
|
||||
HA_SPIN_LOCK(STK_TABLE_LOCK, &t->lock);
|
||||
ts = __stksess_new(t, key);
|
||||
SPIN_UNLOCK(STK_TABLE_LOCK, &t->lock);
|
||||
HA_SPIN_UNLOCK(STK_TABLE_LOCK, &t->lock);
|
||||
|
||||
return ts;
|
||||
}
|
||||
@ -287,11 +287,11 @@ struct stksess *stktable_lookup_key(struct stktable *t, struct stktable_key *key
|
||||
{
|
||||
struct stksess *ts;
|
||||
|
||||
SPIN_LOCK(STK_TABLE_LOCK, &t->lock);
|
||||
HA_SPIN_LOCK(STK_TABLE_LOCK, &t->lock);
|
||||
ts = __stktable_lookup_key(t, key);
|
||||
if (ts)
|
||||
ts->ref_cnt++;
|
||||
SPIN_UNLOCK(STK_TABLE_LOCK, &t->lock);
|
||||
HA_SPIN_UNLOCK(STK_TABLE_LOCK, &t->lock);
|
||||
|
||||
return ts;
|
||||
}
|
||||
@ -325,11 +325,11 @@ struct stksess *stktable_lookup(struct stktable *t, struct stksess *ts)
|
||||
{
|
||||
struct stksess *lts;
|
||||
|
||||
SPIN_LOCK(STK_TABLE_LOCK, &t->lock);
|
||||
HA_SPIN_LOCK(STK_TABLE_LOCK, &t->lock);
|
||||
lts = __stktable_lookup(t, ts);
|
||||
if (lts)
|
||||
lts->ref_cnt++;
|
||||
SPIN_UNLOCK(STK_TABLE_LOCK, &t->lock);
|
||||
HA_SPIN_UNLOCK(STK_TABLE_LOCK, &t->lock);
|
||||
|
||||
return lts;
|
||||
}
|
||||
@ -389,11 +389,11 @@ void __stktable_touch_with_exp(struct stktable *t, struct stksess *ts, int local
|
||||
*/
|
||||
void stktable_touch_remote(struct stktable *t, struct stksess *ts, int decrefcnt)
|
||||
{
|
||||
SPIN_LOCK(STK_TABLE_LOCK, &t->lock);
|
||||
HA_SPIN_LOCK(STK_TABLE_LOCK, &t->lock);
|
||||
__stktable_touch_with_exp(t, ts, 0, ts->expire);
|
||||
if (decrefcnt)
|
||||
ts->ref_cnt--;
|
||||
SPIN_UNLOCK(STK_TABLE_LOCK, &t->lock);
|
||||
HA_SPIN_UNLOCK(STK_TABLE_LOCK, &t->lock);
|
||||
}
|
||||
|
||||
/* Update the expiration timer for <ts> but do not touch its expiration node.
|
||||
@ -406,18 +406,18 @@ void stktable_touch_local(struct stktable *t, struct stksess *ts, int decrefcnt)
|
||||
{
|
||||
int expire = tick_add(now_ms, MS_TO_TICKS(t->expire));
|
||||
|
||||
SPIN_LOCK(STK_TABLE_LOCK, &t->lock);
|
||||
HA_SPIN_LOCK(STK_TABLE_LOCK, &t->lock);
|
||||
__stktable_touch_with_exp(t, ts, 1, expire);
|
||||
if (decrefcnt)
|
||||
ts->ref_cnt--;
|
||||
SPIN_UNLOCK(STK_TABLE_LOCK, &t->lock);
|
||||
HA_SPIN_UNLOCK(STK_TABLE_LOCK, &t->lock);
|
||||
}
|
||||
/* Just decrease the ref_cnt of the current session */
|
||||
void stktable_release(struct stktable *t, struct stksess *ts)
|
||||
{
|
||||
SPIN_LOCK(STK_TABLE_LOCK, &t->lock);
|
||||
HA_SPIN_LOCK(STK_TABLE_LOCK, &t->lock);
|
||||
ts->ref_cnt--;
|
||||
SPIN_UNLOCK(STK_TABLE_LOCK, &t->lock);
|
||||
HA_SPIN_UNLOCK(STK_TABLE_LOCK, &t->lock);
|
||||
}
|
||||
|
||||
/* Insert new sticky session <ts> in the table. It is assumed that it does not
|
||||
@ -466,11 +466,11 @@ struct stksess *stktable_get_entry(struct stktable *table, struct stktable_key *
|
||||
{
|
||||
struct stksess *ts;
|
||||
|
||||
SPIN_LOCK(STK_TABLE_LOCK, &table->lock);
|
||||
HA_SPIN_LOCK(STK_TABLE_LOCK, &table->lock);
|
||||
ts = __stktable_get_entry(table, key);
|
||||
if (ts)
|
||||
ts->ref_cnt++;
|
||||
SPIN_UNLOCK(STK_TABLE_LOCK, &table->lock);
|
||||
HA_SPIN_UNLOCK(STK_TABLE_LOCK, &table->lock);
|
||||
|
||||
return ts;
|
||||
}
|
||||
@ -498,10 +498,10 @@ struct stksess *stktable_set_entry(struct stktable *table, struct stksess *nts)
|
||||
{
|
||||
struct stksess *ts;
|
||||
|
||||
SPIN_LOCK(STK_TABLE_LOCK, &table->lock);
|
||||
HA_SPIN_LOCK(STK_TABLE_LOCK, &table->lock);
|
||||
ts = __stktable_set_entry(table, nts);
|
||||
ts->ref_cnt++;
|
||||
SPIN_UNLOCK(STK_TABLE_LOCK, &table->lock);
|
||||
HA_SPIN_UNLOCK(STK_TABLE_LOCK, &table->lock);
|
||||
|
||||
return ts;
|
||||
}
|
||||
@ -515,7 +515,7 @@ static int stktable_trash_expired(struct stktable *t)
|
||||
struct eb32_node *eb;
|
||||
int looped = 0;
|
||||
|
||||
SPIN_LOCK(STK_TABLE_LOCK, &t->lock);
|
||||
HA_SPIN_LOCK(STK_TABLE_LOCK, &t->lock);
|
||||
eb = eb32_lookup_ge(&t->exps, now_ms - TIMER_LOOK_BACK);
|
||||
|
||||
while (1) {
|
||||
@ -570,7 +570,7 @@ static int stktable_trash_expired(struct stktable *t)
|
||||
/* We have found no task to expire in any tree */
|
||||
t->exp_next = TICK_ETERNITY;
|
||||
out_unlock:
|
||||
SPIN_UNLOCK(STK_TABLE_LOCK, &t->lock);
|
||||
HA_SPIN_UNLOCK(STK_TABLE_LOCK, &t->lock);
|
||||
return t->exp_next;
|
||||
}
|
||||
|
||||
@ -593,7 +593,7 @@ int stktable_init(struct stktable *t)
|
||||
t->keys = EB_ROOT_UNIQUE;
|
||||
memset(&t->exps, 0, sizeof(t->exps));
|
||||
t->updates = EB_ROOT_UNIQUE;
|
||||
SPIN_INIT(&t->lock);
|
||||
HA_SPIN_INIT(&t->lock);
|
||||
|
||||
t->pool = create_pool("sticktables", sizeof(struct stksess) + t->data_size + t->key_size, MEM_F_SHARED);
|
||||
|
||||
@ -1546,7 +1546,7 @@ static enum act_return action_inc_gpc0(struct act_rule *rule, struct proxy *px,
|
||||
ptr1 = stktable_data_ptr(stkctr->table, ts, STKTABLE_DT_GPC0_RATE);
|
||||
ptr2 = stktable_data_ptr(stkctr->table, ts, STKTABLE_DT_GPC0);
|
||||
if (ptr1 || ptr2) {
|
||||
RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
|
||||
if (ptr1)
|
||||
update_freq_ctr_period(&stktable_data_cast(ptr1, gpc0_rate),
|
||||
@ -1555,7 +1555,7 @@ static enum act_return action_inc_gpc0(struct act_rule *rule, struct proxy *px,
|
||||
if (ptr2)
|
||||
stktable_data_cast(ptr2, gpc0)++;
|
||||
|
||||
RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
|
||||
/* If data was modified, we need to touch to re-schedule sync */
|
||||
stktable_touch_local(stkctr->table, ts, 0);
|
||||
@ -1628,11 +1628,11 @@ static enum act_return action_set_gpt0(struct act_rule *rule, struct proxy *px,
|
||||
/* Store the sample in the required sc, and ignore errors. */
|
||||
ptr = stktable_data_ptr(stkctr->table, ts, STKTABLE_DT_GPT0);
|
||||
if (ptr) {
|
||||
RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
|
||||
stktable_data_cast(ptr, gpt0) = rule->arg.gpt.value;
|
||||
|
||||
RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
|
||||
stktable_touch_local(stkctr->table, ts, 0);
|
||||
}
|
||||
@ -1887,11 +1887,11 @@ smp_fetch_sc_get_gpt0(const struct arg *args, struct sample *smp, const char *kw
|
||||
return 0; /* parameter not stored */
|
||||
}
|
||||
|
||||
RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
|
||||
HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
|
||||
|
||||
smp->data.u.sint = stktable_data_cast(ptr, gpt0);
|
||||
|
||||
RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
|
||||
HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
|
||||
|
||||
if (stkctr == &tmpstkctr)
|
||||
stktable_release(stkctr->table, stkctr_entry(stkctr));
|
||||
@ -1928,11 +1928,11 @@ smp_fetch_sc_get_gpc0(const struct arg *args, struct sample *smp, const char *kw
|
||||
return 0; /* parameter not stored */
|
||||
}
|
||||
|
||||
RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
|
||||
HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
|
||||
|
||||
smp->data.u.sint = stktable_data_cast(ptr, gpc0);
|
||||
|
||||
RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
|
||||
HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
|
||||
|
||||
if (stkctr == &tmpstkctr)
|
||||
stktable_release(stkctr->table, stkctr_entry(stkctr));
|
||||
@ -1968,12 +1968,12 @@ smp_fetch_sc_gpc0_rate(const struct arg *args, struct sample *smp, const char *k
|
||||
return 0; /* parameter not stored */
|
||||
}
|
||||
|
||||
RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
|
||||
HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
|
||||
|
||||
smp->data.u.sint = read_freq_ctr_period(&stktable_data_cast(ptr, gpc0_rate),
|
||||
stkctr->table->data_arg[STKTABLE_DT_GPC0_RATE].u);
|
||||
|
||||
RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
|
||||
HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
|
||||
|
||||
if (stkctr == &tmpstkctr)
|
||||
stktable_release(stkctr->table, stkctr_entry(stkctr));
|
||||
@ -2012,7 +2012,7 @@ smp_fetch_sc_inc_gpc0(const struct arg *args, struct sample *smp, const char *kw
|
||||
ptr1 = stktable_data_ptr(stkctr->table, stkctr_entry(stkctr), STKTABLE_DT_GPC0_RATE);
|
||||
ptr2 = stktable_data_ptr(stkctr->table, stkctr_entry(stkctr), STKTABLE_DT_GPC0);
|
||||
if (ptr1 || ptr2) {
|
||||
RWLOCK_WRLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
|
||||
HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
|
||||
|
||||
if (ptr1) {
|
||||
update_freq_ctr_period(&stktable_data_cast(ptr1, gpc0_rate),
|
||||
@ -2023,7 +2023,7 @@ smp_fetch_sc_inc_gpc0(const struct arg *args, struct sample *smp, const char *kw
|
||||
if (ptr2)
|
||||
smp->data.u.sint = ++stktable_data_cast(ptr2, gpc0);
|
||||
|
||||
RWLOCK_WRUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
|
||||
HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
|
||||
|
||||
/* If data was modified, we need to touch to re-schedule sync */
|
||||
stktable_touch_local(stkctr->table, stkctr_entry(stkctr), (stkctr == &tmpstkctr) ? 1 : 0);
|
||||
@ -2065,12 +2065,12 @@ smp_fetch_sc_clr_gpc0(const struct arg *args, struct sample *smp, const char *kw
|
||||
return 0; /* parameter not stored */
|
||||
}
|
||||
|
||||
RWLOCK_WRLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
|
||||
HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
|
||||
|
||||
smp->data.u.sint = stktable_data_cast(ptr, gpc0);
|
||||
stktable_data_cast(ptr, gpc0) = 0;
|
||||
|
||||
RWLOCK_WRUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
|
||||
HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
|
||||
|
||||
/* If data was modified, we need to touch to re-schedule sync */
|
||||
stktable_touch_local(stkctr->table, stkctr_entry(stkctr), (stkctr == &tmpstkctr) ? 1 : 0);
|
||||
@ -2105,11 +2105,11 @@ smp_fetch_sc_conn_cnt(const struct arg *args, struct sample *smp, const char *kw
|
||||
return 0; /* parameter not stored */
|
||||
}
|
||||
|
||||
RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
|
||||
HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
|
||||
|
||||
smp->data.u.sint = stktable_data_cast(ptr, conn_cnt);
|
||||
|
||||
RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
|
||||
HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
|
||||
|
||||
if (stkctr == &tmpstkctr)
|
||||
stktable_release(stkctr->table, stkctr_entry(stkctr));
|
||||
@ -2146,12 +2146,12 @@ smp_fetch_sc_conn_rate(const struct arg *args, struct sample *smp, const char *k
|
||||
return 0; /* parameter not stored */
|
||||
}
|
||||
|
||||
RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
|
||||
HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
|
||||
|
||||
smp->data.u.sint = read_freq_ctr_period(&stktable_data_cast(ptr, conn_rate),
|
||||
stkctr->table->data_arg[STKTABLE_DT_CONN_RATE].u);
|
||||
|
||||
RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
|
||||
HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
|
||||
|
||||
if (stkctr == &tmpstkctr)
|
||||
stktable_release(stkctr->table, stkctr_entry(stkctr));
|
||||
@ -2197,11 +2197,11 @@ smp_fetch_src_updt_conn_cnt(const struct arg *args, struct sample *smp, const ch
|
||||
|
||||
smp->data.type = SMP_T_SINT;
|
||||
|
||||
RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
|
||||
smp->data.u.sint = ++stktable_data_cast(ptr, conn_cnt);
|
||||
|
||||
RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
|
||||
smp->flags = SMP_F_VOL_TEST;
|
||||
|
||||
@ -2238,11 +2238,11 @@ smp_fetch_sc_conn_cur(const struct arg *args, struct sample *smp, const char *kw
|
||||
return 0; /* parameter not stored */
|
||||
}
|
||||
|
||||
RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
|
||||
HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
|
||||
|
||||
smp->data.u.sint = stktable_data_cast(ptr, conn_cur);
|
||||
|
||||
RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
|
||||
HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
|
||||
|
||||
if (stkctr == &tmpstkctr)
|
||||
stktable_release(stkctr->table, stkctr_entry(stkctr));
|
||||
@ -2277,11 +2277,11 @@ smp_fetch_sc_sess_cnt(const struct arg *args, struct sample *smp, const char *kw
|
||||
return 0; /* parameter not stored */
|
||||
}
|
||||
|
||||
RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
|
||||
HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
|
||||
|
||||
smp->data.u.sint = stktable_data_cast(ptr, sess_cnt);
|
||||
|
||||
RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
|
||||
HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
|
||||
|
||||
if (stkctr == &tmpstkctr)
|
||||
stktable_release(stkctr->table, stkctr_entry(stkctr));
|
||||
@ -2315,12 +2315,12 @@ smp_fetch_sc_sess_rate(const struct arg *args, struct sample *smp, const char *k
|
||||
return 0; /* parameter not stored */
|
||||
}
|
||||
|
||||
RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
|
||||
HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
|
||||
|
||||
smp->data.u.sint = read_freq_ctr_period(&stktable_data_cast(ptr, sess_rate),
|
||||
stkctr->table->data_arg[STKTABLE_DT_SESS_RATE].u);
|
||||
|
||||
RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
|
||||
HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
|
||||
|
||||
if (stkctr == &tmpstkctr)
|
||||
stktable_release(stkctr->table, stkctr_entry(stkctr));
|
||||
@ -2355,11 +2355,11 @@ smp_fetch_sc_http_req_cnt(const struct arg *args, struct sample *smp, const char
|
||||
return 0; /* parameter not stored */
|
||||
}
|
||||
|
||||
RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
|
||||
HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
|
||||
|
||||
smp->data.u.sint = stktable_data_cast(ptr, http_req_cnt);
|
||||
|
||||
RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
|
||||
HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
|
||||
|
||||
if (stkctr == &tmpstkctr)
|
||||
stktable_release(stkctr->table, stkctr_entry(stkctr));
|
||||
@ -2394,12 +2394,12 @@ smp_fetch_sc_http_req_rate(const struct arg *args, struct sample *smp, const cha
|
||||
return 0; /* parameter not stored */
|
||||
}
|
||||
|
||||
RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
|
||||
HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
|
||||
|
||||
smp->data.u.sint = read_freq_ctr_period(&stktable_data_cast(ptr, http_req_rate),
|
||||
stkctr->table->data_arg[STKTABLE_DT_HTTP_REQ_RATE].u);
|
||||
|
||||
RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
|
||||
HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
|
||||
|
||||
if (stkctr == &tmpstkctr)
|
||||
stktable_release(stkctr->table, stkctr_entry(stkctr));
|
||||
@ -2434,11 +2434,11 @@ smp_fetch_sc_http_err_cnt(const struct arg *args, struct sample *smp, const char
|
||||
return 0; /* parameter not stored */
|
||||
}
|
||||
|
||||
RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
|
||||
HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
|
||||
|
||||
smp->data.u.sint = stktable_data_cast(ptr, http_err_cnt);
|
||||
|
||||
RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
|
||||
HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
|
||||
|
||||
if (stkctr == &tmpstkctr)
|
||||
stktable_release(stkctr->table, stkctr_entry(stkctr));
|
||||
@ -2473,12 +2473,12 @@ smp_fetch_sc_http_err_rate(const struct arg *args, struct sample *smp, const cha
|
||||
return 0; /* parameter not stored */
|
||||
}
|
||||
|
||||
RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
|
||||
HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
|
||||
|
||||
smp->data.u.sint = read_freq_ctr_period(&stktable_data_cast(ptr, http_err_rate),
|
||||
stkctr->table->data_arg[STKTABLE_DT_HTTP_ERR_RATE].u);
|
||||
|
||||
RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
|
||||
HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
|
||||
|
||||
if (stkctr == &tmpstkctr)
|
||||
stktable_release(stkctr->table, stkctr_entry(stkctr));
|
||||
@ -2513,11 +2513,11 @@ smp_fetch_sc_kbytes_in(const struct arg *args, struct sample *smp, const char *k
|
||||
return 0; /* parameter not stored */
|
||||
}
|
||||
|
||||
RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
|
||||
HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
|
||||
|
||||
smp->data.u.sint = stktable_data_cast(ptr, bytes_in_cnt) >> 10;
|
||||
|
||||
RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
|
||||
HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
|
||||
|
||||
if (stkctr == &tmpstkctr)
|
||||
stktable_release(stkctr->table, stkctr_entry(stkctr));
|
||||
@ -2552,12 +2552,12 @@ smp_fetch_sc_bytes_in_rate(const struct arg *args, struct sample *smp, const cha
|
||||
return 0; /* parameter not stored */
|
||||
}
|
||||
|
||||
RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
|
||||
HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
|
||||
|
||||
smp->data.u.sint = read_freq_ctr_period(&stktable_data_cast(ptr, bytes_in_rate),
|
||||
stkctr->table->data_arg[STKTABLE_DT_BYTES_IN_RATE].u);
|
||||
|
||||
RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
|
||||
HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
|
||||
|
||||
if (stkctr == &tmpstkctr)
|
||||
stktable_release(stkctr->table, stkctr_entry(stkctr));
|
||||
@ -2592,11 +2592,11 @@ smp_fetch_sc_kbytes_out(const struct arg *args, struct sample *smp, const char *
|
||||
return 0; /* parameter not stored */
|
||||
}
|
||||
|
||||
RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
|
||||
HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
|
||||
|
||||
smp->data.u.sint = stktable_data_cast(ptr, bytes_out_cnt) >> 10;
|
||||
|
||||
RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
|
||||
HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
|
||||
|
||||
if (stkctr == &tmpstkctr)
|
||||
stktable_release(stkctr->table, stkctr_entry(stkctr));
|
||||
@ -2631,12 +2631,12 @@ smp_fetch_sc_bytes_out_rate(const struct arg *args, struct sample *smp, const ch
|
||||
return 0; /* parameter not stored */
|
||||
}
|
||||
|
||||
RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
|
||||
HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
|
||||
|
||||
smp->data.u.sint = read_freq_ctr_period(&stktable_data_cast(ptr, bytes_out_rate),
|
||||
stkctr->table->data_arg[STKTABLE_DT_BYTES_OUT_RATE].u);
|
||||
|
||||
RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
|
||||
HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
|
||||
|
||||
if (stkctr == &tmpstkctr)
|
||||
stktable_release(stkctr->table, stkctr_entry(stkctr));
|
||||
@ -2875,13 +2875,13 @@ static int table_process_entry_per_key(struct appctx *appctx, char **args)
|
||||
stktable_release(&px->table, ts);
|
||||
return 0;
|
||||
}
|
||||
RWLOCK_RDLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
if (!table_dump_entry_to_buffer(&trash, si, px, ts)) {
|
||||
RWLOCK_RDUNLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
stktable_release(&px->table, ts);
|
||||
return 0;
|
||||
}
|
||||
RWLOCK_RDUNLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
stktable_release(&px->table, ts);
|
||||
break;
|
||||
|
||||
@ -2910,13 +2910,13 @@ static int table_process_entry_per_key(struct appctx *appctx, char **args)
|
||||
return 1;
|
||||
}
|
||||
|
||||
RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
for (cur_arg = 5; *args[cur_arg]; cur_arg += 2) {
|
||||
if (strncmp(args[cur_arg], "data.", 5) != 0) {
|
||||
appctx->ctx.cli.severity = LOG_ERR;
|
||||
appctx->ctx.cli.msg = "\"data.<type>\" followed by a value expected\n";
|
||||
appctx->st0 = CLI_ST_PRINT;
|
||||
RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
stktable_touch_local(&px->table, ts, 1);
|
||||
return 1;
|
||||
}
|
||||
@ -2926,7 +2926,7 @@ static int table_process_entry_per_key(struct appctx *appctx, char **args)
|
||||
appctx->ctx.cli.severity = LOG_ERR;
|
||||
appctx->ctx.cli.msg = "Unknown data type\n";
|
||||
appctx->st0 = CLI_ST_PRINT;
|
||||
RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
stktable_touch_local(&px->table, ts, 1);
|
||||
return 1;
|
||||
}
|
||||
@ -2935,7 +2935,7 @@ static int table_process_entry_per_key(struct appctx *appctx, char **args)
|
||||
appctx->ctx.cli.severity = LOG_ERR;
|
||||
appctx->ctx.cli.msg = "Data type not stored in this table\n";
|
||||
appctx->st0 = CLI_ST_PRINT;
|
||||
RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
stktable_touch_local(&px->table, ts, 1);
|
||||
return 1;
|
||||
}
|
||||
@ -2944,7 +2944,7 @@ static int table_process_entry_per_key(struct appctx *appctx, char **args)
|
||||
appctx->ctx.cli.severity = LOG_ERR;
|
||||
appctx->ctx.cli.msg = "Require a valid integer value to store\n";
|
||||
appctx->st0 = CLI_ST_PRINT;
|
||||
RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
stktable_touch_local(&px->table, ts, 1);
|
||||
return 1;
|
||||
}
|
||||
@ -2978,7 +2978,7 @@ static int table_process_entry_per_key(struct appctx *appctx, char **args)
|
||||
break;
|
||||
}
|
||||
}
|
||||
RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
stktable_touch_local(&px->table, ts, 1);
|
||||
break;
|
||||
|
||||
@ -3155,16 +3155,16 @@ static int cli_io_handler_table(struct appctx *appctx)
|
||||
if (appctx->ctx.table.target &&
|
||||
(strm_li(s)->bind_conf->level & ACCESS_LVL_MASK) >= ACCESS_LVL_OPER) {
|
||||
/* dump entries only if table explicitly requested */
|
||||
SPIN_LOCK(STK_TABLE_LOCK, &appctx->ctx.table.proxy->table.lock);
|
||||
HA_SPIN_LOCK(STK_TABLE_LOCK, &appctx->ctx.table.proxy->table.lock);
|
||||
eb = ebmb_first(&appctx->ctx.table.proxy->table.keys);
|
||||
if (eb) {
|
||||
appctx->ctx.table.entry = ebmb_entry(eb, struct stksess, key);
|
||||
appctx->ctx.table.entry->ref_cnt++;
|
||||
appctx->st2 = STAT_ST_LIST;
|
||||
SPIN_UNLOCK(STK_TABLE_LOCK, &appctx->ctx.table.proxy->table.lock);
|
||||
HA_SPIN_UNLOCK(STK_TABLE_LOCK, &appctx->ctx.table.proxy->table.lock);
|
||||
break;
|
||||
}
|
||||
SPIN_UNLOCK(STK_TABLE_LOCK, &appctx->ctx.table.proxy->table.lock);
|
||||
HA_SPIN_UNLOCK(STK_TABLE_LOCK, &appctx->ctx.table.proxy->table.lock);
|
||||
}
|
||||
}
|
||||
appctx->ctx.table.proxy = appctx->ctx.table.proxy->next;
|
||||
@ -3173,7 +3173,7 @@ static int cli_io_handler_table(struct appctx *appctx)
|
||||
case STAT_ST_LIST:
|
||||
skip_entry = 0;
|
||||
|
||||
RWLOCK_RDLOCK(STK_SESS_LOCK, &appctx->ctx.table.entry->lock);
|
||||
HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &appctx->ctx.table.entry->lock);
|
||||
|
||||
if (appctx->ctx.table.data_type >= 0) {
|
||||
/* we're filtering on some data contents */
|
||||
@ -3221,13 +3221,13 @@ static int cli_io_handler_table(struct appctx *appctx)
|
||||
|
||||
if (show && !skip_entry &&
|
||||
!table_dump_entry_to_buffer(&trash, si, appctx->ctx.table.proxy, appctx->ctx.table.entry)) {
|
||||
RWLOCK_RDUNLOCK(STK_SESS_LOCK, &appctx->ctx.table.entry->lock);
|
||||
HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &appctx->ctx.table.entry->lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
RWLOCK_RDUNLOCK(STK_SESS_LOCK, &appctx->ctx.table.entry->lock);
|
||||
HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &appctx->ctx.table.entry->lock);
|
||||
|
||||
SPIN_LOCK(STK_TABLE_LOCK, &appctx->ctx.table.proxy->table.lock);
|
||||
HA_SPIN_LOCK(STK_TABLE_LOCK, &appctx->ctx.table.proxy->table.lock);
|
||||
appctx->ctx.table.entry->ref_cnt--;
|
||||
|
||||
eb = ebmb_next(&appctx->ctx.table.entry->key);
|
||||
@ -3239,7 +3239,7 @@ static int cli_io_handler_table(struct appctx *appctx)
|
||||
else if (!skip_entry && !appctx->ctx.table.entry->ref_cnt)
|
||||
__stksess_kill(&appctx->ctx.table.proxy->table, old);
|
||||
appctx->ctx.table.entry->ref_cnt++;
|
||||
SPIN_UNLOCK(STK_TABLE_LOCK, &appctx->ctx.table.proxy->table.lock);
|
||||
HA_SPIN_UNLOCK(STK_TABLE_LOCK, &appctx->ctx.table.proxy->table.lock);
|
||||
break;
|
||||
}
|
||||
|
||||
@ -3249,7 +3249,7 @@ static int cli_io_handler_table(struct appctx *appctx)
|
||||
else if (!skip_entry && !appctx->ctx.table.entry->ref_cnt)
|
||||
__stksess_kill(&appctx->ctx.table.proxy->table, appctx->ctx.table.entry);
|
||||
|
||||
SPIN_UNLOCK(STK_TABLE_LOCK, &appctx->ctx.table.proxy->table.lock);
|
||||
HA_SPIN_UNLOCK(STK_TABLE_LOCK, &appctx->ctx.table.proxy->table.lock);
|
||||
|
||||
appctx->ctx.table.proxy = appctx->ctx.table.proxy->next;
|
||||
appctx->st2 = STAT_ST_INFO;
|
||||
|
66
src/stream.c
66
src/stream.c
@ -253,9 +253,9 @@ struct stream *stream_new(struct session *sess, enum obj_type *origin)
|
||||
s->txn = NULL;
|
||||
s->hlua = NULL;
|
||||
|
||||
SPIN_LOCK(STRMS_LOCK, &streams_lock);
|
||||
HA_SPIN_LOCK(STRMS_LOCK, &streams_lock);
|
||||
LIST_ADDQ(&streams, &s->list);
|
||||
SPIN_UNLOCK(STRMS_LOCK, &streams_lock);
|
||||
HA_SPIN_UNLOCK(STRMS_LOCK, &streams_lock);
|
||||
|
||||
if (flt_stream_init(s) < 0 || flt_stream_start(s) < 0)
|
||||
goto out_fail_accept;
|
||||
@ -326,10 +326,10 @@ static void stream_free(struct stream *s)
|
||||
|
||||
/* We may still be present in the buffer wait queue */
|
||||
if (!LIST_ISEMPTY(&s->buffer_wait.list)) {
|
||||
SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock);
|
||||
HA_SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock);
|
||||
LIST_DEL(&s->buffer_wait.list);
|
||||
LIST_INIT(&s->buffer_wait.list);
|
||||
SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock);
|
||||
HA_SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock);
|
||||
}
|
||||
if (s->req.buf->size || s->res.buf->size) {
|
||||
b_drop(&s->req.buf);
|
||||
@ -373,7 +373,7 @@ static void stream_free(struct stream *s)
|
||||
|
||||
stream_store_counters(s);
|
||||
|
||||
SPIN_LOCK(STRMS_LOCK, &streams_lock);
|
||||
HA_SPIN_LOCK(STRMS_LOCK, &streams_lock);
|
||||
list_for_each_entry_safe(bref, back, &s->back_refs, users) {
|
||||
/* we have to unlink all watchers. We must not relink them if
|
||||
* this stream was the last one in the list.
|
||||
@ -385,7 +385,7 @@ static void stream_free(struct stream *s)
|
||||
bref->ref = s->list.n;
|
||||
}
|
||||
LIST_DEL(&s->list);
|
||||
SPIN_UNLOCK(STRMS_LOCK, &streams_lock);
|
||||
HA_SPIN_UNLOCK(STRMS_LOCK, &streams_lock);
|
||||
|
||||
si_release_endpoint(&s->si[1]);
|
||||
si_release_endpoint(&s->si[0]);
|
||||
@ -423,18 +423,18 @@ static void stream_free(struct stream *s)
|
||||
static int stream_alloc_work_buffer(struct stream *s)
|
||||
{
|
||||
if (!LIST_ISEMPTY(&s->buffer_wait.list)) {
|
||||
SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock);
|
||||
HA_SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock);
|
||||
LIST_DEL(&s->buffer_wait.list);
|
||||
LIST_INIT(&s->buffer_wait.list);
|
||||
SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock);
|
||||
HA_SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock);
|
||||
}
|
||||
|
||||
if (b_alloc_margin(&s->res.buf, 0))
|
||||
return 1;
|
||||
|
||||
SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock);
|
||||
HA_SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock);
|
||||
LIST_ADDQ(&buffer_wq, &s->buffer_wait.list);
|
||||
SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock);
|
||||
HA_SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -468,7 +468,7 @@ void stream_release_buffers(struct stream *s)
|
||||
int init_stream()
|
||||
{
|
||||
LIST_INIT(&streams);
|
||||
SPIN_INIT(&streams_lock);
|
||||
HA_SPIN_INIT(&streams_lock);
|
||||
pool2_stream = create_pool("stream", sizeof(struct stream), MEM_F_SHARED);
|
||||
return pool2_stream != NULL;
|
||||
}
|
||||
@ -504,7 +504,7 @@ void stream_process_counters(struct stream *s)
|
||||
continue;
|
||||
}
|
||||
|
||||
RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
ptr1 = stktable_data_ptr(stkctr->table, ts, STKTABLE_DT_BYTES_IN_CNT);
|
||||
if (ptr1)
|
||||
stktable_data_cast(ptr1, bytes_in_cnt) += bytes;
|
||||
@ -513,7 +513,7 @@ void stream_process_counters(struct stream *s)
|
||||
if (ptr2)
|
||||
update_freq_ctr_period(&stktable_data_cast(ptr2, bytes_in_rate),
|
||||
stkctr->table->data_arg[STKTABLE_DT_BYTES_IN_RATE].u, bytes);
|
||||
RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
|
||||
/* If data was modified, we need to touch to re-schedule sync */
|
||||
if (ptr1 || ptr2)
|
||||
@ -544,7 +544,7 @@ void stream_process_counters(struct stream *s)
|
||||
continue;
|
||||
}
|
||||
|
||||
RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
ptr1 = stktable_data_ptr(stkctr->table, ts, STKTABLE_DT_BYTES_OUT_CNT);
|
||||
if (ptr1)
|
||||
stktable_data_cast(ptr1, bytes_out_cnt) += bytes;
|
||||
@ -553,7 +553,7 @@ void stream_process_counters(struct stream *s)
|
||||
if (ptr2)
|
||||
update_freq_ctr_period(&stktable_data_cast(ptr2, bytes_out_rate),
|
||||
stkctr->table->data_arg[STKTABLE_DT_BYTES_OUT_RATE].u, bytes);
|
||||
RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
|
||||
/* If data was modified, we need to touch to re-schedule sync */
|
||||
if (ptr1 || ptr2)
|
||||
@ -1409,10 +1409,10 @@ static int process_sticking_rules(struct stream *s, struct channel *req, int an_
|
||||
void *ptr;
|
||||
|
||||
/* srv found in table */
|
||||
RWLOCK_RDLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
ptr = stktable_data_ptr(rule->table.t, ts, STKTABLE_DT_SERVER_ID);
|
||||
node = eb32_lookup(&px->conf.used_server_id, stktable_data_cast(ptr, server_id));
|
||||
RWLOCK_RDUNLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
if (node) {
|
||||
struct server *srv;
|
||||
|
||||
@ -1536,10 +1536,10 @@ static int process_store_rules(struct stream *s, struct channel *rep, int an_bit
|
||||
}
|
||||
s->store[i].ts = NULL;
|
||||
|
||||
RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
ptr = stktable_data_ptr(s->store[i].table, ts, STKTABLE_DT_SERVER_ID);
|
||||
stktable_data_cast(ptr, server_id) = objt_server(s->target)->puid;
|
||||
RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
|
||||
stktable_touch_local(s->store[i].table, ts, 1);
|
||||
}
|
||||
s->store_count = 0; /* everything is stored */
|
||||
@ -2536,12 +2536,12 @@ void stream_update_time_stats(struct stream *s)
|
||||
swrate_add(&srv->counters.d_time, TIME_STATS_SAMPLES, t_data);
|
||||
swrate_add(&srv->counters.t_time, TIME_STATS_SAMPLES, t_close);
|
||||
}
|
||||
SPIN_LOCK(PROXY_LOCK, &s->be->lock);
|
||||
HA_SPIN_LOCK(PROXY_LOCK, &s->be->lock);
|
||||
swrate_add(&s->be->be_counters.q_time, TIME_STATS_SAMPLES, t_queue);
|
||||
swrate_add(&s->be->be_counters.c_time, TIME_STATS_SAMPLES, t_connect);
|
||||
swrate_add(&s->be->be_counters.d_time, TIME_STATS_SAMPLES, t_data);
|
||||
swrate_add(&s->be->be_counters.t_time, TIME_STATS_SAMPLES, t_close);
|
||||
SPIN_UNLOCK(PROXY_LOCK, &s->be->lock);
|
||||
HA_SPIN_UNLOCK(PROXY_LOCK, &s->be->lock);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -3056,14 +3056,14 @@ static int cli_io_handler_dump_sess(struct appctx *appctx)
|
||||
* pointer points back to the head of the streams list.
|
||||
*/
|
||||
LIST_INIT(&appctx->ctx.sess.bref.users);
|
||||
SPIN_LOCK(STRMS_LOCK, &streams_lock);
|
||||
HA_SPIN_LOCK(STRMS_LOCK, &streams_lock);
|
||||
appctx->ctx.sess.bref.ref = streams.n;
|
||||
SPIN_UNLOCK(STRMS_LOCK, &streams_lock);
|
||||
HA_SPIN_UNLOCK(STRMS_LOCK, &streams_lock);
|
||||
appctx->st2 = STAT_ST_LIST;
|
||||
/* fall through */
|
||||
|
||||
case STAT_ST_LIST:
|
||||
SPIN_LOCK(STRMS_LOCK, &streams_lock);
|
||||
HA_SPIN_LOCK(STRMS_LOCK, &streams_lock);
|
||||
/* first, let's detach the back-ref from a possible previous stream */
|
||||
if (!LIST_ISEMPTY(&appctx->ctx.sess.bref.users)) {
|
||||
LIST_DEL(&appctx->ctx.sess.bref.users);
|
||||
@ -3084,7 +3084,7 @@ static int cli_io_handler_dump_sess(struct appctx *appctx)
|
||||
LIST_ADDQ(&curr_strm->back_refs, &appctx->ctx.sess.bref.users);
|
||||
/* call the proper dump() function and return if we're missing space */
|
||||
if (!stats_dump_full_strm_to_buffer(si, curr_strm)) {
|
||||
SPIN_UNLOCK(STRMS_LOCK, &streams_lock);
|
||||
HA_SPIN_UNLOCK(STRMS_LOCK, &streams_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -3212,7 +3212,7 @@ static int cli_io_handler_dump_sess(struct appctx *appctx)
|
||||
*/
|
||||
si_applet_cant_put(si);
|
||||
LIST_ADDQ(&curr_strm->back_refs, &appctx->ctx.sess.bref.users);
|
||||
SPIN_UNLOCK(STRMS_LOCK, &streams_lock);
|
||||
HA_SPIN_UNLOCK(STRMS_LOCK, &streams_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -3229,17 +3229,17 @@ static int cli_io_handler_dump_sess(struct appctx *appctx)
|
||||
|
||||
if (ci_putchk(si_ic(si), &trash) == -1) {
|
||||
si_applet_cant_put(si);
|
||||
SPIN_UNLOCK(STRMS_LOCK, &streams_lock);
|
||||
HA_SPIN_UNLOCK(STRMS_LOCK, &streams_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
appctx->ctx.sess.target = NULL;
|
||||
appctx->ctx.sess.uid = 0;
|
||||
SPIN_UNLOCK(STRMS_LOCK, &streams_lock);
|
||||
HA_SPIN_UNLOCK(STRMS_LOCK, &streams_lock);
|
||||
return 1;
|
||||
}
|
||||
|
||||
SPIN_UNLOCK(STRMS_LOCK, &streams_lock);
|
||||
HA_SPIN_UNLOCK(STRMS_LOCK, &streams_lock);
|
||||
appctx->st2 = STAT_ST_FIN;
|
||||
/* fall through */
|
||||
|
||||
@ -3252,10 +3252,10 @@ static int cli_io_handler_dump_sess(struct appctx *appctx)
|
||||
static void cli_release_show_sess(struct appctx *appctx)
|
||||
{
|
||||
if (appctx->st2 == STAT_ST_LIST) {
|
||||
SPIN_UNLOCK(STRMS_LOCK, &streams_lock);
|
||||
HA_SPIN_UNLOCK(STRMS_LOCK, &streams_lock);
|
||||
if (!LIST_ISEMPTY(&appctx->ctx.sess.bref.users))
|
||||
LIST_DEL(&appctx->ctx.sess.bref.users);
|
||||
SPIN_UNLOCK(STRMS_LOCK, &streams_lock);
|
||||
HA_SPIN_UNLOCK(STRMS_LOCK, &streams_lock);
|
||||
}
|
||||
}
|
||||
|
||||
@ -3308,11 +3308,11 @@ static int cli_parse_shutdown_sessions_server(char **args, struct appctx *appctx
|
||||
return 1;
|
||||
|
||||
/* kill all the stream that are on this server */
|
||||
SPIN_LOCK(SERVER_LOCK, &sv->lock);
|
||||
HA_SPIN_LOCK(SERVER_LOCK, &sv->lock);
|
||||
list_for_each_entry_safe(strm, strm_bck, &sv->actconns, by_srv)
|
||||
if (strm->srv_conn == sv)
|
||||
stream_shutdown(strm, SF_ERR_KILLED);
|
||||
SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
|
||||
HA_SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
18
src/task.c
18
src/task.c
@ -121,7 +121,7 @@ int wake_expired_tasks()
|
||||
int ret = TICK_ETERNITY;
|
||||
|
||||
while (1) {
|
||||
SPIN_LOCK(TASK_WQ_LOCK, &wq_lock);
|
||||
HA_SPIN_LOCK(TASK_WQ_LOCK, &wq_lock);
|
||||
lookup_next:
|
||||
eb = eb32_lookup_ge(&timers, now_ms - TIMER_LOOK_BACK);
|
||||
if (!eb) {
|
||||
@ -162,11 +162,11 @@ int wake_expired_tasks()
|
||||
__task_queue(task);
|
||||
goto lookup_next;
|
||||
}
|
||||
SPIN_UNLOCK(TASK_WQ_LOCK, &wq_lock);
|
||||
HA_SPIN_UNLOCK(TASK_WQ_LOCK, &wq_lock);
|
||||
task_wakeup(task, TASK_WOKEN_TIMER);
|
||||
}
|
||||
|
||||
SPIN_UNLOCK(TASK_WQ_LOCK, &wq_lock);
|
||||
HA_SPIN_UNLOCK(TASK_WQ_LOCK, &wq_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -251,7 +251,7 @@ void process_runnable_tasks()
|
||||
return;
|
||||
}
|
||||
|
||||
SPIN_LOCK(TASK_RQ_LOCK, &rq_lock);
|
||||
HA_SPIN_LOCK(TASK_RQ_LOCK, &rq_lock);
|
||||
rq_next = eb32sc_lookup_ge(&rqueue, rqueue_ticks - TIMER_LOOK_BACK, tid_bit);
|
||||
|
||||
do {
|
||||
@ -289,7 +289,7 @@ void process_runnable_tasks()
|
||||
if (!local_tasks_count)
|
||||
break;
|
||||
|
||||
SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock);
|
||||
HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock);
|
||||
|
||||
final_tasks_count = 0;
|
||||
for (i = 0; i < local_tasks_count ; i++) {
|
||||
@ -305,7 +305,7 @@ void process_runnable_tasks()
|
||||
local_tasks[final_tasks_count++] = t;
|
||||
}
|
||||
|
||||
SPIN_LOCK(TASK_RQ_LOCK, &rq_lock);
|
||||
HA_SPIN_LOCK(TASK_RQ_LOCK, &rq_lock);
|
||||
for (i = 0; i < final_tasks_count ; i++) {
|
||||
t = local_tasks[i];
|
||||
t->state &= ~TASK_RUNNING;
|
||||
@ -321,7 +321,7 @@ void process_runnable_tasks()
|
||||
}
|
||||
} while (max_processed > 0);
|
||||
|
||||
SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock);
|
||||
HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock);
|
||||
}
|
||||
|
||||
/* perform minimal intializations, report 0 in case of error, 1 if OK. */
|
||||
@ -329,8 +329,8 @@ int init_task()
|
||||
{
|
||||
memset(&timers, 0, sizeof(timers));
|
||||
memset(&rqueue, 0, sizeof(rqueue));
|
||||
SPIN_INIT(&wq_lock);
|
||||
SPIN_INIT(&rq_lock);
|
||||
HA_SPIN_INIT(&wq_lock);
|
||||
HA_SPIN_INIT(&rq_lock);
|
||||
pool2_task = create_pool("task", sizeof(struct task), MEM_F_SHARED);
|
||||
if (!pool2_task)
|
||||
return 0;
|
||||
|
34
src/vars.c
34
src/vars.c
@ -118,11 +118,11 @@ void vars_prune(struct vars *vars, struct session *sess, struct stream *strm)
|
||||
struct var *var, *tmp;
|
||||
unsigned int size = 0;
|
||||
|
||||
RWLOCK_WRLOCK(VARS_LOCK, &vars->rwlock);
|
||||
HA_RWLOCK_WRLOCK(VARS_LOCK, &vars->rwlock);
|
||||
list_for_each_entry_safe(var, tmp, &vars->head, l) {
|
||||
size += var_clear(var);
|
||||
}
|
||||
RWLOCK_WRUNLOCK(VARS_LOCK, &vars->rwlock);
|
||||
HA_RWLOCK_WRUNLOCK(VARS_LOCK, &vars->rwlock);
|
||||
var_accounting_diff(vars, sess, strm, -size);
|
||||
}
|
||||
|
||||
@ -134,11 +134,11 @@ void vars_prune_per_sess(struct vars *vars)
|
||||
struct var *var, *tmp;
|
||||
unsigned int size = 0;
|
||||
|
||||
RWLOCK_WRLOCK(VARS_LOCK, &vars->rwlock);
|
||||
HA_RWLOCK_WRLOCK(VARS_LOCK, &vars->rwlock);
|
||||
list_for_each_entry_safe(var, tmp, &vars->head, l) {
|
||||
size += var_clear(var);
|
||||
}
|
||||
RWLOCK_WRUNLOCK(VARS_LOCK, &vars->rwlock);
|
||||
HA_RWLOCK_WRUNLOCK(VARS_LOCK, &vars->rwlock);
|
||||
|
||||
HA_ATOMIC_SUB(&vars->size, size);
|
||||
HA_ATOMIC_SUB(&global.vars.size, size);
|
||||
@ -151,7 +151,7 @@ void vars_init(struct vars *vars, enum vars_scope scope)
|
||||
LIST_INIT(&vars->head);
|
||||
vars->scope = scope;
|
||||
vars->size = 0;
|
||||
RWLOCK_INIT(&vars->rwlock);
|
||||
HA_RWLOCK_INIT(&vars->rwlock);
|
||||
}
|
||||
|
||||
/* This function declares a new variable name. It returns a pointer
|
||||
@ -214,9 +214,9 @@ static char *register_name(const char *name, int len, enum vars_scope *scope,
|
||||
}
|
||||
|
||||
if (alloc)
|
||||
RWLOCK_WRLOCK(VARS_LOCK, &var_names_rwlock);
|
||||
HA_RWLOCK_WRLOCK(VARS_LOCK, &var_names_rwlock);
|
||||
else
|
||||
RWLOCK_RDLOCK(VARS_LOCK, &var_names_rwlock);
|
||||
HA_RWLOCK_RDLOCK(VARS_LOCK, &var_names_rwlock);
|
||||
|
||||
|
||||
/* Look for existing variable name. */
|
||||
@ -263,9 +263,9 @@ static char *register_name(const char *name, int len, enum vars_scope *scope,
|
||||
|
||||
end:
|
||||
if (alloc)
|
||||
RWLOCK_WRUNLOCK(VARS_LOCK, &var_names_rwlock);
|
||||
HA_RWLOCK_WRUNLOCK(VARS_LOCK, &var_names_rwlock);
|
||||
else
|
||||
RWLOCK_RDUNLOCK(VARS_LOCK, &var_names_rwlock);
|
||||
HA_RWLOCK_RDUNLOCK(VARS_LOCK, &var_names_rwlock);
|
||||
|
||||
return res;
|
||||
}
|
||||
@ -312,12 +312,12 @@ static int smp_fetch_var(const struct arg *args, struct sample *smp, const char
|
||||
if (vars->scope != var_desc->scope)
|
||||
return 0;
|
||||
|
||||
RWLOCK_RDLOCK(VARS_LOCK, &vars->rwlock);
|
||||
HA_RWLOCK_RDLOCK(VARS_LOCK, &vars->rwlock);
|
||||
var = var_get(vars, var_desc->name);
|
||||
|
||||
/* check for the variable avalaibility */
|
||||
if (!var) {
|
||||
RWLOCK_RDUNLOCK(VARS_LOCK, &vars->rwlock);
|
||||
HA_RWLOCK_RDUNLOCK(VARS_LOCK, &vars->rwlock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -327,7 +327,7 @@ static int smp_fetch_var(const struct arg *args, struct sample *smp, const char
|
||||
smp_dup(smp);
|
||||
smp->flags |= SMP_F_CONST;
|
||||
|
||||
RWLOCK_RDUNLOCK(VARS_LOCK, &vars->rwlock);
|
||||
HA_RWLOCK_RDUNLOCK(VARS_LOCK, &vars->rwlock);
|
||||
return 1;
|
||||
}
|
||||
|
||||
@ -438,9 +438,9 @@ static inline int sample_store_stream(const char *name, enum vars_scope scope, s
|
||||
if (vars->scope != scope)
|
||||
return 0;
|
||||
|
||||
RWLOCK_WRLOCK(VARS_LOCK, &vars->rwlock);
|
||||
HA_RWLOCK_WRLOCK(VARS_LOCK, &vars->rwlock);
|
||||
ret = sample_store(vars, name, smp);
|
||||
RWLOCK_WRUNLOCK(VARS_LOCK, &vars->rwlock);
|
||||
HA_RWLOCK_WRUNLOCK(VARS_LOCK, &vars->rwlock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -463,13 +463,13 @@ static inline int sample_clear_stream(const char *name, enum vars_scope scope, s
|
||||
return 0;
|
||||
|
||||
/* Look for existing variable name. */
|
||||
RWLOCK_WRLOCK(VARS_LOCK, &vars->rwlock);
|
||||
HA_RWLOCK_WRLOCK(VARS_LOCK, &vars->rwlock);
|
||||
var = var_get(vars, name);
|
||||
if (var) {
|
||||
size = var_clear(var);
|
||||
var_accounting_diff(vars, smp->sess, smp->strm, -size);
|
||||
}
|
||||
RWLOCK_WRUNLOCK(VARS_LOCK, &vars->rwlock);
|
||||
HA_RWLOCK_WRUNLOCK(VARS_LOCK, &vars->rwlock);
|
||||
return 1;
|
||||
}
|
||||
|
||||
@ -914,5 +914,5 @@ static void __vars_init(void)
|
||||
http_res_keywords_register(&http_res_kws);
|
||||
cfg_register_keywords(&cfg_kws);
|
||||
|
||||
RWLOCK_INIT(&var_names_rwlock);
|
||||
HA_RWLOCK_INIT(&var_names_rwlock);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user