BUILD: threads: Rename SPIN/RWLOCK macros using HA_ prefix

This remove any name conflicts, especially on Solaris.
This commit is contained in:
Christopher Faulet 2017-11-07 10:42:54 +01:00 committed by Willy Tarreau
parent 7d8e4af46a
commit 2a944ee16b
49 changed files with 701 additions and 701 deletions

View File

@ -751,13 +751,13 @@ void __offer_buffer(void *from, unsigned int threshold);
static inline void offer_buffers(void *from, unsigned int threshold) static inline void offer_buffers(void *from, unsigned int threshold)
{ {
SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock); HA_SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock);
if (LIST_ISEMPTY(&buffer_wq)) { if (LIST_ISEMPTY(&buffer_wq)) {
SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock); HA_SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock);
return; return;
} }
__offer_buffer(from, threshold); __offer_buffer(from, threshold);
SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock); HA_SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock);
} }
/*************************************************************************/ /*************************************************************************/

View File

@ -70,20 +70,20 @@ extern THREAD_LOCAL unsigned int tid_bit; /* The bit corresponding to the thread
#define THREAD_NO_SYNC() ({ 0; }) #define THREAD_NO_SYNC() ({ 0; })
#define THREAD_NEED_SYNC() ({ 1; }) #define THREAD_NEED_SYNC() ({ 1; })
#define SPIN_INIT(l) do { /* do nothing */ } while(0) #define HA_SPIN_INIT(l) do { /* do nothing */ } while(0)
#define SPIN_DESTROY(l) do { /* do nothing */ } while(0) #define HA_SPIN_DESTROY(l) do { /* do nothing */ } while(0)
#define SPIN_LOCK(lbl, l) do { /* do nothing */ } while(0) #define HA_SPIN_LOCK(lbl, l) do { /* do nothing */ } while(0)
#define SPIN_TRYLOCK(lbl, l) ({ 0; }) #define HA_SPIN_TRYLOCK(lbl, l) ({ 0; })
#define SPIN_UNLOCK(lbl, l) do { /* do nothing */ } while(0) #define HA_SPIN_UNLOCK(lbl, l) do { /* do nothing */ } while(0)
#define RWLOCK_INIT(l) do { /* do nothing */ } while(0) #define HA_RWLOCK_INIT(l) do { /* do nothing */ } while(0)
#define RWLOCK_DESTROY(l) do { /* do nothing */ } while(0) #define HA_RWLOCK_DESTROY(l) do { /* do nothing */ } while(0)
#define RWLOCK_WRLOCK(lbl, l) do { /* do nothing */ } while(0) #define HA_RWLOCK_WRLOCK(lbl, l) do { /* do nothing */ } while(0)
#define RWLOCK_TRYWRLOCK(lbl, l) ({ 0; }) #define HA_RWLOCK_TRYWRLOCK(lbl, l) ({ 0; })
#define RWLOCK_WRUNLOCK(lbl, l) do { /* do nothing */ } while(0) #define HA_RWLOCK_WRUNLOCK(lbl, l) do { /* do nothing */ } while(0)
#define RWLOCK_RDLOCK(lbl, l) do { /* do nothing */ } while(0) #define HA_RWLOCK_RDLOCK(lbl, l) do { /* do nothing */ } while(0)
#define RWLOCK_TRYRDLOCK(lbl, l) ({ 0; }) #define HA_RWLOCK_TRYRDLOCK(lbl, l) ({ 0; })
#define RWLOCK_RDUNLOCK(lbl, l) do { /* do nothing */ } while(0) #define HA_RWLOCK_RDUNLOCK(lbl, l) do { /* do nothing */ } while(0)
#else /* USE_THREAD */ #else /* USE_THREAD */
@ -208,23 +208,23 @@ extern struct lock_stat lock_stats[LOCK_LABELS];
#define HA_SPINLOCK_T struct ha_spinlock #define HA_SPINLOCK_T struct ha_spinlock
#define SPIN_INIT(l) __spin_init(l) #define HA_SPIN_INIT(l) __spin_init(l)
#define SPIN_DESTROY(l) __spin_destroy(l) #define HA_SPIN_DESTROY(l) __spin_destroy(l)
#define SPIN_LOCK(lbl, l) __spin_lock(lbl, l, __func__, __FILE__, __LINE__) #define HA_SPIN_LOCK(lbl, l) __spin_lock(lbl, l, __func__, __FILE__, __LINE__)
#define SPIN_TRYLOCK(lbl, l) __spin_trylock(lbl, l, __func__, __FILE__, __LINE__) #define HA_SPIN_TRYLOCK(lbl, l) __spin_trylock(lbl, l, __func__, __FILE__, __LINE__)
#define SPIN_UNLOCK(lbl, l) __spin_unlock(lbl, l, __func__, __FILE__, __LINE__) #define HA_SPIN_UNLOCK(lbl, l) __spin_unlock(lbl, l, __func__, __FILE__, __LINE__)
#define HA_RWLOCK_T struct ha_rwlock #define HA_RWLOCK_T struct ha_rwlock
#define RWLOCK_INIT(l) __ha_rwlock_init((l)) #define HA_RWLOCK_INIT(l) __ha_rwlock_init((l))
#define RWLOCK_DESTROY(l) __ha_rwlock_destroy((l)) #define HA_RWLOCK_DESTROY(l) __ha_rwlock_destroy((l))
#define RWLOCK_WRLOCK(lbl,l) __ha_rwlock_wrlock(lbl, l, __func__, __FILE__, __LINE__) #define HA_RWLOCK_WRLOCK(lbl,l) __ha_rwlock_wrlock(lbl, l, __func__, __FILE__, __LINE__)
#define RWLOCK_TRYWRLOCK(lbl,l) __ha_rwlock_trywrlock(lbl, l, __func__, __FILE__, __LINE__) #define HA_RWLOCK_TRYWRLOCK(lbl,l) __ha_rwlock_trywrlock(lbl, l, __func__, __FILE__, __LINE__)
#define RWLOCK_WRUNLOCK(lbl,l) __ha_rwlock_wrunlock(lbl, l, __func__, __FILE__, __LINE__) #define HA_RWLOCK_WRUNLOCK(lbl,l) __ha_rwlock_wrunlock(lbl, l, __func__, __FILE__, __LINE__)
#define RWLOCK_RDLOCK(lbl,l) __ha_rwlock_rdlock(lbl, l) #define HA_RWLOCK_RDLOCK(lbl,l) __ha_rwlock_rdlock(lbl, l)
#define RWLOCK_TRYRDLOCK(lbl,l) __ha_rwlock_tryrdlock(lbl, l) #define HA_RWLOCK_TRYRDLOCK(lbl,l) __ha_rwlock_tryrdlock(lbl, l)
#define RWLOCK_RDUNLOCK(lbl,l) __ha_rwlock_rdunlock(lbl, l) #define HA_RWLOCK_RDUNLOCK(lbl,l) __ha_rwlock_rdunlock(lbl, l)
struct ha_spinlock { struct ha_spinlock {
__HA_SPINLOCK_T lock; __HA_SPINLOCK_T lock;
@ -550,22 +550,22 @@ static inline void __spin_unlock(enum lock_label lbl, struct ha_spinlock *l,
#define HA_SPINLOCK_T unsigned long #define HA_SPINLOCK_T unsigned long
#define SPIN_INIT(l) ({ (*l) = 0; }) #define HA_SPIN_INIT(l) ({ (*l) = 0; })
#define SPIN_DESTROY(l) ({ (*l) = 0; }) #define HA_SPIN_DESTROY(l) ({ (*l) = 0; })
#define SPIN_LOCK(lbl, l) pl_take_s(l) #define HA_SPIN_LOCK(lbl, l) pl_take_s(l)
#define SPIN_TRYLOCK(lbl, l) !pl_try_s(l) #define HA_SPIN_TRYLOCK(lbl, l) !pl_try_s(l)
#define SPIN_UNLOCK(lbl, l) pl_drop_s(l) #define HA_SPIN_UNLOCK(lbl, l) pl_drop_s(l)
#define HA_RWLOCK_T unsigned long #define HA_RWLOCK_T unsigned long
#define RWLOCK_INIT(l) ({ (*l) = 0; }) #define HA_RWLOCK_INIT(l) ({ (*l) = 0; })
#define RWLOCK_DESTROY(l) ({ (*l) = 0; }) #define HA_RWLOCK_DESTROY(l) ({ (*l) = 0; })
#define RWLOCK_WRLOCK(lbl,l) pl_take_w(l) #define HA_RWLOCK_WRLOCK(lbl,l) pl_take_w(l)
#define RWLOCK_TRYWRLOCK(lbl,l) !pl_try_w(l) #define HA_RWLOCK_TRYWRLOCK(lbl,l) !pl_try_w(l)
#define RWLOCK_WRUNLOCK(lbl,l) pl_drop_w(l) #define HA_RWLOCK_WRUNLOCK(lbl,l) pl_drop_w(l)
#define RWLOCK_RDLOCK(lbl,l) pl_take_r(l) #define HA_RWLOCK_RDLOCK(lbl,l) pl_take_r(l)
#define RWLOCK_TRYRDLOCK(lbl,l) !pl_try_r(l) #define HA_RWLOCK_TRYRDLOCK(lbl,l) !pl_try_r(l)
#define RWLOCK_RDUNLOCK(lbl,l) pl_drop_r(l) #define HA_RWLOCK_RDUNLOCK(lbl,l) pl_drop_r(l)
#endif /* DEBUG_THREAD */ #endif /* DEBUG_THREAD */

View File

@ -135,9 +135,9 @@ static inline void *pool_get_first(struct pool_head *pool)
{ {
void *ret; void *ret;
SPIN_LOCK(POOL_LOCK, &pool->lock); HA_SPIN_LOCK(POOL_LOCK, &pool->lock);
ret = __pool_get_first(pool); ret = __pool_get_first(pool);
SPIN_UNLOCK(POOL_LOCK, &pool->lock); HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
return ret; return ret;
} }
/* /*
@ -150,10 +150,10 @@ static inline void *pool_alloc_dirty(struct pool_head *pool)
{ {
void *p; void *p;
SPIN_LOCK(POOL_LOCK, &pool->lock); HA_SPIN_LOCK(POOL_LOCK, &pool->lock);
if ((p = __pool_get_first(pool)) == NULL) if ((p = __pool_get_first(pool)) == NULL)
p = __pool_refill_alloc(pool, 0); p = __pool_refill_alloc(pool, 0);
SPIN_UNLOCK(POOL_LOCK, &pool->lock); HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
return p; return p;
} }
@ -169,10 +169,10 @@ static inline void *pool_alloc2(struct pool_head *pool)
p = pool_alloc_dirty(pool); p = pool_alloc_dirty(pool);
#ifdef DEBUG_MEMORY_POOLS #ifdef DEBUG_MEMORY_POOLS
if (p) { if (p) {
SPIN_LOCK(POOL_LOCK, &pool->lock); HA_SPIN_LOCK(POOL_LOCK, &pool->lock);
/* keep track of where the element was allocated from */ /* keep track of where the element was allocated from */
*POOL_LINK(pool, p) = (void *)pool; *POOL_LINK(pool, p) = (void *)pool;
SPIN_UNLOCK(POOL_LOCK, &pool->lock); HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
} }
#endif #endif
if (p && mem_poison_byte >= 0) { if (p && mem_poison_byte >= 0) {
@ -194,7 +194,7 @@ static inline void *pool_alloc2(struct pool_head *pool)
static inline void pool_free2(struct pool_head *pool, void *ptr) static inline void pool_free2(struct pool_head *pool, void *ptr)
{ {
if (likely(ptr != NULL)) { if (likely(ptr != NULL)) {
SPIN_LOCK(POOL_LOCK, &pool->lock); HA_SPIN_LOCK(POOL_LOCK, &pool->lock);
#ifdef DEBUG_MEMORY_POOLS #ifdef DEBUG_MEMORY_POOLS
/* we'll get late corruption if we refill to the wrong pool or double-free */ /* we'll get late corruption if we refill to the wrong pool or double-free */
if (*POOL_LINK(pool, ptr) != (void *)pool) if (*POOL_LINK(pool, ptr) != (void *)pool)
@ -203,7 +203,7 @@ static inline void pool_free2(struct pool_head *pool, void *ptr)
*POOL_LINK(pool, ptr) = (void *)pool->free_list; *POOL_LINK(pool, ptr) = (void *)pool->free_list;
pool->free_list = (void *)ptr; pool->free_list = (void *)ptr;
pool->used--; pool->used--;
SPIN_UNLOCK(POOL_LOCK, &pool->lock); HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
} }
} }
#endif /* _COMMON_MEMORY_H */ #endif /* _COMMON_MEMORY_H */

View File

@ -88,10 +88,10 @@ static inline void __appctx_free(struct appctx *appctx)
} }
if (!LIST_ISEMPTY(&appctx->buffer_wait.list)) { if (!LIST_ISEMPTY(&appctx->buffer_wait.list)) {
SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock); HA_SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock);
LIST_DEL(&appctx->buffer_wait.list); LIST_DEL(&appctx->buffer_wait.list);
LIST_INIT(&appctx->buffer_wait.list); LIST_INIT(&appctx->buffer_wait.list);
SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock); HA_SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock);
} }
pool_free2(pool2_connection, appctx); pool_free2(pool2_connection, appctx);
@ -99,14 +99,14 @@ static inline void __appctx_free(struct appctx *appctx)
} }
static inline void appctx_free(struct appctx *appctx) static inline void appctx_free(struct appctx *appctx)
{ {
SPIN_LOCK(APPLETS_LOCK, &applet_active_lock); HA_SPIN_LOCK(APPLETS_LOCK, &applet_active_lock);
if (appctx->state & APPLET_RUNNING) { if (appctx->state & APPLET_RUNNING) {
appctx->state |= APPLET_WANT_DIE; appctx->state |= APPLET_WANT_DIE;
SPIN_UNLOCK(APPLETS_LOCK, &applet_active_lock); HA_SPIN_UNLOCK(APPLETS_LOCK, &applet_active_lock);
return; return;
} }
__appctx_free(appctx); __appctx_free(appctx);
SPIN_UNLOCK(APPLETS_LOCK, &applet_active_lock); HA_SPIN_UNLOCK(APPLETS_LOCK, &applet_active_lock);
} }
/* wakes up an applet when conditions have changed */ /* wakes up an applet when conditions have changed */
@ -120,14 +120,14 @@ static inline void __appctx_wakeup(struct appctx *appctx)
static inline void appctx_wakeup(struct appctx *appctx) static inline void appctx_wakeup(struct appctx *appctx)
{ {
SPIN_LOCK(APPLETS_LOCK, &applet_active_lock); HA_SPIN_LOCK(APPLETS_LOCK, &applet_active_lock);
if (appctx->state & APPLET_RUNNING) { if (appctx->state & APPLET_RUNNING) {
appctx->state |= APPLET_WOKEN_UP; appctx->state |= APPLET_WOKEN_UP;
SPIN_UNLOCK(APPLETS_LOCK, &applet_active_lock); HA_SPIN_UNLOCK(APPLETS_LOCK, &applet_active_lock);
return; return;
} }
__appctx_wakeup(appctx); __appctx_wakeup(appctx);
SPIN_UNLOCK(APPLETS_LOCK, &applet_active_lock); HA_SPIN_UNLOCK(APPLETS_LOCK, &applet_active_lock);
} }
/* Callback used to wake up an applet when a buffer is available. The applet /* Callback used to wake up an applet when a buffer is available. The applet
@ -137,18 +137,18 @@ static inline void appctx_wakeup(struct appctx *appctx)
* requested */ * requested */
static inline int appctx_res_wakeup(struct appctx *appctx) static inline int appctx_res_wakeup(struct appctx *appctx)
{ {
SPIN_LOCK(APPLETS_LOCK, &applet_active_lock); HA_SPIN_LOCK(APPLETS_LOCK, &applet_active_lock);
if (appctx->state & APPLET_RUNNING) { if (appctx->state & APPLET_RUNNING) {
if (appctx->state & APPLET_WOKEN_UP) { if (appctx->state & APPLET_WOKEN_UP) {
SPIN_UNLOCK(APPLETS_LOCK, &applet_active_lock); HA_SPIN_UNLOCK(APPLETS_LOCK, &applet_active_lock);
return 0; return 0;
} }
appctx->state |= APPLET_WOKEN_UP; appctx->state |= APPLET_WOKEN_UP;
SPIN_UNLOCK(APPLETS_LOCK, &applet_active_lock); HA_SPIN_UNLOCK(APPLETS_LOCK, &applet_active_lock);
return 1; return 1;
} }
__appctx_wakeup(appctx); __appctx_wakeup(appctx);
SPIN_UNLOCK(APPLETS_LOCK, &applet_active_lock); HA_SPIN_UNLOCK(APPLETS_LOCK, &applet_active_lock);
return 1; return 1;
} }

View File

@ -441,9 +441,9 @@ static inline int channel_alloc_buffer(struct channel *chn, struct buffer_wait *
return 1; return 1;
if (LIST_ISEMPTY(&wait->list)) { if (LIST_ISEMPTY(&wait->list)) {
SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock); HA_SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock);
LIST_ADDQ(&buffer_wq, &wait->list); LIST_ADDQ(&buffer_wq, &wait->list);
SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock); HA_SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock);
} }
return 0; return 0;

View File

@ -37,15 +37,15 @@ extern struct data_cb check_conn_cb;
*/ */
static inline void health_adjust(struct server *s, short status) static inline void health_adjust(struct server *s, short status)
{ {
SPIN_LOCK(SERVER_LOCK, &s->lock); HA_SPIN_LOCK(SERVER_LOCK, &s->lock);
/* return now if observing nor health check is not enabled */ /* return now if observing nor health check is not enabled */
if (!s->observe || !s->check.task) { if (!s->observe || !s->check.task) {
SPIN_UNLOCK(SERVER_LOCK, &s->lock); HA_SPIN_UNLOCK(SERVER_LOCK, &s->lock);
return; return;
} }
__health_adjust(s, status); __health_adjust(s, status);
SPIN_UNLOCK(SERVER_LOCK, &s->lock); HA_SPIN_UNLOCK(SERVER_LOCK, &s->lock);
} }
const char *init_check(struct check *check, int type); const char *init_check(struct check *check, int type);

View File

@ -113,14 +113,14 @@ static inline void updt_fd_polling(const int fd)
*/ */
static inline void fd_alloc_cache_entry(const int fd) static inline void fd_alloc_cache_entry(const int fd)
{ {
RWLOCK_WRLOCK(FDCACHE_LOCK, &fdcache_lock); HA_RWLOCK_WRLOCK(FDCACHE_LOCK, &fdcache_lock);
if (fdtab[fd].cache) if (fdtab[fd].cache)
goto end; goto end;
fd_cache_num++; fd_cache_num++;
fdtab[fd].cache = fd_cache_num; fdtab[fd].cache = fd_cache_num;
fd_cache[fd_cache_num-1] = fd; fd_cache[fd_cache_num-1] = fd;
end: end:
RWLOCK_WRUNLOCK(FDCACHE_LOCK, &fdcache_lock); HA_RWLOCK_WRUNLOCK(FDCACHE_LOCK, &fdcache_lock);
} }
/* Removes entry used by fd <fd> from the FD cache and replaces it with the /* Removes entry used by fd <fd> from the FD cache and replaces it with the
@ -131,7 +131,7 @@ static inline void fd_release_cache_entry(int fd)
{ {
unsigned int pos; unsigned int pos;
RWLOCK_WRLOCK(FDCACHE_LOCK, &fdcache_lock); HA_RWLOCK_WRLOCK(FDCACHE_LOCK, &fdcache_lock);
pos = fdtab[fd].cache; pos = fdtab[fd].cache;
if (!pos) if (!pos)
goto end; goto end;
@ -144,7 +144,7 @@ static inline void fd_release_cache_entry(int fd)
fdtab[fd].cache = pos; fdtab[fd].cache = pos;
} }
end: end:
RWLOCK_WRUNLOCK(FDCACHE_LOCK, &fdcache_lock); HA_RWLOCK_WRUNLOCK(FDCACHE_LOCK, &fdcache_lock);
} }
/* Computes the new polled status based on the active and ready statuses, for /* Computes the new polled status based on the active and ready statuses, for
@ -267,56 +267,56 @@ static inline int fd_active(const int fd)
/* Disable processing recv events on fd <fd> */ /* Disable processing recv events on fd <fd> */
static inline void fd_stop_recv(int fd) static inline void fd_stop_recv(int fd)
{ {
SPIN_LOCK(FD_LOCK, &fdtab[fd].lock); HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
if (fd_recv_active(fd)) { if (fd_recv_active(fd)) {
fdtab[fd].state &= ~FD_EV_ACTIVE_R; fdtab[fd].state &= ~FD_EV_ACTIVE_R;
fd_update_cache(fd); /* need an update entry to change the state */ fd_update_cache(fd); /* need an update entry to change the state */
} }
SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
} }
/* Disable processing send events on fd <fd> */ /* Disable processing send events on fd <fd> */
static inline void fd_stop_send(int fd) static inline void fd_stop_send(int fd)
{ {
SPIN_LOCK(FD_LOCK, &fdtab[fd].lock); HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
if (fd_send_active(fd)) { if (fd_send_active(fd)) {
fdtab[fd].state &= ~FD_EV_ACTIVE_W; fdtab[fd].state &= ~FD_EV_ACTIVE_W;
fd_update_cache(fd); /* need an update entry to change the state */ fd_update_cache(fd); /* need an update entry to change the state */
} }
SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
} }
/* Disable processing of events on fd <fd> for both directions. */ /* Disable processing of events on fd <fd> for both directions. */
static inline void fd_stop_both(int fd) static inline void fd_stop_both(int fd)
{ {
SPIN_LOCK(FD_LOCK, &fdtab[fd].lock); HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
if (fd_active(fd)) { if (fd_active(fd)) {
fdtab[fd].state &= ~FD_EV_ACTIVE_RW; fdtab[fd].state &= ~FD_EV_ACTIVE_RW;
fd_update_cache(fd); /* need an update entry to change the state */ fd_update_cache(fd); /* need an update entry to change the state */
} }
SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
} }
/* Report that FD <fd> cannot receive anymore without polling (EAGAIN detected). */ /* Report that FD <fd> cannot receive anymore without polling (EAGAIN detected). */
static inline void fd_cant_recv(const int fd) static inline void fd_cant_recv(const int fd)
{ {
SPIN_LOCK(FD_LOCK, &fdtab[fd].lock); HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
if (fd_recv_ready(fd)) { if (fd_recv_ready(fd)) {
fdtab[fd].state &= ~FD_EV_READY_R; fdtab[fd].state &= ~FD_EV_READY_R;
fd_update_cache(fd); /* need an update entry to change the state */ fd_update_cache(fd); /* need an update entry to change the state */
} }
SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
} }
/* Report that FD <fd> can receive anymore without polling. */ /* Report that FD <fd> can receive anymore without polling. */
static inline void fd_may_recv(const int fd) static inline void fd_may_recv(const int fd)
{ {
SPIN_LOCK(FD_LOCK, &fdtab[fd].lock); HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
if (!fd_recv_ready(fd)) { if (!fd_recv_ready(fd)) {
fdtab[fd].state |= FD_EV_READY_R; fdtab[fd].state |= FD_EV_READY_R;
fd_update_cache(fd); /* need an update entry to change the state */ fd_update_cache(fd); /* need an update entry to change the state */
} }
SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
} }
/* Disable readiness when polled. This is useful to interrupt reading when it /* Disable readiness when polled. This is useful to interrupt reading when it
@ -326,66 +326,66 @@ static inline void fd_may_recv(const int fd)
*/ */
static inline void fd_done_recv(const int fd) static inline void fd_done_recv(const int fd)
{ {
SPIN_LOCK(FD_LOCK, &fdtab[fd].lock); HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
if (fd_recv_polled(fd) && fd_recv_ready(fd)) { if (fd_recv_polled(fd) && fd_recv_ready(fd)) {
fdtab[fd].state &= ~FD_EV_READY_R; fdtab[fd].state &= ~FD_EV_READY_R;
fd_update_cache(fd); /* need an update entry to change the state */ fd_update_cache(fd); /* need an update entry to change the state */
} }
SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
} }
/* Report that FD <fd> cannot send anymore without polling (EAGAIN detected). */ /* Report that FD <fd> cannot send anymore without polling (EAGAIN detected). */
static inline void fd_cant_send(const int fd) static inline void fd_cant_send(const int fd)
{ {
SPIN_LOCK(FD_LOCK, &fdtab[fd].lock); HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
if (fd_send_ready(fd)) { if (fd_send_ready(fd)) {
fdtab[fd].state &= ~FD_EV_READY_W; fdtab[fd].state &= ~FD_EV_READY_W;
fd_update_cache(fd); /* need an update entry to change the state */ fd_update_cache(fd); /* need an update entry to change the state */
} }
SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
} }
/* Report that FD <fd> can send anymore without polling (EAGAIN detected). */ /* Report that FD <fd> can send anymore without polling (EAGAIN detected). */
static inline void fd_may_send(const int fd) static inline void fd_may_send(const int fd)
{ {
SPIN_LOCK(FD_LOCK, &fdtab[fd].lock); HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
if (!fd_send_ready(fd)) { if (!fd_send_ready(fd)) {
fdtab[fd].state |= FD_EV_READY_W; fdtab[fd].state |= FD_EV_READY_W;
fd_update_cache(fd); /* need an update entry to change the state */ fd_update_cache(fd); /* need an update entry to change the state */
} }
SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
} }
/* Prepare FD <fd> to try to receive */ /* Prepare FD <fd> to try to receive */
static inline void fd_want_recv(int fd) static inline void fd_want_recv(int fd)
{ {
SPIN_LOCK(FD_LOCK, &fdtab[fd].lock); HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
if (!fd_recv_active(fd)) { if (!fd_recv_active(fd)) {
fdtab[fd].state |= FD_EV_ACTIVE_R; fdtab[fd].state |= FD_EV_ACTIVE_R;
fd_update_cache(fd); /* need an update entry to change the state */ fd_update_cache(fd); /* need an update entry to change the state */
} }
SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
} }
/* Prepare FD <fd> to try to send */ /* Prepare FD <fd> to try to send */
static inline void fd_want_send(int fd) static inline void fd_want_send(int fd)
{ {
SPIN_LOCK(FD_LOCK, &fdtab[fd].lock); HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
if (!fd_send_active(fd)) { if (!fd_send_active(fd)) {
fdtab[fd].state |= FD_EV_ACTIVE_W; fdtab[fd].state |= FD_EV_ACTIVE_W;
fd_update_cache(fd); /* need an update entry to change the state */ fd_update_cache(fd); /* need an update entry to change the state */
} }
SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
} }
/* Update events seen for FD <fd> and its state if needed. This should be called /* Update events seen for FD <fd> and its state if needed. This should be called
* by the poller to set FD_POLL_* flags. */ * by the poller to set FD_POLL_* flags. */
static inline void fd_update_events(int fd, int evts) static inline void fd_update_events(int fd, int evts)
{ {
SPIN_LOCK(FD_LOCK, &fdtab[fd].lock); HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
fdtab[fd].ev &= FD_POLL_STICKY; fdtab[fd].ev &= FD_POLL_STICKY;
fdtab[fd].ev |= evts; fdtab[fd].ev |= evts;
SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
if (fdtab[fd].ev & (FD_POLL_IN | FD_POLL_HUP | FD_POLL_ERR)) if (fdtab[fd].ev & (FD_POLL_IN | FD_POLL_HUP | FD_POLL_ERR))
fd_may_recv(fd); fd_may_recv(fd);
@ -397,7 +397,7 @@ static inline void fd_update_events(int fd, int evts)
/* Prepares <fd> for being polled */ /* Prepares <fd> for being polled */
static inline void fd_insert(int fd, unsigned long thread_mask) static inline void fd_insert(int fd, unsigned long thread_mask)
{ {
SPIN_LOCK(FD_LOCK, &fdtab[fd].lock); HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
fdtab[fd].ev = 0; fdtab[fd].ev = 0;
fdtab[fd].new = 1; fdtab[fd].new = 1;
fdtab[fd].updated = 0; fdtab[fd].updated = 0;
@ -405,12 +405,12 @@ static inline void fd_insert(int fd, unsigned long thread_mask)
fdtab[fd].cloned = 0; fdtab[fd].cloned = 0;
fdtab[fd].cache = 0; fdtab[fd].cache = 0;
fdtab[fd].thread_mask = thread_mask; fdtab[fd].thread_mask = thread_mask;
SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
SPIN_LOCK(FDTAB_LOCK, &fdtab_lock); HA_SPIN_LOCK(FDTAB_LOCK, &fdtab_lock);
if (fd + 1 > maxfd) if (fd + 1 > maxfd)
maxfd = fd + 1; maxfd = fd + 1;
SPIN_UNLOCK(FDTAB_LOCK, &fdtab_lock); HA_SPIN_UNLOCK(FDTAB_LOCK, &fdtab_lock);
} }

View File

@ -57,11 +57,11 @@ static inline void session_store_counters(struct session *sess)
ptr = stktable_data_ptr(stkctr->table, ts, STKTABLE_DT_CONN_CUR); ptr = stktable_data_ptr(stkctr->table, ts, STKTABLE_DT_CONN_CUR);
if (ptr) { if (ptr) {
RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock); HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
stktable_data_cast(ptr, conn_cur)--; stktable_data_cast(ptr, conn_cur)--;
RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock); HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
} }
stkctr_set_entry(stkctr, NULL); stkctr_set_entry(stkctr, NULL);

View File

@ -141,7 +141,7 @@ static inline int __stksess_kill_if_expired(struct stktable *t, struct stksess *
static inline void stksess_kill_if_expired(struct stktable *t, struct stksess *ts, int decrefcnt) static inline void stksess_kill_if_expired(struct stktable *t, struct stksess *ts, int decrefcnt)
{ {
SPIN_LOCK(STK_TABLE_LOCK, &t->lock); HA_SPIN_LOCK(STK_TABLE_LOCK, &t->lock);
if (decrefcnt) if (decrefcnt)
ts->ref_cnt--; ts->ref_cnt--;
@ -149,7 +149,7 @@ static inline void stksess_kill_if_expired(struct stktable *t, struct stksess *t
if (t->expire != TICK_ETERNITY && tick_is_expired(ts->expire, now_ms)) if (t->expire != TICK_ETERNITY && tick_is_expired(ts->expire, now_ms))
__stksess_kill_if_expired(t, ts); __stksess_kill_if_expired(t, ts);
SPIN_UNLOCK(STK_TABLE_LOCK, &t->lock); HA_SPIN_UNLOCK(STK_TABLE_LOCK, &t->lock);
} }
/* sets the stick counter's entry pointer */ /* sets the stick counter's entry pointer */

View File

@ -102,11 +102,11 @@ static inline void stream_store_counters(struct stream *s)
ptr = stktable_data_ptr(s->stkctr[i].table, ts, STKTABLE_DT_CONN_CUR); ptr = stktable_data_ptr(s->stkctr[i].table, ts, STKTABLE_DT_CONN_CUR);
if (ptr) { if (ptr) {
RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock); HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
stktable_data_cast(ptr, conn_cur)--; stktable_data_cast(ptr, conn_cur)--;
RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock); HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
} }
stkctr_set_entry(&s->stkctr[i], NULL); stkctr_set_entry(&s->stkctr[i], NULL);
stksess_kill_if_expired(s->stkctr[i].table, ts, 1); stksess_kill_if_expired(s->stkctr[i].table, ts, 1);
@ -137,11 +137,11 @@ static inline void stream_stop_content_counters(struct stream *s)
ptr = stktable_data_ptr(s->stkctr[i].table, ts, STKTABLE_DT_CONN_CUR); ptr = stktable_data_ptr(s->stkctr[i].table, ts, STKTABLE_DT_CONN_CUR);
if (ptr) { if (ptr) {
RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock); HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
stktable_data_cast(ptr, conn_cur)--; stktable_data_cast(ptr, conn_cur)--;
RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock); HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
} }
stkctr_set_entry(&s->stkctr[i], NULL); stkctr_set_entry(&s->stkctr[i], NULL);
stksess_kill_if_expired(s->stkctr[i].table, ts, 1); stksess_kill_if_expired(s->stkctr[i].table, ts, 1);
@ -156,7 +156,7 @@ static inline void stream_start_counters(struct stktable *t, struct stksess *ts)
{ {
void *ptr; void *ptr;
RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock); HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
ptr = stktable_data_ptr(t, ts, STKTABLE_DT_CONN_CUR); ptr = stktable_data_ptr(t, ts, STKTABLE_DT_CONN_CUR);
if (ptr) if (ptr)
@ -173,7 +173,7 @@ static inline void stream_start_counters(struct stktable *t, struct stksess *ts)
if (tick_isset(t->expire)) if (tick_isset(t->expire))
ts->expire = tick_add(now_ms, MS_TO_TICKS(t->expire)); ts->expire = tick_add(now_ms, MS_TO_TICKS(t->expire));
RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock); HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
} }
/* Enable tracking of stream counters as <stkctr> on stksess <ts>. The caller is /* Enable tracking of stream counters as <stkctr> on stksess <ts>. The caller is
@ -209,7 +209,7 @@ static void inline stream_inc_http_req_ctr(struct stream *s)
continue; continue;
} }
RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock); HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
ptr = stktable_data_ptr(stkctr->table, ts, STKTABLE_DT_HTTP_REQ_CNT); ptr = stktable_data_ptr(stkctr->table, ts, STKTABLE_DT_HTTP_REQ_CNT);
if (ptr) if (ptr)
@ -220,7 +220,7 @@ static void inline stream_inc_http_req_ctr(struct stream *s)
update_freq_ctr_period(&stktable_data_cast(ptr, http_req_rate), update_freq_ctr_period(&stktable_data_cast(ptr, http_req_rate),
stkctr->table->data_arg[STKTABLE_DT_HTTP_REQ_RATE].u, 1); stkctr->table->data_arg[STKTABLE_DT_HTTP_REQ_RATE].u, 1);
RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock); HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
} }
} }
@ -243,7 +243,7 @@ static void inline stream_inc_be_http_req_ctr(struct stream *s)
if (!(stkctr_flags(&s->stkctr[i]) & STKCTR_TRACK_BACKEND)) if (!(stkctr_flags(&s->stkctr[i]) & STKCTR_TRACK_BACKEND))
continue; continue;
RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock); HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
ptr = stktable_data_ptr(stkctr->table, ts, STKTABLE_DT_HTTP_REQ_CNT); ptr = stktable_data_ptr(stkctr->table, ts, STKTABLE_DT_HTTP_REQ_CNT);
if (ptr) if (ptr)
@ -254,7 +254,7 @@ static void inline stream_inc_be_http_req_ctr(struct stream *s)
update_freq_ctr_period(&stktable_data_cast(ptr, http_req_rate), update_freq_ctr_period(&stktable_data_cast(ptr, http_req_rate),
stkctr->table->data_arg[STKTABLE_DT_HTTP_REQ_RATE].u, 1); stkctr->table->data_arg[STKTABLE_DT_HTTP_REQ_RATE].u, 1);
RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock); HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
} }
} }
@ -281,7 +281,7 @@ static void inline stream_inc_http_err_ctr(struct stream *s)
continue; continue;
} }
RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock); HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
ptr = stktable_data_ptr(stkctr->table, ts, STKTABLE_DT_HTTP_ERR_CNT); ptr = stktable_data_ptr(stkctr->table, ts, STKTABLE_DT_HTTP_ERR_CNT);
if (ptr) if (ptr)
@ -292,16 +292,16 @@ static void inline stream_inc_http_err_ctr(struct stream *s)
update_freq_ctr_period(&stktable_data_cast(ptr, http_err_rate), update_freq_ctr_period(&stktable_data_cast(ptr, http_err_rate),
stkctr->table->data_arg[STKTABLE_DT_HTTP_ERR_RATE].u, 1); stkctr->table->data_arg[STKTABLE_DT_HTTP_ERR_RATE].u, 1);
RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock); HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
} }
} }
static void inline stream_add_srv_conn(struct stream *sess, struct server *srv) static void inline stream_add_srv_conn(struct stream *sess, struct server *srv)
{ {
SPIN_LOCK(SERVER_LOCK, &srv->lock); HA_SPIN_LOCK(SERVER_LOCK, &srv->lock);
sess->srv_conn = srv; sess->srv_conn = srv;
LIST_ADD(&srv->actconns, &sess->by_srv); LIST_ADD(&srv->actconns, &sess->by_srv);
SPIN_UNLOCK(SERVER_LOCK, &srv->lock); HA_SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
} }
static void inline stream_del_srv_conn(struct stream *sess) static void inline stream_del_srv_conn(struct stream *sess)
@ -311,10 +311,10 @@ static void inline stream_del_srv_conn(struct stream *sess)
if (!srv) if (!srv)
return; return;
SPIN_LOCK(SERVER_LOCK, &srv->lock); HA_SPIN_LOCK(SERVER_LOCK, &srv->lock);
sess->srv_conn = NULL; sess->srv_conn = NULL;
LIST_DEL(&sess->by_srv); LIST_DEL(&sess->by_srv);
SPIN_UNLOCK(SERVER_LOCK, &srv->lock); HA_SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
} }
static void inline stream_init_srv_conn(struct stream *sess) static void inline stream_init_srv_conn(struct stream *sess)

View File

@ -110,20 +110,20 @@ static inline int task_in_wq(struct task *t)
struct task *__task_wakeup(struct task *t); struct task *__task_wakeup(struct task *t);
static inline struct task *task_wakeup(struct task *t, unsigned int f) static inline struct task *task_wakeup(struct task *t, unsigned int f)
{ {
SPIN_LOCK(TASK_RQ_LOCK, &rq_lock); HA_SPIN_LOCK(TASK_RQ_LOCK, &rq_lock);
/* If task is running, we postpone the call /* If task is running, we postpone the call
* and backup the state. * and backup the state.
*/ */
if (unlikely(t->state & TASK_RUNNING)) { if (unlikely(t->state & TASK_RUNNING)) {
t->pending_state |= f; t->pending_state |= f;
SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock); HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock);
return t; return t;
} }
if (likely(!task_in_rq(t))) if (likely(!task_in_rq(t)))
__task_wakeup(t); __task_wakeup(t);
t->state |= f; t->state |= f;
SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock); HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock);
return t; return t;
} }
@ -148,10 +148,10 @@ static inline struct task *__task_unlink_wq(struct task *t)
static inline struct task *task_unlink_wq(struct task *t) static inline struct task *task_unlink_wq(struct task *t)
{ {
SPIN_LOCK(TASK_WQ_LOCK, &wq_lock); HA_SPIN_LOCK(TASK_WQ_LOCK, &wq_lock);
if (likely(task_in_wq(t))) if (likely(task_in_wq(t)))
__task_unlink_wq(t); __task_unlink_wq(t);
SPIN_UNLOCK(TASK_WQ_LOCK, &wq_lock); HA_SPIN_UNLOCK(TASK_WQ_LOCK, &wq_lock);
return t; return t;
} }
@ -176,10 +176,10 @@ static inline struct task *__task_unlink_rq(struct task *t)
*/ */
static inline struct task *task_unlink_rq(struct task *t) static inline struct task *task_unlink_rq(struct task *t)
{ {
SPIN_LOCK(TASK_RQ_LOCK, &rq_lock); HA_SPIN_LOCK(TASK_RQ_LOCK, &rq_lock);
if (likely(task_in_rq(t))) if (likely(task_in_rq(t)))
__task_unlink_rq(t); __task_unlink_rq(t);
SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock); HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock);
return t; return t;
} }
@ -256,10 +256,10 @@ static inline void task_queue(struct task *task)
if (!tick_isset(task->expire)) if (!tick_isset(task->expire))
return; return;
SPIN_LOCK(TASK_WQ_LOCK, &wq_lock); HA_SPIN_LOCK(TASK_WQ_LOCK, &wq_lock);
if (!task_in_wq(task) || tick_is_lt(task->expire, task->wq.key)) if (!task_in_wq(task) || tick_is_lt(task->expire, task->wq.key))
__task_queue(task); __task_queue(task);
SPIN_UNLOCK(TASK_WQ_LOCK, &wq_lock); HA_SPIN_UNLOCK(TASK_WQ_LOCK, &wq_lock);
} }
/* Ensure <task> will be woken up at most at <when>. If the task is already in /* Ensure <task> will be woken up at most at <when>. If the task is already in
@ -272,14 +272,14 @@ static inline void task_schedule(struct task *task, int when)
if (task_in_rq(task)) if (task_in_rq(task))
return; return;
SPIN_LOCK(TASK_WQ_LOCK, &wq_lock); HA_SPIN_LOCK(TASK_WQ_LOCK, &wq_lock);
if (task_in_wq(task)) if (task_in_wq(task))
when = tick_first(when, task->expire); when = tick_first(when, task->expire);
task->expire = when; task->expire = when;
if (!task_in_wq(task) || tick_is_lt(task->expire, task->wq.key)) if (!task_in_wq(task) || tick_is_lt(task->expire, task->wq.key))
__task_queue(task); __task_queue(task);
SPIN_UNLOCK(TASK_WQ_LOCK, &wq_lock); HA_SPIN_UNLOCK(TASK_WQ_LOCK, &wq_lock);
} }
/* This function register a new signal. "lua" is the current lua /* This function register a new signal. "lua" is the current lua
@ -296,7 +296,7 @@ static inline struct notification *notification_new(struct list *purge, struct l
return NULL; return NULL;
LIST_ADDQ(purge, &com->purge_me); LIST_ADDQ(purge, &com->purge_me);
LIST_ADDQ(event, &com->wake_me); LIST_ADDQ(event, &com->wake_me);
SPIN_INIT(&com->lock); HA_SPIN_INIT(&com->lock);
com->task = wakeup; com->task = wakeup;
return com; return com;
} }
@ -311,15 +311,15 @@ static inline void notification_purge(struct list *purge)
/* Delete all pending communication signals. */ /* Delete all pending communication signals. */
list_for_each_entry_safe(com, back, purge, purge_me) { list_for_each_entry_safe(com, back, purge, purge_me) {
SPIN_LOCK(NOTIF_LOCK, &com->lock); HA_SPIN_LOCK(NOTIF_LOCK, &com->lock);
LIST_DEL(&com->purge_me); LIST_DEL(&com->purge_me);
if (!com->task) { if (!com->task) {
SPIN_UNLOCK(NOTIF_LOCK, &com->lock); HA_SPIN_UNLOCK(NOTIF_LOCK, &com->lock);
pool_free2(pool2_notification, com); pool_free2(pool2_notification, com);
continue; continue;
} }
com->task = NULL; com->task = NULL;
SPIN_UNLOCK(NOTIF_LOCK, &com->lock); HA_SPIN_UNLOCK(NOTIF_LOCK, &com->lock);
} }
} }
@ -333,16 +333,16 @@ static inline void notification_wake(struct list *wake)
/* Wake task and delete all pending communication signals. */ /* Wake task and delete all pending communication signals. */
list_for_each_entry_safe(com, back, wake, wake_me) { list_for_each_entry_safe(com, back, wake, wake_me) {
SPIN_LOCK(NOTIF_LOCK, &com->lock); HA_SPIN_LOCK(NOTIF_LOCK, &com->lock);
LIST_DEL(&com->wake_me); LIST_DEL(&com->wake_me);
if (!com->task) { if (!com->task) {
SPIN_UNLOCK(NOTIF_LOCK, &com->lock); HA_SPIN_UNLOCK(NOTIF_LOCK, &com->lock);
pool_free2(pool2_notification, com); pool_free2(pool2_notification, com);
continue; continue;
} }
task_wakeup(com->task, TASK_WOKEN_MSG); task_wakeup(com->task, TASK_WOKEN_MSG);
com->task = NULL; com->task = NULL;
SPIN_UNLOCK(NOTIF_LOCK, &com->lock); HA_SPIN_UNLOCK(NOTIF_LOCK, &com->lock);
} }
} }

View File

@ -38,7 +38,7 @@ void applet_run_active()
if (!applets_active_queue) if (!applets_active_queue)
return; return;
SPIN_LOCK(APPLETS_LOCK, &applet_active_lock); HA_SPIN_LOCK(APPLETS_LOCK, &applet_active_lock);
curr = LIST_NEXT(&applet_active_queue, typeof(curr), runq); curr = LIST_NEXT(&applet_active_queue, typeof(curr), runq);
while (&curr->runq != &applet_active_queue) { while (&curr->runq != &applet_active_queue) {
@ -52,7 +52,7 @@ void applet_run_active()
curr = next; curr = next;
} }
SPIN_UNLOCK(APPLETS_LOCK, &applet_active_lock); HA_SPIN_UNLOCK(APPLETS_LOCK, &applet_active_lock);
/* The list is only scanned from the head. This guarantees that if any /* The list is only scanned from the head. This guarantees that if any
* applet removes another one, there is no side effect while walking * applet removes another one, there is no side effect while walking
@ -84,7 +84,7 @@ void applet_run_active()
/* curr was left in the list, move it back to the active list */ /* curr was left in the list, move it back to the active list */
LIST_DEL(&curr->runq); LIST_DEL(&curr->runq);
LIST_INIT(&curr->runq); LIST_INIT(&curr->runq);
SPIN_LOCK(APPLETS_LOCK, &applet_active_lock); HA_SPIN_LOCK(APPLETS_LOCK, &applet_active_lock);
if (curr->state & APPLET_WANT_DIE) { if (curr->state & APPLET_WANT_DIE) {
curr->state = APPLET_SLEEPING; curr->state = APPLET_SLEEPING;
__appctx_free(curr); __appctx_free(curr);
@ -98,7 +98,7 @@ void applet_run_active()
curr->state = APPLET_SLEEPING; curr->state = APPLET_SLEEPING;
} }
} }
SPIN_UNLOCK(APPLETS_LOCK, &applet_active_lock); HA_SPIN_UNLOCK(APPLETS_LOCK, &applet_active_lock);
} }
} }
} }
@ -106,5 +106,5 @@ void applet_run_active()
__attribute__((constructor)) __attribute__((constructor))
static void __applet_init(void) static void __applet_init(void)
{ {
SPIN_INIT(&applet_active_lock); HA_SPIN_INIT(&applet_active_lock);
} }

View File

@ -75,7 +75,7 @@ int init_buffer()
if (global.tune.buf_limit) if (global.tune.buf_limit)
pool2_buffer->limit = global.tune.buf_limit; pool2_buffer->limit = global.tune.buf_limit;
SPIN_INIT(&buffer_wq_lock); HA_SPIN_INIT(&buffer_wq_lock);
buffer = pool_refill_alloc(pool2_buffer, pool2_buffer->minavail - 1); buffer = pool_refill_alloc(pool2_buffer, pool2_buffer->minavail - 1);
if (!buffer) if (!buffer)

View File

@ -2108,7 +2108,7 @@ int cfg_parse_peers(const char *file, int linenum, char **args, int kwm)
newpeer->proto = proto; newpeer->proto = proto;
newpeer->xprt = xprt_get(XPRT_RAW); newpeer->xprt = xprt_get(XPRT_RAW);
newpeer->sock_init_arg = NULL; newpeer->sock_init_arg = NULL;
SPIN_INIT(&newpeer->lock); HA_SPIN_INIT(&newpeer->lock);
if (strcmp(newpeer->id, localpeer) == 0) { if (strcmp(newpeer->id, localpeer) == 0) {
/* Current is local peer, it define a frontend */ /* Current is local peer, it define a frontend */
@ -2251,7 +2251,7 @@ int cfg_parse_resolvers(const char *file, int linenum, char **args, int kwm)
LIST_INIT(&curr_resolvers->nameservers); LIST_INIT(&curr_resolvers->nameservers);
LIST_INIT(&curr_resolvers->resolutions.curr); LIST_INIT(&curr_resolvers->resolutions.curr);
LIST_INIT(&curr_resolvers->resolutions.wait); LIST_INIT(&curr_resolvers->resolutions.wait);
SPIN_INIT(&curr_resolvers->lock); HA_SPIN_INIT(&curr_resolvers->lock);
} }
else if (strcmp(args[0], "nameserver") == 0) { /* nameserver definition */ else if (strcmp(args[0], "nameserver") == 0) { /* nameserver definition */
struct sockaddr_storage *sk; struct sockaddr_storage *sk;
@ -8505,7 +8505,7 @@ int check_config_validity()
} }
break; break;
} }
SPIN_INIT(&curproxy->lbprm.lock); HA_SPIN_INIT(&curproxy->lbprm.lock);
if (curproxy->options & PR_O_LOGASAP) if (curproxy->options & PR_O_LOGASAP)
curproxy->to_log &= ~LW_BYTES; curproxy->to_log &= ~LW_BYTES;

View File

@ -715,7 +715,7 @@ static void event_srv_chk_w(struct conn_stream *cs)
struct server *s = check->server; struct server *s = check->server;
struct task *t = check->task; struct task *t = check->task;
SPIN_LOCK(SERVER_LOCK, &check->server->lock); HA_SPIN_LOCK(SERVER_LOCK, &check->server->lock);
if (unlikely(check->result == CHK_RES_FAILED)) if (unlikely(check->result == CHK_RES_FAILED))
goto out_wakeup; goto out_wakeup;
@ -768,7 +768,7 @@ static void event_srv_chk_w(struct conn_stream *cs)
out_nowake: out_nowake:
__cs_stop_send(cs); /* nothing more to write */ __cs_stop_send(cs); /* nothing more to write */
out_unlock: out_unlock:
SPIN_UNLOCK(SERVER_LOCK, &check->server->lock); HA_SPIN_UNLOCK(SERVER_LOCK, &check->server->lock);
} }
/* /*
@ -798,7 +798,7 @@ static void event_srv_chk_r(struct conn_stream *cs)
int done; int done;
unsigned short msglen; unsigned short msglen;
SPIN_LOCK(SERVER_LOCK, &check->server->lock); HA_SPIN_LOCK(SERVER_LOCK, &check->server->lock);
if (unlikely(check->result == CHK_RES_FAILED)) if (unlikely(check->result == CHK_RES_FAILED))
goto out_wakeup; goto out_wakeup;
@ -1354,7 +1354,7 @@ static void event_srv_chk_r(struct conn_stream *cs)
task_wakeup(t, TASK_WOKEN_IO); task_wakeup(t, TASK_WOKEN_IO);
out_unlock: out_unlock:
SPIN_UNLOCK(SERVER_LOCK, &check->server->lock); HA_SPIN_UNLOCK(SERVER_LOCK, &check->server->lock);
return; return;
wait_more_data: wait_more_data:
@ -1374,7 +1374,7 @@ static int wake_srv_chk(struct conn_stream *cs)
struct check *check = cs->data; struct check *check = cs->data;
int ret = 0; int ret = 0;
SPIN_LOCK(SERVER_LOCK, &check->server->lock); HA_SPIN_LOCK(SERVER_LOCK, &check->server->lock);
/* we may have to make progress on the TCP checks */ /* we may have to make progress on the TCP checks */
if (check->type == PR_O2_TCPCHK_CHK) { if (check->type == PR_O2_TCPCHK_CHK) {
@ -1411,7 +1411,7 @@ static int wake_srv_chk(struct conn_stream *cs)
ret = -1; ret = -1;
} }
SPIN_UNLOCK(SERVER_LOCK, &check->server->lock); HA_SPIN_UNLOCK(SERVER_LOCK, &check->server->lock);
/* if a connection got replaced, we must absolutely prevent the connection /* if a connection got replaced, we must absolutely prevent the connection
* handler from touching its fd, and perform the FD polling updates ourselves * handler from touching its fd, and perform the FD polling updates ourselves
@ -1647,9 +1647,9 @@ static struct pid_list *pid_list_add(pid_t pid, struct task *t)
check->curpid = elem; check->curpid = elem;
LIST_INIT(&elem->list); LIST_INIT(&elem->list);
SPIN_LOCK(PID_LIST_LOCK, &pid_list_lock); HA_SPIN_LOCK(PID_LIST_LOCK, &pid_list_lock);
LIST_ADD(&pid_list, &elem->list); LIST_ADD(&pid_list, &elem->list);
SPIN_UNLOCK(PID_LIST_LOCK, &pid_list_lock); HA_SPIN_UNLOCK(PID_LIST_LOCK, &pid_list_lock);
return elem; return elem;
} }
@ -1661,9 +1661,9 @@ static void pid_list_del(struct pid_list *elem)
if (!elem) if (!elem)
return; return;
SPIN_LOCK(PID_LIST_LOCK, &pid_list_lock); HA_SPIN_LOCK(PID_LIST_LOCK, &pid_list_lock);
LIST_DEL(&elem->list); LIST_DEL(&elem->list);
SPIN_UNLOCK(PID_LIST_LOCK, &pid_list_lock); HA_SPIN_UNLOCK(PID_LIST_LOCK, &pid_list_lock);
if (!elem->exited) if (!elem->exited)
kill(elem->pid, SIGTERM); kill(elem->pid, SIGTERM);
@ -1678,7 +1678,7 @@ static void pid_list_expire(pid_t pid, int status)
{ {
struct pid_list *elem; struct pid_list *elem;
SPIN_LOCK(PID_LIST_LOCK, &pid_list_lock); HA_SPIN_LOCK(PID_LIST_LOCK, &pid_list_lock);
list_for_each_entry(elem, &pid_list, list) { list_for_each_entry(elem, &pid_list, list) {
if (elem->pid == pid) { if (elem->pid == pid) {
elem->t->expire = now_ms; elem->t->expire = now_ms;
@ -1688,7 +1688,7 @@ static void pid_list_expire(pid_t pid, int status)
break; break;
} }
} }
SPIN_UNLOCK(PID_LIST_LOCK, &pid_list_lock); HA_SPIN_UNLOCK(PID_LIST_LOCK, &pid_list_lock);
} }
static void sigchld_handler(struct sig_handler *sh) static void sigchld_handler(struct sig_handler *sh)
@ -1719,7 +1719,7 @@ static int init_pid_list(void)
return 1; return 1;
} }
SPIN_INIT(&pid_list_lock); HA_SPIN_INIT(&pid_list_lock);
return 0; return 0;
} }
@ -1979,7 +1979,7 @@ static struct task *process_chk_proc(struct task *t)
int ret; int ret;
int expired = tick_is_expired(t->expire, now_ms); int expired = tick_is_expired(t->expire, now_ms);
SPIN_LOCK(SERVER_LOCK, &check->server->lock); HA_SPIN_LOCK(SERVER_LOCK, &check->server->lock);
if (!(check->state & CHK_ST_INPROGRESS)) { if (!(check->state & CHK_ST_INPROGRESS)) {
/* no check currently running */ /* no check currently running */
if (!expired) /* woke up too early */ if (!expired) /* woke up too early */
@ -2092,7 +2092,7 @@ static struct task *process_chk_proc(struct task *t)
t->expire = tick_add(t->expire, MS_TO_TICKS(check->inter)); t->expire = tick_add(t->expire, MS_TO_TICKS(check->inter));
out_unlock: out_unlock:
SPIN_UNLOCK(SERVER_LOCK, &check->server->lock); HA_SPIN_UNLOCK(SERVER_LOCK, &check->server->lock);
return t; return t;
} }
@ -2113,7 +2113,7 @@ static struct task *process_chk_conn(struct task *t)
int ret; int ret;
int expired = tick_is_expired(t->expire, now_ms); int expired = tick_is_expired(t->expire, now_ms);
SPIN_LOCK(SERVER_LOCK, &check->server->lock); HA_SPIN_LOCK(SERVER_LOCK, &check->server->lock);
if (!(check->state & CHK_ST_INPROGRESS)) { if (!(check->state & CHK_ST_INPROGRESS)) {
/* no check currently running */ /* no check currently running */
if (!expired) /* woke up too early */ if (!expired) /* woke up too early */
@ -2268,7 +2268,7 @@ static struct task *process_chk_conn(struct task *t)
while (tick_is_expired(t->expire, now_ms)) while (tick_is_expired(t->expire, now_ms))
t->expire = tick_add(t->expire, MS_TO_TICKS(check->inter)); t->expire = tick_add(t->expire, MS_TO_TICKS(check->inter));
out_unlock: out_unlock:
SPIN_UNLOCK(SERVER_LOCK, &check->server->lock); HA_SPIN_UNLOCK(SERVER_LOCK, &check->server->lock);
return t; return t;
} }
@ -2597,7 +2597,7 @@ static int tcpcheck_main(struct check *check)
struct list *head = check->tcpcheck_rules; struct list *head = check->tcpcheck_rules;
int retcode = 0; int retcode = 0;
SPIN_LOCK(SERVER_LOCK, &check->server->lock); HA_SPIN_LOCK(SERVER_LOCK, &check->server->lock);
/* here, we know that the check is complete or that it failed */ /* here, we know that the check is complete or that it failed */
if (check->result != CHK_RES_UNKNOWN) if (check->result != CHK_RES_UNKNOWN)
@ -3077,7 +3077,7 @@ static int tcpcheck_main(struct check *check)
__cs_stop_both(cs); __cs_stop_both(cs);
out_unlock: out_unlock:
SPIN_UNLOCK(SERVER_LOCK, &check->server->lock); HA_SPIN_UNLOCK(SERVER_LOCK, &check->server->lock);
return retcode; return retcode;
} }
@ -3137,7 +3137,7 @@ static struct task *process_email_alert(struct task *t)
q = container_of(check, typeof(*q), check); q = container_of(check, typeof(*q), check);
SPIN_LOCK(EMAIL_ALERTS_LOCK, &q->lock); HA_SPIN_LOCK(EMAIL_ALERTS_LOCK, &q->lock);
while (1) { while (1) {
if (!(check->state & CHK_ST_ENABLED)) { if (!(check->state & CHK_ST_ENABLED)) {
if (LIST_ISEMPTY(&q->email_alerts)) { if (LIST_ISEMPTY(&q->email_alerts)) {
@ -3167,7 +3167,7 @@ static struct task *process_email_alert(struct task *t)
check->state &= ~CHK_ST_ENABLED; check->state &= ~CHK_ST_ENABLED;
} }
end: end:
SPIN_UNLOCK(EMAIL_ALERTS_LOCK, &q->lock); HA_SPIN_UNLOCK(EMAIL_ALERTS_LOCK, &q->lock);
return t; return t;
} }
@ -3194,7 +3194,7 @@ int init_email_alert(struct mailers *mls, struct proxy *p, char **err)
struct task *t; struct task *t;
LIST_INIT(&q->email_alerts); LIST_INIT(&q->email_alerts);
SPIN_INIT(&q->lock); HA_SPIN_INIT(&q->lock);
check->inter = mls->timeout.mail; check->inter = mls->timeout.mail;
check->rise = DEF_AGENT_RISETIME; check->rise = DEF_AGENT_RISETIME;
check->fall = DEF_AGENT_FALLTIME; check->fall = DEF_AGENT_FALLTIME;
@ -3398,10 +3398,10 @@ static int enqueue_one_email_alert(struct proxy *p, struct server *s,
if (!add_tcpcheck_expect_str(&alert->tcpcheck_rules, "221 ")) if (!add_tcpcheck_expect_str(&alert->tcpcheck_rules, "221 "))
goto error; goto error;
SPIN_LOCK(EMAIL_ALERTS_LOCK, &q->lock); HA_SPIN_LOCK(EMAIL_ALERTS_LOCK, &q->lock);
task_wakeup(check->task, TASK_WOKEN_MSG); task_wakeup(check->task, TASK_WOKEN_MSG);
LIST_ADDQ(&q->email_alerts, &alert->list); LIST_ADDQ(&q->email_alerts, &alert->list);
SPIN_UNLOCK(EMAIL_ALERTS_LOCK, &q->lock); HA_SPIN_UNLOCK(EMAIL_ALERTS_LOCK, &q->lock);
return 1; return 1;
error: error:

View File

@ -160,10 +160,10 @@ static inline int init_comp_ctx(struct comp_ctx **comp_ctx)
#endif #endif
if (unlikely(pool_comp_ctx == NULL)) { if (unlikely(pool_comp_ctx == NULL)) {
SPIN_LOCK(COMP_POOL_LOCK, &comp_pool_lock); HA_SPIN_LOCK(COMP_POOL_LOCK, &comp_pool_lock);
if (unlikely(pool_comp_ctx == NULL)) if (unlikely(pool_comp_ctx == NULL))
pool_comp_ctx = create_pool("comp_ctx", sizeof(struct comp_ctx), MEM_F_SHARED); pool_comp_ctx = create_pool("comp_ctx", sizeof(struct comp_ctx), MEM_F_SHARED);
SPIN_UNLOCK(COMP_POOL_LOCK, &comp_pool_lock); HA_SPIN_UNLOCK(COMP_POOL_LOCK, &comp_pool_lock);
} }
*comp_ctx = pool_alloc2(pool_comp_ctx); *comp_ctx = pool_alloc2(pool_comp_ctx);
@ -412,10 +412,10 @@ static void *alloc_zlib(void *opaque, unsigned int items, unsigned int size)
switch (round) { switch (round) {
case 0: case 0:
if (zlib_pool_deflate_state == NULL) { if (zlib_pool_deflate_state == NULL) {
SPIN_LOCK(COMP_POOL_LOCK, &comp_pool_lock); HA_SPIN_LOCK(COMP_POOL_LOCK, &comp_pool_lock);
if (zlib_pool_deflate_state == NULL) if (zlib_pool_deflate_state == NULL)
zlib_pool_deflate_state = create_pool("zlib_state", size * items, MEM_F_SHARED); zlib_pool_deflate_state = create_pool("zlib_state", size * items, MEM_F_SHARED);
SPIN_UNLOCK(COMP_POOL_LOCK, &comp_pool_lock); HA_SPIN_UNLOCK(COMP_POOL_LOCK, &comp_pool_lock);
} }
pool = zlib_pool_deflate_state; pool = zlib_pool_deflate_state;
ctx->zlib_deflate_state = buf = pool_alloc2(pool); ctx->zlib_deflate_state = buf = pool_alloc2(pool);
@ -423,10 +423,10 @@ static void *alloc_zlib(void *opaque, unsigned int items, unsigned int size)
case 1: case 1:
if (zlib_pool_window == NULL) { if (zlib_pool_window == NULL) {
SPIN_LOCK(COMP_POOL_LOCK, &comp_pool_lock); HA_SPIN_LOCK(COMP_POOL_LOCK, &comp_pool_lock);
if (zlib_pool_window == NULL) if (zlib_pool_window == NULL)
zlib_pool_window = create_pool("zlib_window", size * items, MEM_F_SHARED); zlib_pool_window = create_pool("zlib_window", size * items, MEM_F_SHARED);
SPIN_UNLOCK(COMP_POOL_LOCK, &comp_pool_lock); HA_SPIN_UNLOCK(COMP_POOL_LOCK, &comp_pool_lock);
} }
pool = zlib_pool_window; pool = zlib_pool_window;
ctx->zlib_window = buf = pool_alloc2(pool); ctx->zlib_window = buf = pool_alloc2(pool);
@ -434,10 +434,10 @@ static void *alloc_zlib(void *opaque, unsigned int items, unsigned int size)
case 2: case 2:
if (zlib_pool_prev == NULL) { if (zlib_pool_prev == NULL) {
SPIN_LOCK(COMP_POOL_LOCK, &comp_pool_lock); HA_SPIN_LOCK(COMP_POOL_LOCK, &comp_pool_lock);
if (zlib_pool_prev == NULL) if (zlib_pool_prev == NULL)
zlib_pool_prev = create_pool("zlib_prev", size * items, MEM_F_SHARED); zlib_pool_prev = create_pool("zlib_prev", size * items, MEM_F_SHARED);
SPIN_UNLOCK(COMP_POOL_LOCK, &comp_pool_lock); HA_SPIN_UNLOCK(COMP_POOL_LOCK, &comp_pool_lock);
} }
pool = zlib_pool_prev; pool = zlib_pool_prev;
ctx->zlib_prev = buf = pool_alloc2(pool); ctx->zlib_prev = buf = pool_alloc2(pool);
@ -445,10 +445,10 @@ static void *alloc_zlib(void *opaque, unsigned int items, unsigned int size)
case 3: case 3:
if (zlib_pool_head == NULL) { if (zlib_pool_head == NULL) {
SPIN_LOCK(COMP_POOL_LOCK, &comp_pool_lock); HA_SPIN_LOCK(COMP_POOL_LOCK, &comp_pool_lock);
if (zlib_pool_head == NULL) if (zlib_pool_head == NULL)
zlib_pool_head = create_pool("zlib_head", size * items, MEM_F_SHARED); zlib_pool_head = create_pool("zlib_head", size * items, MEM_F_SHARED);
SPIN_UNLOCK(COMP_POOL_LOCK, &comp_pool_lock); HA_SPIN_UNLOCK(COMP_POOL_LOCK, &comp_pool_lock);
} }
pool = zlib_pool_head; pool = zlib_pool_head;
ctx->zlib_head = buf = pool_alloc2(pool); ctx->zlib_head = buf = pool_alloc2(pool);
@ -456,10 +456,10 @@ static void *alloc_zlib(void *opaque, unsigned int items, unsigned int size)
case 4: case 4:
if (zlib_pool_pending_buf == NULL) { if (zlib_pool_pending_buf == NULL) {
SPIN_LOCK(COMP_POOL_LOCK, &comp_pool_lock); HA_SPIN_LOCK(COMP_POOL_LOCK, &comp_pool_lock);
if (zlib_pool_pending_buf == NULL) if (zlib_pool_pending_buf == NULL)
zlib_pool_pending_buf = create_pool("zlib_pending_buf", size * items, MEM_F_SHARED); zlib_pool_pending_buf = create_pool("zlib_pending_buf", size * items, MEM_F_SHARED);
SPIN_UNLOCK(COMP_POOL_LOCK, &comp_pool_lock); HA_SPIN_UNLOCK(COMP_POOL_LOCK, &comp_pool_lock);
} }
pool = zlib_pool_pending_buf; pool = zlib_pool_pending_buf;
ctx->zlib_pending_buf = buf = pool_alloc2(pool); ctx->zlib_pending_buf = buf = pool_alloc2(pool);
@ -721,7 +721,7 @@ static void __comp_fetch_init(void)
global.tune.maxzlibmem = DEFAULT_MAXZLIBMEM * 1024U * 1024U, global.tune.maxzlibmem = DEFAULT_MAXZLIBMEM * 1024U * 1024U,
#endif #endif
#ifdef USE_ZLIB #ifdef USE_ZLIB
SPIN_INIT(&comp_pool_lock); HA_SPIN_INIT(&comp_pool_lock);
memprintf(&ptr, "Built with zlib version : " ZLIB_VERSION); memprintf(&ptr, "Built with zlib version : " ZLIB_VERSION);
memprintf(&ptr, "%s\nRunning on zlib version : %s", ptr, zlibVersion()); memprintf(&ptr, "%s\nRunning on zlib version : %s", ptr, zlibVersion());
#elif defined(USE_SLZ) #elif defined(USE_SLZ)

View File

@ -486,7 +486,7 @@ static void dns_check_dns_response(struct dns_resolution *res)
/* Remove any associated server */ /* Remove any associated server */
for (srv = srvrq->proxy->srv; srv != NULL; srv = srv->next) { for (srv = srvrq->proxy->srv; srv != NULL; srv = srv->next) {
SPIN_LOCK(SERVER_LOCK, &srv->lock); HA_SPIN_LOCK(SERVER_LOCK, &srv->lock);
if (srv->srvrq == srvrq && srv->svc_port == item->port && if (srv->srvrq == srvrq && srv->svc_port == item->port &&
item->data_len == srv->hostname_dn_len && item->data_len == srv->hostname_dn_len &&
!memcmp(srv->hostname_dn, item->target, item->data_len)) { !memcmp(srv->hostname_dn, item->target, item->data_len)) {
@ -498,7 +498,7 @@ static void dns_check_dns_response(struct dns_resolution *res)
srv->hostname_dn_len = 0; srv->hostname_dn_len = 0;
dns_unlink_resolution(srv->dns_requester); dns_unlink_resolution(srv->dns_requester);
} }
SPIN_UNLOCK(SERVER_LOCK, &srv->lock); HA_SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
} }
} }
@ -518,7 +518,7 @@ static void dns_check_dns_response(struct dns_resolution *res)
/* Check if a server already uses that hostname */ /* Check if a server already uses that hostname */
for (srv = srvrq->proxy->srv; srv != NULL; srv = srv->next) { for (srv = srvrq->proxy->srv; srv != NULL; srv = srv->next) {
SPIN_LOCK(SERVER_LOCK, &srv->lock); HA_SPIN_LOCK(SERVER_LOCK, &srv->lock);
if (srv->srvrq == srvrq && srv->svc_port == item->port && if (srv->srvrq == srvrq && srv->svc_port == item->port &&
item->data_len == srv->hostname_dn_len && item->data_len == srv->hostname_dn_len &&
!memcmp(srv->hostname_dn, item->target, item->data_len)) { !memcmp(srv->hostname_dn, item->target, item->data_len)) {
@ -528,20 +528,20 @@ static void dns_check_dns_response(struct dns_resolution *res)
snprintf(weight, sizeof(weight), "%d", item->weight); snprintf(weight, sizeof(weight), "%d", item->weight);
server_parse_weight_change_request(srv, weight); server_parse_weight_change_request(srv, weight);
} }
SPIN_UNLOCK(SERVER_LOCK, &srv->lock); HA_SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
break; break;
} }
SPIN_UNLOCK(SERVER_LOCK, &srv->lock); HA_SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
} }
if (srv) if (srv)
continue; continue;
/* If not, try to find a server with undefined hostname */ /* If not, try to find a server with undefined hostname */
for (srv = srvrq->proxy->srv; srv != NULL; srv = srv->next) { for (srv = srvrq->proxy->srv; srv != NULL; srv = srv->next) {
SPIN_LOCK(SERVER_LOCK, &srv->lock); HA_SPIN_LOCK(SERVER_LOCK, &srv->lock);
if (srv->srvrq == srvrq && !srv->hostname_dn) if (srv->srvrq == srvrq && !srv->hostname_dn)
break; break;
SPIN_UNLOCK(SERVER_LOCK, &srv->lock); HA_SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
} }
/* And update this server, if found */ /* And update this server, if found */
if (srv) { if (srv) {
@ -551,7 +551,7 @@ static void dns_check_dns_response(struct dns_resolution *res)
if (dns_dn_label_to_str(item->target, item->data_len+1, if (dns_dn_label_to_str(item->target, item->data_len+1,
hostname, DNS_MAX_NAME_SIZE) == -1) { hostname, DNS_MAX_NAME_SIZE) == -1) {
SPIN_UNLOCK(SERVER_LOCK, &srv->lock); HA_SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
continue; continue;
} }
msg = update_server_fqdn(srv, hostname, "SRV record", 1); msg = update_server_fqdn(srv, hostname, "SRV record", 1);
@ -565,7 +565,7 @@ static void dns_check_dns_response(struct dns_resolution *res)
srv->check.port = item->port; srv->check.port = item->port;
snprintf(weight, sizeof(weight), "%d", item->weight); snprintf(weight, sizeof(weight), "%d", item->weight);
server_parse_weight_change_request(srv, weight); server_parse_weight_change_request(srv, weight);
SPIN_UNLOCK(SERVER_LOCK, &srv->lock); HA_SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
} }
} }
} }
@ -1348,11 +1348,11 @@ int dns_link_resolution(void *requester, int requester_type, int requester_locke
if (srv) { if (srv) {
if (!requester_locked) if (!requester_locked)
SPIN_LOCK(SERVER_LOCK, &srv->lock); HA_SPIN_LOCK(SERVER_LOCK, &srv->lock);
if (srv->dns_requester == NULL) { if (srv->dns_requester == NULL) {
if ((req = calloc(1, sizeof(*req))) == NULL) { if ((req = calloc(1, sizeof(*req))) == NULL) {
if (!requester_locked) if (!requester_locked)
SPIN_UNLOCK(SERVER_LOCK, &srv->lock); HA_SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
goto err; goto err;
} }
req->owner = &srv->obj_type; req->owner = &srv->obj_type;
@ -1361,7 +1361,7 @@ int dns_link_resolution(void *requester, int requester_type, int requester_locke
else else
req = srv->dns_requester; req = srv->dns_requester;
if (!requester_locked) if (!requester_locked)
SPIN_UNLOCK(SERVER_LOCK, &srv->lock); HA_SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
} }
else if (srvrq) { else if (srvrq) {
if (srvrq->dns_requester == NULL) { if (srvrq->dns_requester == NULL) {
@ -1463,7 +1463,7 @@ static void dns_resolve_recv(struct dgram_conn *dgram)
return; return;
resolvers = ns->resolvers; resolvers = ns->resolvers;
SPIN_LOCK(DNS_LOCK, &resolvers->lock); HA_SPIN_LOCK(DNS_LOCK, &resolvers->lock);
/* process all pending input messages */ /* process all pending input messages */
while (1) { while (1) {
@ -1617,10 +1617,10 @@ static void dns_resolve_recv(struct dgram_conn *dgram)
struct server *s = objt_server(req->owner); struct server *s = objt_server(req->owner);
if (s) if (s)
SPIN_LOCK(SERVER_LOCK, &s->lock); HA_SPIN_LOCK(SERVER_LOCK, &s->lock);
req->requester_cb(req, tmpns); req->requester_cb(req, tmpns);
if (s) if (s)
SPIN_UNLOCK(SERVER_LOCK, &s->lock); HA_SPIN_UNLOCK(SERVER_LOCK, &s->lock);
tmpns = NULL; tmpns = NULL;
} }
@ -1630,7 +1630,7 @@ static void dns_resolve_recv(struct dgram_conn *dgram)
continue; continue;
} }
dns_update_resolvers_timeout(resolvers); dns_update_resolvers_timeout(resolvers);
SPIN_UNLOCK(DNS_LOCK, &resolvers->lock); HA_SPIN_UNLOCK(DNS_LOCK, &resolvers->lock);
} }
/* Called when a resolvers network socket is ready to send data */ /* Called when a resolvers network socket is ready to send data */
@ -1655,7 +1655,7 @@ static void dns_resolve_send(struct dgram_conn *dgram)
return; return;
resolvers = ns->resolvers; resolvers = ns->resolvers;
SPIN_LOCK(DNS_LOCK, &resolvers->lock); HA_SPIN_LOCK(DNS_LOCK, &resolvers->lock);
list_for_each_entry(res, &resolvers->resolutions.curr, list) { list_for_each_entry(res, &resolvers->resolutions.curr, list) {
int ret; int ret;
@ -1682,7 +1682,7 @@ static void dns_resolve_send(struct dgram_conn *dgram)
ns->counters.snd_error++; ns->counters.snd_error++;
res->nb_queries++; res->nb_queries++;
} }
SPIN_UNLOCK(DNS_LOCK, &resolvers->lock); HA_SPIN_UNLOCK(DNS_LOCK, &resolvers->lock);
} }
/* Processes DNS resolution. First, it checks the active list to detect expired /* Processes DNS resolution. First, it checks the active list to detect expired
@ -1695,7 +1695,7 @@ static struct task *dns_process_resolvers(struct task *t)
struct dns_resolution *res, *resback; struct dns_resolution *res, *resback;
int exp; int exp;
SPIN_LOCK(DNS_LOCK, &resolvers->lock); HA_SPIN_LOCK(DNS_LOCK, &resolvers->lock);
/* Handle all expired resolutions from the active list */ /* Handle all expired resolutions from the active list */
list_for_each_entry_safe(res, resback, &resolvers->resolutions.curr, list) { list_for_each_entry_safe(res, resback, &resolvers->resolutions.curr, list) {
@ -1765,7 +1765,7 @@ static struct task *dns_process_resolvers(struct task *t)
} }
dns_update_resolvers_timeout(resolvers); dns_update_resolvers_timeout(resolvers);
SPIN_UNLOCK(DNS_LOCK, &resolvers->lock); HA_SPIN_UNLOCK(DNS_LOCK, &resolvers->lock);
return t; return t;
} }

View File

@ -71,14 +71,14 @@ REGPRM2 static void _do_poll(struct poller *p, int exp)
if (!fdtab[fd].owner) if (!fdtab[fd].owner)
continue; continue;
SPIN_LOCK(FD_LOCK, &fdtab[fd].lock); HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
fdtab[fd].updated = 0; fdtab[fd].updated = 0;
fdtab[fd].new = 0; fdtab[fd].new = 0;
eo = fdtab[fd].state; eo = fdtab[fd].state;
en = fd_compute_new_polled_status(eo); en = fd_compute_new_polled_status(eo);
fdtab[fd].state = en; fdtab[fd].state = en;
SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
if ((eo ^ en) & FD_EV_POLLED_RW) { if ((eo ^ en) & FD_EV_POLLED_RW) {
/* poll status changed */ /* poll status changed */

View File

@ -50,14 +50,14 @@ REGPRM2 static void _do_poll(struct poller *p, int exp)
if (!fdtab[fd].owner) if (!fdtab[fd].owner)
continue; continue;
SPIN_LOCK(FD_LOCK, &fdtab[fd].lock); HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
fdtab[fd].updated = 0; fdtab[fd].updated = 0;
fdtab[fd].new = 0; fdtab[fd].new = 0;
eo = fdtab[fd].state; eo = fdtab[fd].state;
en = fd_compute_new_polled_status(eo); en = fd_compute_new_polled_status(eo);
fdtab[fd].state = en; fdtab[fd].state = en;
SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
if ((eo ^ en) & FD_EV_POLLED_RW) { if ((eo ^ en) & FD_EV_POLLED_RW) {
/* poll status changed */ /* poll status changed */

View File

@ -50,10 +50,10 @@ static inline void hap_fd_clr(int fd, unsigned int *evts)
REGPRM1 static void __fd_clo(int fd) REGPRM1 static void __fd_clo(int fd)
{ {
SPIN_LOCK(POLL_LOCK, &poll_lock); HA_SPIN_LOCK(POLL_LOCK, &poll_lock);
hap_fd_clr(fd, fd_evts[DIR_RD]); hap_fd_clr(fd, fd_evts[DIR_RD]);
hap_fd_clr(fd, fd_evts[DIR_WR]); hap_fd_clr(fd, fd_evts[DIR_WR]);
SPIN_UNLOCK(POLL_LOCK, &poll_lock); HA_SPIN_UNLOCK(POLL_LOCK, &poll_lock);
} }
/* /*
@ -76,18 +76,18 @@ REGPRM2 static void _do_poll(struct poller *p, int exp)
if (!fdtab[fd].owner) if (!fdtab[fd].owner)
continue; continue;
SPIN_LOCK(FD_LOCK, &fdtab[fd].lock); HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
fdtab[fd].updated = 0; fdtab[fd].updated = 0;
fdtab[fd].new = 0; fdtab[fd].new = 0;
eo = fdtab[fd].state; eo = fdtab[fd].state;
en = fd_compute_new_polled_status(eo); en = fd_compute_new_polled_status(eo);
fdtab[fd].state = en; fdtab[fd].state = en;
SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
if ((eo ^ en) & FD_EV_POLLED_RW) { if ((eo ^ en) & FD_EV_POLLED_RW) {
/* poll status changed, update the lists */ /* poll status changed, update the lists */
SPIN_LOCK(POLL_LOCK, &poll_lock); HA_SPIN_LOCK(POLL_LOCK, &poll_lock);
if ((eo & ~en) & FD_EV_POLLED_R) if ((eo & ~en) & FD_EV_POLLED_R)
hap_fd_clr(fd, fd_evts[DIR_RD]); hap_fd_clr(fd, fd_evts[DIR_RD]);
else if ((en & ~eo) & FD_EV_POLLED_R) else if ((en & ~eo) & FD_EV_POLLED_R)
@ -97,7 +97,7 @@ REGPRM2 static void _do_poll(struct poller *p, int exp)
hap_fd_clr(fd, fd_evts[DIR_WR]); hap_fd_clr(fd, fd_evts[DIR_WR]);
else if ((en & ~eo) & FD_EV_POLLED_W) else if ((en & ~eo) & FD_EV_POLLED_W)
hap_fd_set(fd, fd_evts[DIR_WR]); hap_fd_set(fd, fd_evts[DIR_WR]);
SPIN_UNLOCK(POLL_LOCK, &poll_lock); HA_SPIN_UNLOCK(POLL_LOCK, &poll_lock);
} }
} }
fd_nbupdt = 0; fd_nbupdt = 0;

View File

@ -31,10 +31,10 @@ static THREAD_LOCAL fd_set *tmp_evts[2];
/* Immediately remove the entry upon close() */ /* Immediately remove the entry upon close() */
REGPRM1 static void __fd_clo(int fd) REGPRM1 static void __fd_clo(int fd)
{ {
SPIN_LOCK(POLL_LOCK, &poll_lock); HA_SPIN_LOCK(POLL_LOCK, &poll_lock);
FD_CLR(fd, fd_evts[DIR_RD]); FD_CLR(fd, fd_evts[DIR_RD]);
FD_CLR(fd, fd_evts[DIR_WR]); FD_CLR(fd, fd_evts[DIR_WR]);
SPIN_UNLOCK(POLL_LOCK, &poll_lock); HA_SPIN_UNLOCK(POLL_LOCK, &poll_lock);
} }
/* /*
@ -58,18 +58,18 @@ REGPRM2 static void _do_poll(struct poller *p, int exp)
if (!fdtab[fd].owner) if (!fdtab[fd].owner)
continue; continue;
SPIN_LOCK(FD_LOCK, &fdtab[fd].lock); HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
fdtab[fd].updated = 0; fdtab[fd].updated = 0;
fdtab[fd].new = 0; fdtab[fd].new = 0;
eo = fdtab[fd].state; eo = fdtab[fd].state;
en = fd_compute_new_polled_status(eo); en = fd_compute_new_polled_status(eo);
fdtab[fd].state = en; fdtab[fd].state = en;
SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
if ((eo ^ en) & FD_EV_POLLED_RW) { if ((eo ^ en) & FD_EV_POLLED_RW) {
/* poll status changed, update the lists */ /* poll status changed, update the lists */
SPIN_LOCK(POLL_LOCK, &poll_lock); HA_SPIN_LOCK(POLL_LOCK, &poll_lock);
if ((eo & ~en) & FD_EV_POLLED_R) if ((eo & ~en) & FD_EV_POLLED_R)
FD_CLR(fd, fd_evts[DIR_RD]); FD_CLR(fd, fd_evts[DIR_RD]);
else if ((en & ~eo) & FD_EV_POLLED_R) else if ((en & ~eo) & FD_EV_POLLED_R)
@ -79,7 +79,7 @@ REGPRM2 static void _do_poll(struct poller *p, int exp)
FD_CLR(fd, fd_evts[DIR_WR]); FD_CLR(fd, fd_evts[DIR_WR]);
else if ((en & ~eo) & FD_EV_POLLED_W) else if ((en & ~eo) & FD_EV_POLLED_W)
FD_SET(fd, fd_evts[DIR_WR]); FD_SET(fd, fd_evts[DIR_WR]);
SPIN_UNLOCK(POLL_LOCK, &poll_lock); HA_SPIN_UNLOCK(POLL_LOCK, &poll_lock);
} }
} }
fd_nbupdt = 0; fd_nbupdt = 0;

View File

@ -185,7 +185,7 @@ HA_SPINLOCK_T poll_lock; /* global lock to protect poll info */
*/ */
static void fd_dodelete(int fd, int do_close) static void fd_dodelete(int fd, int do_close)
{ {
SPIN_LOCK(FD_LOCK, &fdtab[fd].lock); HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
if (fdtab[fd].linger_risk) { if (fdtab[fd].linger_risk) {
/* this is generally set when connecting to servers */ /* this is generally set when connecting to servers */
setsockopt(fd, SOL_SOCKET, SO_LINGER, setsockopt(fd, SOL_SOCKET, SO_LINGER,
@ -205,12 +205,12 @@ static void fd_dodelete(int fd, int do_close)
fdtab[fd].thread_mask = 0; fdtab[fd].thread_mask = 0;
if (do_close) if (do_close)
close(fd); close(fd);
SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
SPIN_LOCK(FDTAB_LOCK, &fdtab_lock); HA_SPIN_LOCK(FDTAB_LOCK, &fdtab_lock);
while ((maxfd-1 >= 0) && !fdtab[maxfd-1].owner) while ((maxfd-1 >= 0) && !fdtab[maxfd-1].owner)
maxfd--; maxfd--;
SPIN_UNLOCK(FDTAB_LOCK, &fdtab_lock); HA_SPIN_UNLOCK(FDTAB_LOCK, &fdtab_lock);
} }
/* Deletes an FD from the fdsets, and recomputes the maxfd limit. /* Deletes an FD from the fdsets, and recomputes the maxfd limit.
@ -241,16 +241,16 @@ void fd_process_cached_events()
if (!fd_cache_num) if (!fd_cache_num)
return; return;
RWLOCK_RDLOCK(FDCACHE_LOCK, &fdcache_lock); HA_RWLOCK_RDLOCK(FDCACHE_LOCK, &fdcache_lock);
for (entry = 0; entry < fd_cache_num; ) { for (entry = 0; entry < fd_cache_num; ) {
fd = fd_cache[entry]; fd = fd_cache[entry];
if (!(fdtab[fd].thread_mask & tid_bit)) if (!(fdtab[fd].thread_mask & tid_bit))
goto next; goto next;
if (SPIN_TRYLOCK(FD_LOCK, &fdtab[fd].lock)) if (HA_SPIN_TRYLOCK(FD_LOCK, &fdtab[fd].lock))
goto next; goto next;
RWLOCK_RDUNLOCK(FDCACHE_LOCK, &fdcache_lock); HA_RWLOCK_RDUNLOCK(FDCACHE_LOCK, &fdcache_lock);
e = fdtab[fd].state; e = fdtab[fd].state;
fdtab[fd].ev &= FD_POLL_STICKY; fdtab[fd].ev &= FD_POLL_STICKY;
@ -262,15 +262,15 @@ void fd_process_cached_events()
fdtab[fd].ev |= FD_POLL_OUT; fdtab[fd].ev |= FD_POLL_OUT;
if (fdtab[fd].iocb && fdtab[fd].owner && fdtab[fd].ev) { if (fdtab[fd].iocb && fdtab[fd].owner && fdtab[fd].ev) {
SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
fdtab[fd].iocb(fd); fdtab[fd].iocb(fd);
} }
else { else {
fd_release_cache_entry(fd); fd_release_cache_entry(fd);
SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock); HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
} }
RWLOCK_RDLOCK(FDCACHE_LOCK, &fdcache_lock); HA_RWLOCK_RDLOCK(FDCACHE_LOCK, &fdcache_lock);
/* If the fd was removed from the cache, it has been /* If the fd was removed from the cache, it has been
* replaced by the next one that we don't want to skip ! * replaced by the next one that we don't want to skip !
*/ */
@ -279,7 +279,7 @@ void fd_process_cached_events()
next: next:
entry++; entry++;
} }
RWLOCK_RDUNLOCK(FDCACHE_LOCK, &fdcache_lock); HA_RWLOCK_RDUNLOCK(FDCACHE_LOCK, &fdcache_lock);
} }
/* disable the specified poller */ /* disable the specified poller */
@ -329,11 +329,11 @@ int init_pollers()
hap_register_per_thread_deinit(deinit_pollers_per_thread); hap_register_per_thread_deinit(deinit_pollers_per_thread);
for (p = 0; p < global.maxsock; p++) for (p = 0; p < global.maxsock; p++)
SPIN_INIT(&fdtab[p].lock); HA_SPIN_INIT(&fdtab[p].lock);
SPIN_INIT(&fdtab_lock); HA_SPIN_INIT(&fdtab_lock);
RWLOCK_INIT(&fdcache_lock); HA_RWLOCK_INIT(&fdcache_lock);
SPIN_INIT(&poll_lock); HA_SPIN_INIT(&poll_lock);
do { do {
bp = NULL; bp = NULL;
for (p = 0; p < nbpollers; p++) for (p = 0; p < nbpollers; p++)
@ -367,7 +367,7 @@ void deinit_pollers() {
int p; int p;
for (p = 0; p < global.maxsock; p++) for (p = 0; p < global.maxsock; p++)
SPIN_DESTROY(&fdtab[p].lock); HA_SPIN_DESTROY(&fdtab[p].lock);
for (p = 0; p < nbpollers; p++) { for (p = 0; p < nbpollers; p++) {
bp = &pollers[p]; bp = &pollers[p];
@ -380,9 +380,9 @@ void deinit_pollers() {
free(fdinfo); fdinfo = NULL; free(fdinfo); fdinfo = NULL;
free(fdtab); fdtab = NULL; free(fdtab); fdtab = NULL;
SPIN_DESTROY(&fdtab_lock); HA_SPIN_DESTROY(&fdtab_lock);
RWLOCK_DESTROY(&fdcache_lock); HA_RWLOCK_DESTROY(&fdcache_lock);
SPIN_DESTROY(&poll_lock); HA_SPIN_DESTROY(&poll_lock);
} }
/* /*

View File

@ -171,7 +171,7 @@ spoe_release_agent(struct spoe_agent *agent)
spoe_release_group(grp); spoe_release_group(grp);
} }
for (i = 0; i < global.nbthread; ++i) for (i = 0; i < global.nbthread; ++i)
SPIN_DESTROY(&agent->rt[i].lock); HA_SPIN_DESTROY(&agent->rt[i].lock);
free(agent->rt); free(agent->rt);
free(agent); free(agent);
} }
@ -1426,10 +1426,10 @@ spoe_handle_connecting_appctx(struct appctx *appctx)
* add the applet in the list of running applets. */ * add the applet in the list of running applets. */
agent->rt[tid].applets_idle++; agent->rt[tid].applets_idle++;
appctx->st0 = SPOE_APPCTX_ST_IDLE; appctx->st0 = SPOE_APPCTX_ST_IDLE;
SPIN_LOCK(SPOE_APPLET_LOCK, &agent->rt[tid].lock); HA_SPIN_LOCK(SPOE_APPLET_LOCK, &agent->rt[tid].lock);
LIST_DEL(&SPOE_APPCTX(appctx)->list); LIST_DEL(&SPOE_APPCTX(appctx)->list);
LIST_ADD(&agent->rt[tid].applets, &SPOE_APPCTX(appctx)->list); LIST_ADD(&agent->rt[tid].applets, &SPOE_APPCTX(appctx)->list);
SPIN_UNLOCK(SPOE_APPLET_LOCK, &agent->rt[tid].lock); HA_SPIN_UNLOCK(SPOE_APPLET_LOCK, &agent->rt[tid].lock);
/* Update runtinme agent info */ /* Update runtinme agent info */
HA_ATOMIC_UPDATE_MIN(&agent->rt[tid].frame_size, SPOE_APPCTX(appctx)->max_frame_size); HA_ATOMIC_UPDATE_MIN(&agent->rt[tid].frame_size, SPOE_APPCTX(appctx)->max_frame_size);
@ -1710,10 +1710,10 @@ spoe_handle_processing_appctx(struct appctx *appctx)
agent->rt[tid].applets_idle++; agent->rt[tid].applets_idle++;
} }
if (fpa || (SPOE_APPCTX(appctx)->flags & SPOE_APPCTX_FL_PERSIST)) { if (fpa || (SPOE_APPCTX(appctx)->flags & SPOE_APPCTX_FL_PERSIST)) {
SPIN_LOCK(SPOE_APPLET_LOCK, &agent->rt[tid].lock); HA_SPIN_LOCK(SPOE_APPLET_LOCK, &agent->rt[tid].lock);
LIST_DEL(&SPOE_APPCTX(appctx)->list); LIST_DEL(&SPOE_APPCTX(appctx)->list);
LIST_ADD(&agent->rt[tid].applets, &SPOE_APPCTX(appctx)->list); LIST_ADD(&agent->rt[tid].applets, &SPOE_APPCTX(appctx)->list);
SPIN_UNLOCK(SPOE_APPLET_LOCK, &agent->rt[tid].lock); HA_SPIN_UNLOCK(SPOE_APPLET_LOCK, &agent->rt[tid].lock);
if (fpa) if (fpa)
SPOE_APPCTX(appctx)->task->expire = SPOE_APPCTX(appctx)->task->expire =
tick_add_ifset(now_ms, agent->timeout.idle); tick_add_ifset(now_ms, agent->timeout.idle);
@ -1985,9 +1985,9 @@ spoe_create_appctx(struct spoe_config *conf)
strm->do_log = NULL; strm->do_log = NULL;
strm->res.flags |= CF_READ_DONTWAIT; strm->res.flags |= CF_READ_DONTWAIT;
SPIN_LOCK(SPOE_APPLET_LOCK, &conf->agent->rt[tid].lock); HA_SPIN_LOCK(SPOE_APPLET_LOCK, &conf->agent->rt[tid].lock);
LIST_ADDQ(&conf->agent->rt[tid].applets, &SPOE_APPCTX(appctx)->list); LIST_ADDQ(&conf->agent->rt[tid].applets, &SPOE_APPCTX(appctx)->list);
SPIN_UNLOCK(SPOE_APPLET_LOCK, &conf->agent->rt[tid].lock); HA_SPIN_UNLOCK(SPOE_APPLET_LOCK, &conf->agent->rt[tid].lock);
conf->agent->rt[tid].applets_act++; conf->agent->rt[tid].applets_act++;
task_wakeup(SPOE_APPCTX(appctx)->task, TASK_WOKEN_INIT); task_wakeup(SPOE_APPCTX(appctx)->task, TASK_WOKEN_INIT);
@ -2096,10 +2096,10 @@ spoe_queue_context(struct spoe_context *ctx)
appctx = spoe_appctx->owner; appctx = spoe_appctx->owner;
if (appctx->st0 == SPOE_APPCTX_ST_IDLE) { if (appctx->st0 == SPOE_APPCTX_ST_IDLE) {
spoe_wakeup_appctx(appctx); spoe_wakeup_appctx(appctx);
SPIN_LOCK(SPOE_APPLET_LOCK, &agent->rt[tid].lock); HA_SPIN_LOCK(SPOE_APPLET_LOCK, &agent->rt[tid].lock);
LIST_DEL(&spoe_appctx->list); LIST_DEL(&spoe_appctx->list);
LIST_ADDQ(&agent->rt[tid].applets, &spoe_appctx->list); LIST_ADDQ(&agent->rt[tid].applets, &spoe_appctx->list);
SPIN_UNLOCK(SPOE_APPLET_LOCK, &agent->rt[tid].lock); HA_SPIN_UNLOCK(SPOE_APPLET_LOCK, &agent->rt[tid].lock);
break; break;
} }
} }
@ -2699,18 +2699,18 @@ spoe_acquire_buffer(struct buffer **buf, struct buffer_wait *buffer_wait)
return 1; return 1;
if (!LIST_ISEMPTY(&buffer_wait->list)) { if (!LIST_ISEMPTY(&buffer_wait->list)) {
SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock); HA_SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock);
LIST_DEL(&buffer_wait->list); LIST_DEL(&buffer_wait->list);
LIST_INIT(&buffer_wait->list); LIST_INIT(&buffer_wait->list);
SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock); HA_SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock);
} }
if (b_alloc_margin(buf, global.tune.reserved_bufs)) if (b_alloc_margin(buf, global.tune.reserved_bufs))
return 1; return 1;
SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock); HA_SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock);
LIST_ADDQ(&buffer_wq, &buffer_wait->list); LIST_ADDQ(&buffer_wq, &buffer_wait->list);
SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock); HA_SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock);
return 0; return 0;
} }
@ -2718,10 +2718,10 @@ static void
spoe_release_buffer(struct buffer **buf, struct buffer_wait *buffer_wait) spoe_release_buffer(struct buffer **buf, struct buffer_wait *buffer_wait)
{ {
if (!LIST_ISEMPTY(&buffer_wait->list)) { if (!LIST_ISEMPTY(&buffer_wait->list)) {
SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock); HA_SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock);
LIST_DEL(&buffer_wait->list); LIST_DEL(&buffer_wait->list);
LIST_INIT(&buffer_wait->list); LIST_INIT(&buffer_wait->list);
SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock); HA_SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock);
} }
/* Release the buffer if needed */ /* Release the buffer if needed */
@ -2813,10 +2813,10 @@ spoe_sig_stop(struct sig_handler *sh)
agent = conf->agent; agent = conf->agent;
for (i = 0; i < global.nbthread; ++i) { for (i = 0; i < global.nbthread; ++i) {
SPIN_LOCK(SPOE_APPLET_LOCK, &agent->rt[i].lock); HA_SPIN_LOCK(SPOE_APPLET_LOCK, &agent->rt[i].lock);
list_for_each_entry(spoe_appctx, &agent->rt[i].applets, list) list_for_each_entry(spoe_appctx, &agent->rt[i].applets, list)
spoe_wakeup_appctx(spoe_appctx->owner); spoe_wakeup_appctx(spoe_appctx->owner);
SPIN_UNLOCK(SPOE_APPLET_LOCK, &agent->rt[i].lock); HA_SPIN_UNLOCK(SPOE_APPLET_LOCK, &agent->rt[i].lock);
} }
} }
p = p->next; p = p->next;
@ -3221,7 +3221,7 @@ cfg_parse_spoe_agent(const char *file, int linenum, char **args, int kwm)
LIST_INIT(&curagent->rt[i].applets); LIST_INIT(&curagent->rt[i].applets);
LIST_INIT(&curagent->rt[i].sending_queue); LIST_INIT(&curagent->rt[i].sending_queue);
LIST_INIT(&curagent->rt[i].waiting_queue); LIST_INIT(&curagent->rt[i].waiting_queue);
SPIN_INIT(&curagent->rt[i].lock); HA_SPIN_INIT(&curagent->rt[i].lock);
} }
} }
else if (!strcmp(args[0], "use-backend")) { else if (!strcmp(args[0], "use-backend")) {

View File

@ -2075,7 +2075,7 @@ void deinit(void)
if (xprt_get(XPRT_SSL) && xprt_get(XPRT_SSL)->destroy_srv) if (xprt_get(XPRT_SSL) && xprt_get(XPRT_SSL)->destroy_srv)
xprt_get(XPRT_SSL)->destroy_srv(s); xprt_get(XPRT_SSL)->destroy_srv(s);
} }
SPIN_DESTROY(&s->lock); HA_SPIN_DESTROY(&s->lock);
free(s); free(s);
s = s_next; s = s_next;
}/* end while(s) */ }/* end while(s) */
@ -2124,8 +2124,8 @@ void deinit(void)
p0 = p; p0 = p;
p = p->next; p = p->next;
SPIN_DESTROY(&p0->lbprm.lock); HA_SPIN_DESTROY(&p0->lbprm.lock);
SPIN_DESTROY(&p0->lock); HA_SPIN_DESTROY(&p0->lock);
free(p0); free(p0);
}/* end while(p) */ }/* end while(p) */

View File

@ -120,7 +120,7 @@ void thread_enter_sync()
thread_sync_barrier(&barrier); thread_sync_barrier(&barrier);
if (threads_want_sync & tid_bit) if (threads_want_sync & tid_bit)
SPIN_LOCK(THREAD_SYNC_LOCK, &sync_lock); HA_SPIN_LOCK(THREAD_SYNC_LOCK, &sync_lock);
} }
/* Exit from the sync point and unlock it if it was previously locked. If the /* Exit from the sync point and unlock it if it was previously locked. If the
@ -135,7 +135,7 @@ void thread_exit_sync()
return; return;
if (threads_want_sync & tid_bit) if (threads_want_sync & tid_bit)
SPIN_UNLOCK(THREAD_SYNC_LOCK, &sync_lock); HA_SPIN_UNLOCK(THREAD_SYNC_LOCK, &sync_lock);
if (HA_ATOMIC_AND(&threads_want_sync, ~tid_bit) == 0) { if (HA_ATOMIC_AND(&threads_want_sync, ~tid_bit) == 0) {
char c; char c;
@ -151,7 +151,7 @@ void thread_exit_sync()
__attribute__((constructor)) __attribute__((constructor))
static void __hathreads_init(void) static void __hathreads_init(void)
{ {
SPIN_INIT(&sync_lock); HA_SPIN_INIT(&sync_lock);
#if defined(DEBUG_THREAD) || defined(DEBUG_FULL) #if defined(DEBUG_THREAD) || defined(DEBUG_FULL)
memset(lock_stats, 0, sizeof(lock_stats)); memset(lock_stats, 0, sizeof(lock_stats));
#endif #endif

View File

@ -125,11 +125,11 @@ static int hlua_panic_ljmp(lua_State *L) { longjmp(safe_ljmp_env, 1); }
#define SET_SAFE_LJMP(__L) \ #define SET_SAFE_LJMP(__L) \
({ \ ({ \
int ret; \ int ret; \
SPIN_LOCK(LUA_LOCK, &hlua_global_lock); \ HA_SPIN_LOCK(LUA_LOCK, &hlua_global_lock); \
if (setjmp(safe_ljmp_env) != 0) { \ if (setjmp(safe_ljmp_env) != 0) { \
lua_atpanic(__L, hlua_panic_safe); \ lua_atpanic(__L, hlua_panic_safe); \
ret = 0; \ ret = 0; \
SPIN_UNLOCK(LUA_LOCK, &hlua_global_lock); \ HA_SPIN_UNLOCK(LUA_LOCK, &hlua_global_lock); \
} else { \ } else { \
lua_atpanic(__L, hlua_panic_ljmp); \ lua_atpanic(__L, hlua_panic_ljmp); \
ret = 1; \ ret = 1; \
@ -143,7 +143,7 @@ static int hlua_panic_ljmp(lua_State *L) { longjmp(safe_ljmp_env, 1); }
#define RESET_SAFE_LJMP(__L) \ #define RESET_SAFE_LJMP(__L) \
do { \ do { \
lua_atpanic(__L, hlua_panic_safe); \ lua_atpanic(__L, hlua_panic_safe); \
SPIN_UNLOCK(LUA_LOCK, &hlua_global_lock); \ HA_SPIN_UNLOCK(LUA_LOCK, &hlua_global_lock); \
} while(0) } while(0)
/* Applet status flags */ /* Applet status flags */
@ -994,7 +994,7 @@ static enum hlua_exec hlua_ctx_resume(struct hlua *lua, int yield_allowed)
/* Lock the whole Lua execution. This lock must be before the /* Lock the whole Lua execution. This lock must be before the
* label "resume_execution". * label "resume_execution".
*/ */
SPIN_LOCK(LUA_LOCK, &hlua_global_lock); HA_SPIN_LOCK(LUA_LOCK, &hlua_global_lock);
resume_execution: resume_execution:
@ -1154,7 +1154,7 @@ static enum hlua_exec hlua_ctx_resume(struct hlua *lua, int yield_allowed)
} }
/* This is the main exit point, remove the Lua lock. */ /* This is the main exit point, remove the Lua lock. */
SPIN_UNLOCK(LUA_LOCK, &hlua_global_lock); HA_SPIN_UNLOCK(LUA_LOCK, &hlua_global_lock);
return ret; return ret;
} }
@ -7370,7 +7370,7 @@ void hlua_init(void)
}; };
#endif #endif
SPIN_INIT(&hlua_global_lock); HA_SPIN_INIT(&hlua_global_lock);
/* Initialise struct hlua and com signals pool */ /* Initialise struct hlua and com signals pool */
pool2_hlua = create_pool("hlua", sizeof(struct hlua), MEM_F_SHARED); pool2_hlua = create_pool("hlua", sizeof(struct hlua), MEM_F_SHARED);

View File

@ -587,9 +587,9 @@ int hlua_server_set_weight(lua_State *L)
srv = hlua_check_server(L, 1); srv = hlua_check_server(L, 1);
weight = luaL_checkstring(L, 2); weight = luaL_checkstring(L, 2);
SPIN_LOCK(SERVER_LOCK, &srv->lock); HA_SPIN_LOCK(SERVER_LOCK, &srv->lock);
err = server_parse_weight_change_request(srv, weight); err = server_parse_weight_change_request(srv, weight);
SPIN_UNLOCK(SERVER_LOCK, &srv->lock); HA_SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
if (!err) if (!err)
lua_pushnil(L); lua_pushnil(L);
else else
@ -615,9 +615,9 @@ int hlua_server_set_addr(lua_State *L)
srv = hlua_check_server(L, 1); srv = hlua_check_server(L, 1);
addr = luaL_checkstring(L, 2); addr = luaL_checkstring(L, 2);
SPIN_LOCK(SERVER_LOCK, &srv->lock); HA_SPIN_LOCK(SERVER_LOCK, &srv->lock);
err = server_parse_addr_change_request(srv, addr, "Lua script"); err = server_parse_addr_change_request(srv, addr, "Lua script");
SPIN_UNLOCK(SERVER_LOCK, &srv->lock); HA_SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
if (!err) if (!err)
lua_pushnil(L); lua_pushnil(L);
else else
@ -630,9 +630,9 @@ int hlua_server_shut_sess(lua_State *L)
struct server *srv; struct server *srv;
srv = hlua_check_server(L, 1); srv = hlua_check_server(L, 1);
SPIN_LOCK(SERVER_LOCK, &srv->lock); HA_SPIN_LOCK(SERVER_LOCK, &srv->lock);
srv_shutdown_streams(srv, SF_ERR_KILLED); srv_shutdown_streams(srv, SF_ERR_KILLED);
SPIN_UNLOCK(SERVER_LOCK, &srv->lock); HA_SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
return 0; return 0;
} }
@ -641,9 +641,9 @@ int hlua_server_set_drain(lua_State *L)
struct server *srv; struct server *srv;
srv = hlua_check_server(L, 1); srv = hlua_check_server(L, 1);
SPIN_LOCK(SERVER_LOCK, &srv->lock); HA_SPIN_LOCK(SERVER_LOCK, &srv->lock);
srv_adm_set_drain(srv); srv_adm_set_drain(srv);
SPIN_UNLOCK(SERVER_LOCK, &srv->lock); HA_SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
return 0; return 0;
} }
@ -652,9 +652,9 @@ int hlua_server_set_maint(lua_State *L)
struct server *srv; struct server *srv;
srv = hlua_check_server(L, 1); srv = hlua_check_server(L, 1);
SPIN_LOCK(SERVER_LOCK, &srv->lock); HA_SPIN_LOCK(SERVER_LOCK, &srv->lock);
srv_adm_set_maint(srv); srv_adm_set_maint(srv);
SPIN_UNLOCK(SERVER_LOCK, &srv->lock); HA_SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
return 0; return 0;
} }
@ -663,9 +663,9 @@ int hlua_server_set_ready(lua_State *L)
struct server *srv; struct server *srv;
srv = hlua_check_server(L, 1); srv = hlua_check_server(L, 1);
SPIN_LOCK(SERVER_LOCK, &srv->lock); HA_SPIN_LOCK(SERVER_LOCK, &srv->lock);
srv_adm_set_ready(srv); srv_adm_set_ready(srv);
SPIN_UNLOCK(SERVER_LOCK, &srv->lock); HA_SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
return 0; return 0;
} }
@ -674,11 +674,11 @@ int hlua_server_check_enable(lua_State *L)
struct server *sv; struct server *sv;
sv = hlua_check_server(L, 1); sv = hlua_check_server(L, 1);
SPIN_LOCK(SERVER_LOCK, &sv->lock); HA_SPIN_LOCK(SERVER_LOCK, &sv->lock);
if (sv->check.state & CHK_ST_CONFIGURED) { if (sv->check.state & CHK_ST_CONFIGURED) {
sv->check.state |= CHK_ST_ENABLED; sv->check.state |= CHK_ST_ENABLED;
} }
SPIN_UNLOCK(SERVER_LOCK, &sv->lock); HA_SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
return 0; return 0;
} }
@ -687,11 +687,11 @@ int hlua_server_check_disable(lua_State *L)
struct server *sv; struct server *sv;
sv = hlua_check_server(L, 1); sv = hlua_check_server(L, 1);
SPIN_LOCK(SERVER_LOCK, &sv->lock); HA_SPIN_LOCK(SERVER_LOCK, &sv->lock);
if (sv->check.state & CHK_ST_CONFIGURED) { if (sv->check.state & CHK_ST_CONFIGURED) {
sv->check.state &= ~CHK_ST_ENABLED; sv->check.state &= ~CHK_ST_ENABLED;
} }
SPIN_UNLOCK(SERVER_LOCK, &sv->lock); HA_SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
return 0; return 0;
} }
@ -700,12 +700,12 @@ int hlua_server_check_force_up(lua_State *L)
struct server *sv; struct server *sv;
sv = hlua_check_server(L, 1); sv = hlua_check_server(L, 1);
SPIN_LOCK(SERVER_LOCK, &sv->lock); HA_SPIN_LOCK(SERVER_LOCK, &sv->lock);
if (!(sv->track)) { if (!(sv->track)) {
sv->check.health = sv->check.rise + sv->check.fall - 1; sv->check.health = sv->check.rise + sv->check.fall - 1;
srv_set_running(sv, "changed from Lua script", NULL); srv_set_running(sv, "changed from Lua script", NULL);
} }
SPIN_UNLOCK(SERVER_LOCK, &sv->lock); HA_SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
return 0; return 0;
} }
@ -714,12 +714,12 @@ int hlua_server_check_force_nolb(lua_State *L)
struct server *sv; struct server *sv;
sv = hlua_check_server(L, 1); sv = hlua_check_server(L, 1);
SPIN_LOCK(SERVER_LOCK, &sv->lock); HA_SPIN_LOCK(SERVER_LOCK, &sv->lock);
if (!(sv->track)) { if (!(sv->track)) {
sv->check.health = sv->check.rise + sv->check.fall - 1; sv->check.health = sv->check.rise + sv->check.fall - 1;
srv_set_stopping(sv, "changed from Lua script", NULL); srv_set_stopping(sv, "changed from Lua script", NULL);
} }
SPIN_UNLOCK(SERVER_LOCK, &sv->lock); HA_SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
return 0; return 0;
} }
@ -728,12 +728,12 @@ int hlua_server_check_force_down(lua_State *L)
struct server *sv; struct server *sv;
sv = hlua_check_server(L, 1); sv = hlua_check_server(L, 1);
SPIN_LOCK(SERVER_LOCK, &sv->lock); HA_SPIN_LOCK(SERVER_LOCK, &sv->lock);
if (!(sv->track)) { if (!(sv->track)) {
sv->check.health = 0; sv->check.health = 0;
srv_set_stopped(sv, "changed from Lua script", NULL); srv_set_stopped(sv, "changed from Lua script", NULL);
} }
SPIN_UNLOCK(SERVER_LOCK, &sv->lock); HA_SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
return 0; return 0;
} }
@ -742,11 +742,11 @@ int hlua_server_agent_enable(lua_State *L)
struct server *sv; struct server *sv;
sv = hlua_check_server(L, 1); sv = hlua_check_server(L, 1);
SPIN_LOCK(SERVER_LOCK, &sv->lock); HA_SPIN_LOCK(SERVER_LOCK, &sv->lock);
if (sv->agent.state & CHK_ST_CONFIGURED) { if (sv->agent.state & CHK_ST_CONFIGURED) {
sv->agent.state |= CHK_ST_ENABLED; sv->agent.state |= CHK_ST_ENABLED;
} }
SPIN_UNLOCK(SERVER_LOCK, &sv->lock); HA_SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
return 0; return 0;
} }
@ -755,11 +755,11 @@ int hlua_server_agent_disable(lua_State *L)
struct server *sv; struct server *sv;
sv = hlua_check_server(L, 1); sv = hlua_check_server(L, 1);
SPIN_LOCK(SERVER_LOCK, &sv->lock); HA_SPIN_LOCK(SERVER_LOCK, &sv->lock);
if (sv->agent.state & CHK_ST_CONFIGURED) { if (sv->agent.state & CHK_ST_CONFIGURED) {
sv->agent.state &= ~CHK_ST_ENABLED; sv->agent.state &= ~CHK_ST_ENABLED;
} }
SPIN_UNLOCK(SERVER_LOCK, &sv->lock); HA_SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
return 0; return 0;
} }
@ -768,12 +768,12 @@ int hlua_server_agent_force_up(lua_State *L)
struct server *sv; struct server *sv;
sv = hlua_check_server(L, 1); sv = hlua_check_server(L, 1);
SPIN_LOCK(SERVER_LOCK, &sv->lock); HA_SPIN_LOCK(SERVER_LOCK, &sv->lock);
if (sv->agent.state & CHK_ST_ENABLED) { if (sv->agent.state & CHK_ST_ENABLED) {
sv->agent.health = sv->agent.rise + sv->agent.fall - 1; sv->agent.health = sv->agent.rise + sv->agent.fall - 1;
srv_set_running(sv, "changed from Lua script", NULL); srv_set_running(sv, "changed from Lua script", NULL);
} }
SPIN_UNLOCK(SERVER_LOCK, &sv->lock); HA_SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
return 0; return 0;
} }
@ -782,12 +782,12 @@ int hlua_server_agent_force_down(lua_State *L)
struct server *sv; struct server *sv;
sv = hlua_check_server(L, 1); sv = hlua_check_server(L, 1);
SPIN_LOCK(SERVER_LOCK, &sv->lock); HA_SPIN_LOCK(SERVER_LOCK, &sv->lock);
if (sv->agent.state & CHK_ST_ENABLED) { if (sv->agent.state & CHK_ST_ENABLED) {
sv->agent.health = 0; sv->agent.health = 0;
srv_set_stopped(sv, "changed from Lua script", NULL); srv_set_stopped(sv, "changed from Lua script", NULL);
} }
SPIN_UNLOCK(SERVER_LOCK, &sv->lock); HA_SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
return 0; return 0;
} }

View File

@ -364,7 +364,7 @@ struct server *chash_get_next_server(struct proxy *p, struct server *srvtoavoid)
srv = avoided = NULL; srv = avoided = NULL;
avoided_node = NULL; avoided_node = NULL;
SPIN_LOCK(LBPRM_LOCK, &p->lbprm.lock); HA_SPIN_LOCK(LBPRM_LOCK, &p->lbprm.lock);
if (p->srv_act) if (p->srv_act)
root = &p->lbprm.chash.act; root = &p->lbprm.chash.act;
else if (p->lbprm.fbck) { else if (p->lbprm.fbck) {
@ -423,7 +423,7 @@ struct server *chash_get_next_server(struct proxy *p, struct server *srvtoavoid)
} }
out: out:
SPIN_UNLOCK(LBPRM_LOCK, &p->lbprm.lock); HA_SPIN_UNLOCK(LBPRM_LOCK, &p->lbprm.lock);
return srv; return srv;
} }

View File

@ -64,10 +64,10 @@ static void fas_srv_reposition(struct server *s)
if (!s->lb_tree) if (!s->lb_tree)
return; return;
SPIN_LOCK(LBPRM_LOCK, &s->proxy->lbprm.lock); HA_SPIN_LOCK(LBPRM_LOCK, &s->proxy->lbprm.lock);
fas_dequeue_srv(s); fas_dequeue_srv(s);
fas_queue_srv(s); fas_queue_srv(s);
SPIN_UNLOCK(LBPRM_LOCK, &s->proxy->lbprm.lock); HA_SPIN_UNLOCK(LBPRM_LOCK, &s->proxy->lbprm.lock);
} }
/* This function updates the server trees according to server <srv>'s new /* This function updates the server trees according to server <srv>'s new
@ -277,7 +277,7 @@ struct server *fas_get_next_server(struct proxy *p, struct server *srvtoavoid)
srv = avoided = NULL; srv = avoided = NULL;
SPIN_LOCK(LBPRM_LOCK, &p->lbprm.lock); HA_SPIN_LOCK(LBPRM_LOCK, &p->lbprm.lock);
if (p->srv_act) if (p->srv_act)
node = eb32_first(&p->lbprm.fas.act); node = eb32_first(&p->lbprm.fas.act);
else if (p->lbprm.fbck) { else if (p->lbprm.fbck) {
@ -313,7 +313,7 @@ struct server *fas_get_next_server(struct proxy *p, struct server *srvtoavoid)
if (!srv) if (!srv)
srv = avoided; srv = avoided;
out: out:
SPIN_UNLOCK(LBPRM_LOCK, &p->lbprm.lock); HA_SPIN_UNLOCK(LBPRM_LOCK, &p->lbprm.lock);
return srv; return srv;
} }

View File

@ -56,10 +56,10 @@ static void fwlc_srv_reposition(struct server *s)
if (!s->lb_tree) if (!s->lb_tree)
return; return;
SPIN_LOCK(LBPRM_LOCK, &s->proxy->lbprm.lock); HA_SPIN_LOCK(LBPRM_LOCK, &s->proxy->lbprm.lock);
fwlc_dequeue_srv(s); fwlc_dequeue_srv(s);
fwlc_queue_srv(s); fwlc_queue_srv(s);
SPIN_UNLOCK(LBPRM_LOCK, &s->proxy->lbprm.lock); HA_SPIN_UNLOCK(LBPRM_LOCK, &s->proxy->lbprm.lock);
} }
/* This function updates the server trees according to server <srv>'s new /* This function updates the server trees according to server <srv>'s new
@ -269,7 +269,7 @@ struct server *fwlc_get_next_server(struct proxy *p, struct server *srvtoavoid)
srv = avoided = NULL; srv = avoided = NULL;
SPIN_LOCK(LBPRM_LOCK, &p->lbprm.lock); HA_SPIN_LOCK(LBPRM_LOCK, &p->lbprm.lock);
if (p->srv_act) if (p->srv_act)
node = eb32_first(&p->lbprm.fwlc.act); node = eb32_first(&p->lbprm.fwlc.act);
else if (p->lbprm.fbck) { else if (p->lbprm.fbck) {
@ -305,7 +305,7 @@ struct server *fwlc_get_next_server(struct proxy *p, struct server *srvtoavoid)
if (!srv) if (!srv)
srv = avoided; srv = avoided;
out: out:
SPIN_UNLOCK(LBPRM_LOCK, &p->lbprm.lock); HA_SPIN_UNLOCK(LBPRM_LOCK, &p->lbprm.lock);
return srv; return srv;
} }

View File

@ -470,7 +470,7 @@ struct server *fwrr_get_next_server(struct proxy *p, struct server *srvtoavoid)
struct fwrr_group *grp; struct fwrr_group *grp;
int switched; int switched;
SPIN_LOCK(LBPRM_LOCK, &p->lbprm.lock); HA_SPIN_LOCK(LBPRM_LOCK, &p->lbprm.lock);
if (p->srv_act) if (p->srv_act)
grp = &p->lbprm.fwrr.act; grp = &p->lbprm.fwrr.act;
else if (p->lbprm.fbck) { else if (p->lbprm.fbck) {
@ -564,7 +564,7 @@ struct server *fwrr_get_next_server(struct proxy *p, struct server *srvtoavoid)
} }
} }
out: out:
SPIN_UNLOCK(LBPRM_LOCK, &p->lbprm.lock); HA_SPIN_UNLOCK(LBPRM_LOCK, &p->lbprm.lock);
return srv; return srv;
} }

View File

@ -208,7 +208,7 @@ struct server *map_get_server_rr(struct proxy *px, struct server *srvtoavoid)
int newidx, avoididx; int newidx, avoididx;
struct server *srv, *avoided; struct server *srv, *avoided;
SPIN_LOCK(LBPRM_LOCK, &px->lbprm.lock); HA_SPIN_LOCK(LBPRM_LOCK, &px->lbprm.lock);
if (px->lbprm.tot_weight == 0) { if (px->lbprm.tot_weight == 0) {
avoided = NULL; avoided = NULL;
goto out; goto out;
@ -240,7 +240,7 @@ struct server *map_get_server_rr(struct proxy *px, struct server *srvtoavoid)
px->lbprm.map.rr_idx = avoididx; px->lbprm.map.rr_idx = avoididx;
out: out:
SPIN_UNLOCK(LBPRM_LOCK, &px->lbprm.lock); HA_SPIN_UNLOCK(LBPRM_LOCK, &px->lbprm.lock);
/* return NULL or srvtoavoid if found */ /* return NULL or srvtoavoid if found */
return avoided; return avoided;
} }

View File

@ -60,7 +60,7 @@ static void __do_unbind_listener(struct listener *listener, int do_close);
*/ */
static void enable_listener(struct listener *listener) static void enable_listener(struct listener *listener)
{ {
SPIN_LOCK(LISTENER_LOCK, &listener->lock); HA_SPIN_LOCK(LISTENER_LOCK, &listener->lock);
if (listener->state == LI_LISTEN) { if (listener->state == LI_LISTEN) {
if ((global.mode & (MODE_DAEMON | MODE_MWORKER)) && if ((global.mode & (MODE_DAEMON | MODE_MWORKER)) &&
listener->bind_conf->bind_proc && listener->bind_conf->bind_proc &&
@ -83,7 +83,7 @@ static void enable_listener(struct listener *listener)
listener->state = LI_FULL; listener->state = LI_FULL;
} }
} }
SPIN_UNLOCK(LISTENER_LOCK, &listener->lock); HA_SPIN_UNLOCK(LISTENER_LOCK, &listener->lock);
} }
/* This function removes the specified listener's file descriptor from the /* This function removes the specified listener's file descriptor from the
@ -92,19 +92,19 @@ static void enable_listener(struct listener *listener)
*/ */
static void disable_listener(struct listener *listener) static void disable_listener(struct listener *listener)
{ {
SPIN_LOCK(LISTENER_LOCK, &listener->lock); HA_SPIN_LOCK(LISTENER_LOCK, &listener->lock);
if (listener->state < LI_READY) if (listener->state < LI_READY)
goto end; goto end;
if (listener->state == LI_READY) if (listener->state == LI_READY)
fd_stop_recv(listener->fd); fd_stop_recv(listener->fd);
if (listener->state == LI_LIMITED) { if (listener->state == LI_LIMITED) {
SPIN_LOCK(LISTENER_QUEUE_LOCK, &lq_lock); HA_SPIN_LOCK(LISTENER_QUEUE_LOCK, &lq_lock);
LIST_DEL(&listener->wait_queue); LIST_DEL(&listener->wait_queue);
SPIN_UNLOCK(LISTENER_QUEUE_LOCK, &lq_lock); HA_SPIN_UNLOCK(LISTENER_QUEUE_LOCK, &lq_lock);
} }
listener->state = LI_LISTEN; listener->state = LI_LISTEN;
end: end:
SPIN_UNLOCK(LISTENER_LOCK, &listener->lock); HA_SPIN_UNLOCK(LISTENER_LOCK, &listener->lock);
} }
/* This function tries to temporarily disable a listener, depending on the OS /* This function tries to temporarily disable a listener, depending on the OS
@ -118,7 +118,7 @@ int pause_listener(struct listener *l)
{ {
int ret = 1; int ret = 1;
SPIN_LOCK(LISTENER_LOCK, &l->lock); HA_SPIN_LOCK(LISTENER_LOCK, &l->lock);
if (l->state <= LI_ZOMBIE) if (l->state <= LI_ZOMBIE)
goto end; goto end;
@ -138,15 +138,15 @@ int pause_listener(struct listener *l)
} }
if (l->state == LI_LIMITED) { if (l->state == LI_LIMITED) {
SPIN_LOCK(LISTENER_QUEUE_LOCK, &lq_lock); HA_SPIN_LOCK(LISTENER_QUEUE_LOCK, &lq_lock);
LIST_DEL(&l->wait_queue); LIST_DEL(&l->wait_queue);
SPIN_UNLOCK(LISTENER_QUEUE_LOCK, &lq_lock); HA_SPIN_UNLOCK(LISTENER_QUEUE_LOCK, &lq_lock);
} }
fd_stop_recv(l->fd); fd_stop_recv(l->fd);
l->state = LI_PAUSED; l->state = LI_PAUSED;
end: end:
SPIN_UNLOCK(LISTENER_LOCK, &l->lock); HA_SPIN_UNLOCK(LISTENER_LOCK, &l->lock);
return ret; return ret;
} }
@ -164,7 +164,7 @@ static int __resume_listener(struct listener *l)
{ {
int ret = 1; int ret = 1;
SPIN_LOCK(LISTENER_LOCK, &l->lock); HA_SPIN_LOCK(LISTENER_LOCK, &l->lock);
if ((global.mode & (MODE_DAEMON | MODE_MWORKER)) && if ((global.mode & (MODE_DAEMON | MODE_MWORKER)) &&
l->bind_conf->bind_proc && l->bind_conf->bind_proc &&
@ -213,7 +213,7 @@ static int __resume_listener(struct listener *l)
fd_want_recv(l->fd); fd_want_recv(l->fd);
l->state = LI_READY; l->state = LI_READY;
end: end:
SPIN_UNLOCK(LISTENER_LOCK, &l->lock); HA_SPIN_UNLOCK(LISTENER_LOCK, &l->lock);
return ret; return ret;
} }
@ -221,9 +221,9 @@ int resume_listener(struct listener *l)
{ {
int ret; int ret;
SPIN_LOCK(LISTENER_QUEUE_LOCK, &lq_lock); HA_SPIN_LOCK(LISTENER_QUEUE_LOCK, &lq_lock);
ret = __resume_listener(l); ret = __resume_listener(l);
SPIN_UNLOCK(LISTENER_QUEUE_LOCK, &lq_lock); HA_SPIN_UNLOCK(LISTENER_QUEUE_LOCK, &lq_lock);
return ret; return ret;
} }
@ -237,9 +237,9 @@ static void listener_full(struct listener *l)
{ {
if (l->state >= LI_READY) { if (l->state >= LI_READY) {
if (l->state == LI_LIMITED) { if (l->state == LI_LIMITED) {
SPIN_LOCK(LISTENER_QUEUE_LOCK, &lq_lock); HA_SPIN_LOCK(LISTENER_QUEUE_LOCK, &lq_lock);
LIST_DEL(&l->wait_queue); LIST_DEL(&l->wait_queue);
SPIN_UNLOCK(LISTENER_QUEUE_LOCK, &lq_lock); HA_SPIN_UNLOCK(LISTENER_QUEUE_LOCK, &lq_lock);
} }
fd_stop_recv(l->fd); fd_stop_recv(l->fd);
@ -256,9 +256,9 @@ static void listener_full(struct listener *l)
static void limit_listener(struct listener *l, struct list *list) static void limit_listener(struct listener *l, struct list *list)
{ {
if (l->state == LI_READY) { if (l->state == LI_READY) {
SPIN_LOCK(LISTENER_QUEUE_LOCK, &lq_lock); HA_SPIN_LOCK(LISTENER_QUEUE_LOCK, &lq_lock);
LIST_ADDQ(list, &l->wait_queue); LIST_ADDQ(list, &l->wait_queue);
SPIN_UNLOCK(LISTENER_QUEUE_LOCK, &lq_lock); HA_SPIN_UNLOCK(LISTENER_QUEUE_LOCK, &lq_lock);
fd_stop_recv(l->fd); fd_stop_recv(l->fd);
l->state = LI_LIMITED; l->state = LI_LIMITED;
} }
@ -298,7 +298,7 @@ void dequeue_all_listeners(struct list *list)
{ {
struct listener *listener, *l_back; struct listener *listener, *l_back;
SPIN_LOCK(LISTENER_QUEUE_LOCK, &lq_lock); HA_SPIN_LOCK(LISTENER_QUEUE_LOCK, &lq_lock);
list_for_each_entry_safe(listener, l_back, list, wait_queue) { list_for_each_entry_safe(listener, l_back, list, wait_queue) {
/* This cannot fail because the listeners are by definition in /* This cannot fail because the listeners are by definition in
* the LI_LIMITED state. The function also removes the entry * the LI_LIMITED state. The function also removes the entry
@ -306,7 +306,7 @@ void dequeue_all_listeners(struct list *list)
*/ */
__resume_listener(listener); __resume_listener(listener);
} }
SPIN_UNLOCK(LISTENER_QUEUE_LOCK, &lq_lock); HA_SPIN_UNLOCK(LISTENER_QUEUE_LOCK, &lq_lock);
} }
/* must be called with the lock held */ /* must be called with the lock held */
@ -316,9 +316,9 @@ static void __do_unbind_listener(struct listener *listener, int do_close)
fd_stop_recv(listener->fd); fd_stop_recv(listener->fd);
if (listener->state == LI_LIMITED) { if (listener->state == LI_LIMITED) {
SPIN_LOCK(LISTENER_QUEUE_LOCK, &lq_lock); HA_SPIN_LOCK(LISTENER_QUEUE_LOCK, &lq_lock);
LIST_DEL(&listener->wait_queue); LIST_DEL(&listener->wait_queue);
SPIN_UNLOCK(LISTENER_QUEUE_LOCK, &lq_lock); HA_SPIN_UNLOCK(LISTENER_QUEUE_LOCK, &lq_lock);
} }
if (listener->state >= LI_PAUSED) { if (listener->state >= LI_PAUSED) {
@ -334,9 +334,9 @@ static void __do_unbind_listener(struct listener *listener, int do_close)
static void do_unbind_listener(struct listener *listener, int do_close) static void do_unbind_listener(struct listener *listener, int do_close)
{ {
SPIN_LOCK(LISTENER_LOCK, &listener->lock); HA_SPIN_LOCK(LISTENER_LOCK, &listener->lock);
__do_unbind_listener(listener, do_close); __do_unbind_listener(listener, do_close);
SPIN_UNLOCK(LISTENER_LOCK, &listener->lock); HA_SPIN_UNLOCK(LISTENER_LOCK, &listener->lock);
} }
/* This function closes the listening socket for the specified listener, /* This function closes the listening socket for the specified listener,
@ -406,7 +406,7 @@ int create_listeners(struct bind_conf *bc, const struct sockaddr_storage *ss,
proto->add(l, port); proto->add(l, port);
SPIN_INIT(&l->lock); HA_SPIN_INIT(&l->lock);
HA_ATOMIC_ADD(&jobs, 1); HA_ATOMIC_ADD(&jobs, 1);
HA_ATOMIC_ADD(&listeners, 1); HA_ATOMIC_ADD(&listeners, 1);
} }
@ -424,13 +424,13 @@ void delete_listener(struct listener *listener)
if (listener->state != LI_ASSIGNED) if (listener->state != LI_ASSIGNED)
return; return;
SPIN_LOCK(LISTENER_LOCK, &listener->lock); HA_SPIN_LOCK(LISTENER_LOCK, &listener->lock);
listener->state = LI_INIT; listener->state = LI_INIT;
LIST_DEL(&listener->proto_list); LIST_DEL(&listener->proto_list);
listener->proto->nb_listeners--; listener->proto->nb_listeners--;
HA_ATOMIC_SUB(&jobs, 1); HA_ATOMIC_SUB(&jobs, 1);
HA_ATOMIC_SUB(&listeners, 1); HA_ATOMIC_SUB(&listeners, 1);
SPIN_UNLOCK(LISTENER_LOCK, &listener->lock); HA_SPIN_UNLOCK(LISTENER_LOCK, &listener->lock);
} }
/* This function is called on a read event from a listening socket, corresponding /* This function is called on a read event from a listening socket, corresponding
@ -449,7 +449,7 @@ void listener_accept(int fd)
static int accept4_broken; static int accept4_broken;
#endif #endif
if (SPIN_TRYLOCK(LISTENER_LOCK, &l->lock)) if (HA_SPIN_TRYLOCK(LISTENER_LOCK, &l->lock))
return; return;
if (unlikely(l->nbconn >= l->maxconn)) { if (unlikely(l->nbconn >= l->maxconn)) {
@ -657,7 +657,7 @@ void listener_accept(int fd)
limit_listener(l, &global_listener_queue); limit_listener(l, &global_listener_queue);
task_schedule(global_listener_queue_task, tick_first(expire, global_listener_queue_task->expire)); task_schedule(global_listener_queue_task, tick_first(expire, global_listener_queue_task->expire));
end: end:
SPIN_UNLOCK(LISTENER_LOCK, &l->lock); HA_SPIN_UNLOCK(LISTENER_LOCK, &l->lock);
} }
/* Notify the listener that a connection initiated from it was released. This /* Notify the listener that a connection initiated from it was released. This
@ -1019,7 +1019,7 @@ static void __listener_init(void)
sample_register_fetches(&smp_kws); sample_register_fetches(&smp_kws);
acl_register_keywords(&acl_kws); acl_register_keywords(&acl_kws);
bind_register_keywords(&bind_kws); bind_register_keywords(&bind_kws);
SPIN_INIT(&lq_lock); HA_SPIN_INIT(&lq_lock);
} }
/* /*

View File

@ -325,16 +325,16 @@ static int cli_io_handler_pat_list(struct appctx *appctx)
* this pointer. We know we have reached the end when this * this pointer. We know we have reached the end when this
* pointer points back to the head of the streams list. * pointer points back to the head of the streams list.
*/ */
SPIN_LOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock); HA_SPIN_LOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
LIST_INIT(&appctx->ctx.map.bref.users); LIST_INIT(&appctx->ctx.map.bref.users);
appctx->ctx.map.bref.ref = appctx->ctx.map.ref->head.n; appctx->ctx.map.bref.ref = appctx->ctx.map.ref->head.n;
SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock); HA_SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
appctx->st2 = STAT_ST_LIST; appctx->st2 = STAT_ST_LIST;
/* fall through */ /* fall through */
case STAT_ST_LIST: case STAT_ST_LIST:
SPIN_LOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock); HA_SPIN_LOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
if (!LIST_ISEMPTY(&appctx->ctx.map.bref.users)) { if (!LIST_ISEMPTY(&appctx->ctx.map.bref.users)) {
LIST_DEL(&appctx->ctx.map.bref.users); LIST_DEL(&appctx->ctx.map.bref.users);
@ -360,7 +360,7 @@ static int cli_io_handler_pat_list(struct appctx *appctx)
* this stream's users so that it can remove us upon termination. * this stream's users so that it can remove us upon termination.
*/ */
LIST_ADDQ(&elt->back_refs, &appctx->ctx.map.bref.users); LIST_ADDQ(&elt->back_refs, &appctx->ctx.map.bref.users);
SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock); HA_SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
si_applet_cant_put(si); si_applet_cant_put(si);
return 0; return 0;
} }
@ -368,7 +368,7 @@ static int cli_io_handler_pat_list(struct appctx *appctx)
/* get next list entry and check the end of the list */ /* get next list entry and check the end of the list */
appctx->ctx.map.bref.ref = elt->list.n; appctx->ctx.map.bref.ref = elt->list.n;
} }
SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock); HA_SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
appctx->st2 = STAT_ST_FIN; appctx->st2 = STAT_ST_FIN;
/* fall through */ /* fall through */
@ -456,7 +456,7 @@ static int cli_io_handler_map_lookup(struct appctx *appctx)
/* fall through */ /* fall through */
case STAT_ST_LIST: case STAT_ST_LIST:
SPIN_LOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock); HA_SPIN_LOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
/* for each lookup type */ /* for each lookup type */
while (appctx->ctx.map.expr) { while (appctx->ctx.map.expr) {
/* initialise chunk to build new message */ /* initialise chunk to build new message */
@ -542,7 +542,7 @@ static int cli_io_handler_map_lookup(struct appctx *appctx)
/* let's try again later from this stream. We add ourselves into /* let's try again later from this stream. We add ourselves into
* this stream's users so that it can remove us upon termination. * this stream's users so that it can remove us upon termination.
*/ */
SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock); HA_SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
si_applet_cant_put(si); si_applet_cant_put(si);
return 0; return 0;
} }
@ -551,7 +551,7 @@ static int cli_io_handler_map_lookup(struct appctx *appctx)
appctx->ctx.map.expr = pat_expr_get_next(appctx->ctx.map.expr, appctx->ctx.map.expr = pat_expr_get_next(appctx->ctx.map.expr,
&appctx->ctx.map.ref->pat); &appctx->ctx.map.ref->pat);
} }
SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock); HA_SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
appctx->st2 = STAT_ST_FIN; appctx->st2 = STAT_ST_FIN;
/* fall through */ /* fall through */
@ -628,10 +628,10 @@ static int cli_parse_get_map(char **args, struct appctx *appctx, void *private)
static void cli_release_show_map(struct appctx *appctx) static void cli_release_show_map(struct appctx *appctx)
{ {
if (appctx->st2 == STAT_ST_LIST) { if (appctx->st2 == STAT_ST_LIST) {
SPIN_LOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock); HA_SPIN_LOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
if (!LIST_ISEMPTY(&appctx->ctx.map.bref.users)) if (!LIST_ISEMPTY(&appctx->ctx.map.bref.users))
LIST_DEL(&appctx->ctx.map.bref.users); LIST_DEL(&appctx->ctx.map.bref.users);
SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock); HA_SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
} }
} }
@ -728,32 +728,32 @@ static int cli_parse_set_map(char **args, struct appctx *appctx, void *private)
/* Try to delete the entry. */ /* Try to delete the entry. */
err = NULL; err = NULL;
SPIN_LOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock); HA_SPIN_LOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
if (!pat_ref_set_by_id(appctx->ctx.map.ref, ref, args[4], &err)) { if (!pat_ref_set_by_id(appctx->ctx.map.ref, ref, args[4], &err)) {
SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock); HA_SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
if (err) if (err)
memprintf(&err, "%s.\n", err); memprintf(&err, "%s.\n", err);
appctx->ctx.cli.err = err; appctx->ctx.cli.err = err;
appctx->st0 = CLI_ST_PRINT_FREE; appctx->st0 = CLI_ST_PRINT_FREE;
return 1; return 1;
} }
SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock); HA_SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
} }
else { else {
/* Else, use the entry identifier as pattern /* Else, use the entry identifier as pattern
* string, and update the value. * string, and update the value.
*/ */
err = NULL; err = NULL;
SPIN_LOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock); HA_SPIN_LOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
if (!pat_ref_set(appctx->ctx.map.ref, args[3], args[4], &err)) { if (!pat_ref_set(appctx->ctx.map.ref, args[3], args[4], &err)) {
SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock); HA_SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
if (err) if (err)
memprintf(&err, "%s.\n", err); memprintf(&err, "%s.\n", err);
appctx->ctx.cli.err = err; appctx->ctx.cli.err = err;
appctx->st0 = CLI_ST_PRINT_FREE; appctx->st0 = CLI_ST_PRINT_FREE;
return 1; return 1;
} }
SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock); HA_SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
} }
/* The set is done, send message. */ /* The set is done, send message. */
@ -825,12 +825,12 @@ static int cli_parse_add_map(char **args, struct appctx *appctx, void *private)
/* Add value. */ /* Add value. */
err = NULL; err = NULL;
SPIN_LOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock); HA_SPIN_LOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
if (appctx->ctx.map.display_flags == PAT_REF_MAP) if (appctx->ctx.map.display_flags == PAT_REF_MAP)
ret = pat_ref_add(appctx->ctx.map.ref, args[3], args[4], &err); ret = pat_ref_add(appctx->ctx.map.ref, args[3], args[4], &err);
else else
ret = pat_ref_add(appctx->ctx.map.ref, args[3], NULL, &err); ret = pat_ref_add(appctx->ctx.map.ref, args[3], NULL, &err);
SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock); HA_SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
if (!ret) { if (!ret) {
if (err) if (err)
memprintf(&err, "%s.\n", err); memprintf(&err, "%s.\n", err);
@ -910,31 +910,31 @@ static int cli_parse_del_map(char **args, struct appctx *appctx, void *private)
} }
/* Try to delete the entry. */ /* Try to delete the entry. */
SPIN_LOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock); HA_SPIN_LOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
if (!pat_ref_delete_by_id(appctx->ctx.map.ref, ref)) { if (!pat_ref_delete_by_id(appctx->ctx.map.ref, ref)) {
SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock); HA_SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
/* The entry is not found, send message. */ /* The entry is not found, send message. */
appctx->ctx.cli.severity = LOG_ERR; appctx->ctx.cli.severity = LOG_ERR;
appctx->ctx.cli.msg = "Key not found.\n"; appctx->ctx.cli.msg = "Key not found.\n";
appctx->st0 = CLI_ST_PRINT; appctx->st0 = CLI_ST_PRINT;
return 1; return 1;
} }
SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock); HA_SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
} }
else { else {
/* Else, use the entry identifier as pattern /* Else, use the entry identifier as pattern
* string and try to delete the entry. * string and try to delete the entry.
*/ */
SPIN_LOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock); HA_SPIN_LOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
if (!pat_ref_delete(appctx->ctx.map.ref, args[3])) { if (!pat_ref_delete(appctx->ctx.map.ref, args[3])) {
SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock); HA_SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
/* The entry is not found, send message. */ /* The entry is not found, send message. */
appctx->ctx.cli.severity = LOG_ERR; appctx->ctx.cli.severity = LOG_ERR;
appctx->ctx.cli.msg = "Key not found.\n"; appctx->ctx.cli.msg = "Key not found.\n";
appctx->st0 = CLI_ST_PRINT; appctx->st0 = CLI_ST_PRINT;
return 1; return 1;
} }
SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock); HA_SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
} }
/* The deletion is done, send message. */ /* The deletion is done, send message. */
@ -983,9 +983,9 @@ static int cli_parse_clear_map(char **args, struct appctx *appctx, void *private
} }
/* Clear all. */ /* Clear all. */
SPIN_LOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock); HA_SPIN_LOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
pat_ref_prune(appctx->ctx.map.ref); pat_ref_prune(appctx->ctx.map.ref);
SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock); HA_SPIN_UNLOCK(PATREF_LOCK, &appctx->ctx.map.ref->lock);
/* return response */ /* return response */
appctx->st0 = CLI_ST_PROMPT; appctx->st0 = CLI_ST_PROMPT;

View File

@ -93,7 +93,7 @@ struct pool_head *create_pool(char *name, unsigned int size, unsigned int flags)
LIST_ADDQ(start, &pool->list); LIST_ADDQ(start, &pool->list);
} }
pool->users++; pool->users++;
SPIN_INIT(&pool->lock); HA_SPIN_INIT(&pool->lock);
return pool; return pool;
} }
@ -143,9 +143,9 @@ void *pool_refill_alloc(struct pool_head *pool, unsigned int avail)
{ {
void *ptr; void *ptr;
SPIN_LOCK(POOL_LOCK, &pool->lock); HA_SPIN_LOCK(POOL_LOCK, &pool->lock);
ptr = __pool_refill_alloc(pool, avail); ptr = __pool_refill_alloc(pool, avail);
SPIN_UNLOCK(POOL_LOCK, &pool->lock); HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
return ptr; return ptr;
} }
/* /*
@ -157,7 +157,7 @@ void pool_flush2(struct pool_head *pool)
if (!pool) if (!pool)
return; return;
SPIN_LOCK(POOL_LOCK, &pool->lock); HA_SPIN_LOCK(POOL_LOCK, &pool->lock);
next = pool->free_list; next = pool->free_list;
while (next) { while (next) {
temp = next; temp = next;
@ -166,7 +166,7 @@ void pool_flush2(struct pool_head *pool)
free(temp); free(temp);
} }
pool->free_list = next; pool->free_list = next;
SPIN_UNLOCK(POOL_LOCK, &pool->lock); HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
/* here, we should have pool->allocate == pool->used */ /* here, we should have pool->allocate == pool->used */
} }
@ -192,7 +192,7 @@ void pool_gc2(struct pool_head *pool_ctx)
void *temp, *next; void *temp, *next;
//qfprintf(stderr, "Flushing pool %s\n", entry->name); //qfprintf(stderr, "Flushing pool %s\n", entry->name);
if (entry != pool_ctx) if (entry != pool_ctx)
SPIN_LOCK(POOL_LOCK, &entry->lock); HA_SPIN_LOCK(POOL_LOCK, &entry->lock);
next = entry->free_list; next = entry->free_list;
while (next && while (next &&
(int)(entry->allocated - entry->used) > (int)entry->minavail) { (int)(entry->allocated - entry->used) > (int)entry->minavail) {
@ -203,7 +203,7 @@ void pool_gc2(struct pool_head *pool_ctx)
} }
entry->free_list = next; entry->free_list = next;
if (entry != pool_ctx) if (entry != pool_ctx)
SPIN_UNLOCK(POOL_LOCK, &entry->lock); HA_SPIN_UNLOCK(POOL_LOCK, &entry->lock);
} }
HA_ATOMIC_STORE(&recurse, 0); HA_ATOMIC_STORE(&recurse, 0);
@ -225,7 +225,7 @@ void *pool_destroy2(struct pool_head *pool)
pool->users--; pool->users--;
if (!pool->users) { if (!pool->users) {
LIST_DEL(&pool->list); LIST_DEL(&pool->list);
SPIN_DESTROY(&pool->lock); HA_SPIN_DESTROY(&pool->lock);
free(pool); free(pool);
} }
} }
@ -242,7 +242,7 @@ void dump_pools_to_trash()
allocated = used = nbpools = 0; allocated = used = nbpools = 0;
chunk_printf(&trash, "Dumping pools usage. Use SIGQUIT to flush them.\n"); chunk_printf(&trash, "Dumping pools usage. Use SIGQUIT to flush them.\n");
list_for_each_entry(entry, &pools, list) { list_for_each_entry(entry, &pools, list) {
SPIN_LOCK(POOL_LOCK, &entry->lock); HA_SPIN_LOCK(POOL_LOCK, &entry->lock);
chunk_appendf(&trash, " - Pool %s (%d bytes) : %d allocated (%u bytes), %d used, %d failures, %d users%s\n", chunk_appendf(&trash, " - Pool %s (%d bytes) : %d allocated (%u bytes), %d used, %d failures, %d users%s\n",
entry->name, entry->size, entry->allocated, entry->name, entry->size, entry->allocated,
entry->size * entry->allocated, entry->used, entry->failed, entry->size * entry->allocated, entry->used, entry->failed,
@ -251,7 +251,7 @@ void dump_pools_to_trash()
allocated += entry->allocated * entry->size; allocated += entry->allocated * entry->size;
used += entry->used * entry->size; used += entry->used * entry->size;
nbpools++; nbpools++;
SPIN_UNLOCK(POOL_LOCK, &entry->lock); HA_SPIN_UNLOCK(POOL_LOCK, &entry->lock);
} }
chunk_appendf(&trash, "Total: %d pools, %lu bytes allocated, %lu used.\n", chunk_appendf(&trash, "Total: %d pools, %lu bytes allocated, %lu used.\n",
nbpools, allocated, used); nbpools, allocated, used);

View File

@ -233,9 +233,9 @@ static inline struct buffer *h2_get_dbuf(struct h2c *h2c)
unlikely((buf = b_alloc_margin(&h2c->dbuf, 0)) == NULL)) { unlikely((buf = b_alloc_margin(&h2c->dbuf, 0)) == NULL)) {
h2c->dbuf_wait.target = h2c->conn; h2c->dbuf_wait.target = h2c->conn;
h2c->dbuf_wait.wakeup_cb = h2_dbuf_available; h2c->dbuf_wait.wakeup_cb = h2_dbuf_available;
SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock); HA_SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock);
LIST_ADDQ(&buffer_wq, &h2c->dbuf_wait.list); LIST_ADDQ(&buffer_wq, &h2c->dbuf_wait.list);
SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock); HA_SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock);
__conn_xprt_stop_recv(h2c->conn); __conn_xprt_stop_recv(h2c->conn);
} }
return buf; return buf;
@ -289,9 +289,9 @@ static inline struct buffer *h2_get_mbuf(struct h2c *h2c)
unlikely((buf = b_alloc_margin(&h2c->mbuf, 0)) == NULL)) { unlikely((buf = b_alloc_margin(&h2c->mbuf, 0)) == NULL)) {
h2c->mbuf_wait.target = h2c; h2c->mbuf_wait.target = h2c;
h2c->mbuf_wait.wakeup_cb = h2_mbuf_available; h2c->mbuf_wait.wakeup_cb = h2_mbuf_available;
SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock); HA_SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock);
LIST_ADDQ(&buffer_wq, &h2c->mbuf_wait.list); LIST_ADDQ(&buffer_wq, &h2c->mbuf_wait.list);
SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock); HA_SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock);
/* FIXME: we should in fact only block the direction being /* FIXME: we should in fact only block the direction being
* currently used. For now it will be enough like this. * currently used. For now it will be enough like this.
@ -425,14 +425,14 @@ static void h2_release(struct connection *conn)
if (h2c) { if (h2c) {
hpack_dht_free(h2c->ddht); hpack_dht_free(h2c->ddht);
h2_release_dbuf(h2c); h2_release_dbuf(h2c);
SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock); HA_SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock);
LIST_DEL(&h2c->dbuf_wait.list); LIST_DEL(&h2c->dbuf_wait.list);
SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock); HA_SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock);
h2_release_mbuf(h2c); h2_release_mbuf(h2c);
SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock); HA_SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock);
LIST_DEL(&h2c->mbuf_wait.list); LIST_DEL(&h2c->mbuf_wait.list);
SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock); HA_SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock);
if (h2c->task) { if (h2c->task) {
task_delete(h2c->task); task_delete(h2c->task);

View File

@ -489,15 +489,15 @@ struct pattern *pat_match_str(struct sample *smp, struct pattern_expr *expr, int
if (pat_lru_tree) { if (pat_lru_tree) {
unsigned long long seed = pat_lru_seed ^ (long)expr; unsigned long long seed = pat_lru_seed ^ (long)expr;
SPIN_LOCK(PATLRU_LOCK, &pat_lru_tree_lock); HA_SPIN_LOCK(PATLRU_LOCK, &pat_lru_tree_lock);
lru = lru64_get(XXH64(smp->data.u.str.str, smp->data.u.str.len, seed), lru = lru64_get(XXH64(smp->data.u.str.str, smp->data.u.str.len, seed),
pat_lru_tree, expr, expr->revision); pat_lru_tree, expr, expr->revision);
if (!lru) { if (!lru) {
SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock); HA_SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
} }
else if (lru->domain) { else if (lru->domain) {
ret = lru->data; ret = lru->data;
SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock); HA_SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
return ret; return ret;
} }
} }
@ -519,7 +519,7 @@ struct pattern *pat_match_str(struct sample *smp, struct pattern_expr *expr, int
if (lru) { if (lru) {
lru64_commit(lru, ret, expr, expr->revision, NULL); lru64_commit(lru, ret, expr, expr->revision, NULL);
SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock); HA_SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
} }
return ret; return ret;
@ -536,15 +536,15 @@ struct pattern *pat_match_bin(struct sample *smp, struct pattern_expr *expr, int
if (pat_lru_tree) { if (pat_lru_tree) {
unsigned long long seed = pat_lru_seed ^ (long)expr; unsigned long long seed = pat_lru_seed ^ (long)expr;
SPIN_LOCK(PATLRU_LOCK, &pat_lru_tree_lock); HA_SPIN_LOCK(PATLRU_LOCK, &pat_lru_tree_lock);
lru = lru64_get(XXH64(smp->data.u.str.str, smp->data.u.str.len, seed), lru = lru64_get(XXH64(smp->data.u.str.str, smp->data.u.str.len, seed),
pat_lru_tree, expr, expr->revision); pat_lru_tree, expr, expr->revision);
if (!lru) { if (!lru) {
SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock); HA_SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
} }
else if (lru->domain) { else if (lru->domain) {
ret = lru->data; ret = lru->data;
SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock); HA_SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
return ret; return ret;
} }
} }
@ -563,7 +563,7 @@ struct pattern *pat_match_bin(struct sample *smp, struct pattern_expr *expr, int
if (lru) { if (lru) {
lru64_commit(lru, ret, expr, expr->revision, NULL); lru64_commit(lru, ret, expr, expr->revision, NULL);
SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock); HA_SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
} }
return ret; return ret;
@ -606,15 +606,15 @@ struct pattern *pat_match_reg(struct sample *smp, struct pattern_expr *expr, int
if (pat_lru_tree) { if (pat_lru_tree) {
unsigned long long seed = pat_lru_seed ^ (long)expr; unsigned long long seed = pat_lru_seed ^ (long)expr;
SPIN_LOCK(PATLRU_LOCK, &pat_lru_tree_lock); HA_SPIN_LOCK(PATLRU_LOCK, &pat_lru_tree_lock);
lru = lru64_get(XXH64(smp->data.u.str.str, smp->data.u.str.len, seed), lru = lru64_get(XXH64(smp->data.u.str.str, smp->data.u.str.len, seed),
pat_lru_tree, expr, expr->revision); pat_lru_tree, expr, expr->revision);
if (!lru) { if (!lru) {
SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock); HA_SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
} }
else if (lru->domain) { else if (lru->domain) {
ret = lru->data; ret = lru->data;
SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock); HA_SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
return ret; return ret;
} }
} }
@ -630,7 +630,7 @@ struct pattern *pat_match_reg(struct sample *smp, struct pattern_expr *expr, int
if (lru) { if (lru) {
lru64_commit(lru, ret, expr, expr->revision, NULL); lru64_commit(lru, ret, expr, expr->revision, NULL);
SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock); HA_SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
} }
return ret; return ret;
@ -675,15 +675,15 @@ struct pattern *pat_match_beg(struct sample *smp, struct pattern_expr *expr, int
if (pat_lru_tree) { if (pat_lru_tree) {
unsigned long long seed = pat_lru_seed ^ (long)expr; unsigned long long seed = pat_lru_seed ^ (long)expr;
SPIN_LOCK(PATLRU_LOCK, &pat_lru_tree_lock); HA_SPIN_LOCK(PATLRU_LOCK, &pat_lru_tree_lock);
lru = lru64_get(XXH64(smp->data.u.str.str, smp->data.u.str.len, seed), lru = lru64_get(XXH64(smp->data.u.str.str, smp->data.u.str.len, seed),
pat_lru_tree, expr, expr->revision); pat_lru_tree, expr, expr->revision);
if (!lru) { if (!lru) {
SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock); HA_SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
} }
else if (lru->domain) { else if (lru->domain) {
ret = lru->data; ret = lru->data;
SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock); HA_SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
return ret; return ret;
} }
} }
@ -705,7 +705,7 @@ struct pattern *pat_match_beg(struct sample *smp, struct pattern_expr *expr, int
if (lru) { if (lru) {
lru64_commit(lru, ret, expr, expr->revision, NULL); lru64_commit(lru, ret, expr, expr->revision, NULL);
SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock); HA_SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
} }
return ret; return ret;
@ -723,15 +723,15 @@ struct pattern *pat_match_end(struct sample *smp, struct pattern_expr *expr, int
if (pat_lru_tree) { if (pat_lru_tree) {
unsigned long long seed = pat_lru_seed ^ (long)expr; unsigned long long seed = pat_lru_seed ^ (long)expr;
SPIN_LOCK(PATLRU_LOCK, &pat_lru_tree_lock); HA_SPIN_LOCK(PATLRU_LOCK, &pat_lru_tree_lock);
lru = lru64_get(XXH64(smp->data.u.str.str, smp->data.u.str.len, seed), lru = lru64_get(XXH64(smp->data.u.str.str, smp->data.u.str.len, seed),
pat_lru_tree, expr, expr->revision); pat_lru_tree, expr, expr->revision);
if (!lru) { if (!lru) {
SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock); HA_SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
} }
else if (lru->domain) { else if (lru->domain) {
ret = lru->data; ret = lru->data;
SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock); HA_SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
return ret; return ret;
} }
} }
@ -753,7 +753,7 @@ struct pattern *pat_match_end(struct sample *smp, struct pattern_expr *expr, int
if (lru) { if (lru) {
lru64_commit(lru, ret, expr, expr->revision, NULL); lru64_commit(lru, ret, expr, expr->revision, NULL);
SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock); HA_SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
} }
return ret; return ret;
@ -775,15 +775,15 @@ struct pattern *pat_match_sub(struct sample *smp, struct pattern_expr *expr, int
if (pat_lru_tree) { if (pat_lru_tree) {
unsigned long long seed = pat_lru_seed ^ (long)expr; unsigned long long seed = pat_lru_seed ^ (long)expr;
SPIN_LOCK(PATLRU_LOCK, &pat_lru_tree_lock); HA_SPIN_LOCK(PATLRU_LOCK, &pat_lru_tree_lock);
lru = lru64_get(XXH64(smp->data.u.str.str, smp->data.u.str.len, seed), lru = lru64_get(XXH64(smp->data.u.str.str, smp->data.u.str.len, seed),
pat_lru_tree, expr, expr->revision); pat_lru_tree, expr, expr->revision);
if (!lru) { if (!lru) {
SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock); HA_SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
} }
else if (lru->domain) { else if (lru->domain) {
ret = lru->data; ret = lru->data;
SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock); HA_SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
return ret; return ret;
} }
} }
@ -819,7 +819,7 @@ struct pattern *pat_match_sub(struct sample *smp, struct pattern_expr *expr, int
leave: leave:
if (lru) { if (lru) {
lru64_commit(lru, ret, expr, expr->revision, NULL); lru64_commit(lru, ret, expr, expr->revision, NULL);
SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock); HA_SPIN_UNLOCK(PATLRU_LOCK, &pat_lru_tree_lock);
} }
return ret; return ret;
@ -1765,11 +1765,11 @@ static inline int pat_ref_set_elt(struct pat_ref *ref, struct pat_ref_elt *elt,
if (!expr->pat_head->parse_smp) if (!expr->pat_head->parse_smp)
continue; continue;
RWLOCK_WRLOCK(PATEXP_LOCK, &expr->lock); HA_RWLOCK_WRLOCK(PATEXP_LOCK, &expr->lock);
data = pattern_find_smp(expr, elt); data = pattern_find_smp(expr, elt);
if (data && *data && !expr->pat_head->parse_smp(sample, *data)) if (data && *data && !expr->pat_head->parse_smp(sample, *data))
*data = NULL; *data = NULL;
RWLOCK_WRUNLOCK(PATEXP_LOCK, &expr->lock); HA_RWLOCK_WRUNLOCK(PATEXP_LOCK, &expr->lock);
} }
/* free old sample only when all exprs are updated */ /* free old sample only when all exprs are updated */
@ -1872,7 +1872,7 @@ struct pat_ref *pat_ref_new(const char *reference, const char *display, unsigned
LIST_INIT(&ref->head); LIST_INIT(&ref->head);
LIST_INIT(&ref->pat); LIST_INIT(&ref->pat);
SPIN_INIT(&ref->lock); HA_SPIN_INIT(&ref->lock);
LIST_ADDQ(&pattern_reference, &ref->list); LIST_ADDQ(&pattern_reference, &ref->list);
return ref; return ref;
@ -1991,14 +1991,14 @@ int pat_ref_push(struct pat_ref_elt *elt, struct pattern_expr *expr,
return 0; return 0;
} }
RWLOCK_WRLOCK(PATEXP_LOCK, &expr->lock); HA_RWLOCK_WRLOCK(PATEXP_LOCK, &expr->lock);
/* index pattern */ /* index pattern */
if (!expr->pat_head->index(expr, &pattern, err)) { if (!expr->pat_head->index(expr, &pattern, err)) {
RWLOCK_WRUNLOCK(PATEXP_LOCK, &expr->lock); HA_RWLOCK_WRUNLOCK(PATEXP_LOCK, &expr->lock);
free(data); free(data);
return 0; return 0;
} }
RWLOCK_WRUNLOCK(PATEXP_LOCK, &expr->lock); HA_RWLOCK_WRUNLOCK(PATEXP_LOCK, &expr->lock);
return 1; return 1;
} }
@ -2073,9 +2073,9 @@ void pat_ref_reload(struct pat_ref *ref, struct pat_ref *replace)
struct pattern pattern; struct pattern pattern;
SPIN_LOCK(PATREF_LOCK, &ref->lock); HA_SPIN_LOCK(PATREF_LOCK, &ref->lock);
list_for_each_entry(expr, &ref->pat, list) { list_for_each_entry(expr, &ref->pat, list) {
RWLOCK_WRLOCK(PATEXP_LOCK, &expr->lock); HA_RWLOCK_WRLOCK(PATEXP_LOCK, &expr->lock);
} }
/* all expr are locked, we can safely remove all pat_ref */ /* all expr are locked, we can safely remove all pat_ref */
@ -2145,9 +2145,9 @@ void pat_ref_reload(struct pat_ref *ref, struct pat_ref *replace)
continue; continue;
} }
} }
RWLOCK_WRUNLOCK(PATEXP_LOCK, &expr->lock); HA_RWLOCK_WRUNLOCK(PATEXP_LOCK, &expr->lock);
} }
SPIN_UNLOCK(PATREF_LOCK, &ref->lock); HA_SPIN_UNLOCK(PATREF_LOCK, &ref->lock);
} }
/* This function prune all entries of <ref>. This function /* This function prune all entries of <ref>. This function
@ -2160,9 +2160,9 @@ void pat_ref_prune(struct pat_ref *ref)
struct bref *bref, *back; struct bref *bref, *back;
list_for_each_entry(expr, &ref->pat, list) { list_for_each_entry(expr, &ref->pat, list) {
RWLOCK_WRLOCK(PATEXP_LOCK, &expr->lock); HA_RWLOCK_WRLOCK(PATEXP_LOCK, &expr->lock);
expr->pat_head->prune(expr); expr->pat_head->prune(expr);
RWLOCK_WRUNLOCK(PATEXP_LOCK, &expr->lock); HA_RWLOCK_WRUNLOCK(PATEXP_LOCK, &expr->lock);
} }
/* we trash pat_ref_elt in a second time to ensure that data is /* we trash pat_ref_elt in a second time to ensure that data is
@ -2267,7 +2267,7 @@ struct pattern_expr *pattern_new_expr(struct pattern_head *head, struct pat_ref
expr->ref = ref; expr->ref = ref;
RWLOCK_INIT(&expr->lock); HA_RWLOCK_INIT(&expr->lock);
/* We must free this pattern if it is no more used. */ /* We must free this pattern if it is no more used. */
list->do_free = 1; list->do_free = 1;
@ -2579,7 +2579,7 @@ struct pattern *pattern_exec_match(struct pattern_head *head, struct sample *smp
return NULL; return NULL;
list_for_each_entry(list, &head->head, list) { list_for_each_entry(list, &head->head, list) {
RWLOCK_RDLOCK(PATEXP_LOCK, &list->expr->lock); HA_RWLOCK_RDLOCK(PATEXP_LOCK, &list->expr->lock);
pat = head->match(smp, list->expr, fill); pat = head->match(smp, list->expr, fill);
if (pat) { if (pat) {
/* We duplicate the pattern cause it could be modified /* We duplicate the pattern cause it could be modified
@ -2610,10 +2610,10 @@ struct pattern *pattern_exec_match(struct pattern_head *head, struct sample *smp
} }
pat->data = &static_sample_data; pat->data = &static_sample_data;
} }
RWLOCK_RDUNLOCK(PATEXP_LOCK, &list->expr->lock); HA_RWLOCK_RDUNLOCK(PATEXP_LOCK, &list->expr->lock);
return pat; return pat;
} }
RWLOCK_RDUNLOCK(PATEXP_LOCK, &list->expr->lock); HA_RWLOCK_RDUNLOCK(PATEXP_LOCK, &list->expr->lock);
} }
return NULL; return NULL;
} }
@ -2627,9 +2627,9 @@ void pattern_prune(struct pattern_head *head)
LIST_DEL(&list->list); LIST_DEL(&list->list);
if (list->do_free) { if (list->do_free) {
LIST_DEL(&list->expr->list); LIST_DEL(&list->expr->list);
RWLOCK_WRLOCK(PATEXP_LOCK, &list->expr->lock); HA_RWLOCK_WRLOCK(PATEXP_LOCK, &list->expr->lock);
head->prune(list->expr); head->prune(list->expr);
RWLOCK_WRUNLOCK(PATEXP_LOCK, &list->expr->lock); HA_RWLOCK_WRUNLOCK(PATEXP_LOCK, &list->expr->lock);
free(list->expr); free(list->expr);
} }
free(list); free(list);
@ -2676,9 +2676,9 @@ struct sample_data **pattern_find_smp(struct pattern_expr *expr, struct pat_ref_
*/ */
int pattern_delete(struct pattern_expr *expr, struct pat_ref_elt *ref) int pattern_delete(struct pattern_expr *expr, struct pat_ref_elt *ref)
{ {
RWLOCK_WRLOCK(PATEXP_LOCK, &expr->lock); HA_RWLOCK_WRLOCK(PATEXP_LOCK, &expr->lock);
expr->pat_head->delete(expr, ref); expr->pat_head->delete(expr, ref);
RWLOCK_WRUNLOCK(PATEXP_LOCK, &expr->lock); HA_RWLOCK_WRUNLOCK(PATEXP_LOCK, &expr->lock);
return 1; return 1;
} }
@ -2694,7 +2694,7 @@ void pattern_finalize_config(void)
pat_lru_seed = random(); pat_lru_seed = random();
if (global.tune.pattern_cache) { if (global.tune.pattern_cache) {
pat_lru_tree = lru64_new(global.tune.pattern_cache); pat_lru_tree = lru64_new(global.tune.pattern_cache);
SPIN_INIT(&pat_lru_tree_lock); HA_SPIN_INIT(&pat_lru_tree_lock);
} }
list_for_each_entry(ref, &pattern_reference, list) { list_for_each_entry(ref, &pattern_reference, list) {

View File

@ -319,7 +319,7 @@ static int peer_prepare_updatemsg(struct stksess *ts, struct shared_table *st, u
cursor += st->table->key_size; cursor += st->table->key_size;
} }
RWLOCK_RDLOCK(STK_SESS_LOCK, &ts->lock); HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &ts->lock);
/* encode values */ /* encode values */
for (data_type = 0 ; data_type < STKTABLE_DATA_TYPES ; data_type++) { for (data_type = 0 ; data_type < STKTABLE_DATA_TYPES ; data_type++) {
@ -359,7 +359,7 @@ static int peer_prepare_updatemsg(struct stksess *ts, struct shared_table *st, u
} }
} }
} }
RWLOCK_RDUNLOCK(STK_SESS_LOCK, &ts->lock); HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &ts->lock);
/* Compute datalen */ /* Compute datalen */
datalen = (cursor - datamsg); datalen = (cursor - datamsg);
@ -510,7 +510,7 @@ static void peer_session_release(struct appctx *appctx)
/* peer session identified */ /* peer session identified */
if (peer) { if (peer) {
SPIN_LOCK(PEER_LOCK, &peer->lock); HA_SPIN_LOCK(PEER_LOCK, &peer->lock);
if (peer->appctx == appctx) { if (peer->appctx == appctx) {
/* Re-init current table pointers to force announcement on re-connect */ /* Re-init current table pointers to force announcement on re-connect */
peer->remote_table = peer->last_local_table = NULL; peer->remote_table = peer->last_local_table = NULL;
@ -527,7 +527,7 @@ static void peer_session_release(struct appctx *appctx)
peer->flags &= PEER_TEACH_RESET; peer->flags &= PEER_TEACH_RESET;
peer->flags &= PEER_LEARN_RESET; peer->flags &= PEER_LEARN_RESET;
} }
SPIN_UNLOCK(PEER_LOCK, &peer->lock); HA_SPIN_UNLOCK(PEER_LOCK, &peer->lock);
task_wakeup(peers->sync_task, TASK_WOKEN_MSG); task_wakeup(peers->sync_task, TASK_WOKEN_MSG);
} }
} }
@ -692,7 +692,7 @@ static void peer_io_handler(struct appctx *appctx)
goto switchstate; goto switchstate;
} }
SPIN_LOCK(PEER_LOCK, &curpeer->lock); HA_SPIN_LOCK(PEER_LOCK, &curpeer->lock);
if (curpeer->appctx && curpeer->appctx != appctx) { if (curpeer->appctx && curpeer->appctx != appctx) {
if (curpeer->local) { if (curpeer->local) {
/* Local connection, reply a retry */ /* Local connection, reply a retry */
@ -726,7 +726,7 @@ static void peer_io_handler(struct appctx *appctx)
if (!curpeer) { if (!curpeer) {
curpeer = appctx->ctx.peers.ptr; curpeer = appctx->ctx.peers.ptr;
SPIN_LOCK(PEER_LOCK, &curpeer->lock); HA_SPIN_LOCK(PEER_LOCK, &curpeer->lock);
if (curpeer->appctx != appctx) { if (curpeer->appctx != appctx) {
appctx->st0 = PEER_SESS_ST_END; appctx->st0 = PEER_SESS_ST_END;
goto switchstate; goto switchstate;
@ -787,7 +787,7 @@ static void peer_io_handler(struct appctx *appctx)
if (!curpeer) { if (!curpeer) {
curpeer = appctx->ctx.peers.ptr; curpeer = appctx->ctx.peers.ptr;
SPIN_LOCK(PEER_LOCK, &curpeer->lock); HA_SPIN_LOCK(PEER_LOCK, &curpeer->lock);
if (curpeer->appctx != appctx) { if (curpeer->appctx != appctx) {
appctx->st0 = PEER_SESS_ST_END; appctx->st0 = PEER_SESS_ST_END;
goto switchstate; goto switchstate;
@ -826,7 +826,7 @@ static void peer_io_handler(struct appctx *appctx)
if (!curpeer) { if (!curpeer) {
curpeer = appctx->ctx.peers.ptr; curpeer = appctx->ctx.peers.ptr;
SPIN_LOCK(PEER_LOCK, &curpeer->lock); HA_SPIN_LOCK(PEER_LOCK, &curpeer->lock);
if (curpeer->appctx != appctx) { if (curpeer->appctx != appctx) {
appctx->st0 = PEER_SESS_ST_END; appctx->st0 = PEER_SESS_ST_END;
goto switchstate; goto switchstate;
@ -913,7 +913,7 @@ static void peer_io_handler(struct appctx *appctx)
if (!curpeer) { if (!curpeer) {
curpeer = appctx->ctx.peers.ptr; curpeer = appctx->ctx.peers.ptr;
SPIN_LOCK(PEER_LOCK, &curpeer->lock); HA_SPIN_LOCK(PEER_LOCK, &curpeer->lock);
if (curpeer->appctx != appctx) { if (curpeer->appctx != appctx) {
appctx->st0 = PEER_SESS_ST_END; appctx->st0 = PEER_SESS_ST_END;
goto switchstate; goto switchstate;
@ -1252,7 +1252,7 @@ static void peer_io_handler(struct appctx *appctx)
newts = NULL; newts = NULL;
} }
RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock); HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
for (data_type = 0 ; data_type < STKTABLE_DATA_TYPES ; data_type++) { for (data_type = 0 ; data_type < STKTABLE_DATA_TYPES ; data_type++) {
@ -1264,7 +1264,7 @@ static void peer_io_handler(struct appctx *appctx)
data = intdecode(&msg_cur, msg_end); data = intdecode(&msg_cur, msg_end);
if (!msg_cur) { if (!msg_cur) {
/* malformed message */ /* malformed message */
RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock); HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
stktable_touch_remote(st->table, ts, 1); stktable_touch_remote(st->table, ts, 1);
appctx->st0 = PEER_SESS_ST_ERRPROTO; appctx->st0 = PEER_SESS_ST_ERRPROTO;
goto switchstate; goto switchstate;
@ -1281,7 +1281,7 @@ static void peer_io_handler(struct appctx *appctx)
data = intdecode(&msg_cur, msg_end); data = intdecode(&msg_cur, msg_end);
if (!msg_cur) { if (!msg_cur) {
/* malformed message */ /* malformed message */
RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock); HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
stktable_touch_remote(st->table, ts, 1); stktable_touch_remote(st->table, ts, 1);
appctx->st0 = PEER_SESS_ST_ERRPROTO; appctx->st0 = PEER_SESS_ST_ERRPROTO;
goto switchstate; goto switchstate;
@ -1298,7 +1298,7 @@ static void peer_io_handler(struct appctx *appctx)
data = intdecode(&msg_cur, msg_end); data = intdecode(&msg_cur, msg_end);
if (!msg_cur) { if (!msg_cur) {
/* malformed message */ /* malformed message */
RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock); HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
stktable_touch_remote(st->table, ts, 1); stktable_touch_remote(st->table, ts, 1);
appctx->st0 = PEER_SESS_ST_ERRPROTO; appctx->st0 = PEER_SESS_ST_ERRPROTO;
goto switchstate; goto switchstate;
@ -1320,7 +1320,7 @@ static void peer_io_handler(struct appctx *appctx)
data.curr_tick = tick_add(now_ms, -intdecode(&msg_cur, msg_end)) & ~0x1; data.curr_tick = tick_add(now_ms, -intdecode(&msg_cur, msg_end)) & ~0x1;
if (!msg_cur) { if (!msg_cur) {
/* malformed message */ /* malformed message */
RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock); HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
stktable_touch_remote(st->table, ts, 1); stktable_touch_remote(st->table, ts, 1);
appctx->st0 = PEER_SESS_ST_ERRPROTO; appctx->st0 = PEER_SESS_ST_ERRPROTO;
goto switchstate; goto switchstate;
@ -1328,7 +1328,7 @@ static void peer_io_handler(struct appctx *appctx)
data.curr_ctr = intdecode(&msg_cur, msg_end); data.curr_ctr = intdecode(&msg_cur, msg_end);
if (!msg_cur) { if (!msg_cur) {
/* malformed message */ /* malformed message */
RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock); HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
stktable_touch_remote(st->table, ts, 1); stktable_touch_remote(st->table, ts, 1);
appctx->st0 = PEER_SESS_ST_ERRPROTO; appctx->st0 = PEER_SESS_ST_ERRPROTO;
goto switchstate; goto switchstate;
@ -1336,7 +1336,7 @@ static void peer_io_handler(struct appctx *appctx)
data.prev_ctr = intdecode(&msg_cur, msg_end); data.prev_ctr = intdecode(&msg_cur, msg_end);
if (!msg_cur) { if (!msg_cur) {
/* malformed message */ /* malformed message */
RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock); HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
stktable_touch_remote(st->table, ts, 1); stktable_touch_remote(st->table, ts, 1);
appctx->st0 = PEER_SESS_ST_ERRPROTO; appctx->st0 = PEER_SESS_ST_ERRPROTO;
goto switchstate; goto switchstate;
@ -1351,7 +1351,7 @@ static void peer_io_handler(struct appctx *appctx)
} }
} }
RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock); HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
stktable_touch_remote(st->table, ts, 1); stktable_touch_remote(st->table, ts, 1);
} }
@ -1463,7 +1463,7 @@ static void peer_io_handler(struct appctx *appctx)
} }
if (!(curpeer->flags & PEER_F_TEACH_PROCESS)) { if (!(curpeer->flags & PEER_F_TEACH_PROCESS)) {
SPIN_LOCK(STK_TABLE_LOCK, &st->table->lock); HA_SPIN_LOCK(STK_TABLE_LOCK, &st->table->lock);
if (!(curpeer->flags & PEER_F_LEARN_ASSIGN) && if (!(curpeer->flags & PEER_F_LEARN_ASSIGN) &&
((int)(st->last_pushed - st->table->localupdate) < 0)) { ((int)(st->last_pushed - st->table->localupdate) < 0)) {
struct eb32_node *eb; struct eb32_node *eb;
@ -1517,14 +1517,14 @@ static void peer_io_handler(struct appctx *appctx)
ts = eb32_entry(eb, struct stksess, upd); ts = eb32_entry(eb, struct stksess, upd);
updateid = ts->upd.key; updateid = ts->upd.key;
ts->ref_cnt++; ts->ref_cnt++;
SPIN_UNLOCK(STK_TABLE_LOCK, &st->table->lock); HA_SPIN_UNLOCK(STK_TABLE_LOCK, &st->table->lock);
msglen = peer_prepare_updatemsg(ts, st, updateid, trash.str, trash.size, new_pushed, 0); msglen = peer_prepare_updatemsg(ts, st, updateid, trash.str, trash.size, new_pushed, 0);
if (!msglen) { if (!msglen) {
/* internal error: message does not fit in trash */ /* internal error: message does not fit in trash */
SPIN_LOCK(STK_TABLE_LOCK, &st->table->lock); HA_SPIN_LOCK(STK_TABLE_LOCK, &st->table->lock);
ts->ref_cnt--; ts->ref_cnt--;
SPIN_UNLOCK(STK_TABLE_LOCK, &st->table->lock); HA_SPIN_UNLOCK(STK_TABLE_LOCK, &st->table->lock);
appctx->st0 = PEER_SESS_ST_END; appctx->st0 = PEER_SESS_ST_END;
goto switchstate; goto switchstate;
} }
@ -1533,9 +1533,9 @@ static void peer_io_handler(struct appctx *appctx)
repl = ci_putblk(si_ic(si), trash.str, msglen); repl = ci_putblk(si_ic(si), trash.str, msglen);
if (repl <= 0) { if (repl <= 0) {
/* no more write possible */ /* no more write possible */
SPIN_LOCK(STK_TABLE_LOCK, &st->table->lock); HA_SPIN_LOCK(STK_TABLE_LOCK, &st->table->lock);
ts->ref_cnt--; ts->ref_cnt--;
SPIN_UNLOCK(STK_TABLE_LOCK, &st->table->lock); HA_SPIN_UNLOCK(STK_TABLE_LOCK, &st->table->lock);
if (repl == -1) { if (repl == -1) {
goto full; goto full;
} }
@ -1543,7 +1543,7 @@ static void peer_io_handler(struct appctx *appctx)
goto switchstate; goto switchstate;
} }
SPIN_LOCK(STK_TABLE_LOCK, &st->table->lock); HA_SPIN_LOCK(STK_TABLE_LOCK, &st->table->lock);
ts->ref_cnt--; ts->ref_cnt--;
st->last_pushed = updateid; st->last_pushed = updateid;
if ((int)(st->last_pushed - st->table->commitupdate) > 0) if ((int)(st->last_pushed - st->table->commitupdate) > 0)
@ -1552,7 +1552,7 @@ static void peer_io_handler(struct appctx *appctx)
new_pushed = 0; new_pushed = 0;
} }
} }
SPIN_UNLOCK(STK_TABLE_LOCK, &st->table->lock); HA_SPIN_UNLOCK(STK_TABLE_LOCK, &st->table->lock);
} }
else { else {
if (!(st->flags & SHTABLE_F_TEACH_STAGE1)) { if (!(st->flags & SHTABLE_F_TEACH_STAGE1)) {
@ -1584,7 +1584,7 @@ static void peer_io_handler(struct appctx *appctx)
/* We force new pushed to 1 to force identifier in update message */ /* We force new pushed to 1 to force identifier in update message */
new_pushed = 1; new_pushed = 1;
SPIN_LOCK(STK_TABLE_LOCK, &st->table->lock); HA_SPIN_LOCK(STK_TABLE_LOCK, &st->table->lock);
while (1) { while (1) {
uint32_t msglen; uint32_t msglen;
struct stksess *ts; struct stksess *ts;
@ -1604,15 +1604,15 @@ static void peer_io_handler(struct appctx *appctx)
ts = eb32_entry(eb, struct stksess, upd); ts = eb32_entry(eb, struct stksess, upd);
updateid = ts->upd.key; updateid = ts->upd.key;
ts->ref_cnt++; ts->ref_cnt++;
SPIN_UNLOCK(STK_TABLE_LOCK, &st->table->lock); HA_SPIN_UNLOCK(STK_TABLE_LOCK, &st->table->lock);
use_timed = !(curpeer->flags & PEER_F_DWNGRD); use_timed = !(curpeer->flags & PEER_F_DWNGRD);
msglen = peer_prepare_updatemsg(ts, st, updateid, trash.str, trash.size, new_pushed, use_timed); msglen = peer_prepare_updatemsg(ts, st, updateid, trash.str, trash.size, new_pushed, use_timed);
if (!msglen) { if (!msglen) {
/* internal error: message does not fit in trash */ /* internal error: message does not fit in trash */
SPIN_LOCK(STK_TABLE_LOCK, &st->table->lock); HA_SPIN_LOCK(STK_TABLE_LOCK, &st->table->lock);
ts->ref_cnt--; ts->ref_cnt--;
SPIN_UNLOCK(STK_TABLE_LOCK, &st->table->lock); HA_SPIN_UNLOCK(STK_TABLE_LOCK, &st->table->lock);
appctx->st0 = PEER_SESS_ST_END; appctx->st0 = PEER_SESS_ST_END;
goto switchstate; goto switchstate;
} }
@ -1621,22 +1621,22 @@ static void peer_io_handler(struct appctx *appctx)
repl = ci_putblk(si_ic(si), trash.str, msglen); repl = ci_putblk(si_ic(si), trash.str, msglen);
if (repl <= 0) { if (repl <= 0) {
/* no more write possible */ /* no more write possible */
SPIN_LOCK(STK_TABLE_LOCK, &st->table->lock); HA_SPIN_LOCK(STK_TABLE_LOCK, &st->table->lock);
ts->ref_cnt--; ts->ref_cnt--;
SPIN_UNLOCK(STK_TABLE_LOCK, &st->table->lock); HA_SPIN_UNLOCK(STK_TABLE_LOCK, &st->table->lock);
if (repl == -1) { if (repl == -1) {
goto full; goto full;
} }
appctx->st0 = PEER_SESS_ST_END; appctx->st0 = PEER_SESS_ST_END;
goto switchstate; goto switchstate;
} }
SPIN_LOCK(STK_TABLE_LOCK, &st->table->lock); HA_SPIN_LOCK(STK_TABLE_LOCK, &st->table->lock);
ts->ref_cnt--; ts->ref_cnt--;
st->last_pushed = updateid; st->last_pushed = updateid;
/* identifier may not needed in next update message */ /* identifier may not needed in next update message */
new_pushed = 0; new_pushed = 0;
} }
SPIN_UNLOCK(STK_TABLE_LOCK, &st->table->lock); HA_SPIN_UNLOCK(STK_TABLE_LOCK, &st->table->lock);
} }
if (!(st->flags & SHTABLE_F_TEACH_STAGE2)) { if (!(st->flags & SHTABLE_F_TEACH_STAGE2)) {
@ -1668,7 +1668,7 @@ static void peer_io_handler(struct appctx *appctx)
/* We force new pushed to 1 to force identifier in update message */ /* We force new pushed to 1 to force identifier in update message */
new_pushed = 1; new_pushed = 1;
SPIN_LOCK(STK_TABLE_LOCK, &st->table->lock); HA_SPIN_LOCK(STK_TABLE_LOCK, &st->table->lock);
while (1) { while (1) {
uint32_t msglen; uint32_t msglen;
struct stksess *ts; struct stksess *ts;
@ -1687,15 +1687,15 @@ static void peer_io_handler(struct appctx *appctx)
ts = eb32_entry(eb, struct stksess, upd); ts = eb32_entry(eb, struct stksess, upd);
updateid = ts->upd.key; updateid = ts->upd.key;
ts->ref_cnt++; ts->ref_cnt++;
SPIN_UNLOCK(STK_TABLE_LOCK, &st->table->lock); HA_SPIN_UNLOCK(STK_TABLE_LOCK, &st->table->lock);
use_timed = !(curpeer->flags & PEER_F_DWNGRD); use_timed = !(curpeer->flags & PEER_F_DWNGRD);
msglen = peer_prepare_updatemsg(ts, st, updateid, trash.str, trash.size, new_pushed, use_timed); msglen = peer_prepare_updatemsg(ts, st, updateid, trash.str, trash.size, new_pushed, use_timed);
if (!msglen) { if (!msglen) {
/* internal error: message does not fit in trash */ /* internal error: message does not fit in trash */
SPIN_LOCK(STK_TABLE_LOCK, &st->table->lock); HA_SPIN_LOCK(STK_TABLE_LOCK, &st->table->lock);
ts->ref_cnt--; ts->ref_cnt--;
SPIN_UNLOCK(STK_TABLE_LOCK, &st->table->lock); HA_SPIN_UNLOCK(STK_TABLE_LOCK, &st->table->lock);
appctx->st0 = PEER_SESS_ST_END; appctx->st0 = PEER_SESS_ST_END;
goto switchstate; goto switchstate;
} }
@ -1704,9 +1704,9 @@ static void peer_io_handler(struct appctx *appctx)
repl = ci_putblk(si_ic(si), trash.str, msglen); repl = ci_putblk(si_ic(si), trash.str, msglen);
if (repl <= 0) { if (repl <= 0) {
/* no more write possible */ /* no more write possible */
SPIN_LOCK(STK_TABLE_LOCK, &st->table->lock); HA_SPIN_LOCK(STK_TABLE_LOCK, &st->table->lock);
ts->ref_cnt--; ts->ref_cnt--;
SPIN_UNLOCK(STK_TABLE_LOCK, &st->table->lock); HA_SPIN_UNLOCK(STK_TABLE_LOCK, &st->table->lock);
if (repl == -1) { if (repl == -1) {
goto full; goto full;
} }
@ -1714,13 +1714,13 @@ static void peer_io_handler(struct appctx *appctx)
goto switchstate; goto switchstate;
} }
SPIN_LOCK(STK_TABLE_LOCK, &st->table->lock); HA_SPIN_LOCK(STK_TABLE_LOCK, &st->table->lock);
ts->ref_cnt--; ts->ref_cnt--;
st->last_pushed = updateid; st->last_pushed = updateid;
/* identifier may not needed in next update message */ /* identifier may not needed in next update message */
new_pushed = 0; new_pushed = 0;
} }
SPIN_UNLOCK(STK_TABLE_LOCK, &st->table->lock); HA_SPIN_UNLOCK(STK_TABLE_LOCK, &st->table->lock);
} }
} }
@ -1803,7 +1803,7 @@ static void peer_io_handler(struct appctx *appctx)
} }
case PEER_SESS_ST_END: { case PEER_SESS_ST_END: {
if (curpeer) { if (curpeer) {
SPIN_UNLOCK(PEER_LOCK, &curpeer->lock); HA_SPIN_UNLOCK(PEER_LOCK, &curpeer->lock);
curpeer = NULL; curpeer = NULL;
} }
si_shutw(si); si_shutw(si);
@ -1817,7 +1817,7 @@ static void peer_io_handler(struct appctx *appctx)
si_oc(si)->flags |= CF_READ_DONTWAIT; si_oc(si)->flags |= CF_READ_DONTWAIT;
if (curpeer) if (curpeer)
SPIN_UNLOCK(PEER_LOCK, &curpeer->lock); HA_SPIN_UNLOCK(PEER_LOCK, &curpeer->lock);
return; return;
full: full:
si_applet_cant_put(si); si_applet_cant_put(si);
@ -1973,7 +1973,7 @@ static struct task *process_peer_sync(struct task * task)
/* Acquire lock for all peers of the section */ /* Acquire lock for all peers of the section */
for (ps = peers->remote; ps; ps = ps->next) for (ps = peers->remote; ps; ps = ps->next)
SPIN_LOCK(PEER_LOCK, &ps->lock); HA_SPIN_LOCK(PEER_LOCK, &ps->lock);
if (!stopping) { if (!stopping) {
/* Normal case (not soft stop)*/ /* Normal case (not soft stop)*/
@ -2147,7 +2147,7 @@ static struct task *process_peer_sync(struct task * task)
/* Release lock for all peers of the section */ /* Release lock for all peers of the section */
for (ps = peers->remote; ps; ps = ps->next) for (ps = peers->remote; ps; ps = ps->next)
SPIN_UNLOCK(PEER_LOCK, &ps->lock); HA_SPIN_UNLOCK(PEER_LOCK, &ps->lock);
/* Wakeup for re-connect */ /* Wakeup for re-connect */
return task; return task;

View File

@ -2621,9 +2621,9 @@ http_req_get_intercept_rule(struct proxy *px, struct list *rules, struct stream
/* perform update */ /* perform update */
/* returned code: 1=ok, 0=ko */ /* returned code: 1=ok, 0=ko */
SPIN_LOCK(PATREF_LOCK, &ref->lock); HA_SPIN_LOCK(PATREF_LOCK, &ref->lock);
pat_ref_delete(ref, key->str); pat_ref_delete(ref, key->str);
SPIN_UNLOCK(PATREF_LOCK, &ref->lock); HA_SPIN_UNLOCK(PATREF_LOCK, &ref->lock);
free_trash_chunk(key); free_trash_chunk(key);
break; break;
@ -2649,10 +2649,10 @@ http_req_get_intercept_rule(struct proxy *px, struct list *rules, struct stream
/* perform update */ /* perform update */
/* add entry only if it does not already exist */ /* add entry only if it does not already exist */
SPIN_LOCK(PATREF_LOCK, &ref->lock); HA_SPIN_LOCK(PATREF_LOCK, &ref->lock);
if (pat_ref_find_elt(ref, key->str) == NULL) if (pat_ref_find_elt(ref, key->str) == NULL)
pat_ref_add(ref, key->str, NULL, NULL); pat_ref_add(ref, key->str, NULL, NULL);
SPIN_UNLOCK(PATREF_LOCK, &ref->lock); HA_SPIN_UNLOCK(PATREF_LOCK, &ref->lock);
free_trash_chunk(key); free_trash_chunk(key);
break; break;
@ -2737,7 +2737,7 @@ http_req_get_intercept_rule(struct proxy *px, struct list *rules, struct stream
ptr1 = stktable_data_ptr(t, ts, STKTABLE_DT_HTTP_REQ_CNT); ptr1 = stktable_data_ptr(t, ts, STKTABLE_DT_HTTP_REQ_CNT);
ptr2 = stktable_data_ptr(t, ts, STKTABLE_DT_HTTP_REQ_RATE); ptr2 = stktable_data_ptr(t, ts, STKTABLE_DT_HTTP_REQ_RATE);
if (ptr1 || ptr2) { if (ptr1 || ptr2) {
RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock); HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
if (ptr1) if (ptr1)
stktable_data_cast(ptr1, http_req_cnt)++; stktable_data_cast(ptr1, http_req_cnt)++;
@ -2746,7 +2746,7 @@ http_req_get_intercept_rule(struct proxy *px, struct list *rules, struct stream
update_freq_ctr_period(&stktable_data_cast(ptr2, http_req_rate), update_freq_ctr_period(&stktable_data_cast(ptr2, http_req_rate),
t->data_arg[STKTABLE_DT_HTTP_REQ_RATE].u, 1); t->data_arg[STKTABLE_DT_HTTP_REQ_RATE].u, 1);
RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock); HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
} }
stkctr_set_flags(&s->stkctr[trk_idx(rule->action)], STKCTR_TRACK_CONTENT); stkctr_set_flags(&s->stkctr[trk_idx(rule->action)], STKCTR_TRACK_CONTENT);
@ -2915,9 +2915,9 @@ http_res_get_intercept_rule(struct proxy *px, struct list *rules, struct stream
/* perform update */ /* perform update */
/* returned code: 1=ok, 0=ko */ /* returned code: 1=ok, 0=ko */
SPIN_LOCK(PATREF_LOCK, &ref->lock); HA_SPIN_LOCK(PATREF_LOCK, &ref->lock);
pat_ref_delete(ref, key->str); pat_ref_delete(ref, key->str);
SPIN_UNLOCK(PATREF_LOCK, &ref->lock); HA_SPIN_UNLOCK(PATREF_LOCK, &ref->lock);
free_trash_chunk(key); free_trash_chunk(key);
break; break;
@ -2980,14 +2980,14 @@ http_res_get_intercept_rule(struct proxy *px, struct list *rules, struct stream
value->str[value->len] = '\0'; value->str[value->len] = '\0';
/* perform update */ /* perform update */
SPIN_LOCK(PATREF_LOCK, &ref->lock); HA_SPIN_LOCK(PATREF_LOCK, &ref->lock);
if (pat_ref_find_elt(ref, key->str) != NULL) if (pat_ref_find_elt(ref, key->str) != NULL)
/* update entry if it exists */ /* update entry if it exists */
pat_ref_set(ref, key->str, value->str, NULL); pat_ref_set(ref, key->str, value->str, NULL);
else else
/* insert a new entry */ /* insert a new entry */
pat_ref_add(ref, key->str, value->str, NULL); pat_ref_add(ref, key->str, value->str, NULL);
SPIN_UNLOCK(PATREF_LOCK, &ref->lock); HA_SPIN_UNLOCK(PATREF_LOCK, &ref->lock);
free_trash_chunk(key); free_trash_chunk(key);
free_trash_chunk(value); free_trash_chunk(value);
break; break;
@ -3015,7 +3015,7 @@ http_res_get_intercept_rule(struct proxy *px, struct list *rules, struct stream
if (key && (ts = stktable_get_entry(t, key))) { if (key && (ts = stktable_get_entry(t, key))) {
stream_track_stkctr(&s->stkctr[trk_idx(rule->action)], t, ts); stream_track_stkctr(&s->stkctr[trk_idx(rule->action)], t, ts);
RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock); HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
/* let's count a new HTTP request as it's the first time we do it */ /* let's count a new HTTP request as it's the first time we do it */
ptr = stktable_data_ptr(t, ts, STKTABLE_DT_HTTP_REQ_CNT); ptr = stktable_data_ptr(t, ts, STKTABLE_DT_HTTP_REQ_CNT);
@ -3045,7 +3045,7 @@ http_res_get_intercept_rule(struct proxy *px, struct list *rules, struct stream
t->data_arg[STKTABLE_DT_HTTP_ERR_RATE].u, 1); t->data_arg[STKTABLE_DT_HTTP_ERR_RATE].u, 1);
} }
RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock); HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
stkctr_set_flags(&s->stkctr[trk_idx(rule->action)], STKCTR_TRACK_CONTENT); stkctr_set_flags(&s->stkctr[trk_idx(rule->action)], STKCTR_TRACK_CONTENT);
if (sess->fe != s->be) if (sess->fe != s->be)
@ -7755,7 +7755,7 @@ void http_capture_bad_message(struct proxy *proxy, struct error_snapshot *es, st
struct channel *chn = msg->chn; struct channel *chn = msg->chn;
int len1, len2; int len1, len2;
SPIN_LOCK(PROXY_LOCK, &proxy->lock); HA_SPIN_LOCK(PROXY_LOCK, &proxy->lock);
es->len = MIN(chn->buf->i, global.tune.bufsize); es->len = MIN(chn->buf->i, global.tune.bufsize);
len1 = chn->buf->data + chn->buf->size - chn->buf->p; len1 = chn->buf->data + chn->buf->size - chn->buf->p;
len1 = MIN(len1, es->len); len1 = MIN(len1, es->len);
@ -7795,7 +7795,7 @@ void http_capture_bad_message(struct proxy *proxy, struct error_snapshot *es, st
es->b_tot = chn->total; es->b_tot = chn->total;
es->m_clen = msg->chunk_len; es->m_clen = msg->chunk_len;
es->m_blen = msg->body_len; es->m_blen = msg->body_len;
SPIN_UNLOCK(PROXY_LOCK, &proxy->lock); HA_SPIN_UNLOCK(PROXY_LOCK, &proxy->lock);
} }
/* Return in <vptr> and <vlen> the pointer and length of occurrence <occ> of /* Return in <vptr> and <vlen> the pointer and length of occurrence <occ> of

View File

@ -762,7 +762,7 @@ void init_new_proxy(struct proxy *p)
/* initial uuid is unassigned (-1) */ /* initial uuid is unassigned (-1) */
p->uuid = -1; p->uuid = -1;
SPIN_INIT(&p->lock); HA_SPIN_INIT(&p->lock);
} }
/* /*

View File

@ -142,8 +142,8 @@ void process_srv_queue(struct server *s)
struct proxy *p = s->proxy; struct proxy *p = s->proxy;
int maxconn; int maxconn;
SPIN_LOCK(PROXY_LOCK, &p->lock); HA_SPIN_LOCK(PROXY_LOCK, &p->lock);
SPIN_LOCK(SERVER_LOCK, &s->lock); HA_SPIN_LOCK(SERVER_LOCK, &s->lock);
/* First, check if we can handle some connections queued at the proxy. We /* First, check if we can handle some connections queued at the proxy. We
* will take as many as we can handle. * will take as many as we can handle.
@ -156,8 +156,8 @@ void process_srv_queue(struct server *s)
break; break;
task_wakeup(strm->task, TASK_WOKEN_RES); task_wakeup(strm->task, TASK_WOKEN_RES);
} }
SPIN_UNLOCK(SERVER_LOCK, &s->lock); HA_SPIN_UNLOCK(SERVER_LOCK, &s->lock);
SPIN_UNLOCK(PROXY_LOCK, &p->lock); HA_SPIN_UNLOCK(PROXY_LOCK, &p->lock);
} }
/* Adds the stream <strm> to the pending connection list of server <strm>->srv /* Adds the stream <strm> to the pending connection list of server <strm>->srv
@ -182,17 +182,17 @@ struct pendconn *pendconn_add(struct stream *strm)
if ((strm->flags & SF_ASSIGNED) && srv) { if ((strm->flags & SF_ASSIGNED) && srv) {
p->srv = srv; p->srv = srv;
SPIN_LOCK(SERVER_LOCK, &srv->lock); HA_SPIN_LOCK(SERVER_LOCK, &srv->lock);
LIST_ADDQ(&srv->pendconns, &p->list); LIST_ADDQ(&srv->pendconns, &p->list);
SPIN_UNLOCK(SERVER_LOCK, &srv->lock); HA_SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
count = HA_ATOMIC_ADD(&srv->nbpend, 1); count = HA_ATOMIC_ADD(&srv->nbpend, 1);
strm->logs.srv_queue_size += count; strm->logs.srv_queue_size += count;
HA_ATOMIC_UPDATE_MAX(&srv->counters.nbpend_max, count); HA_ATOMIC_UPDATE_MAX(&srv->counters.nbpend_max, count);
} else { } else {
p->srv = NULL; p->srv = NULL;
SPIN_LOCK(PROXY_LOCK, &strm->be->lock); HA_SPIN_LOCK(PROXY_LOCK, &strm->be->lock);
LIST_ADDQ(&strm->be->pendconns, &p->list); LIST_ADDQ(&strm->be->pendconns, &p->list);
SPIN_UNLOCK(PROXY_LOCK, &strm->be->lock); HA_SPIN_UNLOCK(PROXY_LOCK, &strm->be->lock);
count = HA_ATOMIC_ADD(&strm->be->nbpend, 1); count = HA_ATOMIC_ADD(&strm->be->nbpend, 1);
strm->logs.prx_queue_size += count; strm->logs.prx_queue_size += count;
HA_ATOMIC_UPDATE_MAX(&strm->be->be_counters.nbpend_max, count); HA_ATOMIC_UPDATE_MAX(&strm->be->be_counters.nbpend_max, count);
@ -209,7 +209,7 @@ int pendconn_redistribute(struct server *s)
struct pendconn *pc, *pc_bck; struct pendconn *pc, *pc_bck;
int xferred = 0; int xferred = 0;
SPIN_LOCK(SERVER_LOCK, &s->lock); HA_SPIN_LOCK(SERVER_LOCK, &s->lock);
list_for_each_entry_safe(pc, pc_bck, &s->pendconns, list) { list_for_each_entry_safe(pc, pc_bck, &s->pendconns, list) {
struct stream *strm = pc->strm; struct stream *strm = pc->strm;
@ -227,7 +227,7 @@ int pendconn_redistribute(struct server *s)
xferred++; xferred++;
} }
} }
SPIN_UNLOCK(SERVER_LOCK, &s->lock); HA_SPIN_UNLOCK(SERVER_LOCK, &s->lock);
return xferred; return xferred;
} }
@ -243,7 +243,7 @@ int pendconn_grab_from_px(struct server *s)
if (!srv_currently_usable(s)) if (!srv_currently_usable(s))
return 0; return 0;
SPIN_LOCK(PROXY_LOCK, &s->proxy->lock); HA_SPIN_LOCK(PROXY_LOCK, &s->proxy->lock);
for (xferred = 0; !s->maxconn || xferred < srv_dynamic_maxconn(s); xferred++) { for (xferred = 0; !s->maxconn || xferred < srv_dynamic_maxconn(s); xferred++) {
struct stream *strm; struct stream *strm;
struct pendconn *p; struct pendconn *p;
@ -256,7 +256,7 @@ int pendconn_grab_from_px(struct server *s)
__pendconn_free(p); __pendconn_free(p);
task_wakeup(strm->task, TASK_WOKEN_RES); task_wakeup(strm->task, TASK_WOKEN_RES);
} }
SPIN_UNLOCK(PROXY_LOCK, &s->proxy->lock); HA_SPIN_UNLOCK(PROXY_LOCK, &s->proxy->lock);
return xferred; return xferred;
} }
@ -268,15 +268,15 @@ int pendconn_grab_from_px(struct server *s)
void pendconn_free(struct pendconn *p) void pendconn_free(struct pendconn *p)
{ {
if (p->srv) { if (p->srv) {
SPIN_LOCK(SERVER_LOCK, &p->srv->lock); HA_SPIN_LOCK(SERVER_LOCK, &p->srv->lock);
LIST_DEL(&p->list); LIST_DEL(&p->list);
SPIN_UNLOCK(SERVER_LOCK, &p->srv->lock); HA_SPIN_UNLOCK(SERVER_LOCK, &p->srv->lock);
HA_ATOMIC_SUB(&p->srv->nbpend, 1); HA_ATOMIC_SUB(&p->srv->nbpend, 1);
} }
else { else {
SPIN_LOCK(SERVER_LOCK, &p->strm->be->lock); HA_SPIN_LOCK(SERVER_LOCK, &p->strm->be->lock);
LIST_DEL(&p->list); LIST_DEL(&p->list);
SPIN_UNLOCK(SERVER_LOCK, &p->strm->be->lock); HA_SPIN_UNLOCK(SERVER_LOCK, &p->strm->be->lock);
HA_ATOMIC_SUB(&p->strm->be->nbpend, 1); HA_ATOMIC_SUB(&p->strm->be->nbpend, 1);
} }
p->strm->pend_pos = NULL; p->strm->pend_pos = NULL;

View File

@ -881,9 +881,9 @@ void srv_set_stopped(struct server *s, const char *reason, struct check *check)
srv_register_update(s); srv_register_update(s);
for (srv = s->trackers; srv; srv = srv->tracknext) { for (srv = s->trackers; srv; srv = srv->tracknext) {
SPIN_LOCK(SERVER_LOCK, &srv->lock); HA_SPIN_LOCK(SERVER_LOCK, &srv->lock);
srv_set_stopped(srv, NULL, NULL); srv_set_stopped(srv, NULL, NULL);
SPIN_UNLOCK(SERVER_LOCK, &srv->lock); HA_SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
} }
} }
@ -923,9 +923,9 @@ void srv_set_running(struct server *s, const char *reason, struct check *check)
srv_register_update(s); srv_register_update(s);
for (srv = s->trackers; srv; srv = srv->tracknext) { for (srv = s->trackers; srv; srv = srv->tracknext) {
SPIN_LOCK(SERVER_LOCK, &srv->lock); HA_SPIN_LOCK(SERVER_LOCK, &srv->lock);
srv_set_running(srv, NULL, NULL); srv_set_running(srv, NULL, NULL);
SPIN_UNLOCK(SERVER_LOCK, &srv->lock); HA_SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
} }
} }
@ -964,9 +964,9 @@ void srv_set_stopping(struct server *s, const char *reason, struct check *check)
srv_register_update(s); srv_register_update(s);
for (srv = s->trackers; srv; srv = srv->tracknext) { for (srv = s->trackers; srv; srv = srv->tracknext) {
SPIN_LOCK(SERVER_LOCK, &srv->lock); HA_SPIN_LOCK(SERVER_LOCK, &srv->lock);
srv_set_stopping(srv, NULL, NULL); srv_set_stopping(srv, NULL, NULL);
SPIN_LOCK(SERVER_LOCK, &srv->lock); HA_SPIN_LOCK(SERVER_LOCK, &srv->lock);
} }
} }
@ -1007,9 +1007,9 @@ void srv_set_admin_flag(struct server *s, enum srv_admin mode, const char *cause
mode = SRV_ADMF_IDRAIN; mode = SRV_ADMF_IDRAIN;
for (srv = s->trackers; srv; srv = srv->tracknext) { for (srv = s->trackers; srv; srv = srv->tracknext) {
SPIN_LOCK(SERVER_LOCK, &srv->lock); HA_SPIN_LOCK(SERVER_LOCK, &srv->lock);
srv_set_admin_flag(srv, mode, cause); srv_set_admin_flag(srv, mode, cause);
SPIN_LOCK(SERVER_LOCK, &srv->lock); HA_SPIN_LOCK(SERVER_LOCK, &srv->lock);
} }
} }
@ -1045,9 +1045,9 @@ void srv_clr_admin_flag(struct server *s, enum srv_admin mode)
mode = SRV_ADMF_IDRAIN; mode = SRV_ADMF_IDRAIN;
for (srv = s->trackers; srv; srv = srv->tracknext) { for (srv = s->trackers; srv; srv = srv->tracknext) {
SPIN_LOCK(SERVER_LOCK, &srv->lock); HA_SPIN_LOCK(SERVER_LOCK, &srv->lock);
srv_clr_admin_flag(srv, mode); srv_clr_admin_flag(srv, mode);
SPIN_UNLOCK(SERVER_LOCK, &srv->lock); HA_SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
} }
} }
@ -1062,13 +1062,13 @@ static void srv_propagate_admin_state(struct server *srv)
return; return;
for (srv2 = srv->trackers; srv2; srv2 = srv2->tracknext) { for (srv2 = srv->trackers; srv2; srv2 = srv2->tracknext) {
SPIN_LOCK(SERVER_LOCK, &srv2->lock); HA_SPIN_LOCK(SERVER_LOCK, &srv2->lock);
if (srv->next_admin & (SRV_ADMF_MAINT | SRV_ADMF_CMAINT)) if (srv->next_admin & (SRV_ADMF_MAINT | SRV_ADMF_CMAINT))
srv_set_admin_flag(srv2, SRV_ADMF_IMAINT, NULL); srv_set_admin_flag(srv2, SRV_ADMF_IMAINT, NULL);
if (srv->next_admin & SRV_ADMF_DRAIN) if (srv->next_admin & SRV_ADMF_DRAIN)
srv_set_admin_flag(srv2, SRV_ADMF_IDRAIN, NULL); srv_set_admin_flag(srv2, SRV_ADMF_IDRAIN, NULL);
SPIN_UNLOCK(SERVER_LOCK, &srv2->lock); HA_SPIN_UNLOCK(SERVER_LOCK, &srv2->lock);
} }
} }
@ -2028,7 +2028,7 @@ int parse_server(const char *file, int linenum, char **args, struct proxy *curpr
/* Copy default server settings to new server settings. */ /* Copy default server settings to new server settings. */
srv_settings_cpy(newsrv, &curproxy->defsrv, 0); srv_settings_cpy(newsrv, &curproxy->defsrv, 0);
SPIN_INIT(&newsrv->lock); HA_SPIN_INIT(&newsrv->lock);
cur_arg++; cur_arg++;
} else { } else {
newsrv = &curproxy->defsrv; newsrv = &curproxy->defsrv;
@ -2600,10 +2600,10 @@ static void srv_register_update(struct server *srv)
{ {
if (LIST_ISEMPTY(&srv->update_status)) { if (LIST_ISEMPTY(&srv->update_status)) {
THREAD_WANT_SYNC(); THREAD_WANT_SYNC();
SPIN_LOCK(UPDATED_SERVERS_LOCK, &updated_servers_lock); HA_SPIN_LOCK(UPDATED_SERVERS_LOCK, &updated_servers_lock);
if (LIST_ISEMPTY(&srv->update_status)) if (LIST_ISEMPTY(&srv->update_status))
LIST_ADDQ(&updated_servers, &srv->update_status); LIST_ADDQ(&updated_servers, &srv->update_status);
SPIN_UNLOCK(UPDATED_SERVERS_LOCK, &updated_servers_lock); HA_SPIN_UNLOCK(UPDATED_SERVERS_LOCK, &updated_servers_lock);
} }
} }
@ -2789,7 +2789,7 @@ static void srv_update_state(struct server *srv, int version, char **params)
if (msg->len) if (msg->len)
goto out; goto out;
SPIN_LOCK(SERVER_LOCK, &srv->lock); HA_SPIN_LOCK(SERVER_LOCK, &srv->lock);
/* recover operational state and apply it to this server /* recover operational state and apply it to this server
* and all servers tracking this one */ * and all servers tracking this one */
switch (srv_op_state) { switch (srv_op_state) {
@ -2919,7 +2919,7 @@ static void srv_update_state(struct server *srv, int version, char **params)
if (port_str) if (port_str)
srv->svc_port = port; srv->svc_port = port;
SPIN_UNLOCK(SERVER_LOCK, &srv->lock); HA_SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
break; break;
default: default:
@ -3696,9 +3696,9 @@ int snr_resolution_error_cb(struct dns_requester *requester, int error_code)
s = objt_server(requester->owner); s = objt_server(requester->owner);
if (!s) if (!s)
return 1; return 1;
SPIN_LOCK(SERVER_LOCK, &s->lock); HA_SPIN_LOCK(SERVER_LOCK, &s->lock);
snr_update_srv_status(s, 0); snr_update_srv_status(s, 0);
SPIN_UNLOCK(SERVER_LOCK, &s->lock); HA_SPIN_UNLOCK(SERVER_LOCK, &s->lock);
return 1; return 1;
} }
@ -3731,18 +3731,18 @@ struct server *snr_check_ip_callback(struct server *srv, void *ip, unsigned char
* one used for the server found in the backend * one used for the server found in the backend
* * the server found in the backend is not our current server * * the server found in the backend is not our current server
*/ */
SPIN_LOCK(SERVER_LOCK, &tmpsrv->lock); HA_SPIN_LOCK(SERVER_LOCK, &tmpsrv->lock);
if ((tmpsrv->hostname_dn == NULL) || if ((tmpsrv->hostname_dn == NULL) ||
(srv->hostname_dn_len != tmpsrv->hostname_dn_len) || (srv->hostname_dn_len != tmpsrv->hostname_dn_len) ||
(strcmp(srv->hostname_dn, tmpsrv->hostname_dn) != 0) || (strcmp(srv->hostname_dn, tmpsrv->hostname_dn) != 0) ||
(srv->puid == tmpsrv->puid)) { (srv->puid == tmpsrv->puid)) {
SPIN_UNLOCK(SERVER_LOCK, &tmpsrv->lock); HA_SPIN_UNLOCK(SERVER_LOCK, &tmpsrv->lock);
continue; continue;
} }
/* If the server has been taken down, don't consider it */ /* If the server has been taken down, don't consider it */
if (tmpsrv->next_admin & SRV_ADMF_RMAINT) { if (tmpsrv->next_admin & SRV_ADMF_RMAINT) {
SPIN_UNLOCK(SERVER_LOCK, &tmpsrv->lock); HA_SPIN_UNLOCK(SERVER_LOCK, &tmpsrv->lock);
continue; continue;
} }
@ -3754,10 +3754,10 @@ struct server *snr_check_ip_callback(struct server *srv, void *ip, unsigned char
memcmp(ip, &((struct sockaddr_in *)&tmpsrv->addr)->sin_addr, 4) == 0) || memcmp(ip, &((struct sockaddr_in *)&tmpsrv->addr)->sin_addr, 4) == 0) ||
(tmpsrv->addr.ss_family == AF_INET6 && (tmpsrv->addr.ss_family == AF_INET6 &&
memcmp(ip, &((struct sockaddr_in6 *)&tmpsrv->addr)->sin6_addr, 16) == 0))) { memcmp(ip, &((struct sockaddr_in6 *)&tmpsrv->addr)->sin6_addr, 16) == 0))) {
SPIN_UNLOCK(SERVER_LOCK, &tmpsrv->lock); HA_SPIN_UNLOCK(SERVER_LOCK, &tmpsrv->lock);
return tmpsrv; return tmpsrv;
} }
SPIN_UNLOCK(SERVER_LOCK, &tmpsrv->lock); HA_SPIN_UNLOCK(SERVER_LOCK, &tmpsrv->lock);
} }
@ -3789,7 +3789,7 @@ int srv_set_fqdn(struct server *srv, const char *hostname, int dns_locked)
int hostname_len, hostname_dn_len; int hostname_len, hostname_dn_len;
if (!dns_locked) if (!dns_locked)
SPIN_LOCK(DNS_LOCK, &srv->resolvers->lock); HA_SPIN_LOCK(DNS_LOCK, &srv->resolvers->lock);
/* run time DNS resolution was not active for this server /* run time DNS resolution was not active for this server
* and we can't enable it at run time for now. * and we can't enable it at run time for now.
*/ */
@ -3825,12 +3825,12 @@ int srv_set_fqdn(struct server *srv, const char *hostname, int dns_locked)
end: end:
if (!dns_locked) if (!dns_locked)
SPIN_UNLOCK(DNS_LOCK, &srv->resolvers->lock); HA_SPIN_UNLOCK(DNS_LOCK, &srv->resolvers->lock);
return 0; return 0;
err: err:
if (!dns_locked) if (!dns_locked)
SPIN_UNLOCK(DNS_LOCK, &srv->resolvers->lock); HA_SPIN_UNLOCK(DNS_LOCK, &srv->resolvers->lock);
return -1; return -1;
} }
@ -4053,7 +4053,7 @@ static int cli_parse_set_server(char **args, struct appctx *appctx, void *privat
if (!sv) if (!sv)
return 1; return 1;
SPIN_LOCK(SERVER_LOCK, &sv->lock); HA_SPIN_LOCK(SERVER_LOCK, &sv->lock);
if (strcmp(args[3], "weight") == 0) { if (strcmp(args[3], "weight") == 0) {
warning = server_parse_weight_change_request(sv, args[4]); warning = server_parse_weight_change_request(sv, args[4]);
@ -4220,7 +4220,7 @@ static int cli_parse_set_server(char **args, struct appctx *appctx, void *privat
appctx->st0 = CLI_ST_PRINT; appctx->st0 = CLI_ST_PRINT;
} }
out_unlock: out_unlock:
SPIN_UNLOCK(SERVER_LOCK, &sv->lock); HA_SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
return 1; return 1;
} }
@ -4427,7 +4427,7 @@ static struct cli_kw_list cli_kws = {{ },{
__attribute__((constructor)) __attribute__((constructor))
static void __server_init(void) static void __server_init(void)
{ {
SPIN_INIT(&updated_servers_lock); HA_SPIN_INIT(&updated_servers_lock);
cli_register_kw(&cli_kws); cli_register_kw(&cli_kws);
} }

View File

@ -73,7 +73,7 @@ void __signal_process_queue()
struct signal_descriptor *desc; struct signal_descriptor *desc;
sigset_t old_sig; sigset_t old_sig;
if (SPIN_TRYLOCK(SIGNALS_LOCK, &signals_lock)) if (HA_SPIN_TRYLOCK(SIGNALS_LOCK, &signals_lock))
return; return;
/* block signal delivery during processing */ /* block signal delivery during processing */
@ -102,7 +102,7 @@ void __signal_process_queue()
/* restore signal delivery */ /* restore signal delivery */
sigprocmask(SIG_SETMASK, &old_sig, NULL); sigprocmask(SIG_SETMASK, &old_sig, NULL);
SPIN_UNLOCK(SIGNALS_LOCK, &signals_lock); HA_SPIN_UNLOCK(SIGNALS_LOCK, &signals_lock);
} }
/* perform minimal intializations, report 0 in case of error, 1 if OK. */ /* perform minimal intializations, report 0 in case of error, 1 if OK. */
@ -114,7 +114,7 @@ int signal_init()
memset(signal_queue, 0, sizeof(signal_queue)); memset(signal_queue, 0, sizeof(signal_queue));
memset(signal_state, 0, sizeof(signal_state)); memset(signal_state, 0, sizeof(signal_state));
SPIN_INIT(&signals_lock); HA_SPIN_INIT(&signals_lock);
/* Ensure signals are not blocked. Some shells or service managers may /* Ensure signals are not blocked. Some shells or service managers may
* accidently block all of our signals unfortunately, causing lots of * accidently block all of our signals unfortunately, causing lots of
@ -150,7 +150,7 @@ void deinit_signals()
pool_free2(pool2_sig_handlers, sh); pool_free2(pool2_sig_handlers, sh);
} }
} }
SPIN_DESTROY(&signals_lock); HA_SPIN_DESTROY(&signals_lock);
} }
/* Register a function and an integer argument on a signal. A pointer to the /* Register a function and an integer argument on a signal. A pointer to the

View File

@ -218,15 +218,15 @@ void ssl_locking_function(int mode, int n, const char * file, int line)
{ {
if (mode & CRYPTO_LOCK) { if (mode & CRYPTO_LOCK) {
if (mode & CRYPTO_READ) if (mode & CRYPTO_READ)
RWLOCK_RDLOCK(SSL_LOCK, &ssl_rwlocks[n]); HA_RWLOCK_RDLOCK(SSL_LOCK, &ssl_rwlocks[n]);
else else
RWLOCK_WRLOCK(SSL_LOCK, &ssl_rwlocks[n]); HA_RWLOCK_WRLOCK(SSL_LOCK, &ssl_rwlocks[n]);
} }
else { else {
if (mode & CRYPTO_READ) if (mode & CRYPTO_READ)
RWLOCK_RDUNLOCK(SSL_LOCK, &ssl_rwlocks[n]); HA_RWLOCK_RDUNLOCK(SSL_LOCK, &ssl_rwlocks[n]);
else else
RWLOCK_WRUNLOCK(SSL_LOCK, &ssl_rwlocks[n]); HA_RWLOCK_WRUNLOCK(SSL_LOCK, &ssl_rwlocks[n]);
} }
} }
@ -239,7 +239,7 @@ static int ssl_locking_init(void)
return -1; return -1;
for (i = 0 ; i < CRYPTO_num_locks() ; i++) for (i = 0 ; i < CRYPTO_num_locks() ; i++)
RWLOCK_INIT(&ssl_rwlocks[i]); HA_RWLOCK_INIT(&ssl_rwlocks[i]);
CRYPTO_set_id_callback(ssl_id_function); CRYPTO_set_id_callback(ssl_id_function);
CRYPTO_set_locking_callback(ssl_locking_function); CRYPTO_set_locking_callback(ssl_locking_function);
@ -1795,15 +1795,15 @@ ssl_sock_assign_generated_cert(unsigned int key, struct bind_conf *bind_conf, SS
struct lru64 *lru = NULL; struct lru64 *lru = NULL;
if (ssl_ctx_lru_tree) { if (ssl_ctx_lru_tree) {
RWLOCK_RDLOCK(SSL_GEN_CERTS_LOCK, &ssl_ctx_lru_rwlock); HA_RWLOCK_RDLOCK(SSL_GEN_CERTS_LOCK, &ssl_ctx_lru_rwlock);
lru = lru64_lookup(key, ssl_ctx_lru_tree, bind_conf->ca_sign_cert, 0); lru = lru64_lookup(key, ssl_ctx_lru_tree, bind_conf->ca_sign_cert, 0);
if (lru && lru->domain) { if (lru && lru->domain) {
if (ssl) if (ssl)
SSL_set_SSL_CTX(ssl, (SSL_CTX *)lru->data); SSL_set_SSL_CTX(ssl, (SSL_CTX *)lru->data);
RWLOCK_RDUNLOCK(SSL_GEN_CERTS_LOCK, &ssl_ctx_lru_rwlock); HA_RWLOCK_RDUNLOCK(SSL_GEN_CERTS_LOCK, &ssl_ctx_lru_rwlock);
return (SSL_CTX *)lru->data; return (SSL_CTX *)lru->data;
} }
RWLOCK_RDUNLOCK(SSL_GEN_CERTS_LOCK, &ssl_ctx_lru_rwlock); HA_RWLOCK_RDUNLOCK(SSL_GEN_CERTS_LOCK, &ssl_ctx_lru_rwlock);
} }
return NULL; return NULL;
} }
@ -1826,16 +1826,16 @@ ssl_sock_set_generated_cert(SSL_CTX *ssl_ctx, unsigned int key, struct bind_conf
struct lru64 *lru = NULL; struct lru64 *lru = NULL;
if (ssl_ctx_lru_tree) { if (ssl_ctx_lru_tree) {
RWLOCK_WRLOCK(SSL_GEN_CERTS_LOCK, &ssl_ctx_lru_rwlock); HA_RWLOCK_WRLOCK(SSL_GEN_CERTS_LOCK, &ssl_ctx_lru_rwlock);
lru = lru64_get(key, ssl_ctx_lru_tree, bind_conf->ca_sign_cert, 0); lru = lru64_get(key, ssl_ctx_lru_tree, bind_conf->ca_sign_cert, 0);
if (!lru) { if (!lru) {
RWLOCK_WRUNLOCK(SSL_GEN_CERTS_LOCK, &ssl_ctx_lru_rwlock); HA_RWLOCK_WRUNLOCK(SSL_GEN_CERTS_LOCK, &ssl_ctx_lru_rwlock);
return -1; return -1;
} }
if (lru->domain && lru->data) if (lru->domain && lru->data)
lru->free((SSL_CTX *)lru->data); lru->free((SSL_CTX *)lru->data);
lru64_commit(lru, ssl_ctx, bind_conf->ca_sign_cert, 0, (void (*)(void *))SSL_CTX_free); lru64_commit(lru, ssl_ctx, bind_conf->ca_sign_cert, 0, (void (*)(void *))SSL_CTX_free);
RWLOCK_WRUNLOCK(SSL_GEN_CERTS_LOCK, &ssl_ctx_lru_rwlock); HA_RWLOCK_WRUNLOCK(SSL_GEN_CERTS_LOCK, &ssl_ctx_lru_rwlock);
return 0; return 0;
} }
return -1; return -1;
@ -1861,7 +1861,7 @@ ssl_sock_generate_certificate(const char *servername, struct bind_conf *bind_con
key = ssl_sock_generated_cert_key(servername, strlen(servername)); key = ssl_sock_generated_cert_key(servername, strlen(servername));
if (ssl_ctx_lru_tree) { if (ssl_ctx_lru_tree) {
RWLOCK_WRLOCK(SSL_GEN_CERTS_LOCK, &ssl_ctx_lru_rwlock); HA_RWLOCK_WRLOCK(SSL_GEN_CERTS_LOCK, &ssl_ctx_lru_rwlock);
lru = lru64_get(key, ssl_ctx_lru_tree, cacert, 0); lru = lru64_get(key, ssl_ctx_lru_tree, cacert, 0);
if (lru && lru->domain) if (lru && lru->domain)
ssl_ctx = (SSL_CTX *)lru->data; ssl_ctx = (SSL_CTX *)lru->data;
@ -1870,7 +1870,7 @@ ssl_sock_generate_certificate(const char *servername, struct bind_conf *bind_con
lru64_commit(lru, ssl_ctx, cacert, 0, (void (*)(void *))SSL_CTX_free); lru64_commit(lru, ssl_ctx, cacert, 0, (void (*)(void *))SSL_CTX_free);
} }
SSL_set_SSL_CTX(ssl, ssl_ctx); SSL_set_SSL_CTX(ssl, ssl_ctx);
RWLOCK_WRUNLOCK(SSL_GEN_CERTS_LOCK, &ssl_ctx_lru_rwlock); HA_RWLOCK_WRUNLOCK(SSL_GEN_CERTS_LOCK, &ssl_ctx_lru_rwlock);
return 1; return 1;
} }
else { else {
@ -4782,7 +4782,7 @@ ssl_sock_load_ca(struct bind_conf *bind_conf)
#if (defined SSL_CTRL_SET_TLSEXT_HOSTNAME && !defined SSL_NO_GENERATE_CERTIFICATES) #if (defined SSL_CTRL_SET_TLSEXT_HOSTNAME && !defined SSL_NO_GENERATE_CERTIFICATES)
if (global_ssl.ctx_cache) { if (global_ssl.ctx_cache) {
ssl_ctx_lru_tree = lru64_new(global_ssl.ctx_cache); ssl_ctx_lru_tree = lru64_new(global_ssl.ctx_cache);
RWLOCK_INIT(&ssl_ctx_lru_rwlock); HA_RWLOCK_INIT(&ssl_ctx_lru_rwlock);
} }
ssl_ctx_lru_seed = (unsigned int)time(NULL); ssl_ctx_lru_seed = (unsigned int)time(NULL);
ssl_ctx_serial = now_ms; ssl_ctx_serial = now_ms;
@ -8803,7 +8803,7 @@ static void __ssl_sock_deinit(void)
#if (defined SSL_CTRL_SET_TLSEXT_HOSTNAME && !defined SSL_NO_GENERATE_CERTIFICATES) #if (defined SSL_CTRL_SET_TLSEXT_HOSTNAME && !defined SSL_NO_GENERATE_CERTIFICATES)
if (ssl_ctx_lru_tree) { if (ssl_ctx_lru_tree) {
lru64_destroy(ssl_ctx_lru_tree); lru64_destroy(ssl_ctx_lru_tree);
RWLOCK_DESTROY(&ssl_ctx_lru_rwlock); HA_RWLOCK_DESTROY(&ssl_ctx_lru_rwlock);
} }
#endif #endif

View File

@ -2764,7 +2764,7 @@ static int stats_process_http_post(struct stream_interface *si)
reprocess = 1; reprocess = 1;
} }
else if ((sv = findserver(px, value)) != NULL) { else if ((sv = findserver(px, value)) != NULL) {
SPIN_LOCK(SERVER_LOCK, &sv->lock); HA_SPIN_LOCK(SERVER_LOCK, &sv->lock);
switch (action) { switch (action) {
case ST_ADM_ACTION_DISABLE: case ST_ADM_ACTION_DISABLE:
if (!(sv->cur_admin & SRV_ADMF_FMAINT)) { if (!(sv->cur_admin & SRV_ADMF_FMAINT)) {
@ -2890,7 +2890,7 @@ static int stats_process_http_post(struct stream_interface *si)
} }
break; break;
} }
SPIN_UNLOCK(SERVER_LOCK, &sv->lock); HA_SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
} else { } else {
/* the server name is unknown or ambiguous (duplicate names) */ /* the server name is unknown or ambiguous (duplicate names) */
total_servers++; total_servers++;

View File

@ -61,9 +61,9 @@ void __stksess_free(struct stktable *t, struct stksess *ts)
*/ */
void stksess_free(struct stktable *t, struct stksess *ts) void stksess_free(struct stktable *t, struct stksess *ts)
{ {
SPIN_LOCK(STK_TABLE_LOCK, &t->lock); HA_SPIN_LOCK(STK_TABLE_LOCK, &t->lock);
__stksess_free(t, ts); __stksess_free(t, ts);
SPIN_UNLOCK(STK_TABLE_LOCK, &t->lock); HA_SPIN_UNLOCK(STK_TABLE_LOCK, &t->lock);
} }
/* /*
@ -90,11 +90,11 @@ int stksess_kill(struct stktable *t, struct stksess *ts, int decrefcnt)
{ {
int ret; int ret;
SPIN_LOCK(STK_TABLE_LOCK, &t->lock); HA_SPIN_LOCK(STK_TABLE_LOCK, &t->lock);
if (decrefcnt) if (decrefcnt)
ts->ref_cnt--; ts->ref_cnt--;
ret = __stksess_kill(t, ts); ret = __stksess_kill(t, ts);
SPIN_UNLOCK(STK_TABLE_LOCK, &t->lock); HA_SPIN_UNLOCK(STK_TABLE_LOCK, &t->lock);
return ret; return ret;
} }
@ -126,7 +126,7 @@ static struct stksess *__stksess_init(struct stktable *t, struct stksess * ts)
ts->exp.node.leaf_p = NULL; ts->exp.node.leaf_p = NULL;
ts->upd.node.leaf_p = NULL; ts->upd.node.leaf_p = NULL;
ts->expire = tick_add(now_ms, MS_TO_TICKS(t->expire)); ts->expire = tick_add(now_ms, MS_TO_TICKS(t->expire));
RWLOCK_INIT(&ts->lock); HA_RWLOCK_INIT(&ts->lock);
return ts; return ts;
} }
@ -201,9 +201,9 @@ int stktable_trash_oldest(struct stktable *t, int to_batch)
{ {
int ret; int ret;
SPIN_LOCK(STK_TABLE_LOCK, &t->lock); HA_SPIN_LOCK(STK_TABLE_LOCK, &t->lock);
ret = __stktable_trash_oldest(t, to_batch); ret = __stktable_trash_oldest(t, to_batch);
SPIN_UNLOCK(STK_TABLE_LOCK, &t->lock); HA_SPIN_UNLOCK(STK_TABLE_LOCK, &t->lock);
return ret; return ret;
} }
@ -249,9 +249,9 @@ struct stksess *stksess_new(struct stktable *t, struct stktable_key *key)
{ {
struct stksess *ts; struct stksess *ts;
SPIN_LOCK(STK_TABLE_LOCK, &t->lock); HA_SPIN_LOCK(STK_TABLE_LOCK, &t->lock);
ts = __stksess_new(t, key); ts = __stksess_new(t, key);
SPIN_UNLOCK(STK_TABLE_LOCK, &t->lock); HA_SPIN_UNLOCK(STK_TABLE_LOCK, &t->lock);
return ts; return ts;
} }
@ -287,11 +287,11 @@ struct stksess *stktable_lookup_key(struct stktable *t, struct stktable_key *key
{ {
struct stksess *ts; struct stksess *ts;
SPIN_LOCK(STK_TABLE_LOCK, &t->lock); HA_SPIN_LOCK(STK_TABLE_LOCK, &t->lock);
ts = __stktable_lookup_key(t, key); ts = __stktable_lookup_key(t, key);
if (ts) if (ts)
ts->ref_cnt++; ts->ref_cnt++;
SPIN_UNLOCK(STK_TABLE_LOCK, &t->lock); HA_SPIN_UNLOCK(STK_TABLE_LOCK, &t->lock);
return ts; return ts;
} }
@ -325,11 +325,11 @@ struct stksess *stktable_lookup(struct stktable *t, struct stksess *ts)
{ {
struct stksess *lts; struct stksess *lts;
SPIN_LOCK(STK_TABLE_LOCK, &t->lock); HA_SPIN_LOCK(STK_TABLE_LOCK, &t->lock);
lts = __stktable_lookup(t, ts); lts = __stktable_lookup(t, ts);
if (lts) if (lts)
lts->ref_cnt++; lts->ref_cnt++;
SPIN_UNLOCK(STK_TABLE_LOCK, &t->lock); HA_SPIN_UNLOCK(STK_TABLE_LOCK, &t->lock);
return lts; return lts;
} }
@ -389,11 +389,11 @@ void __stktable_touch_with_exp(struct stktable *t, struct stksess *ts, int local
*/ */
void stktable_touch_remote(struct stktable *t, struct stksess *ts, int decrefcnt) void stktable_touch_remote(struct stktable *t, struct stksess *ts, int decrefcnt)
{ {
SPIN_LOCK(STK_TABLE_LOCK, &t->lock); HA_SPIN_LOCK(STK_TABLE_LOCK, &t->lock);
__stktable_touch_with_exp(t, ts, 0, ts->expire); __stktable_touch_with_exp(t, ts, 0, ts->expire);
if (decrefcnt) if (decrefcnt)
ts->ref_cnt--; ts->ref_cnt--;
SPIN_UNLOCK(STK_TABLE_LOCK, &t->lock); HA_SPIN_UNLOCK(STK_TABLE_LOCK, &t->lock);
} }
/* Update the expiration timer for <ts> but do not touch its expiration node. /* Update the expiration timer for <ts> but do not touch its expiration node.
@ -406,18 +406,18 @@ void stktable_touch_local(struct stktable *t, struct stksess *ts, int decrefcnt)
{ {
int expire = tick_add(now_ms, MS_TO_TICKS(t->expire)); int expire = tick_add(now_ms, MS_TO_TICKS(t->expire));
SPIN_LOCK(STK_TABLE_LOCK, &t->lock); HA_SPIN_LOCK(STK_TABLE_LOCK, &t->lock);
__stktable_touch_with_exp(t, ts, 1, expire); __stktable_touch_with_exp(t, ts, 1, expire);
if (decrefcnt) if (decrefcnt)
ts->ref_cnt--; ts->ref_cnt--;
SPIN_UNLOCK(STK_TABLE_LOCK, &t->lock); HA_SPIN_UNLOCK(STK_TABLE_LOCK, &t->lock);
} }
/* Just decrease the ref_cnt of the current session */ /* Just decrease the ref_cnt of the current session */
void stktable_release(struct stktable *t, struct stksess *ts) void stktable_release(struct stktable *t, struct stksess *ts)
{ {
SPIN_LOCK(STK_TABLE_LOCK, &t->lock); HA_SPIN_LOCK(STK_TABLE_LOCK, &t->lock);
ts->ref_cnt--; ts->ref_cnt--;
SPIN_UNLOCK(STK_TABLE_LOCK, &t->lock); HA_SPIN_UNLOCK(STK_TABLE_LOCK, &t->lock);
} }
/* Insert new sticky session <ts> in the table. It is assumed that it does not /* Insert new sticky session <ts> in the table. It is assumed that it does not
@ -466,11 +466,11 @@ struct stksess *stktable_get_entry(struct stktable *table, struct stktable_key *
{ {
struct stksess *ts; struct stksess *ts;
SPIN_LOCK(STK_TABLE_LOCK, &table->lock); HA_SPIN_LOCK(STK_TABLE_LOCK, &table->lock);
ts = __stktable_get_entry(table, key); ts = __stktable_get_entry(table, key);
if (ts) if (ts)
ts->ref_cnt++; ts->ref_cnt++;
SPIN_UNLOCK(STK_TABLE_LOCK, &table->lock); HA_SPIN_UNLOCK(STK_TABLE_LOCK, &table->lock);
return ts; return ts;
} }
@ -498,10 +498,10 @@ struct stksess *stktable_set_entry(struct stktable *table, struct stksess *nts)
{ {
struct stksess *ts; struct stksess *ts;
SPIN_LOCK(STK_TABLE_LOCK, &table->lock); HA_SPIN_LOCK(STK_TABLE_LOCK, &table->lock);
ts = __stktable_set_entry(table, nts); ts = __stktable_set_entry(table, nts);
ts->ref_cnt++; ts->ref_cnt++;
SPIN_UNLOCK(STK_TABLE_LOCK, &table->lock); HA_SPIN_UNLOCK(STK_TABLE_LOCK, &table->lock);
return ts; return ts;
} }
@ -515,7 +515,7 @@ static int stktable_trash_expired(struct stktable *t)
struct eb32_node *eb; struct eb32_node *eb;
int looped = 0; int looped = 0;
SPIN_LOCK(STK_TABLE_LOCK, &t->lock); HA_SPIN_LOCK(STK_TABLE_LOCK, &t->lock);
eb = eb32_lookup_ge(&t->exps, now_ms - TIMER_LOOK_BACK); eb = eb32_lookup_ge(&t->exps, now_ms - TIMER_LOOK_BACK);
while (1) { while (1) {
@ -570,7 +570,7 @@ static int stktable_trash_expired(struct stktable *t)
/* We have found no task to expire in any tree */ /* We have found no task to expire in any tree */
t->exp_next = TICK_ETERNITY; t->exp_next = TICK_ETERNITY;
out_unlock: out_unlock:
SPIN_UNLOCK(STK_TABLE_LOCK, &t->lock); HA_SPIN_UNLOCK(STK_TABLE_LOCK, &t->lock);
return t->exp_next; return t->exp_next;
} }
@ -593,7 +593,7 @@ int stktable_init(struct stktable *t)
t->keys = EB_ROOT_UNIQUE; t->keys = EB_ROOT_UNIQUE;
memset(&t->exps, 0, sizeof(t->exps)); memset(&t->exps, 0, sizeof(t->exps));
t->updates = EB_ROOT_UNIQUE; t->updates = EB_ROOT_UNIQUE;
SPIN_INIT(&t->lock); HA_SPIN_INIT(&t->lock);
t->pool = create_pool("sticktables", sizeof(struct stksess) + t->data_size + t->key_size, MEM_F_SHARED); t->pool = create_pool("sticktables", sizeof(struct stksess) + t->data_size + t->key_size, MEM_F_SHARED);
@ -1546,7 +1546,7 @@ static enum act_return action_inc_gpc0(struct act_rule *rule, struct proxy *px,
ptr1 = stktable_data_ptr(stkctr->table, ts, STKTABLE_DT_GPC0_RATE); ptr1 = stktable_data_ptr(stkctr->table, ts, STKTABLE_DT_GPC0_RATE);
ptr2 = stktable_data_ptr(stkctr->table, ts, STKTABLE_DT_GPC0); ptr2 = stktable_data_ptr(stkctr->table, ts, STKTABLE_DT_GPC0);
if (ptr1 || ptr2) { if (ptr1 || ptr2) {
RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock); HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
if (ptr1) if (ptr1)
update_freq_ctr_period(&stktable_data_cast(ptr1, gpc0_rate), update_freq_ctr_period(&stktable_data_cast(ptr1, gpc0_rate),
@ -1555,7 +1555,7 @@ static enum act_return action_inc_gpc0(struct act_rule *rule, struct proxy *px,
if (ptr2) if (ptr2)
stktable_data_cast(ptr2, gpc0)++; stktable_data_cast(ptr2, gpc0)++;
RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock); HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
/* If data was modified, we need to touch to re-schedule sync */ /* If data was modified, we need to touch to re-schedule sync */
stktable_touch_local(stkctr->table, ts, 0); stktable_touch_local(stkctr->table, ts, 0);
@ -1628,11 +1628,11 @@ static enum act_return action_set_gpt0(struct act_rule *rule, struct proxy *px,
/* Store the sample in the required sc, and ignore errors. */ /* Store the sample in the required sc, and ignore errors. */
ptr = stktable_data_ptr(stkctr->table, ts, STKTABLE_DT_GPT0); ptr = stktable_data_ptr(stkctr->table, ts, STKTABLE_DT_GPT0);
if (ptr) { if (ptr) {
RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock); HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
stktable_data_cast(ptr, gpt0) = rule->arg.gpt.value; stktable_data_cast(ptr, gpt0) = rule->arg.gpt.value;
RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock); HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
stktable_touch_local(stkctr->table, ts, 0); stktable_touch_local(stkctr->table, ts, 0);
} }
@ -1887,11 +1887,11 @@ smp_fetch_sc_get_gpt0(const struct arg *args, struct sample *smp, const char *kw
return 0; /* parameter not stored */ return 0; /* parameter not stored */
} }
RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock); HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
smp->data.u.sint = stktable_data_cast(ptr, gpt0); smp->data.u.sint = stktable_data_cast(ptr, gpt0);
RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock); HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
if (stkctr == &tmpstkctr) if (stkctr == &tmpstkctr)
stktable_release(stkctr->table, stkctr_entry(stkctr)); stktable_release(stkctr->table, stkctr_entry(stkctr));
@ -1928,11 +1928,11 @@ smp_fetch_sc_get_gpc0(const struct arg *args, struct sample *smp, const char *kw
return 0; /* parameter not stored */ return 0; /* parameter not stored */
} }
RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock); HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
smp->data.u.sint = stktable_data_cast(ptr, gpc0); smp->data.u.sint = stktable_data_cast(ptr, gpc0);
RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock); HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
if (stkctr == &tmpstkctr) if (stkctr == &tmpstkctr)
stktable_release(stkctr->table, stkctr_entry(stkctr)); stktable_release(stkctr->table, stkctr_entry(stkctr));
@ -1968,12 +1968,12 @@ smp_fetch_sc_gpc0_rate(const struct arg *args, struct sample *smp, const char *k
return 0; /* parameter not stored */ return 0; /* parameter not stored */
} }
RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock); HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
smp->data.u.sint = read_freq_ctr_period(&stktable_data_cast(ptr, gpc0_rate), smp->data.u.sint = read_freq_ctr_period(&stktable_data_cast(ptr, gpc0_rate),
stkctr->table->data_arg[STKTABLE_DT_GPC0_RATE].u); stkctr->table->data_arg[STKTABLE_DT_GPC0_RATE].u);
RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock); HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
if (stkctr == &tmpstkctr) if (stkctr == &tmpstkctr)
stktable_release(stkctr->table, stkctr_entry(stkctr)); stktable_release(stkctr->table, stkctr_entry(stkctr));
@ -2012,7 +2012,7 @@ smp_fetch_sc_inc_gpc0(const struct arg *args, struct sample *smp, const char *kw
ptr1 = stktable_data_ptr(stkctr->table, stkctr_entry(stkctr), STKTABLE_DT_GPC0_RATE); ptr1 = stktable_data_ptr(stkctr->table, stkctr_entry(stkctr), STKTABLE_DT_GPC0_RATE);
ptr2 = stktable_data_ptr(stkctr->table, stkctr_entry(stkctr), STKTABLE_DT_GPC0); ptr2 = stktable_data_ptr(stkctr->table, stkctr_entry(stkctr), STKTABLE_DT_GPC0);
if (ptr1 || ptr2) { if (ptr1 || ptr2) {
RWLOCK_WRLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock); HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
if (ptr1) { if (ptr1) {
update_freq_ctr_period(&stktable_data_cast(ptr1, gpc0_rate), update_freq_ctr_period(&stktable_data_cast(ptr1, gpc0_rate),
@ -2023,7 +2023,7 @@ smp_fetch_sc_inc_gpc0(const struct arg *args, struct sample *smp, const char *kw
if (ptr2) if (ptr2)
smp->data.u.sint = ++stktable_data_cast(ptr2, gpc0); smp->data.u.sint = ++stktable_data_cast(ptr2, gpc0);
RWLOCK_WRUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock); HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
/* If data was modified, we need to touch to re-schedule sync */ /* If data was modified, we need to touch to re-schedule sync */
stktable_touch_local(stkctr->table, stkctr_entry(stkctr), (stkctr == &tmpstkctr) ? 1 : 0); stktable_touch_local(stkctr->table, stkctr_entry(stkctr), (stkctr == &tmpstkctr) ? 1 : 0);
@ -2065,12 +2065,12 @@ smp_fetch_sc_clr_gpc0(const struct arg *args, struct sample *smp, const char *kw
return 0; /* parameter not stored */ return 0; /* parameter not stored */
} }
RWLOCK_WRLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock); HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
smp->data.u.sint = stktable_data_cast(ptr, gpc0); smp->data.u.sint = stktable_data_cast(ptr, gpc0);
stktable_data_cast(ptr, gpc0) = 0; stktable_data_cast(ptr, gpc0) = 0;
RWLOCK_WRUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock); HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
/* If data was modified, we need to touch to re-schedule sync */ /* If data was modified, we need to touch to re-schedule sync */
stktable_touch_local(stkctr->table, stkctr_entry(stkctr), (stkctr == &tmpstkctr) ? 1 : 0); stktable_touch_local(stkctr->table, stkctr_entry(stkctr), (stkctr == &tmpstkctr) ? 1 : 0);
@ -2105,11 +2105,11 @@ smp_fetch_sc_conn_cnt(const struct arg *args, struct sample *smp, const char *kw
return 0; /* parameter not stored */ return 0; /* parameter not stored */
} }
RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock); HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
smp->data.u.sint = stktable_data_cast(ptr, conn_cnt); smp->data.u.sint = stktable_data_cast(ptr, conn_cnt);
RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock); HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
if (stkctr == &tmpstkctr) if (stkctr == &tmpstkctr)
stktable_release(stkctr->table, stkctr_entry(stkctr)); stktable_release(stkctr->table, stkctr_entry(stkctr));
@ -2146,12 +2146,12 @@ smp_fetch_sc_conn_rate(const struct arg *args, struct sample *smp, const char *k
return 0; /* parameter not stored */ return 0; /* parameter not stored */
} }
RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock); HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
smp->data.u.sint = read_freq_ctr_period(&stktable_data_cast(ptr, conn_rate), smp->data.u.sint = read_freq_ctr_period(&stktable_data_cast(ptr, conn_rate),
stkctr->table->data_arg[STKTABLE_DT_CONN_RATE].u); stkctr->table->data_arg[STKTABLE_DT_CONN_RATE].u);
RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock); HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
if (stkctr == &tmpstkctr) if (stkctr == &tmpstkctr)
stktable_release(stkctr->table, stkctr_entry(stkctr)); stktable_release(stkctr->table, stkctr_entry(stkctr));
@ -2197,11 +2197,11 @@ smp_fetch_src_updt_conn_cnt(const struct arg *args, struct sample *smp, const ch
smp->data.type = SMP_T_SINT; smp->data.type = SMP_T_SINT;
RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock); HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
smp->data.u.sint = ++stktable_data_cast(ptr, conn_cnt); smp->data.u.sint = ++stktable_data_cast(ptr, conn_cnt);
RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock); HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
smp->flags = SMP_F_VOL_TEST; smp->flags = SMP_F_VOL_TEST;
@ -2238,11 +2238,11 @@ smp_fetch_sc_conn_cur(const struct arg *args, struct sample *smp, const char *kw
return 0; /* parameter not stored */ return 0; /* parameter not stored */
} }
RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock); HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
smp->data.u.sint = stktable_data_cast(ptr, conn_cur); smp->data.u.sint = stktable_data_cast(ptr, conn_cur);
RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock); HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
if (stkctr == &tmpstkctr) if (stkctr == &tmpstkctr)
stktable_release(stkctr->table, stkctr_entry(stkctr)); stktable_release(stkctr->table, stkctr_entry(stkctr));
@ -2277,11 +2277,11 @@ smp_fetch_sc_sess_cnt(const struct arg *args, struct sample *smp, const char *kw
return 0; /* parameter not stored */ return 0; /* parameter not stored */
} }
RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock); HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
smp->data.u.sint = stktable_data_cast(ptr, sess_cnt); smp->data.u.sint = stktable_data_cast(ptr, sess_cnt);
RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock); HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
if (stkctr == &tmpstkctr) if (stkctr == &tmpstkctr)
stktable_release(stkctr->table, stkctr_entry(stkctr)); stktable_release(stkctr->table, stkctr_entry(stkctr));
@ -2315,12 +2315,12 @@ smp_fetch_sc_sess_rate(const struct arg *args, struct sample *smp, const char *k
return 0; /* parameter not stored */ return 0; /* parameter not stored */
} }
RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock); HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
smp->data.u.sint = read_freq_ctr_period(&stktable_data_cast(ptr, sess_rate), smp->data.u.sint = read_freq_ctr_period(&stktable_data_cast(ptr, sess_rate),
stkctr->table->data_arg[STKTABLE_DT_SESS_RATE].u); stkctr->table->data_arg[STKTABLE_DT_SESS_RATE].u);
RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock); HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
if (stkctr == &tmpstkctr) if (stkctr == &tmpstkctr)
stktable_release(stkctr->table, stkctr_entry(stkctr)); stktable_release(stkctr->table, stkctr_entry(stkctr));
@ -2355,11 +2355,11 @@ smp_fetch_sc_http_req_cnt(const struct arg *args, struct sample *smp, const char
return 0; /* parameter not stored */ return 0; /* parameter not stored */
} }
RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock); HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
smp->data.u.sint = stktable_data_cast(ptr, http_req_cnt); smp->data.u.sint = stktable_data_cast(ptr, http_req_cnt);
RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock); HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
if (stkctr == &tmpstkctr) if (stkctr == &tmpstkctr)
stktable_release(stkctr->table, stkctr_entry(stkctr)); stktable_release(stkctr->table, stkctr_entry(stkctr));
@ -2394,12 +2394,12 @@ smp_fetch_sc_http_req_rate(const struct arg *args, struct sample *smp, const cha
return 0; /* parameter not stored */ return 0; /* parameter not stored */
} }
RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock); HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
smp->data.u.sint = read_freq_ctr_period(&stktable_data_cast(ptr, http_req_rate), smp->data.u.sint = read_freq_ctr_period(&stktable_data_cast(ptr, http_req_rate),
stkctr->table->data_arg[STKTABLE_DT_HTTP_REQ_RATE].u); stkctr->table->data_arg[STKTABLE_DT_HTTP_REQ_RATE].u);
RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock); HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
if (stkctr == &tmpstkctr) if (stkctr == &tmpstkctr)
stktable_release(stkctr->table, stkctr_entry(stkctr)); stktable_release(stkctr->table, stkctr_entry(stkctr));
@ -2434,11 +2434,11 @@ smp_fetch_sc_http_err_cnt(const struct arg *args, struct sample *smp, const char
return 0; /* parameter not stored */ return 0; /* parameter not stored */
} }
RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock); HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
smp->data.u.sint = stktable_data_cast(ptr, http_err_cnt); smp->data.u.sint = stktable_data_cast(ptr, http_err_cnt);
RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock); HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
if (stkctr == &tmpstkctr) if (stkctr == &tmpstkctr)
stktable_release(stkctr->table, stkctr_entry(stkctr)); stktable_release(stkctr->table, stkctr_entry(stkctr));
@ -2473,12 +2473,12 @@ smp_fetch_sc_http_err_rate(const struct arg *args, struct sample *smp, const cha
return 0; /* parameter not stored */ return 0; /* parameter not stored */
} }
RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock); HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
smp->data.u.sint = read_freq_ctr_period(&stktable_data_cast(ptr, http_err_rate), smp->data.u.sint = read_freq_ctr_period(&stktable_data_cast(ptr, http_err_rate),
stkctr->table->data_arg[STKTABLE_DT_HTTP_ERR_RATE].u); stkctr->table->data_arg[STKTABLE_DT_HTTP_ERR_RATE].u);
RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock); HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
if (stkctr == &tmpstkctr) if (stkctr == &tmpstkctr)
stktable_release(stkctr->table, stkctr_entry(stkctr)); stktable_release(stkctr->table, stkctr_entry(stkctr));
@ -2513,11 +2513,11 @@ smp_fetch_sc_kbytes_in(const struct arg *args, struct sample *smp, const char *k
return 0; /* parameter not stored */ return 0; /* parameter not stored */
} }
RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock); HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
smp->data.u.sint = stktable_data_cast(ptr, bytes_in_cnt) >> 10; smp->data.u.sint = stktable_data_cast(ptr, bytes_in_cnt) >> 10;
RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock); HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
if (stkctr == &tmpstkctr) if (stkctr == &tmpstkctr)
stktable_release(stkctr->table, stkctr_entry(stkctr)); stktable_release(stkctr->table, stkctr_entry(stkctr));
@ -2552,12 +2552,12 @@ smp_fetch_sc_bytes_in_rate(const struct arg *args, struct sample *smp, const cha
return 0; /* parameter not stored */ return 0; /* parameter not stored */
} }
RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock); HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
smp->data.u.sint = read_freq_ctr_period(&stktable_data_cast(ptr, bytes_in_rate), smp->data.u.sint = read_freq_ctr_period(&stktable_data_cast(ptr, bytes_in_rate),
stkctr->table->data_arg[STKTABLE_DT_BYTES_IN_RATE].u); stkctr->table->data_arg[STKTABLE_DT_BYTES_IN_RATE].u);
RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock); HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
if (stkctr == &tmpstkctr) if (stkctr == &tmpstkctr)
stktable_release(stkctr->table, stkctr_entry(stkctr)); stktable_release(stkctr->table, stkctr_entry(stkctr));
@ -2592,11 +2592,11 @@ smp_fetch_sc_kbytes_out(const struct arg *args, struct sample *smp, const char *
return 0; /* parameter not stored */ return 0; /* parameter not stored */
} }
RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock); HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
smp->data.u.sint = stktable_data_cast(ptr, bytes_out_cnt) >> 10; smp->data.u.sint = stktable_data_cast(ptr, bytes_out_cnt) >> 10;
RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock); HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
if (stkctr == &tmpstkctr) if (stkctr == &tmpstkctr)
stktable_release(stkctr->table, stkctr_entry(stkctr)); stktable_release(stkctr->table, stkctr_entry(stkctr));
@ -2631,12 +2631,12 @@ smp_fetch_sc_bytes_out_rate(const struct arg *args, struct sample *smp, const ch
return 0; /* parameter not stored */ return 0; /* parameter not stored */
} }
RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock); HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
smp->data.u.sint = read_freq_ctr_period(&stktable_data_cast(ptr, bytes_out_rate), smp->data.u.sint = read_freq_ctr_period(&stktable_data_cast(ptr, bytes_out_rate),
stkctr->table->data_arg[STKTABLE_DT_BYTES_OUT_RATE].u); stkctr->table->data_arg[STKTABLE_DT_BYTES_OUT_RATE].u);
RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock); HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
if (stkctr == &tmpstkctr) if (stkctr == &tmpstkctr)
stktable_release(stkctr->table, stkctr_entry(stkctr)); stktable_release(stkctr->table, stkctr_entry(stkctr));
@ -2875,13 +2875,13 @@ static int table_process_entry_per_key(struct appctx *appctx, char **args)
stktable_release(&px->table, ts); stktable_release(&px->table, ts);
return 0; return 0;
} }
RWLOCK_RDLOCK(STK_SESS_LOCK, &ts->lock); HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &ts->lock);
if (!table_dump_entry_to_buffer(&trash, si, px, ts)) { if (!table_dump_entry_to_buffer(&trash, si, px, ts)) {
RWLOCK_RDUNLOCK(STK_SESS_LOCK, &ts->lock); HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &ts->lock);
stktable_release(&px->table, ts); stktable_release(&px->table, ts);
return 0; return 0;
} }
RWLOCK_RDUNLOCK(STK_SESS_LOCK, &ts->lock); HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &ts->lock);
stktable_release(&px->table, ts); stktable_release(&px->table, ts);
break; break;
@ -2910,13 +2910,13 @@ static int table_process_entry_per_key(struct appctx *appctx, char **args)
return 1; return 1;
} }
RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock); HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
for (cur_arg = 5; *args[cur_arg]; cur_arg += 2) { for (cur_arg = 5; *args[cur_arg]; cur_arg += 2) {
if (strncmp(args[cur_arg], "data.", 5) != 0) { if (strncmp(args[cur_arg], "data.", 5) != 0) {
appctx->ctx.cli.severity = LOG_ERR; appctx->ctx.cli.severity = LOG_ERR;
appctx->ctx.cli.msg = "\"data.<type>\" followed by a value expected\n"; appctx->ctx.cli.msg = "\"data.<type>\" followed by a value expected\n";
appctx->st0 = CLI_ST_PRINT; appctx->st0 = CLI_ST_PRINT;
RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock); HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
stktable_touch_local(&px->table, ts, 1); stktable_touch_local(&px->table, ts, 1);
return 1; return 1;
} }
@ -2926,7 +2926,7 @@ static int table_process_entry_per_key(struct appctx *appctx, char **args)
appctx->ctx.cli.severity = LOG_ERR; appctx->ctx.cli.severity = LOG_ERR;
appctx->ctx.cli.msg = "Unknown data type\n"; appctx->ctx.cli.msg = "Unknown data type\n";
appctx->st0 = CLI_ST_PRINT; appctx->st0 = CLI_ST_PRINT;
RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock); HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
stktable_touch_local(&px->table, ts, 1); stktable_touch_local(&px->table, ts, 1);
return 1; return 1;
} }
@ -2935,7 +2935,7 @@ static int table_process_entry_per_key(struct appctx *appctx, char **args)
appctx->ctx.cli.severity = LOG_ERR; appctx->ctx.cli.severity = LOG_ERR;
appctx->ctx.cli.msg = "Data type not stored in this table\n"; appctx->ctx.cli.msg = "Data type not stored in this table\n";
appctx->st0 = CLI_ST_PRINT; appctx->st0 = CLI_ST_PRINT;
RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock); HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
stktable_touch_local(&px->table, ts, 1); stktable_touch_local(&px->table, ts, 1);
return 1; return 1;
} }
@ -2944,7 +2944,7 @@ static int table_process_entry_per_key(struct appctx *appctx, char **args)
appctx->ctx.cli.severity = LOG_ERR; appctx->ctx.cli.severity = LOG_ERR;
appctx->ctx.cli.msg = "Require a valid integer value to store\n"; appctx->ctx.cli.msg = "Require a valid integer value to store\n";
appctx->st0 = CLI_ST_PRINT; appctx->st0 = CLI_ST_PRINT;
RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock); HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
stktable_touch_local(&px->table, ts, 1); stktable_touch_local(&px->table, ts, 1);
return 1; return 1;
} }
@ -2978,7 +2978,7 @@ static int table_process_entry_per_key(struct appctx *appctx, char **args)
break; break;
} }
} }
RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock); HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
stktable_touch_local(&px->table, ts, 1); stktable_touch_local(&px->table, ts, 1);
break; break;
@ -3155,16 +3155,16 @@ static int cli_io_handler_table(struct appctx *appctx)
if (appctx->ctx.table.target && if (appctx->ctx.table.target &&
(strm_li(s)->bind_conf->level & ACCESS_LVL_MASK) >= ACCESS_LVL_OPER) { (strm_li(s)->bind_conf->level & ACCESS_LVL_MASK) >= ACCESS_LVL_OPER) {
/* dump entries only if table explicitly requested */ /* dump entries only if table explicitly requested */
SPIN_LOCK(STK_TABLE_LOCK, &appctx->ctx.table.proxy->table.lock); HA_SPIN_LOCK(STK_TABLE_LOCK, &appctx->ctx.table.proxy->table.lock);
eb = ebmb_first(&appctx->ctx.table.proxy->table.keys); eb = ebmb_first(&appctx->ctx.table.proxy->table.keys);
if (eb) { if (eb) {
appctx->ctx.table.entry = ebmb_entry(eb, struct stksess, key); appctx->ctx.table.entry = ebmb_entry(eb, struct stksess, key);
appctx->ctx.table.entry->ref_cnt++; appctx->ctx.table.entry->ref_cnt++;
appctx->st2 = STAT_ST_LIST; appctx->st2 = STAT_ST_LIST;
SPIN_UNLOCK(STK_TABLE_LOCK, &appctx->ctx.table.proxy->table.lock); HA_SPIN_UNLOCK(STK_TABLE_LOCK, &appctx->ctx.table.proxy->table.lock);
break; break;
} }
SPIN_UNLOCK(STK_TABLE_LOCK, &appctx->ctx.table.proxy->table.lock); HA_SPIN_UNLOCK(STK_TABLE_LOCK, &appctx->ctx.table.proxy->table.lock);
} }
} }
appctx->ctx.table.proxy = appctx->ctx.table.proxy->next; appctx->ctx.table.proxy = appctx->ctx.table.proxy->next;
@ -3173,7 +3173,7 @@ static int cli_io_handler_table(struct appctx *appctx)
case STAT_ST_LIST: case STAT_ST_LIST:
skip_entry = 0; skip_entry = 0;
RWLOCK_RDLOCK(STK_SESS_LOCK, &appctx->ctx.table.entry->lock); HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &appctx->ctx.table.entry->lock);
if (appctx->ctx.table.data_type >= 0) { if (appctx->ctx.table.data_type >= 0) {
/* we're filtering on some data contents */ /* we're filtering on some data contents */
@ -3221,13 +3221,13 @@ static int cli_io_handler_table(struct appctx *appctx)
if (show && !skip_entry && if (show && !skip_entry &&
!table_dump_entry_to_buffer(&trash, si, appctx->ctx.table.proxy, appctx->ctx.table.entry)) { !table_dump_entry_to_buffer(&trash, si, appctx->ctx.table.proxy, appctx->ctx.table.entry)) {
RWLOCK_RDUNLOCK(STK_SESS_LOCK, &appctx->ctx.table.entry->lock); HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &appctx->ctx.table.entry->lock);
return 0; return 0;
} }
RWLOCK_RDUNLOCK(STK_SESS_LOCK, &appctx->ctx.table.entry->lock); HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &appctx->ctx.table.entry->lock);
SPIN_LOCK(STK_TABLE_LOCK, &appctx->ctx.table.proxy->table.lock); HA_SPIN_LOCK(STK_TABLE_LOCK, &appctx->ctx.table.proxy->table.lock);
appctx->ctx.table.entry->ref_cnt--; appctx->ctx.table.entry->ref_cnt--;
eb = ebmb_next(&appctx->ctx.table.entry->key); eb = ebmb_next(&appctx->ctx.table.entry->key);
@ -3239,7 +3239,7 @@ static int cli_io_handler_table(struct appctx *appctx)
else if (!skip_entry && !appctx->ctx.table.entry->ref_cnt) else if (!skip_entry && !appctx->ctx.table.entry->ref_cnt)
__stksess_kill(&appctx->ctx.table.proxy->table, old); __stksess_kill(&appctx->ctx.table.proxy->table, old);
appctx->ctx.table.entry->ref_cnt++; appctx->ctx.table.entry->ref_cnt++;
SPIN_UNLOCK(STK_TABLE_LOCK, &appctx->ctx.table.proxy->table.lock); HA_SPIN_UNLOCK(STK_TABLE_LOCK, &appctx->ctx.table.proxy->table.lock);
break; break;
} }
@ -3249,7 +3249,7 @@ static int cli_io_handler_table(struct appctx *appctx)
else if (!skip_entry && !appctx->ctx.table.entry->ref_cnt) else if (!skip_entry && !appctx->ctx.table.entry->ref_cnt)
__stksess_kill(&appctx->ctx.table.proxy->table, appctx->ctx.table.entry); __stksess_kill(&appctx->ctx.table.proxy->table, appctx->ctx.table.entry);
SPIN_UNLOCK(STK_TABLE_LOCK, &appctx->ctx.table.proxy->table.lock); HA_SPIN_UNLOCK(STK_TABLE_LOCK, &appctx->ctx.table.proxy->table.lock);
appctx->ctx.table.proxy = appctx->ctx.table.proxy->next; appctx->ctx.table.proxy = appctx->ctx.table.proxy->next;
appctx->st2 = STAT_ST_INFO; appctx->st2 = STAT_ST_INFO;

View File

@ -253,9 +253,9 @@ struct stream *stream_new(struct session *sess, enum obj_type *origin)
s->txn = NULL; s->txn = NULL;
s->hlua = NULL; s->hlua = NULL;
SPIN_LOCK(STRMS_LOCK, &streams_lock); HA_SPIN_LOCK(STRMS_LOCK, &streams_lock);
LIST_ADDQ(&streams, &s->list); LIST_ADDQ(&streams, &s->list);
SPIN_UNLOCK(STRMS_LOCK, &streams_lock); HA_SPIN_UNLOCK(STRMS_LOCK, &streams_lock);
if (flt_stream_init(s) < 0 || flt_stream_start(s) < 0) if (flt_stream_init(s) < 0 || flt_stream_start(s) < 0)
goto out_fail_accept; goto out_fail_accept;
@ -326,10 +326,10 @@ static void stream_free(struct stream *s)
/* We may still be present in the buffer wait queue */ /* We may still be present in the buffer wait queue */
if (!LIST_ISEMPTY(&s->buffer_wait.list)) { if (!LIST_ISEMPTY(&s->buffer_wait.list)) {
SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock); HA_SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock);
LIST_DEL(&s->buffer_wait.list); LIST_DEL(&s->buffer_wait.list);
LIST_INIT(&s->buffer_wait.list); LIST_INIT(&s->buffer_wait.list);
SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock); HA_SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock);
} }
if (s->req.buf->size || s->res.buf->size) { if (s->req.buf->size || s->res.buf->size) {
b_drop(&s->req.buf); b_drop(&s->req.buf);
@ -373,7 +373,7 @@ static void stream_free(struct stream *s)
stream_store_counters(s); stream_store_counters(s);
SPIN_LOCK(STRMS_LOCK, &streams_lock); HA_SPIN_LOCK(STRMS_LOCK, &streams_lock);
list_for_each_entry_safe(bref, back, &s->back_refs, users) { list_for_each_entry_safe(bref, back, &s->back_refs, users) {
/* we have to unlink all watchers. We must not relink them if /* we have to unlink all watchers. We must not relink them if
* this stream was the last one in the list. * this stream was the last one in the list.
@ -385,7 +385,7 @@ static void stream_free(struct stream *s)
bref->ref = s->list.n; bref->ref = s->list.n;
} }
LIST_DEL(&s->list); LIST_DEL(&s->list);
SPIN_UNLOCK(STRMS_LOCK, &streams_lock); HA_SPIN_UNLOCK(STRMS_LOCK, &streams_lock);
si_release_endpoint(&s->si[1]); si_release_endpoint(&s->si[1]);
si_release_endpoint(&s->si[0]); si_release_endpoint(&s->si[0]);
@ -423,18 +423,18 @@ static void stream_free(struct stream *s)
static int stream_alloc_work_buffer(struct stream *s) static int stream_alloc_work_buffer(struct stream *s)
{ {
if (!LIST_ISEMPTY(&s->buffer_wait.list)) { if (!LIST_ISEMPTY(&s->buffer_wait.list)) {
SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock); HA_SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock);
LIST_DEL(&s->buffer_wait.list); LIST_DEL(&s->buffer_wait.list);
LIST_INIT(&s->buffer_wait.list); LIST_INIT(&s->buffer_wait.list);
SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock); HA_SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock);
} }
if (b_alloc_margin(&s->res.buf, 0)) if (b_alloc_margin(&s->res.buf, 0))
return 1; return 1;
SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock); HA_SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock);
LIST_ADDQ(&buffer_wq, &s->buffer_wait.list); LIST_ADDQ(&buffer_wq, &s->buffer_wait.list);
SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock); HA_SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock);
return 0; return 0;
} }
@ -468,7 +468,7 @@ void stream_release_buffers(struct stream *s)
int init_stream() int init_stream()
{ {
LIST_INIT(&streams); LIST_INIT(&streams);
SPIN_INIT(&streams_lock); HA_SPIN_INIT(&streams_lock);
pool2_stream = create_pool("stream", sizeof(struct stream), MEM_F_SHARED); pool2_stream = create_pool("stream", sizeof(struct stream), MEM_F_SHARED);
return pool2_stream != NULL; return pool2_stream != NULL;
} }
@ -504,7 +504,7 @@ void stream_process_counters(struct stream *s)
continue; continue;
} }
RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock); HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
ptr1 = stktable_data_ptr(stkctr->table, ts, STKTABLE_DT_BYTES_IN_CNT); ptr1 = stktable_data_ptr(stkctr->table, ts, STKTABLE_DT_BYTES_IN_CNT);
if (ptr1) if (ptr1)
stktable_data_cast(ptr1, bytes_in_cnt) += bytes; stktable_data_cast(ptr1, bytes_in_cnt) += bytes;
@ -513,7 +513,7 @@ void stream_process_counters(struct stream *s)
if (ptr2) if (ptr2)
update_freq_ctr_period(&stktable_data_cast(ptr2, bytes_in_rate), update_freq_ctr_period(&stktable_data_cast(ptr2, bytes_in_rate),
stkctr->table->data_arg[STKTABLE_DT_BYTES_IN_RATE].u, bytes); stkctr->table->data_arg[STKTABLE_DT_BYTES_IN_RATE].u, bytes);
RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock); HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
/* If data was modified, we need to touch to re-schedule sync */ /* If data was modified, we need to touch to re-schedule sync */
if (ptr1 || ptr2) if (ptr1 || ptr2)
@ -544,7 +544,7 @@ void stream_process_counters(struct stream *s)
continue; continue;
} }
RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock); HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
ptr1 = stktable_data_ptr(stkctr->table, ts, STKTABLE_DT_BYTES_OUT_CNT); ptr1 = stktable_data_ptr(stkctr->table, ts, STKTABLE_DT_BYTES_OUT_CNT);
if (ptr1) if (ptr1)
stktable_data_cast(ptr1, bytes_out_cnt) += bytes; stktable_data_cast(ptr1, bytes_out_cnt) += bytes;
@ -553,7 +553,7 @@ void stream_process_counters(struct stream *s)
if (ptr2) if (ptr2)
update_freq_ctr_period(&stktable_data_cast(ptr2, bytes_out_rate), update_freq_ctr_period(&stktable_data_cast(ptr2, bytes_out_rate),
stkctr->table->data_arg[STKTABLE_DT_BYTES_OUT_RATE].u, bytes); stkctr->table->data_arg[STKTABLE_DT_BYTES_OUT_RATE].u, bytes);
RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock); HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
/* If data was modified, we need to touch to re-schedule sync */ /* If data was modified, we need to touch to re-schedule sync */
if (ptr1 || ptr2) if (ptr1 || ptr2)
@ -1409,10 +1409,10 @@ static int process_sticking_rules(struct stream *s, struct channel *req, int an_
void *ptr; void *ptr;
/* srv found in table */ /* srv found in table */
RWLOCK_RDLOCK(STK_SESS_LOCK, &ts->lock); HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &ts->lock);
ptr = stktable_data_ptr(rule->table.t, ts, STKTABLE_DT_SERVER_ID); ptr = stktable_data_ptr(rule->table.t, ts, STKTABLE_DT_SERVER_ID);
node = eb32_lookup(&px->conf.used_server_id, stktable_data_cast(ptr, server_id)); node = eb32_lookup(&px->conf.used_server_id, stktable_data_cast(ptr, server_id));
RWLOCK_RDUNLOCK(STK_SESS_LOCK, &ts->lock); HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &ts->lock);
if (node) { if (node) {
struct server *srv; struct server *srv;
@ -1536,10 +1536,10 @@ static int process_store_rules(struct stream *s, struct channel *rep, int an_bit
} }
s->store[i].ts = NULL; s->store[i].ts = NULL;
RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock); HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
ptr = stktable_data_ptr(s->store[i].table, ts, STKTABLE_DT_SERVER_ID); ptr = stktable_data_ptr(s->store[i].table, ts, STKTABLE_DT_SERVER_ID);
stktable_data_cast(ptr, server_id) = objt_server(s->target)->puid; stktable_data_cast(ptr, server_id) = objt_server(s->target)->puid;
RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock); HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
stktable_touch_local(s->store[i].table, ts, 1); stktable_touch_local(s->store[i].table, ts, 1);
} }
s->store_count = 0; /* everything is stored */ s->store_count = 0; /* everything is stored */
@ -2536,12 +2536,12 @@ void stream_update_time_stats(struct stream *s)
swrate_add(&srv->counters.d_time, TIME_STATS_SAMPLES, t_data); swrate_add(&srv->counters.d_time, TIME_STATS_SAMPLES, t_data);
swrate_add(&srv->counters.t_time, TIME_STATS_SAMPLES, t_close); swrate_add(&srv->counters.t_time, TIME_STATS_SAMPLES, t_close);
} }
SPIN_LOCK(PROXY_LOCK, &s->be->lock); HA_SPIN_LOCK(PROXY_LOCK, &s->be->lock);
swrate_add(&s->be->be_counters.q_time, TIME_STATS_SAMPLES, t_queue); swrate_add(&s->be->be_counters.q_time, TIME_STATS_SAMPLES, t_queue);
swrate_add(&s->be->be_counters.c_time, TIME_STATS_SAMPLES, t_connect); swrate_add(&s->be->be_counters.c_time, TIME_STATS_SAMPLES, t_connect);
swrate_add(&s->be->be_counters.d_time, TIME_STATS_SAMPLES, t_data); swrate_add(&s->be->be_counters.d_time, TIME_STATS_SAMPLES, t_data);
swrate_add(&s->be->be_counters.t_time, TIME_STATS_SAMPLES, t_close); swrate_add(&s->be->be_counters.t_time, TIME_STATS_SAMPLES, t_close);
SPIN_UNLOCK(PROXY_LOCK, &s->be->lock); HA_SPIN_UNLOCK(PROXY_LOCK, &s->be->lock);
} }
/* /*
@ -3056,14 +3056,14 @@ static int cli_io_handler_dump_sess(struct appctx *appctx)
* pointer points back to the head of the streams list. * pointer points back to the head of the streams list.
*/ */
LIST_INIT(&appctx->ctx.sess.bref.users); LIST_INIT(&appctx->ctx.sess.bref.users);
SPIN_LOCK(STRMS_LOCK, &streams_lock); HA_SPIN_LOCK(STRMS_LOCK, &streams_lock);
appctx->ctx.sess.bref.ref = streams.n; appctx->ctx.sess.bref.ref = streams.n;
SPIN_UNLOCK(STRMS_LOCK, &streams_lock); HA_SPIN_UNLOCK(STRMS_LOCK, &streams_lock);
appctx->st2 = STAT_ST_LIST; appctx->st2 = STAT_ST_LIST;
/* fall through */ /* fall through */
case STAT_ST_LIST: case STAT_ST_LIST:
SPIN_LOCK(STRMS_LOCK, &streams_lock); HA_SPIN_LOCK(STRMS_LOCK, &streams_lock);
/* first, let's detach the back-ref from a possible previous stream */ /* first, let's detach the back-ref from a possible previous stream */
if (!LIST_ISEMPTY(&appctx->ctx.sess.bref.users)) { if (!LIST_ISEMPTY(&appctx->ctx.sess.bref.users)) {
LIST_DEL(&appctx->ctx.sess.bref.users); LIST_DEL(&appctx->ctx.sess.bref.users);
@ -3084,7 +3084,7 @@ static int cli_io_handler_dump_sess(struct appctx *appctx)
LIST_ADDQ(&curr_strm->back_refs, &appctx->ctx.sess.bref.users); LIST_ADDQ(&curr_strm->back_refs, &appctx->ctx.sess.bref.users);
/* call the proper dump() function and return if we're missing space */ /* call the proper dump() function and return if we're missing space */
if (!stats_dump_full_strm_to_buffer(si, curr_strm)) { if (!stats_dump_full_strm_to_buffer(si, curr_strm)) {
SPIN_UNLOCK(STRMS_LOCK, &streams_lock); HA_SPIN_UNLOCK(STRMS_LOCK, &streams_lock);
return 0; return 0;
} }
@ -3212,7 +3212,7 @@ static int cli_io_handler_dump_sess(struct appctx *appctx)
*/ */
si_applet_cant_put(si); si_applet_cant_put(si);
LIST_ADDQ(&curr_strm->back_refs, &appctx->ctx.sess.bref.users); LIST_ADDQ(&curr_strm->back_refs, &appctx->ctx.sess.bref.users);
SPIN_UNLOCK(STRMS_LOCK, &streams_lock); HA_SPIN_UNLOCK(STRMS_LOCK, &streams_lock);
return 0; return 0;
} }
@ -3229,17 +3229,17 @@ static int cli_io_handler_dump_sess(struct appctx *appctx)
if (ci_putchk(si_ic(si), &trash) == -1) { if (ci_putchk(si_ic(si), &trash) == -1) {
si_applet_cant_put(si); si_applet_cant_put(si);
SPIN_UNLOCK(STRMS_LOCK, &streams_lock); HA_SPIN_UNLOCK(STRMS_LOCK, &streams_lock);
return 0; return 0;
} }
appctx->ctx.sess.target = NULL; appctx->ctx.sess.target = NULL;
appctx->ctx.sess.uid = 0; appctx->ctx.sess.uid = 0;
SPIN_UNLOCK(STRMS_LOCK, &streams_lock); HA_SPIN_UNLOCK(STRMS_LOCK, &streams_lock);
return 1; return 1;
} }
SPIN_UNLOCK(STRMS_LOCK, &streams_lock); HA_SPIN_UNLOCK(STRMS_LOCK, &streams_lock);
appctx->st2 = STAT_ST_FIN; appctx->st2 = STAT_ST_FIN;
/* fall through */ /* fall through */
@ -3252,10 +3252,10 @@ static int cli_io_handler_dump_sess(struct appctx *appctx)
static void cli_release_show_sess(struct appctx *appctx) static void cli_release_show_sess(struct appctx *appctx)
{ {
if (appctx->st2 == STAT_ST_LIST) { if (appctx->st2 == STAT_ST_LIST) {
SPIN_UNLOCK(STRMS_LOCK, &streams_lock); HA_SPIN_UNLOCK(STRMS_LOCK, &streams_lock);
if (!LIST_ISEMPTY(&appctx->ctx.sess.bref.users)) if (!LIST_ISEMPTY(&appctx->ctx.sess.bref.users))
LIST_DEL(&appctx->ctx.sess.bref.users); LIST_DEL(&appctx->ctx.sess.bref.users);
SPIN_UNLOCK(STRMS_LOCK, &streams_lock); HA_SPIN_UNLOCK(STRMS_LOCK, &streams_lock);
} }
} }
@ -3308,11 +3308,11 @@ static int cli_parse_shutdown_sessions_server(char **args, struct appctx *appctx
return 1; return 1;
/* kill all the stream that are on this server */ /* kill all the stream that are on this server */
SPIN_LOCK(SERVER_LOCK, &sv->lock); HA_SPIN_LOCK(SERVER_LOCK, &sv->lock);
list_for_each_entry_safe(strm, strm_bck, &sv->actconns, by_srv) list_for_each_entry_safe(strm, strm_bck, &sv->actconns, by_srv)
if (strm->srv_conn == sv) if (strm->srv_conn == sv)
stream_shutdown(strm, SF_ERR_KILLED); stream_shutdown(strm, SF_ERR_KILLED);
SPIN_UNLOCK(SERVER_LOCK, &sv->lock); HA_SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
return 1; return 1;
} }

View File

@ -121,7 +121,7 @@ int wake_expired_tasks()
int ret = TICK_ETERNITY; int ret = TICK_ETERNITY;
while (1) { while (1) {
SPIN_LOCK(TASK_WQ_LOCK, &wq_lock); HA_SPIN_LOCK(TASK_WQ_LOCK, &wq_lock);
lookup_next: lookup_next:
eb = eb32_lookup_ge(&timers, now_ms - TIMER_LOOK_BACK); eb = eb32_lookup_ge(&timers, now_ms - TIMER_LOOK_BACK);
if (!eb) { if (!eb) {
@ -162,11 +162,11 @@ int wake_expired_tasks()
__task_queue(task); __task_queue(task);
goto lookup_next; goto lookup_next;
} }
SPIN_UNLOCK(TASK_WQ_LOCK, &wq_lock); HA_SPIN_UNLOCK(TASK_WQ_LOCK, &wq_lock);
task_wakeup(task, TASK_WOKEN_TIMER); task_wakeup(task, TASK_WOKEN_TIMER);
} }
SPIN_UNLOCK(TASK_WQ_LOCK, &wq_lock); HA_SPIN_UNLOCK(TASK_WQ_LOCK, &wq_lock);
return ret; return ret;
} }
@ -251,7 +251,7 @@ void process_runnable_tasks()
return; return;
} }
SPIN_LOCK(TASK_RQ_LOCK, &rq_lock); HA_SPIN_LOCK(TASK_RQ_LOCK, &rq_lock);
rq_next = eb32sc_lookup_ge(&rqueue, rqueue_ticks - TIMER_LOOK_BACK, tid_bit); rq_next = eb32sc_lookup_ge(&rqueue, rqueue_ticks - TIMER_LOOK_BACK, tid_bit);
do { do {
@ -289,7 +289,7 @@ void process_runnable_tasks()
if (!local_tasks_count) if (!local_tasks_count)
break; break;
SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock); HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock);
final_tasks_count = 0; final_tasks_count = 0;
for (i = 0; i < local_tasks_count ; i++) { for (i = 0; i < local_tasks_count ; i++) {
@ -305,7 +305,7 @@ void process_runnable_tasks()
local_tasks[final_tasks_count++] = t; local_tasks[final_tasks_count++] = t;
} }
SPIN_LOCK(TASK_RQ_LOCK, &rq_lock); HA_SPIN_LOCK(TASK_RQ_LOCK, &rq_lock);
for (i = 0; i < final_tasks_count ; i++) { for (i = 0; i < final_tasks_count ; i++) {
t = local_tasks[i]; t = local_tasks[i];
t->state &= ~TASK_RUNNING; t->state &= ~TASK_RUNNING;
@ -321,7 +321,7 @@ void process_runnable_tasks()
} }
} while (max_processed > 0); } while (max_processed > 0);
SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock); HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock);
} }
/* perform minimal intializations, report 0 in case of error, 1 if OK. */ /* perform minimal intializations, report 0 in case of error, 1 if OK. */
@ -329,8 +329,8 @@ int init_task()
{ {
memset(&timers, 0, sizeof(timers)); memset(&timers, 0, sizeof(timers));
memset(&rqueue, 0, sizeof(rqueue)); memset(&rqueue, 0, sizeof(rqueue));
SPIN_INIT(&wq_lock); HA_SPIN_INIT(&wq_lock);
SPIN_INIT(&rq_lock); HA_SPIN_INIT(&rq_lock);
pool2_task = create_pool("task", sizeof(struct task), MEM_F_SHARED); pool2_task = create_pool("task", sizeof(struct task), MEM_F_SHARED);
if (!pool2_task) if (!pool2_task)
return 0; return 0;

View File

@ -118,11 +118,11 @@ void vars_prune(struct vars *vars, struct session *sess, struct stream *strm)
struct var *var, *tmp; struct var *var, *tmp;
unsigned int size = 0; unsigned int size = 0;
RWLOCK_WRLOCK(VARS_LOCK, &vars->rwlock); HA_RWLOCK_WRLOCK(VARS_LOCK, &vars->rwlock);
list_for_each_entry_safe(var, tmp, &vars->head, l) { list_for_each_entry_safe(var, tmp, &vars->head, l) {
size += var_clear(var); size += var_clear(var);
} }
RWLOCK_WRUNLOCK(VARS_LOCK, &vars->rwlock); HA_RWLOCK_WRUNLOCK(VARS_LOCK, &vars->rwlock);
var_accounting_diff(vars, sess, strm, -size); var_accounting_diff(vars, sess, strm, -size);
} }
@ -134,11 +134,11 @@ void vars_prune_per_sess(struct vars *vars)
struct var *var, *tmp; struct var *var, *tmp;
unsigned int size = 0; unsigned int size = 0;
RWLOCK_WRLOCK(VARS_LOCK, &vars->rwlock); HA_RWLOCK_WRLOCK(VARS_LOCK, &vars->rwlock);
list_for_each_entry_safe(var, tmp, &vars->head, l) { list_for_each_entry_safe(var, tmp, &vars->head, l) {
size += var_clear(var); size += var_clear(var);
} }
RWLOCK_WRUNLOCK(VARS_LOCK, &vars->rwlock); HA_RWLOCK_WRUNLOCK(VARS_LOCK, &vars->rwlock);
HA_ATOMIC_SUB(&vars->size, size); HA_ATOMIC_SUB(&vars->size, size);
HA_ATOMIC_SUB(&global.vars.size, size); HA_ATOMIC_SUB(&global.vars.size, size);
@ -151,7 +151,7 @@ void vars_init(struct vars *vars, enum vars_scope scope)
LIST_INIT(&vars->head); LIST_INIT(&vars->head);
vars->scope = scope; vars->scope = scope;
vars->size = 0; vars->size = 0;
RWLOCK_INIT(&vars->rwlock); HA_RWLOCK_INIT(&vars->rwlock);
} }
/* This function declares a new variable name. It returns a pointer /* This function declares a new variable name. It returns a pointer
@ -214,9 +214,9 @@ static char *register_name(const char *name, int len, enum vars_scope *scope,
} }
if (alloc) if (alloc)
RWLOCK_WRLOCK(VARS_LOCK, &var_names_rwlock); HA_RWLOCK_WRLOCK(VARS_LOCK, &var_names_rwlock);
else else
RWLOCK_RDLOCK(VARS_LOCK, &var_names_rwlock); HA_RWLOCK_RDLOCK(VARS_LOCK, &var_names_rwlock);
/* Look for existing variable name. */ /* Look for existing variable name. */
@ -263,9 +263,9 @@ static char *register_name(const char *name, int len, enum vars_scope *scope,
end: end:
if (alloc) if (alloc)
RWLOCK_WRUNLOCK(VARS_LOCK, &var_names_rwlock); HA_RWLOCK_WRUNLOCK(VARS_LOCK, &var_names_rwlock);
else else
RWLOCK_RDUNLOCK(VARS_LOCK, &var_names_rwlock); HA_RWLOCK_RDUNLOCK(VARS_LOCK, &var_names_rwlock);
return res; return res;
} }
@ -312,12 +312,12 @@ static int smp_fetch_var(const struct arg *args, struct sample *smp, const char
if (vars->scope != var_desc->scope) if (vars->scope != var_desc->scope)
return 0; return 0;
RWLOCK_RDLOCK(VARS_LOCK, &vars->rwlock); HA_RWLOCK_RDLOCK(VARS_LOCK, &vars->rwlock);
var = var_get(vars, var_desc->name); var = var_get(vars, var_desc->name);
/* check for the variable avalaibility */ /* check for the variable avalaibility */
if (!var) { if (!var) {
RWLOCK_RDUNLOCK(VARS_LOCK, &vars->rwlock); HA_RWLOCK_RDUNLOCK(VARS_LOCK, &vars->rwlock);
return 0; return 0;
} }
@ -327,7 +327,7 @@ static int smp_fetch_var(const struct arg *args, struct sample *smp, const char
smp_dup(smp); smp_dup(smp);
smp->flags |= SMP_F_CONST; smp->flags |= SMP_F_CONST;
RWLOCK_RDUNLOCK(VARS_LOCK, &vars->rwlock); HA_RWLOCK_RDUNLOCK(VARS_LOCK, &vars->rwlock);
return 1; return 1;
} }
@ -438,9 +438,9 @@ static inline int sample_store_stream(const char *name, enum vars_scope scope, s
if (vars->scope != scope) if (vars->scope != scope)
return 0; return 0;
RWLOCK_WRLOCK(VARS_LOCK, &vars->rwlock); HA_RWLOCK_WRLOCK(VARS_LOCK, &vars->rwlock);
ret = sample_store(vars, name, smp); ret = sample_store(vars, name, smp);
RWLOCK_WRUNLOCK(VARS_LOCK, &vars->rwlock); HA_RWLOCK_WRUNLOCK(VARS_LOCK, &vars->rwlock);
return ret; return ret;
} }
@ -463,13 +463,13 @@ static inline int sample_clear_stream(const char *name, enum vars_scope scope, s
return 0; return 0;
/* Look for existing variable name. */ /* Look for existing variable name. */
RWLOCK_WRLOCK(VARS_LOCK, &vars->rwlock); HA_RWLOCK_WRLOCK(VARS_LOCK, &vars->rwlock);
var = var_get(vars, name); var = var_get(vars, name);
if (var) { if (var) {
size = var_clear(var); size = var_clear(var);
var_accounting_diff(vars, smp->sess, smp->strm, -size); var_accounting_diff(vars, smp->sess, smp->strm, -size);
} }
RWLOCK_WRUNLOCK(VARS_LOCK, &vars->rwlock); HA_RWLOCK_WRUNLOCK(VARS_LOCK, &vars->rwlock);
return 1; return 1;
} }
@ -914,5 +914,5 @@ static void __vars_init(void)
http_res_keywords_register(&http_res_kws); http_res_keywords_register(&http_res_kws);
cfg_register_keywords(&cfg_kws); cfg_register_keywords(&cfg_kws);
RWLOCK_INIT(&var_names_rwlock); HA_RWLOCK_INIT(&var_names_rwlock);
} }