WIP/MINOR: stick-tables: Rename stksess shards to use buckets

This commit is contained in:
Christopher Faulet 2025-10-15 14:34:02 +02:00
parent 02607a97d5
commit cd363c95bd
4 changed files with 134 additions and 134 deletions

View File

@ -224,7 +224,7 @@ struct stktable {
struct eb_root keys; /* head of sticky session tree */
struct eb_root exps; /* head of sticky session expiration tree */
__decl_thread(HA_RWLOCK_T sh_lock); /* for the trees above */
} shards[CONFIG_HAP_TBL_BUCKETS];
} buckets[CONFIG_HAP_TBL_BUCKETS];
unsigned int refcnt; /* number of local peer over all peers sections
attached to this table */

View File

@ -193,11 +193,11 @@ static inline void *stktable_data_ptr_idx(struct stktable *t, struct stksess *ts
return __stktable_data_ptr(t, ts, type) + idx*stktable_type_size(stktable_data_types[type].std_type);
}
/* return a shard number for key <key> of len <len> present in table <t>, for
/* return a bucket number for key <key> of len <len> present in table <t>, for
* use with the tree indexing. The value will be from 0 to
* CONFIG_HAP_TBL_BUCKETS-1.
*/
static inline uint stktable_calc_shard_num(const struct stktable *t, const void *key, size_t len)
static inline uint stktable_calc_bucket_num(const struct stktable *t, const void *key, size_t len)
{
#if CONFIG_HAP_TBL_BUCKETS > 1
return XXH32(key, len, t->hash_seed) % CONFIG_HAP_TBL_BUCKETS;
@ -219,13 +219,13 @@ static inline int __stksess_kill_if_expired(struct stktable *t, struct stksess *
* Decrease the refcount of a stksess and release it if the refcount falls to 0
* _AND_ if the session expired. Note,, the refcount is always decremented.
*
* This function locks the corresponding table shard to proceed. When this
* This function locks the corresponding table bucket to proceed. When this
* function is called, the caller must be sure it owns a reference on the
* stksess (refcount >= 1).
*/
static inline void stksess_kill_if_expired(struct stktable *t, struct stksess *ts)
{
uint shard;
uint bucket;
size_t len;
if (t->expire != TICK_ETERNITY && tick_is_expired(ts->expire, now_ms)) {
@ -234,15 +234,15 @@ static inline void stksess_kill_if_expired(struct stktable *t, struct stksess *t
else
len = t->key_size;
shard = stktable_calc_shard_num(t, ts->key.key, len);
bucket = stktable_calc_bucket_num(t, ts->key.key, len);
/* make the compiler happy when shard is not used without threads */
ALREADY_CHECKED(shard);
/* make the compiler happy when bucket is not used without threads */
ALREADY_CHECKED(bucket);
HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &t->shards[shard].sh_lock);
HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &t->buckets[bucket].sh_lock);
if (!HA_ATOMIC_SUB_FETCH(&ts->ref_cnt, 1))
__stksess_kill_if_expired(t, ts);
HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &t->shards[shard].sh_lock);
HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &t->buckets[bucket].sh_lock);
}
else
HA_ATOMIC_SUB_FETCH(&ts->ref_cnt, 1);

View File

@ -969,7 +969,7 @@ int hlua_stktable_dump(lua_State *L)
int i;
int skip_entry;
void *ptr;
int shard = 0; // FIXME: this should be stored in the context and iterate to scan the table
int bucket = 0; // FIXME: this should be stored in the context and iterate to scan the table
t = hlua_check_stktable(L, 1);
type = lua_type(L, 2);
@ -1030,17 +1030,17 @@ int hlua_stktable_dump(lua_State *L)
lua_newtable(L);
next_shard:
HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &t->shards[shard].sh_lock);
eb = ebmb_first(&t->shards[shard].keys);
next_bucket:
HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &t->buckets[bucket].sh_lock);
eb = ebmb_first(&t->buckets[bucket].keys);
for (n = eb; n; n = ebmb_next(n)) {
ts = ebmb_entry(n, struct stksess, key);
if (!ts) {
HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &t->shards[shard].sh_lock);
HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &t->buckets[bucket].sh_lock);
goto done;
}
HA_ATOMIC_INC(&ts->ref_cnt);
HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &t->shards[shard].sh_lock);
HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &t->buckets[bucket].sh_lock);
/* multi condition/value filter */
skip_entry = 0;
@ -1079,7 +1079,7 @@ int hlua_stktable_dump(lua_State *L)
}
if (skip_entry) {
HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &t->shards[shard].sh_lock);
HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &t->buckets[bucket].sh_lock);
HA_ATOMIC_DEC(&ts->ref_cnt);
continue;
}
@ -1103,14 +1103,14 @@ int hlua_stktable_dump(lua_State *L)
lua_newtable(L);
hlua_stktable_entry(L, t, ts);
lua_settable(L, -3);
HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &t->shards[shard].sh_lock);
HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &t->buckets[bucket].sh_lock);
HA_ATOMIC_DEC(&ts->ref_cnt);
}
HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &t->shards[shard].sh_lock);
HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &t->buckets[bucket].sh_lock);
done:
shard++;
if (shard < CONFIG_HAP_TBL_BUCKETS)
goto next_shard;
bucket++;
if (bucket < CONFIG_HAP_TBL_BUCKETS)
goto next_bucket;
return 1;
}

View File

@ -99,7 +99,7 @@ void __stksess_free(struct stktable *t, struct stksess *ts)
*/
void stksess_free(struct stktable *t, struct stksess *ts)
{
uint shard;
uint bucket;
size_t len;
void *data;
@ -114,10 +114,10 @@ void stksess_free(struct stktable *t, struct stksess *ts)
else
len = t->key_size;
shard = stktable_calc_shard_num(t, ts->key.key, len);
bucket = stktable_calc_bucket_num(t, ts->key.key, len);
/* make the compiler happy when shard is not used without threads */
ALREADY_CHECKED(shard);
/* make the compiler happy when bucket is not used without threads */
ALREADY_CHECKED(bucket);
__stksess_free(t, ts);
}
@ -155,13 +155,13 @@ int __stksess_kill(struct stktable *t, struct stksess *ts)
* Decrease the refcount of a stksess and release it if the refcount falls to 0.
* Returns non-zero if deleted, zero otherwise.
*
* This function locks the corresponding table shard to proceed. When this
* This function locks the corresponding table bucket to proceed. When this
* function is called, the caller must be sure it owns a reference on the
* stksess (refcount >= 1).
*/
int stksess_kill(struct stktable *t, struct stksess *ts)
{
uint shard;
uint bucket;
size_t len;
int ret = 0;
@ -170,15 +170,15 @@ int stksess_kill(struct stktable *t, struct stksess *ts)
else
len = t->key_size;
shard = stktable_calc_shard_num(t, ts->key.key, len);
bucket = stktable_calc_bucket_num(t, ts->key.key, len);
/* make the compiler happy when shard is not used without threads */
ALREADY_CHECKED(shard);
/* make the compiler happy when bucket is not used without threads */
ALREADY_CHECKED(bucket);
HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &t->shards[shard].sh_lock);
HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &t->buckets[bucket].sh_lock);
if (!HA_ATOMIC_SUB_FETCH(&ts->ref_cnt, 1))
ret = __stksess_kill(t, ts);
HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &t->shards[shard].sh_lock);
HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &t->buckets[bucket].sh_lock);
return ret;
}
@ -280,38 +280,38 @@ int stktable_trash_oldest(struct stktable *t)
struct eb32_node *eb;
struct mt_list link;
int max_search; // no more than 50% misses
int max_per_shard;
int done_per_shard;
int max_per_bucket;
int done_per_bucket;
int batched = 0;
int to_batch;
int failed_once = 0;
int looped;
int shard;
int init_shard;
int bucket;
int init_bucket;
/* start from a random shard number to avoid starvation in the last ones */
shard = init_shard = statistical_prng_range(CONFIG_HAP_TBL_BUCKETS - 1);
/* start from a random bucket number to avoid starvation in the last ones */
bucket = init_bucket = statistical_prng_range(CONFIG_HAP_TBL_BUCKETS - 1);
to_batch = STKTABLE_MAX_UPDATES_AT_ONCE;
max_search = to_batch * 2; // no more than 50% misses
max_per_shard = (to_batch + CONFIG_HAP_TBL_BUCKETS - 1) / CONFIG_HAP_TBL_BUCKETS;
max_per_bucket = (to_batch + CONFIG_HAP_TBL_BUCKETS - 1) / CONFIG_HAP_TBL_BUCKETS;
do {
done_per_shard = 0;
done_per_bucket = 0;
looped = 0;
if (HA_RWLOCK_TRYWRLOCK(STK_TABLE_LOCK, &t->shards[shard].sh_lock) != 0) {
if (HA_RWLOCK_TRYWRLOCK(STK_TABLE_LOCK, &t->buckets[bucket].sh_lock) != 0) {
if (batched)
break; // no point insisting, we have or made some room
if (failed_once)
break; // already waited once, that's enough
failed_once = 1;
HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &t->shards[shard].sh_lock);
HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &t->buckets[bucket].sh_lock);
}
eb = eb32_lookup_ge(&t->shards[shard].exps, now_ms - TIMER_LOOK_BACK);
while (batched < to_batch && done_per_shard < max_per_shard) {
eb = eb32_lookup_ge(&t->buckets[bucket].exps, now_ms - TIMER_LOOK_BACK);
while (batched < to_batch && done_per_bucket < max_per_bucket) {
if (unlikely(!eb)) {
/* we might have reached the end of the tree, typically because
* <now_ms> is in the first half and we're first scanning the last
@ -321,7 +321,7 @@ int stktable_trash_oldest(struct stktable *t)
if (looped)
break;
looped = 1;
eb = eb32_first(&t->shards[shard].exps);
eb = eb32_first(&t->buckets[bucket].exps);
if (likely(!eb))
break;
}
@ -349,7 +349,7 @@ int stktable_trash_oldest(struct stktable *t)
}
ts->exp.key = ts->expire;
eb32_insert(&t->shards[shard].exps, &ts->exp);
eb32_insert(&t->buckets[bucket].exps, &ts->exp);
/* the update might have jumped beyond the next element,
* possibly causing a wrapping. We need to check whether
@ -360,7 +360,7 @@ int stktable_trash_oldest(struct stktable *t)
* use the current one.
*/
if (!eb)
eb = eb32_first(&t->shards[shard].exps);
eb = eb32_first(&t->buckets[bucket].exps);
if (!eb || tick_is_lt(ts->exp.key, eb->key))
eb = &ts->exp;
@ -390,19 +390,19 @@ int stktable_trash_oldest(struct stktable *t)
ebmb_delete(&ts->key);
__stksess_free(t, ts);
batched++;
done_per_shard++;
done_per_bucket++;
/* don't waste more time here it we're not alone */
if (failed_once)
break;
}
HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &t->shards[shard].sh_lock);
HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &t->buckets[bucket].sh_lock);
shard++;
if (shard >= CONFIG_HAP_TBL_BUCKETS)
shard = 0;
} while (max_search > 0 && shard != init_shard);
bucket++;
if (bucket >= CONFIG_HAP_TBL_BUCKETS)
bucket = 0;
} while (max_search > 0 && bucket != init_bucket);
return batched;
}
@ -450,17 +450,17 @@ struct stksess *stksess_new(struct stktable *t, struct stktable_key *key)
}
/*
* Looks in table <t> for a sticky session matching key <key> in shard <shard>.
* Looks in table <t> for a sticky session matching key <key> in bucket <bucket>.
* Returns pointer on requested sticky session or NULL if none was found.
*/
struct stksess *__stktable_lookup_key(struct stktable *t, struct stktable_key *key, uint shard)
struct stksess *__stktable_lookup_key(struct stktable *t, struct stktable_key *key, uint bucket)
{
struct ebmb_node *eb;
if (t->type == SMP_T_STR)
eb = ebst_lookup_len(&t->shards[shard].keys, key->key, key->key_len + 1 < t->key_size ? key->key_len : t->key_size - 1);
eb = ebst_lookup_len(&t->buckets[bucket].keys, key->key, key->key_len + 1 < t->key_size ? key->key_len : t->key_size - 1);
else
eb = ebmb_lookup(&t->shards[shard].keys, key->key, t->key_size);
eb = ebmb_lookup(&t->buckets[bucket].keys, key->key, t->key_size);
if (unlikely(!eb)) {
/* no session found */
@ -479,7 +479,7 @@ struct stksess *__stktable_lookup_key(struct stktable *t, struct stktable_key *k
struct stksess *stktable_lookup_key(struct stktable *t, struct stktable_key *key)
{
struct stksess *ts;
uint shard;
uint bucket;
size_t len;
if (t->type == SMP_T_STR)
@ -487,13 +487,13 @@ struct stksess *stktable_lookup_key(struct stktable *t, struct stktable_key *key
else
len = t->key_size;
shard = stktable_calc_shard_num(t, key->key, len);
bucket = stktable_calc_bucket_num(t, key->key, len);
HA_RWLOCK_RDLOCK(STK_TABLE_LOCK, &t->shards[shard].sh_lock);
ts = __stktable_lookup_key(t, key, shard);
HA_RWLOCK_RDLOCK(STK_TABLE_LOCK, &t->buckets[bucket].sh_lock);
ts = __stktable_lookup_key(t, key, bucket);
if (ts)
HA_ATOMIC_INC(&ts->ref_cnt);
HA_RWLOCK_RDUNLOCK(STK_TABLE_LOCK, &t->shards[shard].sh_lock);
HA_RWLOCK_RDUNLOCK(STK_TABLE_LOCK, &t->buckets[bucket].sh_lock);
return ts;
}
@ -508,15 +508,15 @@ struct stksess *stktable_lookup_ptr(struct stktable *t, void *ptr)
{
struct stksess *ts = NULL;
struct ebmb_node *eb;
int shard;
int bucket;
for (shard = 0; shard < CONFIG_HAP_TBL_BUCKETS; shard++) {
HA_RWLOCK_RDLOCK(STK_TABLE_LOCK, &t->shards[shard].sh_lock);
for (bucket = 0; bucket < CONFIG_HAP_TBL_BUCKETS; bucket++) {
HA_RWLOCK_RDLOCK(STK_TABLE_LOCK, &t->buckets[bucket].sh_lock);
/* linear search is performed, this could be optimized by adding
* an eb node dedicated to ptr lookups into stksess struct to
* leverage eb_lookup function instead.
*/
eb = ebmb_first(&t->shards[shard].keys);
eb = ebmb_first(&t->buckets[bucket].keys);
while (eb) {
struct stksess *cur;
@ -529,7 +529,7 @@ struct stksess *stktable_lookup_ptr(struct stktable *t, void *ptr)
}
if (ts)
HA_ATOMIC_INC(&ts->ref_cnt);
HA_RWLOCK_RDUNLOCK(STK_TABLE_LOCK, &t->shards[shard].sh_lock);
HA_RWLOCK_RDUNLOCK(STK_TABLE_LOCK, &t->buckets[bucket].sh_lock);
if (ts)
return ts;
}
@ -544,14 +544,14 @@ struct stksess *stktable_lookup_ptr(struct stktable *t, void *ptr)
* <ts> must originate from a table with same key type and length than <t>,
* else it is undefined behavior.
*/
struct stksess *__stktable_lookup(struct stktable *t, struct stksess *ts, uint shard)
struct stksess *__stktable_lookup(struct stktable *t, struct stksess *ts, uint bucket)
{
struct ebmb_node *eb;
if (t->type == SMP_T_STR)
eb = ebst_lookup(&t->shards[shard].keys, (char *)ts->key.key);
eb = ebst_lookup(&t->buckets[bucket].keys, (char *)ts->key.key);
else
eb = ebmb_lookup(&t->shards[shard].keys, ts->key.key, t->key_size);
eb = ebmb_lookup(&t->buckets[bucket].keys, ts->key.key, t->key_size);
if (unlikely(!eb))
return NULL;
@ -571,7 +571,7 @@ struct stksess *__stktable_lookup(struct stktable *t, struct stksess *ts, uint s
struct stksess *stktable_lookup(struct stktable *t, struct stksess *ts)
{
struct stksess *lts;
uint shard;
uint bucket;
size_t len;
if (t->type == SMP_T_STR)
@ -579,13 +579,13 @@ struct stksess *stktable_lookup(struct stktable *t, struct stksess *ts)
else
len = t->key_size;
shard = stktable_calc_shard_num(t, ts->key.key, len);
bucket = stktable_calc_bucket_num(t, ts->key.key, len);
HA_RWLOCK_RDLOCK(STK_TABLE_LOCK, &t->shards[shard].sh_lock);
lts = __stktable_lookup(t, ts, shard);
HA_RWLOCK_RDLOCK(STK_TABLE_LOCK, &t->buckets[bucket].sh_lock);
lts = __stktable_lookup(t, ts, bucket);
if (lts)
HA_ATOMIC_INC(&lts->ref_cnt);
HA_RWLOCK_RDUNLOCK(STK_TABLE_LOCK, &t->shards[shard].sh_lock);
HA_RWLOCK_RDUNLOCK(STK_TABLE_LOCK, &t->buckets[bucket].sh_lock);
return lts;
}
@ -672,14 +672,14 @@ static void stktable_release(struct stktable *t, struct stksess *ts)
* is set. <ts> is returned if properly inserted, otherwise the one already
* present if any.
*/
struct stksess *__stktable_store(struct stktable *t, struct stksess *ts, uint shard)
struct stksess *__stktable_store(struct stktable *t, struct stksess *ts, uint bucket)
{
struct ebmb_node *eb;
eb = ebmb_insert(&t->shards[shard].keys, &ts->key, t->key_size);
eb = ebmb_insert(&t->buckets[bucket].keys, &ts->key, t->key_size);
if (likely(eb == &ts->key)) {
ts->exp.key = ts->expire;
eb32_insert(&t->shards[shard].exps, &ts->exp);
eb32_insert(&t->buckets[bucket].exps, &ts->exp);
}
return ebmb_entry(eb, struct stksess, key); // most commonly this is <ts>
}
@ -730,7 +730,7 @@ void stktable_requeue_exp(struct stktable *t, const struct stksess *ts)
struct stksess *stktable_get_entry(struct stktable *table, struct stktable_key *key)
{
struct stksess *ts, *ts2;
uint shard;
uint bucket;
size_t len;
if (!key)
@ -741,13 +741,13 @@ struct stksess *stktable_get_entry(struct stktable *table, struct stktable_key *
else
len = table->key_size;
shard = stktable_calc_shard_num(table, key->key, len);
bucket = stktable_calc_bucket_num(table, key->key, len);
HA_RWLOCK_RDLOCK(STK_TABLE_LOCK, &table->shards[shard].sh_lock);
ts = __stktable_lookup_key(table, key, shard);
HA_RWLOCK_RDLOCK(STK_TABLE_LOCK, &table->buckets[bucket].sh_lock);
ts = __stktable_lookup_key(table, key, bucket);
if (ts)
HA_ATOMIC_INC(&ts->ref_cnt);
HA_RWLOCK_RDUNLOCK(STK_TABLE_LOCK, &table->shards[shard].sh_lock);
HA_RWLOCK_RDUNLOCK(STK_TABLE_LOCK, &table->buckets[bucket].sh_lock);
if (ts)
return ts;
@ -767,12 +767,12 @@ struct stksess *stktable_get_entry(struct stktable *table, struct stktable_key *
* one we find.
*/
HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &table->shards[shard].sh_lock);
HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &table->buckets[bucket].sh_lock);
ts2 = __stktable_store(table, ts, shard);
ts2 = __stktable_store(table, ts, bucket);
HA_ATOMIC_INC(&ts2->ref_cnt);
HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &table->shards[shard].sh_lock);
HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &table->buckets[bucket].sh_lock);
if (unlikely(ts2 != ts)) {
/* another entry was added in the mean time, let's
@ -840,7 +840,7 @@ struct task *stktable_add_pend_updates(struct task *t, void *ctx, unsigned int s
struct stksess *stktable_set_entry(struct stktable *table, struct stksess *nts)
{
struct stksess *ts;
uint shard;
uint bucket;
size_t len;
if (table->type == SMP_T_STR)
@ -848,13 +848,13 @@ struct stksess *stktable_set_entry(struct stktable *table, struct stksess *nts)
else
len = table->key_size;
shard = stktable_calc_shard_num(table, nts->key.key, len);
bucket = stktable_calc_bucket_num(table, nts->key.key, len);
HA_RWLOCK_RDLOCK(STK_TABLE_LOCK, &table->shards[shard].sh_lock);
ts = __stktable_lookup(table, nts, shard);
HA_RWLOCK_RDLOCK(STK_TABLE_LOCK, &table->buckets[bucket].sh_lock);
ts = __stktable_lookup(table, nts, bucket);
if (ts) {
HA_ATOMIC_INC(&ts->ref_cnt);
HA_RWLOCK_RDUNLOCK(STK_TABLE_LOCK, &table->shards[shard].sh_lock);
HA_RWLOCK_RDUNLOCK(STK_TABLE_LOCK, &table->buckets[bucket].sh_lock);
return ts;
}
ts = nts;
@ -862,18 +862,18 @@ struct stksess *stktable_set_entry(struct stktable *table, struct stksess *nts)
/* let's increment it before switching to exclusive */
HA_ATOMIC_INC(&ts->ref_cnt);
if (HA_RWLOCK_TRYRDTOSK(STK_TABLE_LOCK, &table->shards[shard].sh_lock) != 0) {
if (HA_RWLOCK_TRYRDTOSK(STK_TABLE_LOCK, &table->buckets[bucket].sh_lock) != 0) {
/* upgrade to seek lock failed, let's drop and take */
HA_RWLOCK_RDUNLOCK(STK_TABLE_LOCK, &table->shards[shard].sh_lock);
HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &table->shards[shard].sh_lock);
HA_RWLOCK_RDUNLOCK(STK_TABLE_LOCK, &table->buckets[bucket].sh_lock);
HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &table->buckets[bucket].sh_lock);
}
else
HA_RWLOCK_SKTOWR(STK_TABLE_LOCK, &table->shards[shard].sh_lock);
HA_RWLOCK_SKTOWR(STK_TABLE_LOCK, &table->buckets[bucket].sh_lock);
/* now we're write-locked */
__stktable_store(table, ts, shard);
HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &table->shards[shard].sh_lock);
__stktable_store(table, ts, bucket);
HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &table->buckets[bucket].sh_lock);
stktable_requeue_exp(table, ts);
return ts;
@ -893,18 +893,18 @@ struct task *process_table_expire(struct task *task, void *context, unsigned int
int looped;
int exp_next;
int task_exp;
int shard, init_shard;
int bucket, init_bucket;
int failed_once = 0;
int purged = 0;
task_exp = TICK_ETERNITY;
/* start from a random shard number to avoid starvation in the last ones */
shard = init_shard = statistical_prng_range(CONFIG_HAP_TBL_BUCKETS - 1);
/* start from a random bucket number to avoid starvation in the last ones */
bucket = init_bucket = statistical_prng_range(CONFIG_HAP_TBL_BUCKETS - 1);
do {
looped = 0;
if (HA_RWLOCK_TRYWRLOCK(STK_TABLE_LOCK, &t->shards[shard].sh_lock) != 0) {
if (HA_RWLOCK_TRYWRLOCK(STK_TABLE_LOCK, &t->buckets[bucket].sh_lock) != 0) {
if (purged || failed_once) {
/* already purged or second failed lock, yield and come back later */
to_visit = 0;
@ -912,10 +912,10 @@ struct task *process_table_expire(struct task *task, void *context, unsigned int
}
/* make sure we succeed at least once */
failed_once = 1;
HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &t->shards[shard].sh_lock);
HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &t->buckets[bucket].sh_lock);
}
eb = eb32_lookup_ge(&t->shards[shard].exps, now_ms - TIMER_LOOK_BACK);
eb = eb32_lookup_ge(&t->buckets[bucket].exps, now_ms - TIMER_LOOK_BACK);
while (to_visit >= 0) {
if (unlikely(!eb)) {
@ -927,7 +927,7 @@ struct task *process_table_expire(struct task *task, void *context, unsigned int
if (looped)
break;
looped = 1;
eb = eb32_first(&t->shards[shard].exps);
eb = eb32_first(&t->buckets[bucket].exps);
if (likely(!eb))
break;
}
@ -965,7 +965,7 @@ struct task *process_table_expire(struct task *task, void *context, unsigned int
continue;
ts->exp.key = ts->expire;
eb32_insert(&t->shards[shard].exps, &ts->exp);
eb32_insert(&t->buckets[bucket].exps, &ts->exp);
/* the update might have jumped beyond the next element,
* possibly causing a wrapping. We need to check whether
@ -976,7 +976,7 @@ struct task *process_table_expire(struct task *task, void *context, unsigned int
* use the current one.
*/
if (!eb)
eb = eb32_first(&t->shards[shard].exps);
eb = eb32_first(&t->buckets[bucket].exps);
if (!eb || tick_is_lt(ts->exp.key, eb->key))
eb = &ts->exp;
@ -1008,12 +1008,12 @@ struct task *process_table_expire(struct task *task, void *context, unsigned int
out_unlock:
task_exp = tick_first(task_exp, exp_next);
HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &t->shards[shard].sh_lock);
HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &t->buckets[bucket].sh_lock);
shard++;
if (shard >= CONFIG_HAP_TBL_BUCKETS)
shard = 0;
} while (to_visit > 0 && shard != init_shard);
bucket++;
if (bucket >= CONFIG_HAP_TBL_BUCKETS)
bucket = 0;
} while (to_visit > 0 && bucket != init_bucket);
if (to_visit <= 0) {
task_wakeup(task, TASK_WOKEN_OTHER);
@ -1040,16 +1040,16 @@ int stktable_init(struct stktable *t, char **err_msg)
{
static int operating_thread = 0;
int peers_retval = 0;
int shard;
int bucket;
int i;
t->hash_seed = XXH64(t->id, t->idlen, 0);
if (t->size) {
for (shard = 0; shard < CONFIG_HAP_TBL_BUCKETS; shard++) {
t->shards[shard].keys = EB_ROOT_UNIQUE;
memset(&t->shards[shard].exps, 0, sizeof(t->shards[shard].exps));
HA_RWLOCK_INIT(&t->shards[shard].sh_lock);
for (bucket = 0; bucket < CONFIG_HAP_TBL_BUCKETS; bucket++) {
t->buckets[bucket].keys = EB_ROOT_UNIQUE;
memset(&t->buckets[bucket].exps, 0, sizeof(t->buckets[bucket].exps));
HA_RWLOCK_INIT(&t->buckets[bucket].sh_lock);
}
MT_LIST_INIT(&t->updates);
@ -5625,7 +5625,7 @@ static int cli_io_handler_table(struct appctx *appctx)
struct ebmb_node *eb;
int skip_entry;
int show = ctx->action == STK_CLI_ACT_SHOW;
int shard = ctx->tree_head;
int bucket = ctx->tree_head;
/*
* We have 3 possible states in ctx->state :
@ -5651,30 +5651,30 @@ static int cli_io_handler_table(struct appctx *appctx)
}
if (ctx->t->size) {
if (show && !shard && !table_dump_head_to_buffer(&trash, appctx, ctx->t, ctx->target))
if (show && !bucket && !table_dump_head_to_buffer(&trash, appctx, ctx->t, ctx->target))
return 0;
if (ctx->target &&
(strm_li(s)->bind_conf->level & ACCESS_LVL_MASK) >= ACCESS_LVL_OPER) {
/* dump entries only if table explicitly requested */
HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &ctx->t->shards[shard].sh_lock);
eb = ebmb_first(&ctx->t->shards[shard].keys);
HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &ctx->t->buckets[bucket].sh_lock);
eb = ebmb_first(&ctx->t->buckets[bucket].keys);
if (eb) {
ctx->entry = ebmb_entry(eb, struct stksess, key);
HA_ATOMIC_INC(&ctx->entry->ref_cnt);
ctx->state = STATE_DUMP;
HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &ctx->t->shards[shard].sh_lock);
HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &ctx->t->buckets[bucket].sh_lock);
break;
}
HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &ctx->t->shards[shard].sh_lock);
HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &ctx->t->buckets[bucket].sh_lock);
/* we come here if we didn't find any entry in this shard */
shard = ++ctx->tree_head;
if (shard < CONFIG_HAP_TBL_BUCKETS)
break; // try again on new shard
/* we come here if we didn't find any entry in this bucket */
bucket = ++ctx->tree_head;
if (bucket < CONFIG_HAP_TBL_BUCKETS)
break; // try again on new bucket
/* fall through next table */
shard = ctx->tree_head = 0;
bucket = ctx->tree_head = 0;
}
}
ctx->t = ctx->t->next;
@ -5760,7 +5760,7 @@ static int cli_io_handler_table(struct appctx *appctx)
HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &ctx->entry->lock);
HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &ctx->t->shards[shard].sh_lock);
HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &ctx->t->buckets[bucket].sh_lock);
HA_ATOMIC_DEC(&ctx->entry->ref_cnt);
eb = ebmb_next(&ctx->entry->key);
@ -5772,7 +5772,7 @@ static int cli_io_handler_table(struct appctx *appctx)
else if (!skip_entry && !ctx->entry->ref_cnt)
__stksess_kill(ctx->t, old);
HA_ATOMIC_INC(&ctx->entry->ref_cnt);
HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &ctx->t->shards[shard].sh_lock);
HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &ctx->t->buckets[bucket].sh_lock);
break;
}
@ -5782,11 +5782,11 @@ static int cli_io_handler_table(struct appctx *appctx)
else if (!skip_entry && !HA_ATOMIC_LOAD(&ctx->entry->ref_cnt))
__stksess_kill(ctx->t, ctx->entry);
HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &ctx->t->shards[shard].sh_lock);
HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &ctx->t->buckets[bucket].sh_lock);
shard = ++ctx->tree_head;
if (shard >= CONFIG_HAP_TBL_BUCKETS) {
shard = ctx->tree_head = 0;
bucket = ++ctx->tree_head;
if (bucket >= CONFIG_HAP_TBL_BUCKETS) {
bucket = ctx->tree_head = 0;
ctx->t = ctx->t->next;
}
ctx->state = STATE_NEXT;