MEDIUM: threads: Use the new _HA_ATOMIC_* macros.

Use the new _HA_ATOMIC_* macros and add barriers where needed.
This commit is contained in:
Olivier Houchard 2019-03-08 18:51:17 +01:00 committed by Olivier Houchard
parent 9e7ae28a16
commit b23a61f78a
2 changed files with 11 additions and 11 deletions

View File

@ -2739,11 +2739,11 @@ static void run_poll_loop()
else if (signal_queue_len && tid == 0) else if (signal_queue_len && tid == 0)
activity[tid].wake_signal++; activity[tid].wake_signal++;
else { else {
HA_ATOMIC_OR(&sleeping_thread_mask, tid_bit); _HA_ATOMIC_OR(&sleeping_thread_mask, tid_bit);
__ha_barrier_store(); __ha_barrier_atomic_store();
if (active_tasks_mask & tid_bit) { if (active_tasks_mask & tid_bit) {
activity[tid].wake_tasks++; activity[tid].wake_tasks++;
HA_ATOMIC_AND(&sleeping_thread_mask, ~tid_bit); _HA_ATOMIC_AND(&sleeping_thread_mask, ~tid_bit);
} else } else
exp = next; exp = next;
} }
@ -2751,7 +2751,7 @@ static void run_poll_loop()
/* The poller will ensure it returns around <next> */ /* The poller will ensure it returns around <next> */
cur_poller.poll(&cur_poller, exp); cur_poller.poll(&cur_poller, exp);
if (sleeping_thread_mask & tid_bit) if (sleeping_thread_mask & tid_bit)
HA_ATOMIC_AND(&sleeping_thread_mask, ~tid_bit); _HA_ATOMIC_AND(&sleeping_thread_mask, ~tid_bit);
fd_process_cached_events(); fd_process_cached_events();
activity[tid].loops++; activity[tid].loops++;
@ -2787,7 +2787,7 @@ static void *run_thread_poll_loop(void *data)
ptdf->fct(); ptdf->fct();
#ifdef USE_THREAD #ifdef USE_THREAD
HA_ATOMIC_AND(&all_threads_mask, ~tid_bit); _HA_ATOMIC_AND(&all_threads_mask, ~tid_bit);
if (tid > 0) if (tid > 0)
pthread_exit(NULL); pthread_exit(NULL);
#endif #endif

View File

@ -46,7 +46,7 @@ struct lock_stat lock_stats[LOCK_LABELS];
*/ */
void thread_harmless_till_end() void thread_harmless_till_end()
{ {
HA_ATOMIC_OR(&threads_harmless_mask, tid_bit); _HA_ATOMIC_OR(&threads_harmless_mask, tid_bit);
while (threads_want_rdv_mask & all_threads_mask) { while (threads_want_rdv_mask & all_threads_mask) {
#if _POSIX_PRIORITY_SCHEDULING #if _POSIX_PRIORITY_SCHEDULING
sched_yield(); sched_yield();
@ -65,16 +65,16 @@ void thread_isolate()
{ {
unsigned long old; unsigned long old;
HA_ATOMIC_OR(&threads_harmless_mask, tid_bit); _HA_ATOMIC_OR(&threads_harmless_mask, tid_bit);
__ha_barrier_store(); __ha_barrier_atomic_store();
HA_ATOMIC_OR(&threads_want_rdv_mask, tid_bit); _HA_ATOMIC_OR(&threads_want_rdv_mask, tid_bit);
/* wait for all threads to become harmless */ /* wait for all threads to become harmless */
old = threads_harmless_mask; old = threads_harmless_mask;
while (1) { while (1) {
if (unlikely((old & all_threads_mask) != all_threads_mask)) if (unlikely((old & all_threads_mask) != all_threads_mask))
old = threads_harmless_mask; old = threads_harmless_mask;
else if (HA_ATOMIC_CAS(&threads_harmless_mask, &old, old & ~tid_bit)) else if (_HA_ATOMIC_CAS(&threads_harmless_mask, &old, old & ~tid_bit))
break; break;
#if _POSIX_PRIORITY_SCHEDULING #if _POSIX_PRIORITY_SCHEDULING
@ -95,7 +95,7 @@ void thread_isolate()
*/ */
void thread_release() void thread_release()
{ {
HA_ATOMIC_AND(&threads_want_rdv_mask, ~tid_bit); _HA_ATOMIC_AND(&threads_want_rdv_mask, ~tid_bit);
thread_harmless_end(); thread_harmless_end();
} }