MEDIUM: various: Use __ha_barrier_atomic* when relevant.

When protecting data modified by atomic operations, use __ha_barrier_atomic*
to avoid unneeded barriers on x86.
This commit is contained in:
Olivier Houchard 2019-03-08 13:47:21 +01:00 committed by Olivier Houchard
parent d0c3b8894a
commit d2b5d16187
4 changed files with 7 additions and 7 deletions

View File

@ -192,7 +192,7 @@ struct cond_wordlist {
n = HA_ATOMIC_XCHG(&(lh)->n, LLIST_BUSY); \ n = HA_ATOMIC_XCHG(&(lh)->n, LLIST_BUSY); \
if (n == LLIST_BUSY) \ if (n == LLIST_BUSY) \
continue; \ continue; \
__ha_barrier_store(); \ __ha_barrier_atomic_store(); \
p = HA_ATOMIC_XCHG(&n->p, LLIST_BUSY); \ p = HA_ATOMIC_XCHG(&n->p, LLIST_BUSY); \
if (p == LLIST_BUSY) { \ if (p == LLIST_BUSY) { \
(lh)->n = n; \ (lh)->n = n; \

View File

@ -32,7 +32,7 @@ static inline struct xref *xref_get_peer_and_lock(struct xref *xref)
/* Get the local pointer to the peer. */ /* Get the local pointer to the peer. */
local = HA_ATOMIC_XCHG(&xref->peer, XREF_BUSY); local = HA_ATOMIC_XCHG(&xref->peer, XREF_BUSY);
__ha_barrier_store(); __ha_barrier_atomic_store();
/* If the local pointer is NULL, the peer no longer exists. */ /* If the local pointer is NULL, the peer no longer exists. */
if (local == NULL) { if (local == NULL) {

View File

@ -203,7 +203,7 @@ void fd_add_to_fd_list(volatile struct fdlist *list, int fd, int off)
goto done; goto done;
if (!HA_ATOMIC_CAS(&_GET_NEXT(fd, off), &next, -2)) if (!HA_ATOMIC_CAS(&_GET_NEXT(fd, off), &next, -2))
goto redo_next; goto redo_next;
__ha_barrier_store(); __ha_barrier_atomic_store();
new = fd; new = fd;
redo_last: redo_last:
@ -292,7 +292,7 @@ void fd_rm_from_fd_list(volatile struct fdlist *list, int fd, int off)
if (unlikely(!HA_ATOMIC_CAS(&_GET_PREV(fd, off), &prev, -2))) if (unlikely(!HA_ATOMIC_CAS(&_GET_PREV(fd, off), &prev, -2)))
goto lock_self_prev; goto lock_self_prev;
#endif #endif
__ha_barrier_store(); __ha_barrier_atomic_store();
/* Now, lock the entries of our neighbours */ /* Now, lock the entries of our neighbours */
if (likely(prev != -1)) { if (likely(prev != -1)) {

View File

@ -122,7 +122,7 @@ void __task_wakeup(struct task *t, struct eb_root *root)
#ifdef USE_THREAD #ifdef USE_THREAD
if (root == &rqueue) { if (root == &rqueue) {
HA_ATOMIC_OR(&global_tasks_mask, t->thread_mask); HA_ATOMIC_OR(&global_tasks_mask, t->thread_mask);
__ha_barrier_store(); __ha_barrier_atomic_store();
} }
#endif #endif
old_active_mask = active_tasks_mask; old_active_mask = active_tasks_mask;
@ -401,7 +401,7 @@ void process_runnable_tasks()
} }
if (!(global_tasks_mask & tid_bit) && task_per_thread[tid].rqueue_size == 0) { if (!(global_tasks_mask & tid_bit) && task_per_thread[tid].rqueue_size == 0) {
HA_ATOMIC_AND(&active_tasks_mask, ~tid_bit); HA_ATOMIC_AND(&active_tasks_mask, ~tid_bit);
__ha_barrier_load(); __ha_barrier_atomic_load();
if (global_tasks_mask & tid_bit) if (global_tasks_mask & tid_bit)
HA_ATOMIC_OR(&active_tasks_mask, tid_bit); HA_ATOMIC_OR(&active_tasks_mask, tid_bit);
} }
@ -413,7 +413,7 @@ void process_runnable_tasks()
t = (struct task *)LIST_ELEM(task_per_thread[tid].task_list.n, struct tasklet *, list); t = (struct task *)LIST_ELEM(task_per_thread[tid].task_list.n, struct tasklet *, list);
state = HA_ATOMIC_XCHG(&t->state, TASK_RUNNING); state = HA_ATOMIC_XCHG(&t->state, TASK_RUNNING);
__ha_barrier_store(); __ha_barrier_atomic_store();
task_remove_from_task_list(t); task_remove_from_task_list(t);
ctx = t->context; ctx = t->context;