From d2b5d16187e38a2db2d2916a071e921b4ce74b09 Mon Sep 17 00:00:00 2001 From: Olivier Houchard Date: Fri, 8 Mar 2019 13:47:21 +0100 Subject: [PATCH] MEDIUM: various: Use __ha_barrier_atomic* when relevant. When protecting data modified by atomic operations, use __ha_barrier_atomic* to avoid unneeded barriers on x86. --- include/common/mini-clist.h | 2 +- include/common/xref.h | 2 +- src/fd.c | 4 ++-- src/task.c | 6 +++--- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/include/common/mini-clist.h b/include/common/mini-clist.h index 62a62d78b..074176a20 100644 --- a/include/common/mini-clist.h +++ b/include/common/mini-clist.h @@ -192,7 +192,7 @@ struct cond_wordlist { n = HA_ATOMIC_XCHG(&(lh)->n, LLIST_BUSY); \ if (n == LLIST_BUSY) \ continue; \ - __ha_barrier_store(); \ + __ha_barrier_atomic_store(); \ p = HA_ATOMIC_XCHG(&n->p, LLIST_BUSY); \ if (p == LLIST_BUSY) { \ (lh)->n = n; \ diff --git a/include/common/xref.h b/include/common/xref.h index a6291f52f..48bc07a92 100644 --- a/include/common/xref.h +++ b/include/common/xref.h @@ -32,7 +32,7 @@ static inline struct xref *xref_get_peer_and_lock(struct xref *xref) /* Get the local pointer to the peer. */ local = HA_ATOMIC_XCHG(&xref->peer, XREF_BUSY); - __ha_barrier_store(); + __ha_barrier_atomic_store(); /* If the local pointer is NULL, the peer no longer exists. */ if (local == NULL) { diff --git a/src/fd.c b/src/fd.c index 581c5aaae..cb9df1f6b 100644 --- a/src/fd.c +++ b/src/fd.c @@ -203,7 +203,7 @@ void fd_add_to_fd_list(volatile struct fdlist *list, int fd, int off) goto done; if (!HA_ATOMIC_CAS(&_GET_NEXT(fd, off), &next, -2)) goto redo_next; - __ha_barrier_store(); + __ha_barrier_atomic_store(); new = fd; redo_last: @@ -292,7 +292,7 @@ void fd_rm_from_fd_list(volatile struct fdlist *list, int fd, int off) if (unlikely(!HA_ATOMIC_CAS(&_GET_PREV(fd, off), &prev, -2))) goto lock_self_prev; #endif - __ha_barrier_store(); + __ha_barrier_atomic_store(); /* Now, lock the entries of our neighbours */ if (likely(prev != -1)) { diff --git a/src/task.c b/src/task.c index 826e2124b..d7c3e059a 100644 --- a/src/task.c +++ b/src/task.c @@ -122,7 +122,7 @@ void __task_wakeup(struct task *t, struct eb_root *root) #ifdef USE_THREAD if (root == &rqueue) { HA_ATOMIC_OR(&global_tasks_mask, t->thread_mask); - __ha_barrier_store(); + __ha_barrier_atomic_store(); } #endif old_active_mask = active_tasks_mask; @@ -401,7 +401,7 @@ void process_runnable_tasks() } if (!(global_tasks_mask & tid_bit) && task_per_thread[tid].rqueue_size == 0) { HA_ATOMIC_AND(&active_tasks_mask, ~tid_bit); - __ha_barrier_load(); + __ha_barrier_atomic_load(); if (global_tasks_mask & tid_bit) HA_ATOMIC_OR(&active_tasks_mask, tid_bit); } @@ -413,7 +413,7 @@ void process_runnable_tasks() t = (struct task *)LIST_ELEM(task_per_thread[tid].task_list.n, struct tasklet *, list); state = HA_ATOMIC_XCHG(&t->state, TASK_RUNNING); - __ha_barrier_store(); + __ha_barrier_atomic_store(); task_remove_from_task_list(t); ctx = t->context;