MEDIUM: fd: Use the new _HA_ATOMIC_* macros.

Use the new _HA_ATOMIC_* macros and add barriers where needed.
This commit is contained in:
Olivier Houchard 2019-03-08 18:47:42 +01:00 committed by Olivier Houchard
parent 8beb27e9ce
commit d360879fb5
2 changed files with 27 additions and 27 deletions

View File

@ -122,14 +122,14 @@ static inline void updt_fd_polling(const int fd)
if (HA_ATOMIC_BTS(&fdtab[fd].update_mask, tid))
return;
oldupdt = HA_ATOMIC_ADD(&fd_nbupdt, 1) - 1;
oldupdt = _HA_ATOMIC_ADD(&fd_nbupdt, 1) - 1;
fd_updt[oldupdt] = fd;
} else {
unsigned long update_mask = fdtab[fd].update_mask;
do {
if (update_mask == fdtab[fd].thread_mask)
return;
} while (!HA_ATOMIC_CAS(&fdtab[fd].update_mask, &update_mask,
} while (!_HA_ATOMIC_CAS(&fdtab[fd].update_mask, &update_mask,
fdtab[fd].thread_mask));
fd_add_to_fd_list(&update_list, fd, offsetof(struct fdtab, update));
}
@ -144,7 +144,7 @@ static inline void done_update_polling(int fd)
{
unsigned long update_mask;
update_mask = HA_ATOMIC_AND(&fdtab[fd].update_mask, ~tid_bit);
update_mask = _HA_ATOMIC_AND(&fdtab[fd].update_mask, ~tid_bit);
while ((update_mask & all_threads_mask)== 0) {
/* If we were the last one that had to update that entry, remove it from the list */
fd_rm_from_fd_list(&update_list, fd, offsetof(struct fdtab, update));
@ -170,7 +170,7 @@ static inline void done_update_polling(int fd)
*/
static inline void fd_alloc_cache_entry(const int fd)
{
HA_ATOMIC_OR(&fd_cache_mask, fdtab[fd].thread_mask);
_HA_ATOMIC_OR(&fd_cache_mask, fdtab[fd].thread_mask);
if (!(fdtab[fd].thread_mask & (fdtab[fd].thread_mask - 1)))
fd_add_to_fd_list(&fd_cache_local[my_ffsl(fdtab[fd].thread_mask) - 1], fd, offsetof(struct fdtab, cache));
else
@ -288,7 +288,7 @@ static inline void fd_stop_recv(int fd)
return;
new = old & ~FD_EV_ACTIVE_R;
new &= ~FD_EV_POLLED_R;
} while (unlikely(!HA_ATOMIC_CAS(&fdtab[fd].state, &old, new)));
} while (unlikely(!_HA_ATOMIC_CAS(&fdtab[fd].state, &old, new)));
if ((old ^ new) & FD_EV_POLLED_R)
updt_fd_polling(fd);
@ -313,7 +313,7 @@ static inline void fd_stop_send(int fd)
return;
new = old & ~FD_EV_ACTIVE_W;
new &= ~FD_EV_POLLED_W;
} while (unlikely(!HA_ATOMIC_CAS(&fdtab[fd].state, &old, new)));
} while (unlikely(!_HA_ATOMIC_CAS(&fdtab[fd].state, &old, new)));
if ((old ^ new) & FD_EV_POLLED_W)
updt_fd_polling(fd);
@ -338,7 +338,7 @@ static inline void fd_stop_both(int fd)
return;
new = old & ~FD_EV_ACTIVE_RW;
new &= ~FD_EV_POLLED_RW;
} while (unlikely(!HA_ATOMIC_CAS(&fdtab[fd].state, &old, new)));
} while (unlikely(!_HA_ATOMIC_CAS(&fdtab[fd].state, &old, new)));
if ((old ^ new) & FD_EV_POLLED_RW)
updt_fd_polling(fd);
@ -364,7 +364,7 @@ static inline void fd_cant_recv(const int fd)
new = old & ~FD_EV_READY_R;
if (new & FD_EV_ACTIVE_R)
new |= FD_EV_POLLED_R;
} while (unlikely(!HA_ATOMIC_CAS(&fdtab[fd].state, &old, new)));
} while (unlikely(!_HA_ATOMIC_CAS(&fdtab[fd].state, &old, new)));
if ((old ^ new) & FD_EV_POLLED_R)
updt_fd_polling(fd);
@ -383,7 +383,7 @@ static inline void fd_may_recv(const int fd)
unsigned long locked;
/* marking ready never changes polled status */
HA_ATOMIC_OR(&fdtab[fd].state, FD_EV_READY_R);
_HA_ATOMIC_OR(&fdtab[fd].state, FD_EV_READY_R);
locked = atleast2(fdtab[fd].thread_mask);
if (locked)
@ -410,7 +410,7 @@ static inline void fd_done_recv(const int fd)
new = old & ~FD_EV_READY_R;
if (new & FD_EV_ACTIVE_R)
new |= FD_EV_POLLED_R;
} while (unlikely(!HA_ATOMIC_CAS(&fdtab[fd].state, &old, new)));
} while (unlikely(!_HA_ATOMIC_CAS(&fdtab[fd].state, &old, new)));
if ((old ^ new) & FD_EV_POLLED_R)
updt_fd_polling(fd);
@ -436,7 +436,7 @@ static inline void fd_cant_send(const int fd)
new = old & ~FD_EV_READY_W;
if (new & FD_EV_ACTIVE_W)
new |= FD_EV_POLLED_W;
} while (unlikely(!HA_ATOMIC_CAS(&fdtab[fd].state, &old, new)));
} while (unlikely(!_HA_ATOMIC_CAS(&fdtab[fd].state, &old, new)));
if ((old ^ new) & FD_EV_POLLED_W)
updt_fd_polling(fd);
@ -455,7 +455,7 @@ static inline void fd_may_send(const int fd)
unsigned long locked;
/* marking ready never changes polled status */
HA_ATOMIC_OR(&fdtab[fd].state, FD_EV_READY_W);
_HA_ATOMIC_OR(&fdtab[fd].state, FD_EV_READY_W);
locked = atleast2(fdtab[fd].thread_mask);
if (locked)
@ -478,7 +478,7 @@ static inline void fd_want_recv(int fd)
new = old | FD_EV_ACTIVE_R;
if (!(new & FD_EV_READY_R))
new |= FD_EV_POLLED_R;
} while (unlikely(!HA_ATOMIC_CAS(&fdtab[fd].state, &old, new)));
} while (unlikely(!_HA_ATOMIC_CAS(&fdtab[fd].state, &old, new)));
if ((old ^ new) & FD_EV_POLLED_R)
updt_fd_polling(fd);
@ -504,7 +504,7 @@ static inline void fd_want_send(int fd)
new = old | FD_EV_ACTIVE_W;
if (!(new & FD_EV_READY_W))
new |= FD_EV_POLLED_W;
} while (unlikely(!HA_ATOMIC_CAS(&fdtab[fd].state, &old, new)));
} while (unlikely(!_HA_ATOMIC_CAS(&fdtab[fd].state, &old, new)));
if ((old ^ new) & FD_EV_POLLED_W)
updt_fd_polling(fd);
@ -584,12 +584,12 @@ static inline int compute_poll_timeout(int next)
/* These are replacements for FD_SET, FD_CLR, FD_ISSET, working on uints */
static inline void hap_fd_set(int fd, unsigned int *evts)
{
HA_ATOMIC_OR(&evts[fd / (8*sizeof(*evts))], 1U << (fd & (8*sizeof(*evts) - 1)));
_HA_ATOMIC_OR(&evts[fd / (8*sizeof(*evts))], 1U << (fd & (8*sizeof(*evts) - 1)));
}
static inline void hap_fd_clr(int fd, unsigned int *evts)
{
HA_ATOMIC_AND(&evts[fd / (8*sizeof(*evts))], ~(1U << (fd & (8*sizeof(*evts) - 1))));
_HA_ATOMIC_AND(&evts[fd / (8*sizeof(*evts))], ~(1U << (fd & (8*sizeof(*evts) - 1))));
}
static inline unsigned int hap_fd_isset(int fd, unsigned int *evts)

View File

@ -201,7 +201,7 @@ void fd_add_to_fd_list(volatile struct fdlist *list, int fd, int off)
/* Check that we're not already in the cache, and if not, lock us. */
if (next >= -2)
goto done;
if (!HA_ATOMIC_CAS(&_GET_NEXT(fd, off), &next, -2))
if (!_HA_ATOMIC_CAS(&_GET_NEXT(fd, off), &next, -2))
goto redo_next;
__ha_barrier_atomic_store();
@ -217,7 +217,7 @@ void fd_add_to_fd_list(volatile struct fdlist *list, int fd, int off)
if (unlikely(last == -1)) {
/* list is empty, try to add ourselves alone so that list->last=fd */
if (unlikely(!HA_ATOMIC_CAS(&list->last, &old, new)))
if (unlikely(!_HA_ATOMIC_CAS(&list->last, &old, new)))
goto redo_last;
/* list->first was necessary -1, we're guaranteed to be alone here */
@ -227,7 +227,7 @@ void fd_add_to_fd_list(volatile struct fdlist *list, int fd, int off)
* The CAS will only succeed if its next is -1,
* which means it's in the cache, and the last element.
*/
if (unlikely(!HA_ATOMIC_CAS(&_GET_NEXT(last, off), &old, new)))
if (unlikely(!_HA_ATOMIC_CAS(&_GET_NEXT(last, off), &old, new)))
goto redo_last;
/* Then, update the last entry */
@ -268,7 +268,7 @@ void fd_rm_from_fd_list(volatile struct fdlist *list, int fd, int off)
goto lock_self;
} while (
#ifdef HA_CAS_IS_8B
unlikely(!HA_ATOMIC_CAS(((void **)(void *)&_GET_NEXT(fd, off)), ((void **)(void *)&cur_list), (*(void **)(void *)&next_list))))
unlikely(!_HA_ATOMIC_CAS(((void **)(void *)&_GET_NEXT(fd, off)), ((void **)(void *)&cur_list), (*(void **)(void *)&next_list))))
#else
unlikely(!__ha_cas_dw((void *)&_GET_NEXT(fd, off), (void *)&cur_list, (void *)&next_list)))
#endif
@ -283,13 +283,13 @@ void fd_rm_from_fd_list(volatile struct fdlist *list, int fd, int off)
goto lock_self_next;
if (next <= -3)
goto done;
if (unlikely(!HA_ATOMIC_CAS(&_GET_NEXT(fd, off), &next, -2)))
if (unlikely(!_HA_ATOMIC_CAS(&_GET_NEXT(fd, off), &next, -2)))
goto lock_self_next;
lock_self_prev:
prev = ({ volatile int *prev = &_GET_PREV(fd, off); *prev; });
if (prev == -2)
goto lock_self_prev;
if (unlikely(!HA_ATOMIC_CAS(&_GET_PREV(fd, off), &prev, -2)))
if (unlikely(!_HA_ATOMIC_CAS(&_GET_PREV(fd, off), &prev, -2)))
goto lock_self_prev;
#endif
__ha_barrier_atomic_store();
@ -299,7 +299,7 @@ void fd_rm_from_fd_list(volatile struct fdlist *list, int fd, int off)
redo_prev:
old = fd;
if (unlikely(!HA_ATOMIC_CAS(&_GET_NEXT(prev, off), &old, new))) {
if (unlikely(!_HA_ATOMIC_CAS(&_GET_NEXT(prev, off), &old, new))) {
if (unlikely(old == -2)) {
/* Neighbour already locked, give up and
* retry again once he's done
@ -316,7 +316,7 @@ void fd_rm_from_fd_list(volatile struct fdlist *list, int fd, int off)
if (likely(next != -1)) {
redo_next:
old = fd;
if (unlikely(!HA_ATOMIC_CAS(&_GET_PREV(next, off), &old, new))) {
if (unlikely(!_HA_ATOMIC_CAS(&_GET_PREV(next, off), &old, new))) {
if (unlikely(old == -2)) {
/* Neighbour already locked, give up and
* retry again once he's done
@ -338,7 +338,7 @@ void fd_rm_from_fd_list(volatile struct fdlist *list, int fd, int off)
list->first = next;
__ha_barrier_store();
last = list->last;
while (unlikely(last == fd && (!HA_ATOMIC_CAS(&list->last, &last, prev))))
while (unlikely(last == fd && (!_HA_ATOMIC_CAS(&list->last, &last, prev))))
__ha_compiler_barrier();
/* Make sure we let other threads know we're no longer in cache,
* before releasing our neighbours.
@ -427,7 +427,7 @@ static inline void fdlist_process_cached_events(volatile struct fdlist *fdlist)
if (fdtab[fd].cache.next < -3)
continue;
HA_ATOMIC_OR(&fd_cache_mask, tid_bit);
_HA_ATOMIC_OR(&fd_cache_mask, tid_bit);
locked = atleast2(fdtab[fd].thread_mask);
if (locked && HA_SPIN_TRYLOCK(FD_LOCK, &fdtab[fd].lock)) {
activity[tid].fd_lock++;
@ -463,7 +463,7 @@ static inline void fdlist_process_cached_events(volatile struct fdlist *fdlist)
*/
void fd_process_cached_events()
{
HA_ATOMIC_AND(&fd_cache_mask, ~tid_bit);
_HA_ATOMIC_AND(&fd_cache_mask, ~tid_bit);
fdlist_process_cached_events(&fd_cache_local[tid]);
fdlist_process_cached_events(&fd_cache);
}