MINOR: clock: make global_now_ms a pointer

This is preparation work for shared counters between co-processes. As
co-processes will need to share a common date. global_now_ms will be used
for that as it will point to the shm when sharing is enabled.

Thus in this patch we turn global_now_ms into a pointer (and adjust the
places where it is written to and read from, hopefully atomic operations
through pointer are already used so the change is trivial)

For now global_now_ms points to process-local _global_now_ms which is a
fallback for when sharing through the shm is not enabled.
This commit is contained in:
Aurelien DARRAGON 2025-05-30 12:04:53 +02:00
parent 713ebd2750
commit 4a20b3835a
4 changed files with 17 additions and 12 deletions

View File

@ -64,7 +64,7 @@
/* currently updated and stored in time.c */
extern THREAD_LOCAL unsigned int now_ms; /* internal date in milliseconds (may wrap) */
extern volatile unsigned int global_now_ms;
extern volatile unsigned int *global_now_ms;
/* return 1 if tick is set, otherwise 0 */
static inline int tick_isset(int expire)

View File

@ -30,7 +30,8 @@ struct timeval start_date; /* the process's start date in
struct timeval ready_date; /* date when the process was considered ready */
ullong start_time_ns; /* the process's start date in internal monotonic time (ns) */
volatile ullong global_now_ns; /* common monotonic date between all threads, in ns (wraps every 585 yr) */
volatile uint global_now_ms; /* common monotonic date in milliseconds (may wrap) */
volatile uint _global_now_ms; /* locally stored common monotonic date in milliseconds (may wrap) */
volatile uint *global_now_ms; /* common monotonic date in milliseconds (may wrap), may point to _global_now_ms or shared memory */
/* when CLOCK_MONOTONIC is supported, the offset is applied from th_ctx->prev_mono_time instead */
THREAD_ALIGNED(64) static llong now_offset; /* global offset between system time and global time in ns */
@ -270,7 +271,7 @@ void clock_update_global_date()
* otherwise catch up.
*/
old_now_ns = _HA_ATOMIC_LOAD(&global_now_ns);
old_now_ms = _HA_ATOMIC_LOAD(&global_now_ms);
old_now_ms = _HA_ATOMIC_LOAD(global_now_ms);
do {
if (now_ns < old_now_ns)
@ -300,7 +301,7 @@ void clock_update_global_date()
* and ms forms) or loop again.
*/
} while ((!_HA_ATOMIC_CAS(&global_now_ns, &old_now_ns, now_ns) ||
(now_ms != old_now_ms && !_HA_ATOMIC_CAS(&global_now_ms, &old_now_ms, now_ms))) &&
(now_ms != old_now_ms && !_HA_ATOMIC_CAS(global_now_ms, &old_now_ms, now_ms))) &&
__ha_cpu_relax());
if (!th_ctx->curr_mono_time) {
@ -326,7 +327,8 @@ void clock_init_process_date(void)
if (!global_now_ns) // CLOCK_MONOTONIC not supported
global_now_ns = tv_to_ns(&date);
now_ns = global_now_ns;
global_now_ms = ns_to_ms(now_ns);
_global_now_ms = ns_to_ms(now_ns);
/* force time to wrap 20s after boot: we first compute the time offset
* that once applied to the wall-clock date will make the local time
@ -334,14 +336,17 @@ void clock_init_process_date(void)
* and will be used to recompute the local time, both of which will
* match and continue from this shifted date.
*/
now_offset = sec_to_ns((uint)((uint)(-global_now_ms) / 1000U - BOOT_TIME_WRAP_SEC));
now_offset = sec_to_ns((uint)((uint)(-_global_now_ms) / 1000U - BOOT_TIME_WRAP_SEC));
global_now_ns += now_offset;
now_ns = global_now_ns;
now_ms = ns_to_ms(now_ns);
/* correct for TICK_ETNERITY (0) */
if (now_ms == TICK_ETERNITY)
now_ms++;
global_now_ms = now_ms;
_global_now_ms = now_ms;
/* for now global_now_ms points to the process-local _global_now_ms */
global_now_ms = &_global_now_ms;
th_ctx->idle_pct = 100;
clock_update_date(0, 1);

View File

@ -33,7 +33,7 @@ uint update_freq_ctr_period_slow(struct freq_ctr *ctr, uint period, uint inc)
*/
for (;; __ha_cpu_relax()) {
curr_tick = HA_ATOMIC_LOAD(&ctr->curr_tick);
now_ms_tmp = HA_ATOMIC_LOAD(&global_now_ms);
now_ms_tmp = HA_ATOMIC_LOAD(global_now_ms);
if (now_ms_tmp - curr_tick < period)
return HA_ATOMIC_ADD_FETCH(&ctr->curr_ctr, inc);
@ -81,7 +81,7 @@ ullong _freq_ctr_total_from_values(uint period, int pend,
{
int remain;
remain = tick + period - HA_ATOMIC_LOAD(&global_now_ms);
remain = tick + period - HA_ATOMIC_LOAD(global_now_ms);
if (unlikely(remain < 0)) {
/* We're past the first period, check if we can still report a
* part of last period or if we're too far away.
@ -239,7 +239,7 @@ int freq_ctr_overshoot_period(const struct freq_ctr *ctr, uint period, uint freq
return 0;
}
elapsed = HA_ATOMIC_LOAD(&global_now_ms) - tick;
elapsed = HA_ATOMIC_LOAD(global_now_ms) - tick;
if (unlikely(elapsed < 0 || elapsed > period)) {
/* The counter is in the future or the elapsed time is higher than the period, there is no overshoot */
return 0;

View File

@ -491,11 +491,11 @@ int is_inet6_reachable(void)
int fd;
if (tick_isset(last_check) &&
!tick_is_expired(tick_add(last_check, INET6_CONNECTIVITY_CACHE_TIME), HA_ATOMIC_LOAD(&global_now_ms)))
!tick_is_expired(tick_add(last_check, INET6_CONNECTIVITY_CACHE_TIME), HA_ATOMIC_LOAD(global_now_ms)))
return HA_ATOMIC_LOAD(&sock_inet6_seems_reachable);
/* update the test date to ensure nobody else does it in parallel */
HA_ATOMIC_STORE(&last_inet6_check, HA_ATOMIC_LOAD(&global_now_ms));
HA_ATOMIC_STORE(&last_inet6_check, HA_ATOMIC_LOAD(global_now_ms));
fd = socket(AF_INET6, SOCK_DGRAM, 0);
if (fd >= 0) {