mirror of
https://git.haproxy.org/git/haproxy.git/
synced 2025-08-06 07:07:04 +02:00
MINOR: freq_ctr: provide non-blocking read functions
Some code called by the debug handlers in the context of a signal handler accesses to some freq_ctr and occasionally ends up on a locked one from the same thread that is dumping it. Let's introduce a non-blocking version that at least allows to return even if the value is in the process of being updated, it's less problematic than hanging.
This commit is contained in:
parent
84d4c948fc
commit
29e246a84c
@ -28,7 +28,9 @@
|
||||
#include <haproxy/ticks.h>
|
||||
|
||||
/* exported functions from freq_ctr.c */
|
||||
ullong _freq_ctr_total_from_values(uint period, int pend, uint tick, ullong past, ullong curr);
|
||||
ullong freq_ctr_total(const struct freq_ctr *ctr, uint period, int pend);
|
||||
ullong freq_ctr_total_estimate(const struct freq_ctr *ctr, uint period, int pend);
|
||||
int freq_ctr_overshoot_period(const struct freq_ctr *ctr, uint period, uint freq);
|
||||
uint update_freq_ctr_period_slow(struct freq_ctr *ctr, uint period, uint inc);
|
||||
|
||||
@ -92,6 +94,16 @@ static inline uint read_freq_ctr_period(const struct freq_ctr *ctr, uint period)
|
||||
return div64_32(total, period);
|
||||
}
|
||||
|
||||
/* same as read_freq_ctr_period() above except that it doesn't lock and may
|
||||
* return incorrect values. This is only meant for use in signal handlers.
|
||||
*/
|
||||
static inline uint read_freq_ctr_period_estimate(const struct freq_ctr *ctr, uint period)
|
||||
{
|
||||
ullong total = freq_ctr_total_estimate(ctr, period, -1);
|
||||
|
||||
return div64_32(total, period);
|
||||
}
|
||||
|
||||
/* same as read_freq_ctr_period() above except that floats are used for the
|
||||
* output so that low rates can be more precise.
|
||||
*/
|
||||
|
@ -64,6 +64,44 @@ uint update_freq_ctr_period_slow(struct freq_ctr *ctr, uint period, uint inc)
|
||||
return inc;
|
||||
}
|
||||
|
||||
/* Returns the total number of events over the current + last period, including
|
||||
* a number of already pending events <pend>. The average frequency will be
|
||||
* obtained by dividing the output by <period>. This is essentially made to
|
||||
* ease implementation of higher-level read functions. This function does not
|
||||
* access the freq_ctr itself, it's supposed to be called with the stabilized
|
||||
* values. See freq_ctr_total() and freq_ctr_total_estimate() instead.
|
||||
*
|
||||
* As a special case, if pend < 0, it's assumed there are no pending
|
||||
* events and a flapping correction must be applied at the end. This is used by
|
||||
* read_freq_ctr_period() to avoid reporting ups and downs on low-frequency
|
||||
* events when the past value is <= 1.
|
||||
*/
|
||||
ullong _freq_ctr_total_from_values(uint period, int pend,
|
||||
uint tick, ullong past, ullong curr)
|
||||
{
|
||||
int remain;
|
||||
|
||||
remain = tick + period - HA_ATOMIC_LOAD(&global_now_ms);
|
||||
if (unlikely(remain < 0)) {
|
||||
/* We're past the first period, check if we can still report a
|
||||
* part of last period or if we're too far away.
|
||||
*/
|
||||
remain += period;
|
||||
past = (remain >= 0) ? curr : 0;
|
||||
curr = 0;
|
||||
}
|
||||
|
||||
if (pend < 0) {
|
||||
/* enable flapping correction at very low rates */
|
||||
pend = 0;
|
||||
if (!curr && past <= 1)
|
||||
return past * period;
|
||||
}
|
||||
|
||||
/* compute the total number of confirmed events over the period */
|
||||
return past * remain + (curr + pend) * period;
|
||||
}
|
||||
|
||||
/* Returns the total number of events over the current + last period, including
|
||||
* a number of already pending events <pend>. The average frequency will be
|
||||
* obtained by dividing the output by <period>. This is essentially made to
|
||||
@ -78,7 +116,6 @@ ullong freq_ctr_total(const struct freq_ctr *ctr, uint period, int pend)
|
||||
{
|
||||
ullong curr, past, old_curr, old_past;
|
||||
uint tick, old_tick;
|
||||
int remain;
|
||||
|
||||
tick = HA_ATOMIC_LOAD(&ctr->curr_tick);
|
||||
curr = HA_ATOMIC_LOAD(&ctr->curr_ctr);
|
||||
@ -124,26 +161,27 @@ ullong freq_ctr_total(const struct freq_ctr *ctr, uint period, int pend)
|
||||
redo3:
|
||||
__ha_cpu_relax();
|
||||
};
|
||||
return _freq_ctr_total_from_values(period, pend, tick, past, curr);
|
||||
}
|
||||
|
||||
remain = tick + period - HA_ATOMIC_LOAD(&global_now_ms);
|
||||
if (unlikely(remain < 0)) {
|
||||
/* We're past the first period, check if we can still report a
|
||||
* part of last period or if we're too far away.
|
||||
*/
|
||||
remain += period;
|
||||
past = (remain >= 0) ? curr : 0;
|
||||
curr = 0;
|
||||
}
|
||||
/* Like the function above but doesn't block if the entry is locked. In this
|
||||
* case it will only return the most accurate estimate it can bring. Based on
|
||||
* the update order in update_freq_ctr_period_slow() above, it may return a
|
||||
* low value caused by the replacement of the curr value before the past one
|
||||
* and/or the tick was updated. Otherwise the value will be correct most of
|
||||
* the time. This is only meant to be used from debug handlers.
|
||||
*/
|
||||
ullong freq_ctr_total_estimate(const struct freq_ctr *ctr, uint period, int pend)
|
||||
{
|
||||
ullong curr, past;
|
||||
uint tick;
|
||||
|
||||
if (pend < 0) {
|
||||
/* enable flapping correction at very low rates */
|
||||
pend = 0;
|
||||
if (!curr && past <= 1)
|
||||
return past * period;
|
||||
}
|
||||
tick = HA_ATOMIC_LOAD(&ctr->curr_tick);
|
||||
curr = HA_ATOMIC_LOAD(&ctr->curr_ctr);
|
||||
past = HA_ATOMIC_LOAD(&ctr->prev_ctr);
|
||||
|
||||
/* compute the total number of confirmed events over the period */
|
||||
return past * remain + (curr + pend) * period;
|
||||
tick &= ~1;
|
||||
return _freq_ctr_total_from_values(period, pend, tick, past, curr);
|
||||
}
|
||||
|
||||
/* Returns the excess of events (may be negative) over the current period for
|
||||
|
Loading…
Reference in New Issue
Block a user