mirror of
https://git.haproxy.org/git/haproxy.git/
synced 2025-08-07 07:37:02 +02:00
MEDIUM: freq_ctr: reimplement freq_ctr_remain_period() from freq_ctr_total()
Now the function becomes an inline one and only contains a divide and a max. The divide will automatically go away with constant periods.
This commit is contained in:
parent
a7a31b2602
commit
607be24a85
@ -148,6 +148,20 @@ static inline uint read_freq_ctr_period(struct freq_ctr_period *ctr, uint period
|
||||
*/
|
||||
unsigned int freq_ctr_remain(struct freq_ctr *ctr, unsigned int freq, unsigned int pend);
|
||||
|
||||
/* Returns the number of remaining events that can occur on this freq counter
|
||||
* while respecting <freq> events per period, and taking into account that
|
||||
* <pend> events are already known to be pending. Returns 0 if limit was reached.
|
||||
*/
|
||||
static inline uint freq_ctr_remain_period(struct freq_ctr_period *ctr, uint period, uint freq, uint pend)
|
||||
{
|
||||
ullong total = freq_ctr_total(ctr, period, pend);
|
||||
uint avg = div64_32(total, period);
|
||||
|
||||
if (avg > freq)
|
||||
avg = freq;
|
||||
return freq - avg;
|
||||
}
|
||||
|
||||
/* return the expected wait time in ms before the next event may occur,
|
||||
* respecting frequency <freq>, and assuming there may already be some pending
|
||||
* events. It returns zero if we can proceed immediately, otherwise the wait
|
||||
@ -158,8 +172,6 @@ unsigned int next_event_delay(struct freq_ctr *ctr, unsigned int freq, unsigned
|
||||
|
||||
/* process freq counters over configurable periods */
|
||||
unsigned int read_freq_ctr_period(struct freq_ctr_period *ctr, unsigned int period);
|
||||
unsigned int freq_ctr_remain_period(struct freq_ctr_period *ctr, unsigned int period,
|
||||
unsigned int freq, unsigned int pend);
|
||||
|
||||
/* While the functions above report average event counts per period, we are
|
||||
* also interested in average values per event. For this we use a different
|
||||
|
@ -166,56 +166,6 @@ unsigned int next_event_delay(struct freq_ctr *ctr, unsigned int freq, unsigned
|
||||
return MAX(wait, 1);
|
||||
}
|
||||
|
||||
/* Returns the number of remaining events that can occur on this freq counter
|
||||
* while respecting <freq> events per period, and taking into account that
|
||||
* <pend> events are already known to be pending. Returns 0 if limit was reached.
|
||||
*/
|
||||
unsigned int freq_ctr_remain_period(struct freq_ctr_period *ctr, unsigned int period,
|
||||
unsigned int freq, unsigned int pend)
|
||||
{
|
||||
unsigned int _curr, _past, curr, past;
|
||||
unsigned int remain, _curr_tick, curr_tick;
|
||||
|
||||
while (1) {
|
||||
_curr = ctr->curr_ctr;
|
||||
__ha_compiler_barrier();
|
||||
_past = ctr->prev_ctr;
|
||||
__ha_compiler_barrier();
|
||||
_curr_tick = ctr->curr_tick;
|
||||
__ha_compiler_barrier();
|
||||
if (_curr_tick & 0x1)
|
||||
continue;
|
||||
curr = ctr->curr_ctr;
|
||||
__ha_compiler_barrier();
|
||||
past = ctr->prev_ctr;
|
||||
__ha_compiler_barrier();
|
||||
curr_tick = ctr->curr_tick;
|
||||
__ha_compiler_barrier();
|
||||
if (_curr == curr && _past == past && _curr_tick == curr_tick)
|
||||
break;
|
||||
};
|
||||
|
||||
remain = curr_tick + period - global_now_ms;
|
||||
if (likely((int)remain < 0)) {
|
||||
/* We're past the first period, check if we can still report a
|
||||
* part of last period or if we're too far away.
|
||||
*/
|
||||
past = curr;
|
||||
curr = 0;
|
||||
remain += period;
|
||||
if ((int)remain < 0)
|
||||
past = 0;
|
||||
}
|
||||
if (likely(past))
|
||||
curr += div64_32((unsigned long long)past * remain, period);
|
||||
|
||||
curr += pend;
|
||||
freq -= curr;
|
||||
if ((int)freq < 0)
|
||||
freq = 0;
|
||||
return freq;
|
||||
}
|
||||
|
||||
/* Returns the total number of events over the current + last period, including
|
||||
* a number of already pending events <pend>. The average frequency will be
|
||||
* obtained by dividing the output by <period>. This is essentially made to
|
||||
|
Loading…
Reference in New Issue
Block a user