mirror of
https://git.haproxy.org/git/haproxy.git/
synced 2025-09-22 22:31:28 +02:00
It's easier to take the counter's age into account when consulting it than to rotate it first. It also saves some CPU cycles and avoids the multiply for outdated counters, finally saving CPU cycles here too when multiple operations need to read the same counter. The freq_ctr code has also shrinked by one third consecutively to these optimizations.
120 lines
3.1 KiB
C
120 lines
3.1 KiB
C
/*
|
|
* Event rate calculation functions.
|
|
*
|
|
* Copyright 2000-2009 Willy Tarreau <w@1wt.eu>
|
|
*
|
|
* This program is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU General Public License
|
|
* as published by the Free Software Foundation; either version
|
|
* 2 of the License, or (at your option) any later version.
|
|
*
|
|
*/
|
|
|
|
#include <common/config.h>
|
|
#include <common/standard.h>
|
|
#include <common/time.h>
|
|
#include <common/tools.h>
|
|
#include <proto/freq_ctr.h>
|
|
|
|
/* Read a frequency counter taking history into account for missing time in
|
|
* current period. Current second is sub-divided in 1000 chunks of one ms,
|
|
* and the missing ones are read proportionally from previous value. The
|
|
* return value has the same precision as one input data sample, so low rates
|
|
* will be inaccurate still appropriate for max checking. One trick we use for
|
|
* low values is to specially handle the case where the rate is between 0 and 1
|
|
* in order to avoid flapping while waiting for the next event.
|
|
*
|
|
* For immediate limit checking, it's recommended to use freq_ctr_remain() and
|
|
* next_event_delay() instead which do not have the flapping correction, so
|
|
* that even frequencies as low as one event/period are properly handled.
|
|
*/
|
|
unsigned int read_freq_ctr(struct freq_ctr *ctr)
|
|
{
|
|
unsigned int curr, past;
|
|
unsigned int age;
|
|
|
|
age = now.tv_sec - ctr->curr_sec;
|
|
if (unlikely(age > 1))
|
|
return 0;
|
|
|
|
curr = 0;
|
|
past = ctr->curr_ctr;
|
|
if (likely(!age)) {
|
|
curr = past;
|
|
past = ctr->prev_ctr;
|
|
}
|
|
|
|
if (past <= 1 && !curr)
|
|
return past; /* very low rate, avoid flapping */
|
|
|
|
return curr + mul32hi(past, ~curr_sec_ms_scaled);
|
|
}
|
|
|
|
/* returns the number of remaining events that can occur on this freq counter
|
|
* while respecting <freq> and taking into account that <pend> events are
|
|
* already known to be pending. Returns 0 if limit was reached.
|
|
*/
|
|
unsigned int freq_ctr_remain(struct freq_ctr *ctr, unsigned int freq, unsigned int pend)
|
|
{
|
|
unsigned int curr, past;
|
|
unsigned int age;
|
|
|
|
past = 0;
|
|
curr = 0;
|
|
age = now.tv_sec - ctr->curr_sec;
|
|
|
|
if (likely(age <= 1)) {
|
|
past = ctr->curr_ctr;
|
|
if (likely(!age)) {
|
|
curr = past;
|
|
past = ctr->prev_ctr;
|
|
}
|
|
curr += mul32hi(past, ~curr_sec_ms_scaled);
|
|
}
|
|
curr += pend;
|
|
|
|
if (curr >= freq)
|
|
return 0;
|
|
return freq - curr;
|
|
}
|
|
|
|
/* return the expected wait time in ms before the next event may occur,
|
|
* respecting frequency <freq>, and assuming there may already be some pending
|
|
* events. It returns zero if we can proceed immediately, otherwise the wait
|
|
* time, which will be rounded down 1ms for better accuracy, with a minimum
|
|
* of one ms.
|
|
*/
|
|
unsigned int next_event_delay(struct freq_ctr *ctr, unsigned int freq, unsigned int pend)
|
|
{
|
|
unsigned int curr, past;
|
|
unsigned int wait, age;
|
|
|
|
past = 0;
|
|
curr = 0;
|
|
age = now.tv_sec - ctr->curr_sec;
|
|
|
|
if (likely(age <= 1)) {
|
|
past = ctr->curr_ctr;
|
|
if (likely(!age)) {
|
|
curr = past;
|
|
past = ctr->prev_ctr;
|
|
}
|
|
curr += mul32hi(past, ~curr_sec_ms_scaled);
|
|
}
|
|
curr += pend;
|
|
|
|
if (curr < freq)
|
|
return 0;
|
|
|
|
wait = 999 / curr;
|
|
return MAX(wait, 1);
|
|
}
|
|
|
|
|
|
/*
|
|
* Local variables:
|
|
* c-indent-level: 8
|
|
* c-basic-offset: 8
|
|
* End:
|
|
*/
|