mirror of
https://git.haproxy.org/git/haproxy.git/
synced 2026-05-04 20:46:11 +02:00
BUG/MEDIUM: cpu-topo: Distribute CPUs fairly across groups
Make sure CPUs are distributed fairly across groups, in case the number
of groups to generate is not a divider of the number of CPUs, otherwise
we may end up with a few groups that will have no CPU bound to them.
This was introduced in 3.4-dev2 with commit 56fd0c1a5c ("MEDIUM: cpu-topo:
Add an optional directive for per-group affinity"). No backport is
needed unless this commit is backported.
This commit is contained in:
parent
ca5332a9c3
commit
84837b6e70
@ -1286,7 +1286,8 @@ cpu_policy_assign_threads(int cpu_count, int thr_count, struct hap_cpuset node_c
|
||||
int thr_per_grp;
|
||||
int thr;
|
||||
int same_core = 0;
|
||||
int cpu_per_group;
|
||||
int cpu_reminder;
|
||||
int grp_generated = 0;
|
||||
|
||||
ha_cpuset_zero(&thrset);
|
||||
ha_cpuset_assign(&saved_touse_ccx, &touse_ccx);
|
||||
@ -1297,7 +1298,8 @@ cpu_policy_assign_threads(int cpu_count, int thr_count, struct hap_cpuset node_c
|
||||
nb_grp = (thr_count + global.maxthrpertgroup - 1) / global.maxthrpertgroup;
|
||||
if (nb_grp > MAX_TGROUPS - global.nbtgroups)
|
||||
nb_grp = MAX_TGROUPS - global.nbtgroups;
|
||||
cpu_per_group = (cpu_count + nb_grp - 1) / nb_grp;
|
||||
cpu_reminder = cpu_count % nb_grp;
|
||||
|
||||
thr_per_grp = (thr_count + nb_grp - 1) / nb_grp;
|
||||
if (thr_per_grp > global.maxthrpertgroup)
|
||||
thr_per_grp = global.maxthrpertgroup;
|
||||
@ -1306,6 +1308,7 @@ cpu_policy_assign_threads(int cpu_count, int thr_count, struct hap_cpuset node_c
|
||||
struct hap_cpuset group_cpuset;
|
||||
struct hap_cpuset current_tsid;
|
||||
struct hap_cpuset current_ccx;
|
||||
int cpu_for_group;
|
||||
|
||||
ha_cpuset_zero(&group_cpuset);
|
||||
ha_cpuset_zero(¤t_tsid);
|
||||
@ -1318,6 +1321,11 @@ cpu_policy_assign_threads(int cpu_count, int thr_count, struct hap_cpuset node_c
|
||||
if (thr_per_grp + global.nbthread > MAX_THREADS)
|
||||
thr_per_grp = MAX_THREADS - global.nbthread;
|
||||
|
||||
cpu_for_group = cpu_count / nb_grp;
|
||||
if (grp_generated < cpu_reminder)
|
||||
cpu_for_group++;
|
||||
grp_generated++;
|
||||
|
||||
if ((cpu_policy_conf.affinity & (CPU_AFFINITY_PER_GROUP | CPU_AFFINITY_PER_GROUP_LOOSE)) == CPU_AFFINITY_PER_GROUP) {
|
||||
int i = 0;
|
||||
int next_ccx;
|
||||
@ -1327,7 +1335,7 @@ cpu_policy_assign_threads(int cpu_count, int thr_count, struct hap_cpuset node_c
|
||||
* Try to allocate them from the same CCX, and then
|
||||
* the same TSID
|
||||
*/
|
||||
while (i < cpu_per_group) {
|
||||
while (i < cpu_for_group) {
|
||||
int next_cpu = 0;
|
||||
int got_cpu;
|
||||
|
||||
@ -1336,7 +1344,7 @@ cpu_policy_assign_threads(int cpu_count, int thr_count, struct hap_cpuset node_c
|
||||
if (next_ccx == -1)
|
||||
break;
|
||||
|
||||
while (i < cpu_per_group && (got_cpu = find_next_cpu_ccx(next_cpu, next_ccx)) != -1) {
|
||||
while (i < cpu_for_group && (got_cpu = find_next_cpu_ccx(next_cpu, next_ccx)) != -1) {
|
||||
int tsid;
|
||||
int got_cpu_tsid;
|
||||
int next_cpu_tsid = 0;
|
||||
@ -1345,7 +1353,7 @@ cpu_policy_assign_threads(int cpu_count, int thr_count, struct hap_cpuset node_c
|
||||
continue;
|
||||
tsid = ha_cpu_topo[got_cpu].ts_id;
|
||||
|
||||
while (i < cpu_per_group && (got_cpu_tsid = find_next_cpu_tsid(next_cpu_tsid, tsid)) != -1) {
|
||||
while (i < cpu_for_group && (got_cpu_tsid = find_next_cpu_tsid(next_cpu_tsid, tsid)) != -1) {
|
||||
next_cpu_tsid = got_cpu_tsid + 1;
|
||||
if (!ha_cpuset_isset(&node_cpu_set, ha_cpu_topo[got_cpu_tsid].idx))
|
||||
continue;
|
||||
@ -1360,7 +1368,7 @@ cpu_policy_assign_threads(int cpu_count, int thr_count, struct hap_cpuset node_c
|
||||
* At this point there is nothing left
|
||||
* for us in that CCX, forget about it.
|
||||
*/
|
||||
if (i < cpu_per_group)
|
||||
if (i < cpu_for_group)
|
||||
ha_cpuset_clr(&saved_touse_ccx, next_ccx);
|
||||
|
||||
}
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user