MEDIUM: thread: Turn the group mask in thread set into a group counter
Some checks are pending
Contrib / build (push) Waiting to run
alpine/musl / gcc (push) Waiting to run
VTest / Generate Build Matrix (push) Waiting to run
VTest / (push) Blocked by required conditions
Windows / Windows, gcc, all features (push) Waiting to run

If we want to be able to have more than 64 thread groups, we can no
longer use thread group masks as long.
One remaining place where it is done is in struct thread_set. However,
it is not really used as a mask anywhere, all we want is a thread group
counter, so convert that mask to a counter.
This commit is contained in:
Olivier Houchard 2026-01-15 05:10:03 +01:00
parent 6249698840
commit a209c35f30
4 changed files with 23 additions and 21 deletions

View file

@ -42,7 +42,7 @@ struct thread_set {
ulong abs[(MAX_THREADS + LONGBITS - 1) / LONGBITS];
ulong rel[MAX_TGROUPS];
};
ulong grps; /* bit field of all non-empty groups, 0 for abs */
ulong nbgrps; /* Number of thread groups, 0 for abs */
};
/* tasklet classes */

View file

@ -77,7 +77,7 @@ static inline int thread_set_nth_group(const struct thread_set *ts, int n)
{
int i;
if (ts->grps) {
if (ts->nbgrps) {
for (i = 0; i < MAX_TGROUPS; i++)
if (ts->rel[i] && !n--)
return i + 1;
@ -95,7 +95,7 @@ static inline ulong thread_set_nth_tmask(const struct thread_set *ts, int n)
{
int i;
if (ts->grps) {
if (ts->nbgrps) {
for (i = 0; i < MAX_TGROUPS; i++)
if (ts->rel[i] && !n--)
return ts->rel[i];
@ -111,7 +111,7 @@ static inline void thread_set_pin_grp1(struct thread_set *ts, ulong mask)
{
int i;
ts->grps = 1;
ts->nbgrps = 1;
ts->rel[0] = mask;
for (i = 1; i < MAX_TGROUPS; i++)
ts->rel[i] = 0;

View file

@ -1756,7 +1756,8 @@ int bind_complete_thread_setup(struct bind_conf *bind_conf, int *err_code)
struct listener *li, *new_li, *ref;
struct thread_set new_ts;
int shard, shards, todo, done, grp, dups;
ulong mask, gmask, bit;
ulong mask, bit;
int nbgrps;
int cfgerr = 0;
char *err;
@ -1788,7 +1789,7 @@ int bind_complete_thread_setup(struct bind_conf *bind_conf, int *err_code)
}
}
else if (shards == -2)
shards = protocol_supports_flag(li->rx.proto, PROTO_F_REUSEPORT_SUPPORTED) ? my_popcountl(bind_conf->thread_set.grps) : 1;
shards = protocol_supports_flag(li->rx.proto, PROTO_F_REUSEPORT_SUPPORTED) ? bind_conf->thread_set.nbgrps : 1;
/* no more shards than total threads */
if (shards > todo)
@ -1821,25 +1822,25 @@ int bind_complete_thread_setup(struct bind_conf *bind_conf, int *err_code)
/* take next unassigned bit */
bit = (bind_conf->thread_set.rel[grp] & ~mask) & -(bind_conf->thread_set.rel[grp] & ~mask);
if (!new_ts.rel[grp])
new_ts.nbgrps++;
new_ts.rel[grp] |= bit;
mask |= bit;
new_ts.grps |= 1UL << grp;
done += shards;
};
BUG_ON(!new_ts.grps); // no more bits left unassigned
BUG_ON(!new_ts.nbgrps); // no more group ?
/* Create all required listeners for all bound groups. If more than one group is
* needed, the first receiver serves as a reference, and subsequent ones point to
* it. We already have a listener available in new_li() so we only allocate a new
* one if we're not on the last one. We count the remaining groups by copying their
* mask into <gmask> and dropping the lowest bit at the end of the loop until there
* is no more. Ah yes, it's not pretty :-/
* one if we're not on the last one.
*
*/
ref = new_li;
gmask = new_ts.grps;
for (dups = 0; gmask; dups++) {
nbgrps = new_ts.nbgrps;
for (dups = 0; nbgrps; dups++) {
/* assign the first (and only) thread and group */
new_li->rx.bind_thread = thread_set_nth_tmask(&new_ts, dups);
new_li->rx.bind_tgroup = thread_set_nth_group(&new_ts, dups);
@ -1856,8 +1857,8 @@ int bind_complete_thread_setup(struct bind_conf *bind_conf, int *err_code)
new_li->rx.flags |= ref->rx.flags & RX_F_INHERITED_SOCK;
}
gmask &= gmask - 1; // drop lowest bit
if (gmask) {
nbgrps--;
if (nbgrps) {
/* yet another listener expected in this shard, let's
* chain it.
*/
@ -2672,7 +2673,7 @@ static int bind_parse_thread(char **args, int cur_arg, struct proxy *px, struct
l = LIST_NEXT(&conf->listeners, struct listener *, by_bind);
if (l->rx.addr.ss_family == AF_CUST_RHTTP_SRV &&
atleast2(conf->thread_set.grps)) {
conf->thread_set.nbgrps >= 2) {
memprintf(err, "'%s' : reverse HTTP bind cannot span multiple thread groups.", args[cur_arg]);
return ERR_ALERT | ERR_FATAL;
}

View file

@ -1524,7 +1524,7 @@ int thread_resolve_group_mask(struct thread_set *ts, int defgrp, char **err)
ulong mask, imask;
uint g;
if (!ts->grps) {
if (!ts->nbgrps) {
/* unspecified group, IDs are global */
if (thread_set_is_empty(ts)) {
/* all threads of all groups, unless defgrp is set and
@ -1533,7 +1533,7 @@ int thread_resolve_group_mask(struct thread_set *ts, int defgrp, char **err)
for (g = defgrp ? defgrp-1 : 0; g < (defgrp ? defgrp : global.nbtgroups); g++) {
new_ts.rel[g] = ha_tgroup_info[g].threads_enabled;
if (new_ts.rel[g])
new_ts.grps |= 1UL << g;
new_ts.nbgrps++;
}
} else {
/* some absolute threads are set, we must remap them to
@ -1558,7 +1558,7 @@ int thread_resolve_group_mask(struct thread_set *ts, int defgrp, char **err)
*/
new_ts.rel[g] |= mask;
if (new_ts.rel[g])
new_ts.grps |= 1UL << g;
new_ts.nbgrps++;
}
}
} else {
@ -1596,7 +1596,7 @@ int thread_resolve_group_mask(struct thread_set *ts, int defgrp, char **err)
new_ts.rel[g] = imask & mask;
if (new_ts.rel[g])
new_ts.grps |= 1UL << g;
new_ts.nbgrps++;
}
}
@ -1943,7 +1943,8 @@ int parse_thread_set(const char *arg, struct thread_set *ts, char **err)
if (ts) {
if (is_rel) {
/* group-relative thread numbers */
ts->grps |= 1UL << (tg - 1);
if (ts->rel[tg - 1] == 0)
ts->nbgrps++;
if (max >= min) {
for (v = min; v <= max; v++)