[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251104075427.289432429@linutronix.de>
Date: Sun, 16 Nov 2025 21:48:53 +0100 (CET)
From: Thomas Gleixner <tglx@...utronix.de>
To: LKML <linux-kernel@...r.kernel.org>
Cc: Peter Zijlstra <peterz@...radead.org>,
Gabriele Monaco <gmonaco@...hat.com>,
Mathieu Desnoyers <mathieu.desnoyers@...icios.com>,
Michael Jeanson <mjeanson@...icios.com>,
Jens Axboe <axboe@...nel.dk>,
"Paul E. McKenney" <paulmck@...nel.org>,
"Gautham R. Shenoy" <gautham.shenoy@....com>,
Florian Weimer <fweimer@...hat.com>,
Tim Chen <tim.c.chen@...el.com>,
Yury Norov <yury.norov@...il.com>,
Shrikanth Hegde <sshegde@...ux.ibm.com>
Subject: [patch V4 10/20] sched/mmcid: Convert mm CID mask to a bitmap
This is truly a bitmap and just conveniently uses a cpumask because the
maximum size of the bitmap is nr_cpu_ids.
But that prevents to do searches for a zero bit in a limited range, which
is helpful to provide an efficient mechanism to consolidate the CID space
when the number of users decreases.
Signed-off-by: Thomas Gleixner <tglx@...utronix.de>
Acked-by: Yury Norov (NVIDIA) <yury.norov@...il.com>
---
V4: Allocate bitmap_size(nr_possible_cpus()) - Mathieu
V4: Zero the bitmap with nrbits=nr_possible_cpus() - Brown paperbag
V3: Zero the bitmap with length of bitmap_size(nr_possible_cpus()) - Shrikanth
---
include/linux/mm_types.h | 9 +++++----
kernel/sched/core.c | 2 +-
kernel/sched/sched.h | 6 +++---
3 files changed, 9 insertions(+), 8 deletions(-)
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -1342,13 +1342,13 @@ static inline cpumask_t *mm_cpus_allowed
}
/* Accessor for struct mm_struct's cidmask. */
-static inline cpumask_t *mm_cidmask(struct mm_struct *mm)
+static inline unsigned long *mm_cidmask(struct mm_struct *mm)
{
unsigned long cid_bitmap = (unsigned long)mm_cpus_allowed(mm);
/* Skip mm_cpus_allowed */
cid_bitmap += cpumask_size();
- return (struct cpumask *)cid_bitmap;
+ return (unsigned long *)cid_bitmap;
}
static inline void mm_init_cid(struct mm_struct *mm, struct task_struct *p)
@@ -1363,7 +1363,7 @@ static inline void mm_init_cid(struct mm
mm->mm_cid.nr_cpus_allowed = p->nr_cpus_allowed;
raw_spin_lock_init(&mm->mm_cid.lock);
cpumask_copy(mm_cpus_allowed(mm), &p->cpus_mask);
- cpumask_clear(mm_cidmask(mm));
+ bitmap_zero(mm_cidmask(mm), num_possible_cpus());
}
static inline int mm_alloc_cid_noprof(struct mm_struct *mm, struct task_struct *p)
@@ -1384,7 +1384,8 @@ static inline void mm_destroy_cid(struct
static inline unsigned int mm_cid_size(void)
{
- return 2 * cpumask_size(); /* mm_cpus_allowed(), mm_cidmask(). */
+ /* mm_cpus_allowed(), mm_cidmask(). */
+ return cpumask_size() + bitmap_size(num_possible_cpus());
}
#else /* CONFIG_SCHED_MM_CID */
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -10402,7 +10402,7 @@ void sched_mm_cid_exit_signals(struct ta
guard(preempt)();
t->mm_cid.active = 0;
if (t->mm_cid.cid != MM_CID_UNSET) {
- cpumask_clear_cpu(t->mm_cid.cid, mm_cidmask(mm));
+ clear_bit(t->mm_cid.cid, mm_cidmask(mm));
t->mm_cid.cid = MM_CID_UNSET;
}
}
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -3559,7 +3559,7 @@ static inline bool __mm_cid_get(struct t
if (cid >= max_cids)
return false;
- if (cpumask_test_and_set_cpu(cid, mm_cidmask(mm)))
+ if (test_and_set_bit(cid, mm_cidmask(mm)))
return false;
t->mm_cid.cid = t->mm_cid.last_cid = cid;
__this_cpu_write(mm->mm_cid.pcpu->cid, cid);
@@ -3582,7 +3582,7 @@ static inline bool mm_cid_get(struct tas
return true;
/* Try the first zero bit in the cidmask. */
- return __mm_cid_get(t, cpumask_first_zero(mm_cidmask(mm)), max_cids);
+ return __mm_cid_get(t, find_first_zero_bit(mm_cidmask(mm), num_possible_cpus()), max_cids);
}
static inline void mm_cid_select(struct task_struct *t)
@@ -3603,7 +3603,7 @@ static inline void switch_mm_cid(struct
{
if (prev->mm_cid.active) {
if (prev->mm_cid.cid != MM_CID_UNSET)
- cpumask_clear_cpu(prev->mm_cid.cid, mm_cidmask(prev->mm));
+ clear_bit(prev->mm_cid.cid, mm_cidmask(prev->mm));
prev->mm_cid.cid = MM_CID_UNSET;
}
Powered by blists - more mailing lists