[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251015172834.884261347@linutronix.de>
Date: Wed, 15 Oct 2025 19:29:40 +0200 (CEST)
From: Thomas Gleixner <tglx@...utronix.de>
To: LKML <linux-kernel@...r.kernel.org>
Cc: Peter Zijlstra <peterz@...radead.org>,
Gabriele Monaco <gmonaco@...hat.com>,
Mathieu Desnoyers <mathieu.desnoyers@...icios.com>,
Michael Jeanson <mjeanson@...icios.com>,
Jens Axboe <axboe@...nel.dk>,
"Paul E. McKenney" <paulmck@...nel.org>,
"Gautham R. Shenoy" <gautham.shenoy@....com>,
Florian Weimer <fweimer@...hat.com>,
Tim Chen <tim.c.chen@...el.com>,
TCMalloc Team <tcmalloc-eng@...gle.com>
Subject: [patch 09/19] sched/mmcid: Convert mm CID mask to a bitmap
This is truly a bitmap and just conveniently uses a cpumask because the
maximum size of the bitmap is nr_cpu_ids.
But that prevents to do searches for a zero bit in a limited range, which
is helpful to provide an efficient mechanism to consolidate the CID space
when the number of users decreases.
Signed-off-by: Thomas Gleixner <tglx@...utronix.de>
---
include/linux/mm_types.h | 6 +++---
kernel/sched/core.c | 2 +-
kernel/sched/sched.h | 6 +++---
3 files changed, 7 insertions(+), 7 deletions(-)
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -1296,13 +1296,13 @@ static inline cpumask_t *mm_cpus_allowed
}
/* Accessor for struct mm_struct's cidmask. */
-static inline cpumask_t *mm_cidmask(struct mm_struct *mm)
+static inline unsigned long *mm_cidmask(struct mm_struct *mm)
{
unsigned long cid_bitmap = (unsigned long)mm_cpus_allowed(mm);
/* Skip mm_cpus_allowed */
cid_bitmap += cpumask_size();
- return (struct cpumask *)cid_bitmap;
+ return (unsigned long *)cid_bitmap;
}
static inline void mm_init_cid(struct mm_struct *mm, struct task_struct *p)
@@ -1317,7 +1317,7 @@ static inline void mm_init_cid(struct mm
mm->mm_cid.nr_cpus_allowed = p->nr_cpus_allowed;
raw_spin_lock_init(&mm->mm_cid.lock);
cpumask_copy(mm_cpus_allowed(mm), &p->cpus_mask);
- cpumask_clear(mm_cidmask(mm));
+ bitmap_zero(mm_cidmask(mm), nr_cpu_ids);
}
static inline int mm_alloc_cid_noprof(struct mm_struct *mm, struct task_struct *p)
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -10437,7 +10437,7 @@ void sched_mm_cid_exit_signals(struct ta
guard(preempt)();
t->mm_cid.active = 0;
if (t->mm_cid.cid != MM_CID_UNSET) {
- cpumask_clear_cpu(t->mm_cid.cid, mm_cidmask(mm));
+ clear_bit(t->mm_cid.cid, mm_cidmask(mm));
t->mm_cid.cid = MM_CID_UNSET;
}
}
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -3553,7 +3553,7 @@ static inline bool __mm_cid_get(struct t
if (cid >= max_cids)
return false;
- if (cpumask_test_and_set_cpu(cid, mm_cidmask(mm)))
+ if (test_and_set_bit(cid, mm_cidmask(mm)))
return false;
t->mm_cid.cid = t->mm_cid.last_cid = cid;
__this_cpu_write(mm->mm_cid.pcpu->cid, cid);
@@ -3576,7 +3576,7 @@ static inline bool mm_cid_get(struct tas
return true;
/* Try the first zero bit in the cidmask. */
- return __mm_cid_get(t, cpumask_first_zero(mm_cidmask(mm)), max_cids);
+ return __mm_cid_get(t, find_first_zero_bit(mm_cidmask(mm), nr_cpu_ids), max_cids);
}
static inline void mm_cid_select(struct task_struct *t)
@@ -3597,7 +3597,7 @@ static inline void switch_mm_cid(struct
{
if (prev->mm_cid.active) {
if (prev->mm_cid.cid != MM_CID_UNSET)
- cpumask_clear_cpu(prev->mm_cid.cid, mm_cidmask(prev->mm));
+ clear_bit(prev->mm_cid.cid, mm_cidmask(prev->mm));
prev->mm_cid.cid = MM_CID_UNSET;
}
Powered by blists - more mailing lists