[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <aPpb-lAkNACezbkh@yury>
Date: Thu, 23 Oct 2025 12:46:50 -0400
From: Yury Norov <yury.norov@...il.com>
To: Thomas Gleixner <tglx@...utronix.de>
Cc: LKML <linux-kernel@...r.kernel.org>,
Peter Zijlstra <peterz@...radead.org>,
Gabriele Monaco <gmonaco@...hat.com>,
Mathieu Desnoyers <mathieu.desnoyers@...icios.com>,
Michael Jeanson <mjeanson@...icios.com>,
Jens Axboe <axboe@...nel.dk>,
"Paul E. McKenney" <paulmck@...nel.org>,
"Gautham R. Shenoy" <gautham.shenoy@....com>,
Florian Weimer <fweimer@...hat.com>,
Tim Chen <tim.c.chen@...el.com>
Subject: Re: [patch V2 10/20] sched/mmcid: Convert mm CID mask to a bitmap
On Wed, Oct 22, 2025 at 02:55:32PM +0200, Thomas Gleixner wrote:
> This is truly a bitmap and just conveniently uses a cpumask because the
> maximum size of the bitmap is nr_cpu_ids.
>
> But that prevents to do searches for a zero bit in a limited range, which
> is helpful to provide an efficient mechanism to consolidate the CID space
> when the number of users decreases.
>
> Signed-off-by: Thomas Gleixner <tglx@...utronix.de>
Acked-by: Yury Norov (NVIDIA) <yury.norov@...il.com>
> ---
> include/linux/mm_types.h | 6 +++---
> kernel/sched/core.c | 2 +-
> kernel/sched/sched.h | 6 +++---
> 3 files changed, 7 insertions(+), 7 deletions(-)
>
> --- a/include/linux/mm_types.h
> +++ b/include/linux/mm_types.h
> @@ -1342,13 +1342,13 @@ static inline cpumask_t *mm_cpus_allowed
> }
>
> /* Accessor for struct mm_struct's cidmask. */
> -static inline cpumask_t *mm_cidmask(struct mm_struct *mm)
> +static inline unsigned long *mm_cidmask(struct mm_struct *mm)
> {
> unsigned long cid_bitmap = (unsigned long)mm_cpus_allowed(mm);
>
> /* Skip mm_cpus_allowed */
> cid_bitmap += cpumask_size();
> - return (struct cpumask *)cid_bitmap;
> + return (unsigned long *)cid_bitmap;
> }
>
> static inline void mm_init_cid(struct mm_struct *mm, struct task_struct *p)
> @@ -1363,7 +1363,7 @@ static inline void mm_init_cid(struct mm
> mm->mm_cid.nr_cpus_allowed = p->nr_cpus_allowed;
> raw_spin_lock_init(&mm->mm_cid.lock);
> cpumask_copy(mm_cpus_allowed(mm), &p->cpus_mask);
> - cpumask_clear(mm_cidmask(mm));
> + bitmap_zero(mm_cidmask(mm), cpumask_size());
> }
>
> static inline int mm_alloc_cid_noprof(struct mm_struct *mm, struct task_struct *p)
> --- a/kernel/sched/core.c
> +++ b/kernel/sched/core.c
> @@ -10399,7 +10399,7 @@ void sched_mm_cid_exit_signals(struct ta
> guard(preempt)();
> t->mm_cid.active = 0;
> if (t->mm_cid.cid != MM_CID_UNSET) {
> - cpumask_clear_cpu(t->mm_cid.cid, mm_cidmask(mm));
> + clear_bit(t->mm_cid.cid, mm_cidmask(mm));
> t->mm_cid.cid = MM_CID_UNSET;
> }
> }
> --- a/kernel/sched/sched.h
> +++ b/kernel/sched/sched.h
> @@ -3558,7 +3558,7 @@ static inline bool __mm_cid_get(struct t
>
> if (cid >= max_cids)
> return false;
> - if (cpumask_test_and_set_cpu(cid, mm_cidmask(mm)))
> + if (test_and_set_bit(cid, mm_cidmask(mm)))
> return false;
> t->mm_cid.cid = t->mm_cid.last_cid = cid;
> __this_cpu_write(mm->mm_cid.pcpu->cid, cid);
> @@ -3581,7 +3581,7 @@ static inline bool mm_cid_get(struct tas
> return true;
>
> /* Try the first zero bit in the cidmask. */
> - return __mm_cid_get(t, cpumask_first_zero(mm_cidmask(mm)), max_cids);
> + return __mm_cid_get(t, find_first_zero_bit(mm_cidmask(mm), num_possible_cpus()), max_cids);
> }
>
> static inline void mm_cid_select(struct task_struct *t)
> @@ -3602,7 +3602,7 @@ static inline void switch_mm_cid(struct
> {
> if (prev->mm_cid.active) {
> if (prev->mm_cid.cid != MM_CID_UNSET)
> - cpumask_clear_cpu(prev->mm_cid.cid, mm_cidmask(prev->mm));
> + clear_bit(prev->mm_cid.cid, mm_cidmask(prev->mm));
> prev->mm_cid.cid = MM_CID_UNSET;
> }
>
Powered by blists - more mailing lists