[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAFTs51VHC5nyAb7bf-jvuYDRwLS5kWP98T336Uu7h+RMFb0ZqA@mail.gmail.com>
Date: Fri, 7 Aug 2020 10:50:42 -0700
From: Peter Oskolkov <posk@...k.io>
To: Peter Zijlstra <peterz@...radead.org>
Cc: Peter Oskolkov <posk@...gle.com>,
Mathieu Desnoyers <mathieu.desnoyers@...icios.com>,
"Paul E . McKenney" <paulmck@...nel.org>,
Boqun Feng <boqun.feng@...il.com>,
Linux Kernel Mailing List <linux-kernel@...r.kernel.org>,
Paul Turner <pjt@...gle.com>,
Chris Kennelly <ckennelly@...gle.com>
Subject: Re: [PATCH 1/2 v2] rseq/membarrier: add MEMBARRIER_CMD_PRIVATE_RESTART_RSEQ_ON_CPU
On Fri, Aug 7, 2020 at 6:38 AM <peterz@...radead.org> wrote:
>
[...]
> I'm thinking even this is a problem, we can end up sending IPIs to CPUs
> outside out partition (they might be NOHZ_FULL) and that's a no-no too.
>
> Something like so perhaps... that really limits it to CPUs that match
> our mm.
Thanks for the suggestion - I'll prepare a v3 based on your and
Mathieu's feedback.
>
> diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
> index 6be66f52a2ad..bee5e98e6774 100644
> --- a/include/linux/sched/mm.h
> +++ b/include/linux/sched/mm.h
> @@ -356,6 +356,7 @@ enum {
>
> enum {
> MEMBARRIER_FLAG_SYNC_CORE = (1U << 0),
> + MEMBARRIER_FLAG_RSEQ = (1U << 1),
> };
>
> #ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS
> diff --git a/kernel/sched/membarrier.c b/kernel/sched/membarrier.c
> index 168479a7d61b..4d9b22c2f5e2 100644
> --- a/kernel/sched/membarrier.c
> +++ b/kernel/sched/membarrier.c
> @@ -27,6 +27,11 @@
>
> static void ipi_mb(void *info)
> {
> + int *flags = info;
> +
> + if (flags && (*flags & MEMBARRIER_FLAG_RSEQ))
> + rseq_preempt(current);
> +
> smp_mb(); /* IPIs should be serializing but paranoid. */
> }
>
> @@ -129,11 +134,11 @@ static int membarrier_global_expedited(void)
> return 0;
> }
>
> -static int membarrier_private_expedited(int flags)
> +static int membarrier_private_expedited(int flags, int cpu_id)
> {
> - int cpu;
> - cpumask_var_t tmpmask;
> struct mm_struct *mm = current->mm;
> + cpumask_var_t tmpmask;
> + int cpu;
>
> if (flags & MEMBARRIER_FLAG_SYNC_CORE) {
> if (!IS_ENABLED(CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE))
> @@ -174,6 +179,10 @@ static int membarrier_private_expedited(int flags)
> */
> if (cpu == raw_smp_processor_id())
> continue;
> +
> + if (cpu_id >= 0 && cpu != cpu_id)
> + continue;
> +
> p = rcu_dereference(cpu_rq(cpu)->curr);
> if (p && p->mm == mm)
> __cpumask_set_cpu(cpu, tmpmask);
> @@ -181,7 +190,7 @@ static int membarrier_private_expedited(int flags)
> rcu_read_unlock();
>
> preempt_disable();
> - smp_call_function_many(tmpmask, ipi_mb, NULL, 1);
> + smp_call_function_many(tmpmask, ipi_mb, &flags, 1);
> preempt_enable();
>
> free_cpumask_var(tmpmask);
> @@ -362,11 +371,13 @@ SYSCALL_DEFINE2(membarrier, int, cmd, int, flags)
> case MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED:
> return membarrier_register_global_expedited();
> case MEMBARRIER_CMD_PRIVATE_EXPEDITED:
> - return membarrier_private_expedited(0);
> + return membarrier_private_expedited(0, -1);
> case MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED:
> return membarrier_register_private_expedited(0);
> case MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE:
> - return membarrier_private_expedited(MEMBARRIER_FLAG_SYNC_CORE);
> + return membarrier_private_expedited(MEMBARRIER_FLAG_SYNC_CORE, -1);
> + case MEMBERRIER_CMD_PRIVATE_EXPEDITED_RSEQ:
> + return membarrier_private_expedited(MEMBARRIER_FLAG_RSEQ, flags);
> case MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE:
> return membarrier_register_private_expedited(MEMBARRIER_FLAG_SYNC_CORE);
> default:
Powered by blists - more mailing lists