[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <9c798af6-aa2c-5f76-7a50-ef1983f9bdcd@redhat.com>
Date: Tue, 27 Sep 2022 11:28:13 -0400
From: Waiman Long <longman@...hat.com>
To: Peter Zijlstra <peterz@...radead.org>,
Ingo Molnar <mingo@...hat.com>, Will Deacon <will@...nel.org>,
Boqun Feng <boqun.feng@...il.com>,
Steven Rostedt <rostedt@...dmis.org>
Cc: linux-kernel@...r.kernel.org
Subject: Re: [Resend PATCH v2] tracing: Disable interrupt or preemption before
acquiring arch_spinlock_t
On 9/22/22 10:56, Waiman Long wrote:
> It was found that some tracing functions in kernel/trace/trace.c acquire
> an arch_spinlock_t with preemption and irqs enabled. An example is the
> tracing_saved_cmdlines_size_read() function which intermittently causes
> a "BUG: using smp_processor_id() in preemptible" warning when the LTP
> read_all_proc test is run.
>
> That can be problematic in case preemption happens after acquiring the
> lock. Add the necessary preemption or interrupt disabling code in the
> appropriate places before acquiring an arch_spinlock_t.
>
> The convention here is to disable preemption for trace_cmdline_lock and
> interupt for max_lock.
>
> Fixes: a35873a0993b ("tracing: Add conditional snapshot")
> Fixes: 939c7a4f04fc ("tracing: Introduce saved_cmdlines_size file")
> Suggested-by: Steven Rostedt <rostedt@...dmis.org>
> Signed-off-by: Waiman Long <longman@...hat.com>
> ---
> kernel/trace/trace.c | 23 +++++++++++++++++++++++
> 1 file changed, 23 insertions(+)
Ping!
Any comment on this patch?
Thanks,
Longman
>
> diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
> index d3005279165d..aed7ea6e6045 100644
> --- a/kernel/trace/trace.c
> +++ b/kernel/trace/trace.c
> @@ -1193,12 +1193,14 @@ void *tracing_cond_snapshot_data(struct trace_array *tr)
> {
> void *cond_data = NULL;
>
> + local_irq_disable();
> arch_spin_lock(&tr->max_lock);
>
> if (tr->cond_snapshot)
> cond_data = tr->cond_snapshot->cond_data;
>
> arch_spin_unlock(&tr->max_lock);
> + local_irq_enable();
>
> return cond_data;
> }
> @@ -1334,9 +1336,11 @@ int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
> goto fail_unlock;
> }
>
> + local_irq_disable();
> arch_spin_lock(&tr->max_lock);
> tr->cond_snapshot = cond_snapshot;
> arch_spin_unlock(&tr->max_lock);
> + local_irq_enable();
>
> mutex_unlock(&trace_types_lock);
>
> @@ -1363,6 +1367,7 @@ int tracing_snapshot_cond_disable(struct trace_array *tr)
> {
> int ret = 0;
>
> + local_irq_disable();
> arch_spin_lock(&tr->max_lock);
>
> if (!tr->cond_snapshot)
> @@ -1373,6 +1378,7 @@ int tracing_snapshot_cond_disable(struct trace_array *tr)
> }
>
> arch_spin_unlock(&tr->max_lock);
> + local_irq_enable();
>
> return ret;
> }
> @@ -2200,6 +2206,11 @@ static size_t tgid_map_max;
>
> #define SAVED_CMDLINES_DEFAULT 128
> #define NO_CMDLINE_MAP UINT_MAX
> +/*
> + * Preemption must be disabled before acquiring trace_cmdline_lock.
> + * The various trace_arrays' max_lock must be acquired in a context
> + * where interrupt is disabled.
> + */
> static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
> struct saved_cmdlines_buffer {
> unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
> @@ -2412,7 +2423,11 @@ static int trace_save_cmdline(struct task_struct *tsk)
> * the lock, but we also don't want to spin
> * nor do we want to disable interrupts,
> * so if we miss here, then better luck next time.
> + *
> + * This is called within the scheduler and wake up, so interrupts
> + * had better been disabled and run queue lock been held.
> */
> + lockdep_assert_preemption_disabled();
> if (!arch_spin_trylock(&trace_cmdline_lock))
> return 0;
>
> @@ -5890,9 +5905,11 @@ tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
> char buf[64];
> int r;
>
> + preempt_disable();
> arch_spin_lock(&trace_cmdline_lock);
> r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
> arch_spin_unlock(&trace_cmdline_lock);
> + preempt_enable();
>
> return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
> }
> @@ -5917,10 +5934,12 @@ static int tracing_resize_saved_cmdlines(unsigned int val)
> return -ENOMEM;
> }
>
> + preempt_disable();
> arch_spin_lock(&trace_cmdline_lock);
> savedcmd_temp = savedcmd;
> savedcmd = s;
> arch_spin_unlock(&trace_cmdline_lock);
> + preempt_enable();
> free_saved_cmdlines_buffer(savedcmd_temp);
>
> return 0;
> @@ -6373,10 +6392,12 @@ int tracing_set_tracer(struct trace_array *tr, const char *buf)
>
> #ifdef CONFIG_TRACER_SNAPSHOT
> if (t->use_max_tr) {
> + local_irq_disable();
> arch_spin_lock(&tr->max_lock);
> if (tr->cond_snapshot)
> ret = -EBUSY;
> arch_spin_unlock(&tr->max_lock);
> + local_irq_enable();
> if (ret)
> goto out;
> }
> @@ -7436,10 +7457,12 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
> goto out;
> }
>
> + local_irq_disable();
> arch_spin_lock(&tr->max_lock);
> if (tr->cond_snapshot)
> ret = -EBUSY;
> arch_spin_unlock(&tr->max_lock);
> + local_irq_enable();
> if (ret)
> goto out;
>
Powered by blists - more mailing lists