[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20190124170409.GC5531@lakrids.cambridge.arm.com>
Date: Thu, 24 Jan 2019 17:04:09 +0000
From: Mark Rutland <mark.rutland@....com>
To: Christophe Leroy <christophe.leroy@....fr>
Cc: Benjamin Herrenschmidt <benh@...nel.crashing.org>,
Paul Mackerras <paulus@...ba.org>,
Michael Ellerman <mpe@...erman.id.au>,
Nicholas Piggin <npiggin@...il.com>,
Mike Rapoport <rppt@...ux.ibm.com>,
linux-kernel@...r.kernel.org, linuxppc-dev@...ts.ozlabs.org
Subject: Re: [PATCH v14 05/12] powerpc: prep stack walkers for
THREAD_INFO_IN_TASK
On Thu, Jan 24, 2019 at 04:19:39PM +0000, Christophe Leroy wrote:
> [text copied from commit 9bbd4c56b0b6
> ("arm64: prep stack walkers for THREAD_INFO_IN_TASK")]
>
> When CONFIG_THREAD_INFO_IN_TASK is selected, task stacks may be freed
> before a task is destroyed. To account for this, the stacks are
> refcounted, and when manipulating the stack of another task, it is
> necessary to get/put the stack to ensure it isn't freed and/or re-used
> while we do so.
>
> This patch reworks the powerpc stack walking code to account for this.
> When CONFIG_THREAD_INFO_IN_TASK is not selected these perform no
> refcounting, and this should only be a structural change that does not
> affect behaviour.
>
> Signed-off-by: Christophe Leroy <christophe.leroy@....fr>
I'm not familiar with the powerpc code, but AFAICT this is analagous to
the arm64 code, and I'm not aware of any special caveats. FWIW:
Acked-by: Mark Rutland <mark.rutland@....com>
Thanks,
Mark.
> ---
> arch/powerpc/kernel/process.c | 23 +++++++++++++++++++++--
> arch/powerpc/kernel/stacktrace.c | 29 ++++++++++++++++++++++++++---
> 2 files changed, 47 insertions(+), 5 deletions(-)
>
> diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
> index ce393df243aa..4ffbb677c9f5 100644
> --- a/arch/powerpc/kernel/process.c
> +++ b/arch/powerpc/kernel/process.c
> @@ -2027,7 +2027,7 @@ int validate_sp(unsigned long sp, struct task_struct *p,
>
> EXPORT_SYMBOL(validate_sp);
>
> -unsigned long get_wchan(struct task_struct *p)
> +static unsigned long __get_wchan(struct task_struct *p)
> {
> unsigned long ip, sp;
> int count = 0;
> @@ -2053,6 +2053,20 @@ unsigned long get_wchan(struct task_struct *p)
> return 0;
> }
>
> +unsigned long get_wchan(struct task_struct *p)
> +{
> + unsigned long ret;
> +
> + if (!try_get_task_stack(p))
> + return 0;
> +
> + ret = __get_wchan(p);
> +
> + put_task_stack(p);
> +
> + return ret;
> +}
> +
> static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
>
> void show_stack(struct task_struct *tsk, unsigned long *stack)
> @@ -2067,6 +2081,9 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
> int curr_frame = 0;
> #endif
>
> + if (!try_get_task_stack(tsk))
> + return;
> +
> sp = (unsigned long) stack;
> if (tsk == NULL)
> tsk = current;
> @@ -2081,7 +2098,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
> printk("Call Trace:\n");
> do {
> if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD))
> - return;
> + break;
>
> stack = (unsigned long *) sp;
> newsp = stack[0];
> @@ -2121,6 +2138,8 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
>
> sp = newsp;
> } while (count++ < kstack_depth_to_print);
> +
> + put_task_stack(tsk);
> }
>
> #ifdef CONFIG_PPC64
> diff --git a/arch/powerpc/kernel/stacktrace.c b/arch/powerpc/kernel/stacktrace.c
> index e2c50b55138f..a5745571e06e 100644
> --- a/arch/powerpc/kernel/stacktrace.c
> +++ b/arch/powerpc/kernel/stacktrace.c
> @@ -67,12 +67,17 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
> {
> unsigned long sp;
>
> + if (!try_get_task_stack(tsk))
> + return;
> +
> if (tsk == current)
> sp = current_stack_pointer();
> else
> sp = tsk->thread.ksp;
>
> save_context_stack(trace, sp, tsk, 0);
> +
> + put_task_stack(tsk);
> }
> EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
>
> @@ -84,9 +89,8 @@ save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
> EXPORT_SYMBOL_GPL(save_stack_trace_regs);
>
> #ifdef CONFIG_HAVE_RELIABLE_STACKTRACE
> -int
> -save_stack_trace_tsk_reliable(struct task_struct *tsk,
> - struct stack_trace *trace)
> +static int __save_stack_trace_tsk_reliable(struct task_struct *tsk,
> + struct stack_trace *trace)
> {
> unsigned long sp;
> unsigned long stack_page = (unsigned long)task_stack_page(tsk);
> @@ -193,6 +197,25 @@ save_stack_trace_tsk_reliable(struct task_struct *tsk,
> }
> return 0;
> }
> +
> +int save_stack_trace_tsk_reliable(struct task_struct *tsk,
> + struct stack_trace *trace)
> +{
> + int ret;
> +
> + /*
> + * If the task doesn't have a stack (e.g., a zombie), the stack is
> + * "reliably" empty.
> + */
> + if (!try_get_task_stack(tsk))
> + return 0;
> +
> + ret = __save_stack_trace_tsk_reliable(trace, tsk);
> +
> + put_task_stack(tsk);
> +
> + return ret;
> +}
> EXPORT_SYMBOL_GPL(save_stack_trace_tsk_reliable);
> #endif /* CONFIG_HAVE_RELIABLE_STACKTRACE */
>
> --
> 2.13.3
>
Powered by blists - more mailing lists