[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <a96062bc-8bcc-4ca0-b24e-ea64f9ee6329@csgroup.eu>
Date: Tue, 26 Nov 2024 11:49:49 +0100
From: Christophe Leroy <christophe.leroy@...roup.eu>
To: Shrikanth Hegde <sshegde@...ux.ibm.com>, mpe@...erman.id.au,
linuxppc-dev@...ts.ozlabs.org
Cc: npiggin@...il.com, maddy@...ux.ibm.com, bigeasy@...utronix.de,
ankur.a.arora@...cle.com, linux-kernel@...r.kernel.org,
mark.rutland@....com, vschneid@...hat.com, peterz@...radead.org
Subject: Re: [PATCH 1/3] powerpc: copy preempt.h into arch/include/asm
Le 25/11/2024 à 05:22, Shrikanth Hegde a écrit :
> PowerPC uses asm-generic preempt definitions as of now.
> Copy that into arch/asm so that arch specific changes can be done.
> This would help the next patch for enabling dynamic preemption.
I can't see any valid use in following patches. The only modification
you do to that file is in patch 2 and it is unused.
>
> No functional changes intended.
>
> Signed-off-by: Shrikanth Hegde <sshegde@...ux.ibm.com>
> ---
> arch/powerpc/include/asm/preempt.h | 100 +++++++++++++++++++++++++++++
> 1 file changed, 100 insertions(+)
> create mode 100644 arch/powerpc/include/asm/preempt.h
>
> diff --git a/arch/powerpc/include/asm/preempt.h b/arch/powerpc/include/asm/preempt.h
> new file mode 100644
> index 000000000000..51f8f3881523
> --- /dev/null
> +++ b/arch/powerpc/include/asm/preempt.h
> @@ -0,0 +1,100 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +#ifndef __ASM_PREEMPT_H
> +#define __ASM_PREEMPT_H
> +
> +#include <linux/thread_info.h>
> +
> +#define PREEMPT_ENABLED (0)
> +
> +static __always_inline int preempt_count(void)
> +{
> + return READ_ONCE(current_thread_info()->preempt_count);
> +}
> +
> +static __always_inline volatile int *preempt_count_ptr(void)
> +{
> + return ¤t_thread_info()->preempt_count;
> +}
> +
> +static __always_inline void preempt_count_set(int pc)
> +{
> + *preempt_count_ptr() = pc;
> +}
> +
> +/*
> + * must be macros to avoid header recursion hell
> + */
> +#define init_task_preempt_count(p) do { \
> + task_thread_info(p)->preempt_count = FORK_PREEMPT_COUNT; \
> +} while (0)
> +
> +#define init_idle_preempt_count(p, cpu) do { \
> + task_thread_info(p)->preempt_count = PREEMPT_DISABLED; \
> +} while (0)
> +
> +static __always_inline void set_preempt_need_resched(void)
> +{
> +}
> +
> +static __always_inline void clear_preempt_need_resched(void)
> +{
> +}
> +
> +static __always_inline bool test_preempt_need_resched(void)
> +{
> + return false;
> +}
> +
> +/*
> + * The various preempt_count add/sub methods
> + */
> +
> +static __always_inline void __preempt_count_add(int val)
> +{
> + *preempt_count_ptr() += val;
> +}
> +
> +static __always_inline void __preempt_count_sub(int val)
> +{
> + *preempt_count_ptr() -= val;
> +}
> +
> +static __always_inline bool __preempt_count_dec_and_test(void)
> +{
> + /*
> + * Because of load-store architectures cannot do per-cpu atomic
> + * operations; we cannot use PREEMPT_NEED_RESCHED because it might get
> + * lost.
> + */
> + return !--*preempt_count_ptr() && tif_need_resched();
> +}
> +
> +/*
> + * Returns true when we need to resched and can (barring IRQ state).
> + */
> +static __always_inline bool should_resched(int preempt_offset)
> +{
> + return unlikely(preempt_count() == preempt_offset &&
> + tif_need_resched());
> +}
> +
> +#ifdef CONFIG_PREEMPTION
> +extern asmlinkage void preempt_schedule(void);
> +extern asmlinkage void preempt_schedule_notrace(void);
> +
> +#if defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
> +
> +void dynamic_preempt_schedule(void);
> +void dynamic_preempt_schedule_notrace(void);
> +#define __preempt_schedule() dynamic_preempt_schedule()
> +#define __preempt_schedule_notrace() dynamic_preempt_schedule_notrace()
> +
> +#else /* !CONFIG_PREEMPT_DYNAMIC || !CONFIG_HAVE_PREEMPT_DYNAMIC_KEY*/
> +
> +#define __preempt_schedule() preempt_schedule()
> +#define __preempt_schedule_notrace() preempt_schedule_notrace()
> +
> +#endif /* CONFIG_PREEMPT_DYNAMIC && CONFIG_HAVE_PREEMPT_DYNAMIC_KEY*/
> +#endif /* CONFIG_PREEMPTION */
> +
> +#endif /* __ASM_PREEMPT_H */
Powered by blists - more mailing lists