lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <026ad776-a889-4213-8e0e-1da9065dc5ef@linux.ibm.com>
Date: Mon, 2 Dec 2024 19:35:35 +0530
From: Shrikanth Hegde <sshegde@...ux.ibm.com>
To: Christophe Leroy <christophe.leroy@...roup.eu>,
        linuxppc-dev@...ts.ozlabs.org
Cc: npiggin@...il.com, maddy@...ux.ibm.com, bigeasy@...utronix.de,
        ankur.a.arora@...cle.com, linux-kernel@...r.kernel.org,
        mark.rutland@....com, vschneid@...hat.com, peterz@...radead.org,
        Michael Ellerman <mpe@...erman.id.au>
Subject: Re: [PATCH 1/3] powerpc: copy preempt.h into arch/include/asm



On 11/27/24 12:07, Christophe Leroy wrote:
> 
> 
> Le 25/11/2024 à 05:22, Shrikanth Hegde a écrit :
>> PowerPC uses asm-generic preempt definitions as of now.
>> Copy that into arch/asm so that arch specific changes can be done.
>> This would help the next patch for enabling dynamic preemption.
> 

The reason I want the content instead was to allow future patches where 
I thought of making preempt count per paca for ppc64 atleast. generic 
code assumes it is per thread. If this change is to be done at that 
point, that is fair too. I am okay with it.


> Instead of copying all the content of asm-generic version, can you just 
> create a receptacle for your new macros, that will include asm-generic/ 
> preempt.h ?
> 
> Look at arch/powerpc/include/asm/percpu.h for exemple.
>

You mean something like below right?


#ifndef __ASM_POWERPC_PREEMPT_H
#define __ASM_POWERPC_PREEMPT_H

#include <asm-generic/preempt.h>

#if defined(CONFIG_PREEMPT_DYNAMIC) && 
defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
DECLARE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
#endif

#endif /* __ASM_POWERPC_PREEMPT_H */



>>
>> No functional changes intended.
>>
>> Signed-off-by: Shrikanth Hegde <sshegde@...ux.ibm.com>
>> ---
>>   arch/powerpc/include/asm/preempt.h | 100 +++++++++++++++++++++++++++++
>>   1 file changed, 100 insertions(+)
>>   create mode 100644 arch/powerpc/include/asm/preempt.h
>>
>> diff --git a/arch/powerpc/include/asm/preempt.h b/arch/powerpc/ 
>> include/asm/preempt.h
>> new file mode 100644
>> index 000000000000..51f8f3881523
>> --- /dev/null
>> +++ b/arch/powerpc/include/asm/preempt.h
>> @@ -0,0 +1,100 @@
>> +/* SPDX-License-Identifier: GPL-2.0 */
>> +#ifndef __ASM_PREEMPT_H
>> +#define __ASM_PREEMPT_H
> 
> Should be __ASM_POWERPC_PREEMPT_H

thanks for catching this.

> 
>> +
>> +#include <linux/thread_info.h>
>> +
>> +#define PREEMPT_ENABLED    (0)
>> +
>> +static __always_inline int preempt_count(void)
>> +{
>> +    return READ_ONCE(current_thread_info()->preempt_count);
>> +}
>> +
>> +static __always_inline volatile int *preempt_count_ptr(void)
>> +{
>> +    return &current_thread_info()->preempt_count;
>> +}
>> +
>> +static __always_inline void preempt_count_set(int pc)
>> +{
>> +    *preempt_count_ptr() = pc;
>> +}
>> +
>> +/*
>> + * must be macros to avoid header recursion hell
>> + */
>> +#define init_task_preempt_count(p) do { \
>> +    task_thread_info(p)->preempt_count = FORK_PREEMPT_COUNT; \
>> +} while (0)
>> +
>> +#define init_idle_preempt_count(p, cpu) do { \
>> +    task_thread_info(p)->preempt_count = PREEMPT_DISABLED; \
>> +} while (0)
>> +
>> +static __always_inline void set_preempt_need_resched(void)
>> +{
>> +}
>> +
>> +static __always_inline void clear_preempt_need_resched(void)
>> +{
>> +}
>> +
>> +static __always_inline bool test_preempt_need_resched(void)
>> +{
>> +    return false;
>> +}
>> +
>> +/*
>> + * The various preempt_count add/sub methods
>> + */
>> +
>> +static __always_inline void __preempt_count_add(int val)
>> +{
>> +    *preempt_count_ptr() += val;
>> +}
>> +
>> +static __always_inline void __preempt_count_sub(int val)
>> +{
>> +    *preempt_count_ptr() -= val;
>> +}
>> +
>> +static __always_inline bool __preempt_count_dec_and_test(void)
>> +{
>> +    /*
>> +     * Because of load-store architectures cannot do per-cpu atomic
>> +     * operations; we cannot use PREEMPT_NEED_RESCHED because it 
>> might get
>> +     * lost.
>> +     */
>> +    return !--*preempt_count_ptr() && tif_need_resched();
>> +}
>> +
>> +/*
>> + * Returns true when we need to resched and can (barring IRQ state).
>> + */
>> +static __always_inline bool should_resched(int preempt_offset)
>> +{
>> +    return unlikely(preempt_count() == preempt_offset &&
>> +            tif_need_resched());
>> +}
>> +
>> +#ifdef CONFIG_PREEMPTION
>> +extern asmlinkage void preempt_schedule(void);
>> +extern asmlinkage void preempt_schedule_notrace(void);
>> +
>> +#if defined(CONFIG_PREEMPT_DYNAMIC) && 
>> defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
>> +
>> +void dynamic_preempt_schedule(void);
>> +void dynamic_preempt_schedule_notrace(void);
>> +#define __preempt_schedule()        dynamic_preempt_schedule()
>> +#define __preempt_schedule_notrace()    
>> dynamic_preempt_schedule_notrace()
>> +
>> +#else /* !CONFIG_PREEMPT_DYNAMIC || !CONFIG_HAVE_PREEMPT_DYNAMIC_KEY*/
>> +
>> +#define __preempt_schedule() preempt_schedule()
>> +#define __preempt_schedule_notrace() preempt_schedule_notrace()
>> +
>> +#endif /* CONFIG_PREEMPT_DYNAMIC && CONFIG_HAVE_PREEMPT_DYNAMIC_KEY*/
>> +#endif /* CONFIG_PREEMPTION */
>> +
>> +#endif /* __ASM_PREEMPT_H */


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ