lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <0fd099e9-f293-51f0-3a4e-76aead43dc39@oppo.com>
Date:   Tue, 21 Dec 2021 21:30:00 +0800
From:   xuhaifeng <xuhaifeng@...o.com>
To:     Peter Zijlstra <peterz@...radead.org>
Cc:     mingo@...hat.com, juri.lelli@...hat.com, dietmar.eggemann@....com,
        rostedt@...dmis.org, bsegall@...gle.com, mgorman@...e.de,
        bristot@...hat.com, linux-kernel@...r.kernel.org
Subject: Re: [PATCH] sched: optimize __cond_resched_lock()

Thanks for your review and suggestion.
It doesn't work if CONFIG_PREEMPTION=y and CONFIG_PREEMPT_DYNAMIC=y.
Can i change the patch like this?
---
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 83872f95a1ea..9b1e42f8ee60 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -8202,6 +8202,15 @@ DEFINE_STATIC_CALL_RET0(might_resched,
__cond_resched);
 EXPORT_STATIC_CALL_TRAMP(might_resched);
 #endif

+static inline int cond_resched_preempt(void)
+{
+#ifdef CONFIG_PREEMPTION
+       return 0;
+#else
+       return __cond_resched();
+#endif
+}
+
 /*
  * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
  * call schedule, and on return reacquire the lock.
@@ -8219,9 +8228,7 @@ int __cond_resched_lock(spinlock_t *lock)

        if (spin_needbreak(lock) || resched) {
                spin_unlock(lock);
-               if (resched)
-                       preempt_schedule_common();
-               else
+               if(!cond_resched_preempt())
                        cpu_relax();
                ret = 1;
                spin_lock(lock);
@@ -8239,9 +8246,7 @@ int __cond_resched_rwlock_read(rwlock_t *lock)

        if (rwlock_needbreak(lock) || resched) {
                read_unlock(lock);
-               if (resched)
-                       preempt_schedule_common();
-               else
+               if(!cond_resched_preempt())
                        cpu_relax();
                ret = 1;
                read_lock(lock);
@@ -8259,9 +8264,7 @@ int __cond_resched_rwlock_write(rwlock_t *lock)

        if (rwlock_needbreak(lock) || resched) {
                write_unlock(lock);
-               if (resched)
-                       preempt_schedule_common();
-               else
+               if(!cond_resched_preempt())
                        cpu_relax();
                ret = 1;
                write_lock(lock);

On 12/21/2021 6:09 PM, Peter Zijlstra wrote:
> On Tue, Dec 21, 2021 at 09:52:28AM +0100, Peter Zijlstra wrote:
>> On Tue, Dec 21, 2021 at 03:23:16PM +0800, xuhaifeng wrote:
>>> if the kernel is preemptible(CONFIG_PREEMPTION=y), schedule()may be
>>> called twice, once via spin_unlock, once via preempt_schedule_common.
>>>
>>> we can add one conditional, check TIF_NEED_RESCHED flag again,
>>> to avoid this.
>>
>> You can also make it more similar to __cond_resched() instead of making
>> it more different.
>
> Bah, sorry, had to wake up first :/
>
> cond_resched_lock still needs to exist for PREEMPT because locks won't
> magically release themselves.
>
> Still don't much like the patch though, how's this work for you?
>
> That's arguably the right thing to do work PREEMPT_DYNAMIC too.
>
> ---
> diff --git a/kernel/sched/core.c b/kernel/sched/core.c
> index 83872f95a1ea..79d3d5e15c4c 100644
> --- a/kernel/sched/core.c
> +++ b/kernel/sched/core.c
> @@ -8192,6 +8192,11 @@ int __sched __cond_resched(void)
>       return 0;
>  }
>  EXPORT_SYMBOL(__cond_resched);
> +#else
> +static inline int __cond_resched(void)
> +{
> +     return 0;
> +}
>  #endif
>
>  #ifdef CONFIG_PREEMPT_DYNAMIC
> @@ -8219,9 +8224,7 @@ int __cond_resched_lock(spinlock_t *lock)
>
>       if (spin_needbreak(lock) || resched) {
>               spin_unlock(lock);
> -             if (resched)
> -                     preempt_schedule_common();
> -             else
> +             if (!__cond_resched())
>                       cpu_relax();
>               ret = 1;
>               spin_lock(lock);
> @@ -8239,9 +8242,7 @@ int __cond_resched_rwlock_read(rwlock_t *lock)
>
>       if (rwlock_needbreak(lock) || resched) {
>               read_unlock(lock);
> -             if (resched)
> -                     preempt_schedule_common();
> -             else
> +             if (!__cond_resched())
>                       cpu_relax();
>               ret = 1;
>               read_lock(lock);
> @@ -8259,9 +8260,7 @@ int __cond_resched_rwlock_write(rwlock_t *lock)
>
>       if (rwlock_needbreak(lock) || resched) {
>               write_unlock(lock);
> -             if (resched)
> -                     preempt_schedule_common();
> -             else
> +             if (!__cond_resched())
>                       cpu_relax();
>               ret = 1;
>               write_lock(lock);
________________________________
OPPO

本电子邮件及其附件含有OPPO公司的保密信息,仅限于邮件指明的收件人使用(包含个人及群组)。禁止任何人在未经授权的情况下以任何形式使用。如果您错收了本邮件,请立即以电子邮件通知发件人并删除本邮件及其附件。

This e-mail and its attachments contain confidential information from OPPO, which is intended only for the person or entity whose address is listed above. Any use of the information contained herein in any way (including, but not limited to, total or partial disclosure, reproduction, or dissemination) by persons other than the intended recipient(s) is prohibited. If you receive this e-mail in error, please notify the sender by phone or email immediately and delete it!

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ