lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <20190812053014.27743-1-satendrasingh.thakur@hcl.com>
Date:   Mon, 12 Aug 2019 05:31:43 +0000
From:   Satendra Singh Thakur <satendrasingh.thakur@....com>
To:     unlisted-recipients:; (no To-header on input)
CC:     Satendra Singh Thakur <satendrasingh.thakur@....com>,
        Peter Zijlstra <peterz@...radead.org>,
        Ingo Molnar <mingo@...hat.com>, Will Deacon <will@...nel.org>,
        "linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>
Subject: [PATCH] [semaphore] Removed redundant code from semaphore's down
 family of function

-The semaphore code has four funcs
down,
down_interruptible,
down_killable,
down_timeout
-These four funcs have almost similar code except that
they all call lower level function __down_xyz.
-This lower level func in-turn call inline func
__down_common with appropriate arguments.
-This patch creates a common macro for above family of funcs
so that duplicate code is eliminated.
-Also, __down_common has been made noinline so that code is
functionally similar to previous one
-For example, earlier down_killable would call __down_killable
, which in-turn would call inline func __down_common
Now, down_killable calls noinline __down_common directly
through a macro
-The funcs __down_interruptible, __down_killable etc have been
removed as they were just wrapper to __down_common

Signed-off-by: Satendra Singh Thakur <satendrasingh.thakur@....com>
---
 kernel/locking/semaphore.c | 107 +++++++++++++------------------------
 1 file changed, 38 insertions(+), 69 deletions(-)

diff --git a/kernel/locking/semaphore.c b/kernel/locking/semaphore.c
index d9dd94defc0a..0468bc335908 100644
--- a/kernel/locking/semaphore.c
+++ b/kernel/locking/semaphore.c
@@ -33,11 +33,33 @@
 #include <linux/spinlock.h>
 #include <linux/ftrace.h>

-static noinline void __down(struct semaphore *sem);
-static noinline int __down_interruptible(struct semaphore *sem);
-static noinline int __down_killable(struct semaphore *sem);
-static noinline int __down_timeout(struct semaphore *sem, long timeout);
+static noinline int __sched __down_common(struct semaphore *sem, long state,
+long timeout);
 static noinline void __up(struct semaphore *sem);
+/**
+ * down_common - acquire the semaphore
+ * @sem: the semaphore to be acquired
+ * @state: new state of the task
+ * @timeout: either MAX_SCHEDULE_TIMEOUT or actual specified
+ * timeout
+ * Acquires the semaphore. If no more tasks are allowed to
+ * acquire the semaphore, calling this macro will put the task
+ * to sleep until the semaphore is released.
+ *
+ * This internally calls another func __down_common.
+ */
+#define down_common(sem, state, timeout)\
+({\
+int ret = 0;\
+unsigned long flags;\
+raw_spin_lock_irqsave(&(sem)->lock, flags);\
+if (likely((sem)->count > 0))\
+(sem)->count--;\
+else\
+ret = __down_common(sem, state, timeout);\
+raw_spin_unlock_irqrestore(&(sem)->lock, flags);\
+ret;\
+})

 /**
  * down - acquire the semaphore
@@ -52,14 +74,7 @@ static noinline void __up(struct semaphore *sem);
  */
 void down(struct semaphore *sem)
 {
-unsigned long flags;
-
-raw_spin_lock_irqsave(&sem->lock, flags);
-if (likely(sem->count > 0))
-sem->count--;
-else
-__down(sem);
-raw_spin_unlock_irqrestore(&sem->lock, flags);
+down_common(sem, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
 }
 EXPORT_SYMBOL(down);

@@ -74,17 +89,7 @@ EXPORT_SYMBOL(down);
  */
 int down_interruptible(struct semaphore *sem)
 {
-unsigned long flags;
-int result = 0;
-
-raw_spin_lock_irqsave(&sem->lock, flags);
-if (likely(sem->count > 0))
-sem->count--;
-else
-result = __down_interruptible(sem);
-raw_spin_unlock_irqrestore(&sem->lock, flags);
-
-return result;
+return down_common(sem, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
 }
 EXPORT_SYMBOL(down_interruptible);

@@ -100,17 +105,7 @@ EXPORT_SYMBOL(down_interruptible);
  */
 int down_killable(struct semaphore *sem)
 {
-unsigned long flags;
-int result = 0;
-
-raw_spin_lock_irqsave(&sem->lock, flags);
-if (likely(sem->count > 0))
-sem->count--;
-else
-result = __down_killable(sem);
-raw_spin_unlock_irqrestore(&sem->lock, flags);
-
-return result;
+return down_common(sem, TASK_KILLABLE, MAX_SCHEDULE_TIMEOUT);
 }
 EXPORT_SYMBOL(down_killable);

@@ -154,17 +149,7 @@ EXPORT_SYMBOL(down_trylock);
  */
 int down_timeout(struct semaphore *sem, long timeout)
 {
-unsigned long flags;
-int result = 0;
-
-raw_spin_lock_irqsave(&sem->lock, flags);
-if (likely(sem->count > 0))
-sem->count--;
-else
-result = __down_timeout(sem, timeout);
-raw_spin_unlock_irqrestore(&sem->lock, flags);
-
-return result;
+return down_common(sem, TASK_UNINTERRUPTIBLE, timeout);
 }
 EXPORT_SYMBOL(down_timeout);

@@ -196,12 +181,15 @@ struct semaphore_waiter {
 bool up;
 };

-/*
- * Because this function is inlined, the 'state' parameter will be
- * constant, and thus optimised away by the compiler.  Likewise the
- * 'timeout' parameter for the cases without timeouts.
+/**
+ * __down_common - Adds the current task to wait list
+ * puts the task to sleep until signal, timeout or up flag
+ * @sem: the semaphore to be acquired
+ * @state: the state of the calling task
+ * @timeout: either MAX_SCHEDULE_TIMEOUT or actual specified
+ * timeout
  */
-static inline int __sched __down_common(struct semaphore *sem, long state,
+static noinline int __sched __down_common(struct semaphore *sem, long state,
 long timeout)
 {
 struct semaphore_waiter waiter;
@@ -232,25 +220,6 @@ static inline int __sched __down_common(struct semaphore *sem, long state,
 return -EINTR;
 }

-static noinline void __sched __down(struct semaphore *sem)
-{
-__down_common(sem, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
-}
-
-static noinline int __sched __down_interruptible(struct semaphore *sem)
-{
-return __down_common(sem, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
-}
-
-static noinline int __sched __down_killable(struct semaphore *sem)
-{
-return __down_common(sem, TASK_KILLABLE, MAX_SCHEDULE_TIMEOUT);
-}
-
-static noinline int __sched __down_timeout(struct semaphore *sem, long timeout)
-{
-return __down_common(sem, TASK_UNINTERRUPTIBLE, timeout);
-}

 static noinline void __sched __up(struct semaphore *sem)
 {
--
2.17.1

::DISCLAIMER::
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
The contents of this e-mail and any attachment(s) are confidential and intended for the named recipient(s) only. E-mail transmission is not guaranteed to be secure or error-free as information could be intercepted, corrupted, lost, destroyed, arrive late or incomplete, or may contain viruses in transmission. The e mail and its contents (with or without referred errors) shall therefore not attach any liability on the originator or HCL or its affiliates. Views or opinions, if any, presented in this email are solely those of the author and may not necessarily reflect the views or opinions of HCL or its affiliates. Any form of reproduction, dissemination, copying, disclosure, modification, distribution and / or publication of this message without the prior written consent of authorized representative of HCL is strictly prohibited. If you have received this email in error please delete it and notify the sender immediately. Before opening any email and/or attachments, please check them for viruses and other defects.
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ