lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <1650818276-129374-1-git-send-email-CruzZhao@linux.alibaba.com>
Date:   Mon, 25 Apr 2022 00:37:56 +0800
From:   Cruz Zhao <CruzZhao@...ux.alibaba.com>
To:     mingo@...hat.com, peterz@...radead.org, juri.lelli@...hat.com,
        vincent.guittot@...aro.org, dietmar.eggemann@....com,
        rostedt@...dmis.org, bsegall@...gle.com, mgorman@...e.de,
        akpm@...ux-foundation.org, tglx@...utronix.de, luto@...nel.org,
        legion@...nel.org, fenghua.yu@...el.com, david@...hat.com,
        bristot@...hat.com, bigeasy@...utronix.de, ebiederm@...ssion.com
Cc:     linux-kernel@...r.kernel.org
Subject: [PATCH] sched/core: Skip sched_core_fork/free() when core sched is disabled

As __put_task_struct() and copy_process() are hot path functions,
the call of sched_core_fork/free() will bring overhead when core
sched is disabled, and we skip them when core sched is disabled().

Signed-off-by: Cruz Zhao <CruzZhao@...ux.alibaba.com>
---
 include/linux/sched.h | 10 ++++++++++
 kernel/fork.c         |  9 ++++++---
 kernel/sched/sched.h  | 10 ----------
 3 files changed, 16 insertions(+), 13 deletions(-)

diff --git a/include/linux/sched.h b/include/linux/sched.h
index f64f8f2..a2266df 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2406,9 +2406,19 @@ static inline void rseq_syscall(struct pt_regs *regs)
 extern void sched_core_fork(struct task_struct *p);
 extern int sched_core_share_pid(unsigned int cmd, pid_t pid, enum pid_type type,
 				unsigned long uaddr);
+DECLARE_STATIC_KEY_FALSE(__sched_core_enabled);
+static inline bool sched_core_disabled(void)
+{
+	return !static_branch_unlikely(&__sched_core_enabled);
+}
+
 #else
 static inline void sched_core_free(struct task_struct *tsk) { }
 static inline void sched_core_fork(struct task_struct *p) { }
+static inline bool sched_core_disabled(void)
+{
+	return true;
+}
 #endif
 
 #endif
diff --git a/kernel/fork.c b/kernel/fork.c
index 0d13baf..611f80b 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -843,7 +843,8 @@ void __put_task_struct(struct task_struct *tsk)
 	exit_creds(tsk);
 	delayacct_tsk_free(tsk);
 	put_signal_struct(tsk->signal);
-	sched_core_free(tsk);
+	if (!sched_core_disabled())
+		sched_core_free(tsk);
 	free_task(tsk);
 }
 EXPORT_SYMBOL_GPL(__put_task_struct);
@@ -2381,7 +2382,8 @@ static __latent_entropy struct task_struct *copy_process(
 
 	klp_copy_process(p);
 
-	sched_core_fork(p);
+	if (!sched_core_disabled())
+		sched_core_fork(p);
 
 	spin_lock(&current->sighand->siglock);
 
@@ -2469,7 +2471,8 @@ static __latent_entropy struct task_struct *copy_process(
 	return p;
 
 bad_fork_cancel_cgroup:
-	sched_core_free(p);
+	if (!sched_core_disabled())
+		sched_core_free(p);
 	spin_unlock(&current->sighand->siglock);
 	write_unlock_irq(&tasklist_lock);
 	cgroup_cancel_fork(p, args);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 5b21448..c6aeeda 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1157,11 +1157,6 @@ static inline bool sched_core_enabled(struct rq *rq)
 	return static_branch_unlikely(&__sched_core_enabled) && rq->core_enabled;
 }
 
-static inline bool sched_core_disabled(void)
-{
-	return !static_branch_unlikely(&__sched_core_enabled);
-}
-
 /*
  * Be careful with this function; not for general use. The return value isn't
  * stable unless you actually hold a relevant rq->__lock.
@@ -1257,11 +1252,6 @@ static inline bool sched_core_enabled(struct rq *rq)
 	return false;
 }
 
-static inline bool sched_core_disabled(void)
-{
-	return true;
-}
-
 static inline raw_spinlock_t *rq_lockp(struct rq *rq)
 {
 	return &rq->__lock;
-- 
1.8.3.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ