[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230624031333.96597-11-alexei.starovoitov@gmail.com>
Date: Fri, 23 Jun 2023 20:13:30 -0700
From: Alexei Starovoitov <alexei.starovoitov@...il.com>
To: daniel@...earbox.net,
andrii@...nel.org,
void@...ifault.com,
houtao@...weicloud.com,
paulmck@...nel.org
Cc: tj@...nel.org,
rcu@...r.kernel.org,
netdev@...r.kernel.org,
bpf@...r.kernel.org,
kernel-team@...com
Subject: [PATCH v2 bpf-next 10/13] rcu: Export rcu_request_urgent_qs_task()
From: "Paul E. McKenney" <paulmck@...nel.org>
If a CPU is executing a long series of non-sleeping system calls,
RCU grace periods can be delayed for on the order of a couple hundred
milliseconds. This is normally not a problem, but if each system call
does a call_rcu(), those callbacks can stack up. RCU will eventually
notice this callback storm, but use of rcu_request_urgent_qs_task()
allows the code invoking call_rcu() to give RCU a heads up.
This function is not for general use, not yet, anyway.
Reported-by: Alexei Starovoitov <ast@...nel.org>
Signed-off-by: Paul E. McKenney <paulmck@...nel.org>
Signed-off-by: Alexei Starovoitov <ast@...nel.org>
---
include/linux/rcutiny.h | 2 ++
include/linux/rcutree.h | 1 +
kernel/rcu/rcu.h | 2 --
3 files changed, 3 insertions(+), 2 deletions(-)
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index 7f17acf29dda..7b949292908a 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -138,6 +138,8 @@ static inline int rcu_needs_cpu(void)
return 0;
}
+static inline void rcu_request_urgent_qs_task(struct task_struct *t) { }
+
/*
* Take advantage of the fact that there is only one CPU, which
* allows us to ignore virtualization-based context switches.
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 56bccb5a8fde..126f6b418f6a 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -21,6 +21,7 @@ void rcu_softirq_qs(void);
void rcu_note_context_switch(bool preempt);
int rcu_needs_cpu(void);
void rcu_cpu_stall_reset(void);
+void rcu_request_urgent_qs_task(struct task_struct *t);
/*
* Note a virtualization-based context switch. This is simply a
diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h
index 4a1b9622598b..6f5fb3f7ebf3 100644
--- a/kernel/rcu/rcu.h
+++ b/kernel/rcu/rcu.h
@@ -493,7 +493,6 @@ static inline void rcu_expedite_gp(void) { }
static inline void rcu_unexpedite_gp(void) { }
static inline void rcu_async_hurry(void) { }
static inline void rcu_async_relax(void) { }
-static inline void rcu_request_urgent_qs_task(struct task_struct *t) { }
#else /* #ifdef CONFIG_TINY_RCU */
bool rcu_gp_is_normal(void); /* Internal RCU use. */
bool rcu_gp_is_expedited(void); /* Internal RCU use. */
@@ -508,7 +507,6 @@ void show_rcu_tasks_gp_kthreads(void);
#else /* #ifdef CONFIG_TASKS_RCU_GENERIC */
static inline void show_rcu_tasks_gp_kthreads(void) {}
#endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */
-void rcu_request_urgent_qs_task(struct task_struct *t);
#endif /* #else #ifdef CONFIG_TINY_RCU */
#define RCU_SCHEDULER_INACTIVE 0
--
2.34.1
Powered by blists - more mailing lists