[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20250325085807.171885-1-davydov-max@yandex-team.ru>
Date: Tue, 25 Mar 2025 11:58:07 +0300
From: Maksim Davydov <davydov-max@...dex-team.ru>
To: x86@...nel.org,
linux-kernel@...r.kernel.org
Cc: davydov-max@...dex-team.ru,
den-plotnikov@...dex-team.ru,
gpiccoli@...lia.com,
mingo@...hat.com,
bp@...en8.de,
dave.hansen@...ux.intel.com,
tglx@...utronix.de,
hpa@...or.com
Subject: [PATCH] x86/split_lock: simplify reenabling
When split_lock_mitigate is disabled each CPU needs its own delayed_work
structure. They are used to reenable split lock detection after its
disabling. But delayed_work structure must be correctly initialized after
its allocation. Current implementation uses deferred initialization that
makes the split lock handler code unclear. So the code can be simplified
if the initialization is moved to the appropriate initcall. sld_setup()
is called before setup_per_cpu_areas(), thus it can't be used for this
purpose. Another way is to implement independent initcall for the
initialization, that's what has been done.
Signed-off-by: Maksim Davydov <davydov-max@...dex-team.ru>
---
arch/x86/kernel/cpu/bus_lock.c | 36 +++++++++++++++++++++++-----------
1 file changed, 25 insertions(+), 11 deletions(-)
diff --git a/arch/x86/kernel/cpu/bus_lock.c b/arch/x86/kernel/cpu/bus_lock.c
index 97222efb4d2a..5149e3874c92 100644
--- a/arch/x86/kernel/cpu/bus_lock.c
+++ b/arch/x86/kernel/cpu/bus_lock.c
@@ -200,6 +200,26 @@ static void __split_lock_reenable(struct work_struct *work)
*/
static DEFINE_PER_CPU(struct delayed_work, sl_reenable);
+/*
+ * Per-CPU delayed_work can't be statically initialized properly because
+ * the struct address is unknown. Thus per-CPU delayed_work structures
+ * have to be initialized during kernel initialization and after calling
+ * setup_per_cpu_areas().
+ */
+static int __init setup_split_lock_delayed_work(void)
+{
+ unsigned int cpu;
+
+ for_each_possible_cpu(cpu) {
+ struct delayed_work *work = per_cpu_ptr(&sl_reenable, cpu);
+
+ INIT_DELAYED_WORK(work, __split_lock_reenable);
+ }
+
+ return 0;
+}
+pure_initcall(setup_split_lock_delayed_work);
+
/*
* If a CPU goes offline with pending delayed work to re-enable split lock
* detection then the delayed work will be executed on some other CPU. That
@@ -219,15 +239,16 @@ static int splitlock_cpu_offline(unsigned int cpu)
static void split_lock_warn(unsigned long ip)
{
- struct delayed_work *work = NULL;
+ struct delayed_work *work;
int cpu;
+ unsigned int saved_sld_mitigate = READ_ONCE(sysctl_sld_mitigate);
if (!current->reported_split_lock)
pr_warn_ratelimited("#AC: %s/%d took a split_lock trap at address: 0x%lx\n",
current->comm, current->pid, ip);
current->reported_split_lock = 1;
- if (sysctl_sld_mitigate) {
+ if (saved_sld_mitigate) {
/*
* misery factor #1:
* sleep 10ms before trying to execute split lock.
@@ -240,18 +261,11 @@ static void split_lock_warn(unsigned long ip)
*/
if (down_interruptible(&buslock_sem) == -EINTR)
return;
- work = &sl_reenable_unlock;
}
cpu = get_cpu();
-
- if (!work) {
- work = this_cpu_ptr(&sl_reenable);
- /* Deferred initialization of per-CPU struct */
- if (!work->work.func)
- INIT_DELAYED_WORK(work, __split_lock_reenable);
- }
-
+ work = (saved_sld_mitigate ?
+ &sl_reenable_unlock : per_cpu_ptr(&sl_reenable, cpu));
schedule_delayed_work_on(cpu, work, 2);
/* Disable split lock detection on this CPU to make progress */
--
2.34.1
Powered by blists - more mailing lists