[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20161103145021.28528-24-bigeasy@linutronix.de>
Date: Thu, 3 Nov 2016 15:50:19 +0100
From: Sebastian Andrzej Siewior <bigeasy@...utronix.de>
To: linux-kernel@...r.kernel.org
Cc: rt@...utronix.de,
Sebastian Andrzej Siewior <bigeasy@...utronix.de>,
Tony Luck <tony.luck@...el.com>,
Borislav Petkov <bp@...en8.de>, linux-edac@...r.kernel.org,
x86@...nel.org, Thomas Gleixner <tglx@...utronix.de>
Subject: [PATCH 23/25] x86/mcheck: Make CPU_DOWN_PREPARE the counter part of CPU_STARTING
The previous patch moved mcheck_cpu_init() out of identify_cpu() and put
it as the first CPU hotplug callback which is invoked on the target CPU
during bring up. It enables MCE and starts the MCE timer. If a CPU goes
down then those two things have to be reverted and this happens
currently in CPU_DOWN_PREPARE. This is not symmetrical because
CPU_DOWN_PREPARE is the counterpart of CPU_ONLINE.
Usually CPU_DOWN_FAILED and CPU_ONLINE can do the same thing but not in
this case since here (in CPU_DOWN_FAILED) it tries to revert what was
done in CPU_DOWN_PREPARE.
To make this simpler make CPU_DOWN_PREPARE the counterpart of
mcheck_cpu_starting() and just disable MCE and stop the timer. With this
change the callback is symmetrical again and we don't need CPU_DOWN_FAILED
including mce_reenable_cpu(). smp_call_function_single() can be dropped
because it is already invoked on the proper CPU and interrupts are disabled
at this point.
Cc: Tony Luck <tony.luck@...el.com>
Cc: Borislav Petkov <bp@...en8.de>
Cc: linux-edac@...r.kernel.org
Cc: x86@...nel.org
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@...utronix.de>
Signed-off-by: Thomas Gleixner <tglx@...utronix.de>
---
arch/x86/kernel/cpu/mcheck/mce.c | 44 +++++++++++-----------------------------
1 file changed, 12 insertions(+), 32 deletions(-)
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 72af9db8526d..596a7128a46b 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -2469,43 +2469,22 @@ static void mce_device_remove(unsigned int cpu)
}
/* Make sure there are no machine checks on offlined CPUs. */
-static void mce_disable_cpu(void *h)
+static void mce_disable_cpu(void)
{
- unsigned long action = *(unsigned long *)h;
-
if (!mce_available(raw_cpu_ptr(&cpu_info)))
return;
- if (!(action & CPU_TASKS_FROZEN))
+ if (!cpuhp_tasks_frozen)
cmci_clear();
vendor_disable_error_reporting();
}
-static void mce_reenable_cpu(void *h)
-{
- unsigned long action = *(unsigned long *)h;
- int i;
-
- if (!mce_available(raw_cpu_ptr(&cpu_info)))
- return;
-
- if (!(action & CPU_TASKS_FROZEN))
- cmci_reenable();
- for (i = 0; i < mca_cfg.banks; i++) {
- struct mce_bank *b = &mce_banks[i];
-
- if (b->init)
- wrmsrl(msr_ops.ctl(i), b->ctl);
- }
-}
-
/* Get notified when a cpu comes on/off. Be hotplug friendly. */
static int
mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
- struct timer_list *t = &per_cpu(mce_timer, cpu);
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_ONLINE:
@@ -2530,19 +2509,20 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
if (!(action & CPU_TASKS_FROZEN))
cmci_rediscover();
break;
- case CPU_DOWN_PREPARE:
- smp_call_function_single(cpu, mce_disable_cpu, &action, 1);
- del_timer_sync(t);
- break;
- case CPU_DOWN_FAILED:
- smp_call_function_single(cpu, mce_reenable_cpu, &action, 1);
- mce_start_timer(cpu, t);
- break;
}
return NOTIFY_OK;
}
+static int mce_cpu_down_dying(unsigned int cpu)
+{
+ struct timer_list *t = this_cpu_ptr(&mce_timer);
+
+ mce_disable_cpu();
+ del_timer_sync(t);
+ return 0;
+}
+
static struct notifier_block mce_cpu_notifier = {
.notifier_call = mce_cpu_callback,
};
@@ -2597,7 +2577,7 @@ static __init int mcheck_init_device(void)
goto err_init_pool;
err = cpuhp_setup_state(CPUHP_AP_X86_MCE_STARTING, "x86/mce:starting",
- mcheck_cpu_starting, NULL);
+ mcheck_cpu_starting, mce_cpu_down_dying);
if (err)
goto err_init_pool;
--
2.10.2
Powered by blists - more mailing lists