[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <7083f81f-cded-44f8-1586-46a1e44f0786@posteo.net>
Date: Thu, 26 May 2022 11:48:01 +0000
From: Derek Dolney <z23@...teo.net>
To: Vincent Donnefort <vdonnefort@...gle.com>, peterz@...radead.org,
tglx@...utronix.de
Cc: linux-kernel@...r.kernel.org, vschneid@...hat.com,
kernel-team@...roid.com
Subject: Re: [PATCH v2] cpu/hotplug: Do not bail-out in DYING/STARTING
sections
I tested this patch on the 5.12 commit that broke suspend and also on
the latest git 5.18 branch and this is good, suspend and resume are
working again.
Derek
On 5/23/22 12:05 PM, Vincent Donnefort wrote:
> The DYING/STARTING callbacks are not expected to fail. However, as reported
> by Derek, drivers such as tboot are still free to return errors within
> those sections. In that case, there's nothing the hotplug machinery can do,
> so let's just proceed and log the failures.
>
> Fixes: 453e41085183 (cpu/hotplug: Add cpuhp_invoke_callback_range())
> Reported-by: Derek Dolney <z23@...teo.net>
> Signed-off-by: Vincent Donnefort <vdonnefort@...gle.com>
>
> ---
>
> v1 -> v2:
> - Commit message rewording.
> - More details in the warnings.
> - Some variable renaming
>
> diff --git a/kernel/cpu.c b/kernel/cpu.c
> index bbad5e375d3b..c3617683459e 100644
> --- a/kernel/cpu.c
> +++ b/kernel/cpu.c
> @@ -663,21 +663,51 @@ static bool cpuhp_next_state(bool bringup,
> return true;
> }
>
> -static int cpuhp_invoke_callback_range(bool bringup,
> - unsigned int cpu,
> - struct cpuhp_cpu_state *st,
> - enum cpuhp_state target)
> +static int _cpuhp_invoke_callback_range(bool bringup,
> + unsigned int cpu,
> + struct cpuhp_cpu_state *st,
> + enum cpuhp_state target,
> + bool nofail)
> {
> enum cpuhp_state state;
> - int err = 0;
> + int ret = 0;
>
> while (cpuhp_next_state(bringup, &state, st, target)) {
> + int err;
> +
> err = cpuhp_invoke_callback(cpu, state, bringup, NULL, NULL);
> - if (err)
> + if (!err)
> + continue;
> +
> + if (nofail) {
> + pr_warn("CPU %u %s state %s (%d) failed (%d)\n",
> + cpu, bringup ? "UP" : "DOWN",
> + cpuhp_get_step(st->state)->name,
> + st->state, err);
> + ret = -1;
> + } else {
> + ret = err;
> break;
> + }
> }
>
> - return err;
> + return ret;
> +}
> +
> +static inline int cpuhp_invoke_callback_range(bool bringup,
> + unsigned int cpu,
> + struct cpuhp_cpu_state *st,
> + enum cpuhp_state target)
> +{
> + return _cpuhp_invoke_callback_range(bringup, cpu, st, target, false);
> +}
> +
> +static inline void cpuhp_invoke_callback_range_nofail(bool bringup,
> + unsigned int cpu,
> + struct cpuhp_cpu_state *st,
> + enum cpuhp_state target)
> +{
> + WARN_ON_ONCE(_cpuhp_invoke_callback_range(bringup, cpu, st, target, true));
> }
>
> static inline bool can_rollback_cpu(struct cpuhp_cpu_state *st)
> @@ -999,7 +1029,6 @@ static int take_cpu_down(void *_param)
> struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
> enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE);
> int err, cpu = smp_processor_id();
> - int ret;
>
> /* Ensure this CPU doesn't handle any more interrupts. */
> err = __cpu_disable();
> @@ -1012,13 +1041,11 @@ static int take_cpu_down(void *_param)
> */
> WARN_ON(st->state != (CPUHP_TEARDOWN_CPU - 1));
>
> - /* Invoke the former CPU_DYING callbacks */
> - ret = cpuhp_invoke_callback_range(false, cpu, st, target);
> -
> /*
> + * Invoke the former CPU_DYING callbacks
> * DYING must not fail!
> */
> - WARN_ON_ONCE(ret);
> + cpuhp_invoke_callback_range_nofail(false, cpu, st, target);
>
> /* Give up timekeeping duties */
> tick_handover_do_timer();
> @@ -1296,16 +1323,14 @@ void notify_cpu_starting(unsigned int cpu)
> {
> struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
> enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE);
> - int ret;
>
> rcu_cpu_starting(cpu); /* Enables RCU usage on this CPU. */
> cpumask_set_cpu(cpu, &cpus_booted_once_mask);
> - ret = cpuhp_invoke_callback_range(true, cpu, st, target);
>
> /*
> * STARTING must not fail!
> */
> - WARN_ON_ONCE(ret);
> + cpuhp_invoke_callback_range_nofail(true, cpu, st, target);
> }
>
> /*
>
Powered by blists - more mailing lists