[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <f69c5409-1b5e-734a-da09-e96010334329@huawei.com>
Date: Fri, 19 Feb 2021 21:08:47 +0800
From: Yunfeng Ye <yeyunfeng@...wei.com>
To: Frederic Weisbecker <frederic@...nel.org>
CC: <fweisbec@...il.com>, <tglx@...utronix.de>, <mingo@...nel.org>,
<linux-kernel@...r.kernel.org>, Shiyuan Hu <hushiyuan@...wei.com>,
Hewenliang <hewenliang4@...wei.com>
Subject: Re: nohz: Update tick instead of restarting tick in
tick_nohz_idle_exit()
On 2021/2/19 20:16, Frederic Weisbecker wrote:
> On Mon, Nov 23, 2020 at 09:22:08PM +0800, Yunfeng Ye wrote:
>> In realtime scenarios, the "nohz_full" parameter is configured. Tick
>> interference is not expected when there is only one realtime thread.
>> But when the idle thread is switched to the realtime thread, the tick
>> timer is restarted always.
>>
>> So on the nohz full mode, it is unnecessary to restart the tick timer
>> when there is only one realtime thread. Adding can_stop_full_tick()
>> before restarting the tick, if it return true, keep tick stopped.
>>
>> Signed-off-by: Yunfeng Ye <yeyunfeng@...wei.com>
>
> Hi,
>
> After reworking the codebase a bit, I've edited your patch and
> changelog and then queued it. I'll post it in a series after the
> merge window. See the result:
>
Ok, Thanks. My first version patch which not be sent is a little like this.
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index cc7cba20382e..e793c8f675e6 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -908,21 +908,24 @@ static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now)
tick_nohz_restart(ts, now);
}
-static void tick_nohz_full_update_tick(struct tick_sched *ts)
+static int tick_nohz_full_update_tick(struct tick_sched *ts, ktime_t now)
{
#ifdef CONFIG_NO_HZ_FULL
int cpu = smp_processor_id();
if (!tick_nohz_full_cpu(cpu))
- return;
+ return -EINVAL;
if (!ts->tick_stopped && ts->nohz_mode == NOHZ_MODE_INACTIVE)
- return;
+ return -EINVAL;
if (can_stop_full_tick(cpu, ts))
tick_nohz_stop_sched_tick(ts, cpu);
else if (ts->tick_stopped)
- tick_nohz_restart_sched_tick(ts, ktime_get());
+ tick_nohz_restart_sched_tick(ts, now);
+ return 0;
+#else
+ return -EINVAL;
#endif
}
@@ -1080,7 +1083,7 @@ void tick_nohz_irq_exit(void)
if (ts->inidle)
tick_nohz_start_idle(ts);
else
- tick_nohz_full_update_tick(ts);
+ tick_nohz_full_update_tick(ts, ktime_get());
}
/**
@@ -1208,6 +1211,13 @@ void tick_nohz_idle_restart_tick(void)
__tick_nohz_idle_restart_tick(ts, ktime_get());
}
+static void tick_nohz_update_tick(struct tick_sched *ts, ktime_t now)
+{
+ if (tick_nohz_full_update_tick(ts) < 0)
+ tick_nohz_restart_sched_tick(ts, now);
+ tick_nohz_account_idle_ticks(ts);
+}
+
/**
* tick_nohz_idle_exit - restart the idle tick from the idle task
*
@@ -1237,7 +1247,7 @@ void tick_nohz_idle_exit(void)
tick_nohz_stop_idle(ts, now);
if (tick_stopped)
- __tick_nohz_idle_restart_tick(ts, now);
+ tick_nohz_update_tick(ts, now);
local_irq_enable();
}
> ---
> From: Yunfeng Ye <yeyunfeng@...wei.com>
> Date: Tue, 9 Feb 2021 23:59:19 +0100
> Subject: [PATCH] tick/nohz: Conditionally restart tick on idle exit
>
> In nohz_full mode, switching from idle to a task will unconditionally
> issue a tick restart. If the task is alone in the runqueue or is the
> highest priority, the tick will fire once then eventually stop. But that
> alone is still undesired noise.
>
> Therefore, only restart the tick on idle exit when it's strictly
> necessary.
>
> Signed-off-by: Yunfeng Ye <yeyunfeng@...wei.com>
> Signed-off-by: Frederic Weisbecker <frederic@...nel.org>
> ---
> kernel/time/tick-sched.c | 44 ++++++++++++++++++++++++----------------
> 1 file changed, 27 insertions(+), 17 deletions(-)
>
> diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
> index 3e272490fe2e..79796286a4ba 100644
> --- a/kernel/time/tick-sched.c
> +++ b/kernel/time/tick-sched.c
> @@ -923,24 +923,28 @@ static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now)
> tick_nohz_restart(ts, now);
> }
>
> -static void tick_nohz_full_update_tick(struct tick_sched *ts)
> +static void __tick_nohz_full_update_tick(struct tick_sched *ts,
> + ktime_t now)
> {
> #ifdef CONFIG_NO_HZ_FULL
> - int cpu;
> + int cpu = smp_processor_id();
>
> + if (can_stop_full_tick(cpu, ts))
> + tick_nohz_stop_sched_tick(ts, cpu);
> + else if (ts->tick_stopped)
> + tick_nohz_restart_sched_tick(ts, now);
> +#endif
> +}
> +
> +static void tick_nohz_full_update_tick(struct tick_sched *ts)
> +{
> if (!tick_nohz_full_this_cpu())
> return;
>
> if (!ts->tick_stopped && ts->nohz_mode == NOHZ_MODE_INACTIVE)
> return;
>
> - cpu = smp_processor_id();
> -
> - if (can_stop_full_tick(cpu, ts))
> - tick_nohz_stop_sched_tick(ts, cpu);
> - else if (ts->tick_stopped)
> - tick_nohz_restart_sched_tick(ts, ktime_get());
> -#endif
> + __tick_nohz_full_update_tick(ts, ktime_get());
> }
>
> static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
> @@ -1210,18 +1214,24 @@ static void tick_nohz_account_idle_ticks(struct tick_sched *ts)
> #endif
> }
>
> -static void __tick_nohz_idle_restart_tick(struct tick_sched *ts, ktime_t now)
> +void tick_nohz_idle_restart_tick(void)
> {
> - tick_nohz_restart_sched_tick(ts, now);
> - tick_nohz_account_idle_ticks(ts);
> + struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
> +
> + if (ts->tick_stopped) {
> + tick_nohz_restart_sched_tick(ts, ktime_get());
> + tick_nohz_account_idle_ticks(ts);
> + }
> }
>
> -void tick_nohz_idle_restart_tick(void)
> +static void tick_nohz_idle_update_tick(struct tick_sched *ts, ktime_t now)
> {
> - struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
> + if (tick_nohz_full_this_cpu())
> + __tick_nohz_full_update_tick(ts, now);
> + else
> + tick_nohz_restart_sched_tick(ts, now);
>
> - if (ts->tick_stopped)
> - __tick_nohz_idle_restart_tick(ts, ktime_get());
> + tick_nohz_account_idle_ticks(ts);
> }
>
> /**
> @@ -1253,7 +1263,7 @@ void tick_nohz_idle_exit(void)
> tick_nohz_stop_idle(ts, now);
>
> if (tick_stopped)
> - __tick_nohz_idle_restart_tick(ts, now);
> + tick_nohz_idle_update_tick(ts, now);
>
> local_irq_enable();
> }
>
Powered by blists - more mailing lists