[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <fcdfd388-60f3-2c71-646e-5638ee0b5dde@oracle.com>
Date: Thu, 22 Jul 2021 08:07:59 -0700
From: Dongli Zhang <dongli.zhang@...cle.com>
To: Li RongQing <lirongqing@...du.com>, pbonzini@...hat.com,
mingo@...hat.com, peterz@...radead.org, kvm@...r.kernel.org,
linux-kernel@...r.kernel.org
Subject: Re: [PATCH] KVM: Consider SMT idle status when halt polling
Hi RongQing,
Would you mind share if there is any performance data to demonstrate how much
performance can be improved?
Thank you very much!
Dongli Zhang
On 7/21/21 8:58 PM, Li RongQing wrote:
> SMT siblings share caches and other hardware, halt polling
> will degrade its sibling performance if its sibling is busy
>
> Signed-off-by: Li RongQing <lirongqing@...du.com>
> ---
> include/linux/kvm_host.h | 5 ++++-
> include/linux/sched.h | 17 +++++++++++++++++
> kernel/sched/fair.c | 17 -----------------
> 3 files changed, 21 insertions(+), 18 deletions(-)
>
> diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
> index ae7735b..15b3ef4 100644
> --- a/include/linux/kvm_host.h
> +++ b/include/linux/kvm_host.h
> @@ -269,7 +269,10 @@ static inline bool kvm_vcpu_mapped(struct kvm_host_map *map)
>
> static inline bool kvm_vcpu_can_poll(ktime_t cur, ktime_t stop)
> {
> - return single_task_running() && !need_resched() && ktime_before(cur, stop);
> + return single_task_running() &&
> + !need_resched() &&
> + ktime_before(cur, stop) &&
> + is_core_idle(raw_smp_processor_id());
> }
>
> /*
> diff --git a/include/linux/sched.h b/include/linux/sched.h
> index ec8d07d..c333218 100644
> --- a/include/linux/sched.h
> +++ b/include/linux/sched.h
> @@ -34,6 +34,7 @@
> #include <linux/rseq.h>
> #include <linux/seqlock.h>
> #include <linux/kcsan.h>
> +#include <linux/topology.h>
> #include <asm/kmap_size.h>
>
> /* task_struct member predeclarations (sorted alphabetically): */
> @@ -2191,6 +2192,22 @@ int sched_trace_rq_nr_running(struct rq *rq);
>
> const struct cpumask *sched_trace_rd_span(struct root_domain *rd);
>
> +static inline bool is_core_idle(int cpu)
> +{
> +#ifdef CONFIG_SCHED_SMT
> + int sibling;
> +
> + for_each_cpu(sibling, cpu_smt_mask(cpu)) {
> + if (cpu == sibling)
> + continue;
> +
> + if (!idle_cpu(cpu))
> + return false;
> + }
> +#endif
> + return true;
> +}
> +
> #ifdef CONFIG_SCHED_CORE
> extern void sched_core_free(struct task_struct *tsk);
> extern void sched_core_fork(struct task_struct *p);
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index 44c4520..5b0259c 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -1477,23 +1477,6 @@ struct numa_stats {
> int idle_cpu;
> };
>
> -static inline bool is_core_idle(int cpu)
> -{
> -#ifdef CONFIG_SCHED_SMT
> - int sibling;
> -
> - for_each_cpu(sibling, cpu_smt_mask(cpu)) {
> - if (cpu == sibling)
> - continue;
> -
> - if (!idle_cpu(cpu))
> - return false;
> - }
> -#endif
> -
> - return true;
> -}
> -
> struct task_numa_env {
> struct task_struct *p;
>
>
Powered by blists - more mailing lists