[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <4a30de3c6bc3a304ff45f671832480c548c4d8f0.camel@perches.com>
Date: Thu, 24 Oct 2019 09:47:07 -0700
From: Joe Perches <joe@...ches.com>
To: Vitaly Kuznetsov <vkuznets@...hat.com>,
linux-hyperv@...r.kernel.org
Cc: linux-kernel@...r.kernel.org, x86@...nel.org,
"K. Y. Srinivasan" <kys@...rosoft.com>,
Haiyang Zhang <haiyangz@...rosoft.com>,
Stephen Hemminger <sthemmin@...rosoft.com>,
Sasha Levin <sashal@...nel.org>,
Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>, Borislav Petkov <bp@...en8.de>,
"H. Peter Anvin" <hpa@...or.com>,
Roman Kagan <rkagan@...tuozzo.com>,
Michael Kelley <mikelley@...rosoft.com>
Subject: Re: [PATCH] x86/hyper-v: micro-optimize send_ipi_one case
On Thu, 2019-10-24 at 17:21 +0200, Vitaly Kuznetsov wrote:
> When sending an IPI to a single CPU there is no need to deal with cpumasks.
> With 2 CPU guest on WS2019 I'm seeing a minor (like 3%, 8043 -> 7761 CPU
> cycles) improvement with smp_call_function_single() loop benchmark. The
> optimization, however, is tiny and straitforward. Also, send_ipi_one() is
> important for PV spinlock kick.
>
> I was also wondering if it would make sense to switch to using regular
> APIC IPI send for CPU > 64 case but no, it is twice as expesive (12650 CPU
> cycles for __send_ipi_mask_ex() call, 26000 for orig_apic.send_IPI(cpu,
> vector)).
style trivia:
> diff --git a/arch/x86/hyperv/hv_apic.c b/arch/x86/hyperv/hv_apic.c
[]
> @@ -194,10 +194,26 @@ static bool __send_ipi_mask(const struct cpumask *mask, int vector)
>
> static bool __send_ipi_one(int cpu, int vector)
> {
> - struct cpumask mask = CPU_MASK_NONE;
> + int ret;
>
> - cpumask_set_cpu(cpu, &mask);
> - return __send_ipi_mask(&mask, vector);
> + trace_hyperv_send_ipi_one(cpu, vector);
> +
> + if (unlikely(!hv_hypercall_pg))
> + return false;
> +
> + if (unlikely((vector < HV_IPI_LOW_VECTOR) ||
> + (vector > HV_IPI_HIGH_VECTOR)))
> + return false;
> +
> + if (cpu >= 64)
> + goto do_ex_hypercall;
Pretty odd to have a separate single use goto
to a single return statement. Might be better
using a direct return.
> +
> + ret = hv_do_fast_hypercall16(HVCALL_SEND_IPI, vector,
> + BIT_ULL(hv_cpu_number_to_vp_number(cpu)));
> + return ((ret == 0) ? true : false);
> +
> +do_ex_hypercall:
> + return __send_ipi_mask_ex(cpumask_of(cpu), vector);
> }
And the use of a automatic declaration of ret probably
isn't useful either.
Perhaps:
---
arch/x86/hyperv/hv_apic.c | 16 +++++++++++++---
arch/x86/include/asm/trace/hyperv.h | 15 +++++++++++++++
2 files changed, 28 insertions(+), 3 deletions(-)
diff --git a/arch/x86/hyperv/hv_apic.c b/arch/x86/hyperv/hv_apic.c
index e01078e9..16c65cd 100644
--- a/arch/x86/hyperv/hv_apic.c
+++ b/arch/x86/hyperv/hv_apic.c
@@ -194,10 +194,20 @@ static bool __send_ipi_mask(const struct cpumask *mask, int vector)
static bool __send_ipi_one(int cpu, int vector)
{
- struct cpumask mask = CPU_MASK_NONE;
+ trace_hyperv_send_ipi_one(cpu, vector);
- cpumask_set_cpu(cpu, &mask);
- return __send_ipi_mask(&mask, vector);
+ if (unlikely(!hv_hypercall_pg))
+ return false;
+
+ if (unlikely((vector < HV_IPI_LOW_VECTOR) ||
+ (vector > HV_IPI_HIGH_VECTOR)))
+ return false;
+
+ if (cpu >= 64)
+ return __send_ipi_mask_ex(cpumask_of(cpu), vector);
+
+ return !hv_do_fast_hypercall16(HVCALL_SEND_IPI, vector,
+ BIT_ULL(hv_cpu_number_to_vp_number(cpu)));
}
static void hv_send_ipi(int cpu, int vector)
diff --git a/arch/x86/include/asm/trace/hyperv.h b/arch/x86/include/asm/trace/hyperv.h
index ace464f..4d705cb 100644
--- a/arch/x86/include/asm/trace/hyperv.h
+++ b/arch/x86/include/asm/trace/hyperv.h
@@ -71,6 +71,21 @@ TRACE_EVENT(hyperv_send_ipi_mask,
__entry->ncpus, __entry->vector)
);
+TRACE_EVENT(hyperv_send_ipi_one,
+ TP_PROTO(int cpu,
+ int vector),
+ TP_ARGS(cpu, vector),
+ TP_STRUCT__entry(
+ __field(int, cpu)
+ __field(int, vector)
+ ),
+ TP_fast_assign(__entry->cpu = cpu;
+ __entry->vector = vector;
+ ),
+ TP_printk("cpu %d vector %x",
+ __entry->cpu, __entry->vector)
+ );
+
#endif /* CONFIG_HYPERV */
#undef TRACE_INCLUDE_PATH
Powered by blists - more mailing lists