[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20220915000448.1674802-9-vannapurve@google.com>
Date: Thu, 15 Sep 2022 00:04:48 +0000
From: Vishal Annapurve <vannapurve@...gle.com>
To: x86@...nel.org, kvm@...r.kernel.org, linux-kernel@...r.kernel.org,
linux-kselftest@...r.kernel.org
Cc: pbonzini@...hat.com, shuah@...nel.org, bgardon@...gle.com,
seanjc@...gle.com, oupton@...gle.com, peterx@...hat.com,
vkuznets@...hat.com, dmatlack@...gle.com,
Vishal Annapurve <vannapurve@...gle.com>
Subject: [V2 PATCH 8/8] KVM: selftests: x86: xen: Execute cpu specific vmcall instruction
Update xen specific hypercall invocation to execute cpu specific vmcall
instructions.
Suggested-by: Sean Christopherson <seanjc@...gle.com>
Signed-off-by: Vishal Annapurve <vannapurve@...gle.com>
---
.../selftests/kvm/x86_64/xen_shinfo_test.c | 64 +++++++------------
.../selftests/kvm/x86_64/xen_vmcall_test.c | 14 ++--
2 files changed, 34 insertions(+), 44 deletions(-)
diff --git a/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c b/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c
index 8a5cb800f50e..92ed07f1c772 100644
--- a/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c
+++ b/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c
@@ -145,6 +145,23 @@ static void guest_wait_for_irq(void)
guest_saw_irq = false;
}
+static unsigned long vmcall_helper(unsigned long reg_a, unsigned long reg_di,
+ unsigned long reg_si)
+{
+ unsigned long ret;
+
+ if (is_amd_cpu())
+ __asm__ __volatile__ ("vmmcall" :
+ "=a" (ret) :
+ "a" (reg_a), "D" (reg_di), "S" (reg_si));
+ else
+ __asm__ __volatile__ ("vmcall" :
+ "=a" (ret) :
+ "a" (reg_a), "D" (reg_di), "S" (reg_si));
+
+ return ret;
+}
+
static void guest_code(void)
{
struct vcpu_runstate_info *rs = (void *)RUNSTATE_VADDR;
@@ -217,12 +234,7 @@ static void guest_code(void)
* EVTCHNOP_send hypercall. */
unsigned long rax;
struct evtchn_send s = { .port = 127 };
- __asm__ __volatile__ ("vmcall" :
- "=a" (rax) :
- "a" (__HYPERVISOR_event_channel_op),
- "D" (EVTCHNOP_send),
- "S" (&s));
-
+ rax = vmcall_helper(__HYPERVISOR_event_channel_op, EVTCHNOP_send, (unsigned long)&s);
GUEST_ASSERT(rax == 0);
guest_wait_for_irq();
@@ -232,12 +244,7 @@ static void guest_code(void)
/* Deliver "outbound" event channel to an eventfd which
* happens to be one of our own irqfds. */
s.port = 197;
- __asm__ __volatile__ ("vmcall" :
- "=a" (rax) :
- "a" (__HYPERVISOR_event_channel_op),
- "D" (EVTCHNOP_send),
- "S" (&s));
-
+ rax = vmcall_helper(__HYPERVISOR_event_channel_op, EVTCHNOP_send, (unsigned long)&s);
GUEST_ASSERT(rax == 0);
guest_wait_for_irq();
@@ -245,10 +252,7 @@ static void guest_code(void)
GUEST_SYNC(13);
/* Set a timer 100ms in the future. */
- __asm__ __volatile__ ("vmcall" :
- "=a" (rax) :
- "a" (__HYPERVISOR_set_timer_op),
- "D" (rs->state_entry_time + 100000000));
+ rax = vmcall_helper(__HYPERVISOR_set_timer_op, (rs->state_entry_time + 100000000), 0);
GUEST_ASSERT(rax == 0);
GUEST_SYNC(14);
@@ -271,36 +275,21 @@ static void guest_code(void)
.timeout = 0,
};
- __asm__ __volatile__ ("vmcall" :
- "=a" (rax) :
- "a" (__HYPERVISOR_sched_op),
- "D" (SCHEDOP_poll),
- "S" (&p));
-
+ rax = vmcall_helper(__HYPERVISOR_sched_op, SCHEDOP_poll, (unsigned long)&p);
GUEST_ASSERT(rax == 0);
GUEST_SYNC(17);
/* Poll for an unset port and wait for the timeout. */
p.timeout = 100000000;
- __asm__ __volatile__ ("vmcall" :
- "=a" (rax) :
- "a" (__HYPERVISOR_sched_op),
- "D" (SCHEDOP_poll),
- "S" (&p));
-
+ rax = vmcall_helper(__HYPERVISOR_sched_op, SCHEDOP_poll, (unsigned long)&p);
GUEST_ASSERT(rax == 0);
GUEST_SYNC(18);
/* A timer will wake the masked port we're waiting on, while we poll */
p.timeout = 0;
- __asm__ __volatile__ ("vmcall" :
- "=a" (rax) :
- "a" (__HYPERVISOR_sched_op),
- "D" (SCHEDOP_poll),
- "S" (&p));
-
+ rax = vmcall_helper(__HYPERVISOR_sched_op, SCHEDOP_poll, (unsigned long)&p);
GUEST_ASSERT(rax == 0);
GUEST_SYNC(19);
@@ -309,12 +298,7 @@ static void guest_code(void)
* actual interrupt, while we're polling on a different port. */
ports[0]++;
p.timeout = 0;
- __asm__ __volatile__ ("vmcall" :
- "=a" (rax) :
- "a" (__HYPERVISOR_sched_op),
- "D" (SCHEDOP_poll),
- "S" (&p));
-
+ rax = vmcall_helper(__HYPERVISOR_sched_op, SCHEDOP_poll, (unsigned long)&p);
GUEST_ASSERT(rax == 0);
guest_wait_for_irq();
diff --git a/tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c b/tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c
index 88914d48c65e..e78f1b5d3af8 100644
--- a/tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c
+++ b/tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c
@@ -37,10 +37,16 @@ static void guest_code(void)
register unsigned long r9 __asm__("r9") = ARGVALUE(6);
/* First a direct invocation of 'vmcall' */
- __asm__ __volatile__("vmcall" :
- "=a"(rax) :
- "a"(rax), "D"(rdi), "S"(rsi), "d"(rdx),
- "r"(r10), "r"(r8), "r"(r9));
+ if (is_amd_cpu())
+ __asm__ __volatile__("vmmcall" :
+ "=a"(rax) :
+ "a"(rax), "D"(rdi), "S"(rsi), "d"(rdx),
+ "r"(r10), "r"(r8), "r"(r9));
+ else
+ __asm__ __volatile__("vmcall" :
+ "=a"(rax) :
+ "a"(rax), "D"(rdi), "S"(rsi), "d"(rdx),
+ "r"(r10), "r"(r8), "r"(r9));
GUEST_ASSERT(rax == RETVALUE);
/* Fill in the Xen hypercall page */
--
2.37.2.789.g6183377224-goog
Powered by blists - more mailing lists