[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230918144111.641369-11-paul@xen.org>
Date: Mon, 18 Sep 2023 14:41:08 +0000
From: Paul Durrant <paul@....org>
To: kvm@...r.kernel.org, linux-kernel@...r.kernel.org
Cc: Paul Durrant <pdurrant@...zon.com>,
David Woodhouse <dwmw@...zon.co.uk>,
Sean Christopherson <seanjc@...gle.com>,
Paolo Bonzini <pbonzini@...hat.com>,
David Woodhouse <dwmw2@...radead.org>
Subject: [PATCH v3 10/13] KVM: selftests / xen: set KVM_XEN_VCPU_ATTR_TYPE_VCPU_ID
From: Paul Durrant <pdurrant@...zon.com>
If the capability (KVM_XEN_HVM_CONFIG_EVTCHN_SEND) is present then set
the guest's vCPU id to match the chosen vcpu_info offset.
Also make some cosmetic fixes to the code for clarity.
Signed-off-by: Paul Durrant <pdurrant@...zon.com>
Reviewed-by: David Woodhouse <dwmw@...zon.co.uk>
---
Cc: Sean Christopherson <seanjc@...gle.com>
Cc: Paolo Bonzini <pbonzini@...hat.com>
Cc: David Woodhouse <dwmw2@...radead.org>
v3:
- Remame VCPU_ID to XEN_VCPU_ID.
- Set vcpu_id before the shared_info page is set.
v2:
- New in this version.
---
.../selftests/kvm/x86_64/xen_shinfo_test.c | 19 +++++++++++++++----
1 file changed, 15 insertions(+), 4 deletions(-)
diff --git a/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c b/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c
index 05898ad9f4d9..b0c3a00ea6a6 100644
--- a/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c
+++ b/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c
@@ -38,6 +38,8 @@
#define VCPU_INFO_VADDR (SHINFO_REGION_GVA + 0x40)
#define RUNSTATE_VADDR (SHINFO_REGION_GVA + PAGE_SIZE + PAGE_SIZE - 15)
+#define XEN_VCPU_ID 1 /* Must correspond to offset of VCPU_INFO_[V]ADDR */
+
#define EVTCHN_VECTOR 0x10
#define EVTCHN_TEST1 15
@@ -410,7 +412,7 @@ static void *juggle_shinfo_state(void *arg)
struct kvm_xen_hvm_attr cache_activate = {
.type = KVM_XEN_ATTR_TYPE_SHARED_INFO,
- .u.shared_info.gfn = SHINFO_REGION_GPA / PAGE_SIZE
+ .u.shared_info.gfn = SHINFO_ADDR / PAGE_SIZE
};
struct kvm_xen_hvm_attr cache_deactivate = {
@@ -446,6 +448,7 @@ int main(int argc, char *argv[])
bool do_runstate_flag = !!(xen_caps & KVM_XEN_HVM_CONFIG_RUNSTATE_UPDATE_FLAG);
bool do_eventfd_tests = !!(xen_caps & KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL);
bool do_evtchn_tests = do_eventfd_tests && !!(xen_caps & KVM_XEN_HVM_CONFIG_EVTCHN_SEND);
+ bool has_vcpu_id = !!(xen_caps & KVM_XEN_HVM_CONFIG_EVTCHN_SEND);
clock_gettime(CLOCK_REALTIME, &min_ts);
@@ -492,9 +495,17 @@ int main(int argc, char *argv[])
"Failed to read back RUNSTATE_UPDATE_FLAG attr");
}
+ if (has_vcpu_id) {
+ struct kvm_xen_vcpu_attr vid = {
+ .type = KVM_XEN_VCPU_ATTR_TYPE_VCPU_ID,
+ .u.vcpu_id = XEN_VCPU_ID,
+ };
+ vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &vid);
+ }
+
struct kvm_xen_hvm_attr ha = {
.type = KVM_XEN_ATTR_TYPE_SHARED_INFO,
- .u.shared_info.gfn = SHINFO_REGION_GPA / PAGE_SIZE,
+ .u.shared_info.gfn = SHINFO_ADDR / PAGE_SIZE,
};
vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &ha);
@@ -983,8 +994,8 @@ int main(int argc, char *argv[])
struct pvclock_wall_clock *wc;
struct pvclock_vcpu_time_info *ti, *ti2;
- wc = addr_gpa2hva(vm, SHINFO_REGION_GPA + 0xc00);
- ti = addr_gpa2hva(vm, SHINFO_REGION_GPA + 0x40 + 0x20);
+ wc = addr_gpa2hva(vm, SHINFO_ADDR + 0xc00);
+ ti = addr_gpa2hva(vm, VCPU_INFO_ADDR + 0x20);
ti2 = addr_gpa2hva(vm, PVTIME_ADDR);
if (verbose) {
--
2.39.2
Powered by blists - more mailing lists