From f06ebf20cd0115be33c38ce887ef6d28ad562183 Mon Sep 17 00:00:00 2001 From: Maxim Levitsky Date: Sun, 3 Apr 2022 10:46:43 +0300 Subject: [PATCH 6/7] svm: move svm entry macros to svm_lib.h --- lib/x86/svm_lib.h | 68 +++++++++++++++++++++++++++++++++++++++++++++ x86/svm.c | 22 ++++++--------- x86/svm.h | 71 ++--------------------------------------------- x86/svm_tests.c | 9 +++--- 4 files changed, 85 insertions(+), 85 deletions(-) diff --git a/lib/x86/svm_lib.h b/lib/x86/svm_lib.h index 6d9a86aa..f682c679 100644 --- a/lib/x86/svm_lib.h +++ b/lib/x86/svm_lib.h @@ -71,4 +71,72 @@ u8* svm_get_io_bitmap(void); #define MSR_BITMAP_SIZE 8192 +struct x86_gpr_regs +{ + u64 rax; + u64 rbx; + u64 rcx; + u64 rdx; + u64 cr2; + u64 rbp; + u64 rsi; + u64 rdi; + + u64 r8; + u64 r9; + u64 r10; + u64 r11; + u64 r12; + u64 r13; + u64 r14; + u64 r15; + u64 rflags; +}; + +#define SAVE_GPR_C(regs) \ + "xchg %%rbx, %p[" #regs "]+0x8\n\t" \ + "xchg %%rcx, %p[" #regs "]+0x10\n\t" \ + "xchg %%rdx, %p[" #regs "]+0x18\n\t" \ + "xchg %%rbp, %p[" #regs "]+0x28\n\t" \ + "xchg %%rsi, %p[" #regs "]+0x30\n\t" \ + "xchg %%rdi, %p[" #regs "]+0x38\n\t" \ + "xchg %%r8, %p[" #regs "]+0x40\n\t" \ + "xchg %%r9, %p[" #regs "]+0x48\n\t" \ + "xchg %%r10, %p[" #regs "]+0x50\n\t" \ + "xchg %%r11, %p[" #regs "]+0x58\n\t" \ + "xchg %%r12, %p[" #regs "]+0x60\n\t" \ + "xchg %%r13, %p[" #regs "]+0x68\n\t" \ + "xchg %%r14, %p[" #regs "]+0x70\n\t" \ + "xchg %%r15, %p[" #regs "]+0x78\n\t" \ + +#define LOAD_GPR_C(regs) SAVE_GPR_C(regs) + +#define ASM_PRE_VMRUN_CMD(regs) \ + "vmload %%rax\n\t" \ + "mov %p[" #regs "]+0x80, %%r15\n\t" \ + "mov %%r15, 0x170(%%rax)\n\t" \ + "mov %p[" #regs "], %%r15\n\t" \ + "mov %%r15, 0x1f8(%%rax)\n\t" \ + LOAD_GPR_C(regs) \ + +#define ASM_POST_VMRUN_CMD(regs) \ + SAVE_GPR_C(regs) \ + "mov 0x170(%%rax), %%r15\n\t" \ + "mov %%r15, %p[regs]+0x80\n\t" \ + "mov 0x1f8(%%rax), %%r15\n\t" \ + "mov %%r15, %p[regs]\n\t" \ + "vmsave %%rax\n\t" \ + + +#define SVM_BARE_VMRUN(vmcb, regs) \ + asm volatile ( \ + ASM_PRE_VMRUN_CMD(regs) \ + "vmrun %%rax\n\t" \ + ASM_POST_VMRUN_CMD(regs) \ + : \ + : "a" (virt_to_phys(vmcb)), \ + [regs] "i" (®s) \ + : "memory", "r15") + + #endif /* SRC_LIB_X86_SVM_LIB_H_ */ diff --git a/x86/svm.c b/x86/svm.c index 74c3931b..b2dbef75 100644 --- a/x86/svm.c +++ b/x86/svm.c @@ -77,9 +77,9 @@ static void test_thunk(struct svm_test *test) vmmcall(); } -struct regs regs; +struct x86_gpr_regs regs; -struct regs get_regs(void) +struct x86_gpr_regs get_regs(void) { return regs; } @@ -98,13 +98,7 @@ int __svm_vmrun(u64 rip) vmcb->save.rsp = (ulong)(guest_stack + ARRAY_SIZE(guest_stack)); regs.rdi = (ulong)v2_test; - asm volatile ( - ASM_PRE_VMRUN_CMD - "vmrun %%rax\n\t" \ - ASM_POST_VMRUN_CMD - : - : "a" (virt_to_phys(vmcb)) - : "memory", "r15"); + SVM_BARE_VMRUN(vmcb, regs); return (vmcb->control.exit_code); } @@ -118,6 +112,7 @@ extern u8 vmrun_rip; static noinline void test_run(struct svm_test *test) { + u64 vmcb_phys = virt_to_phys(vmcb); irq_disable(); @@ -136,18 +131,19 @@ static noinline void test_run(struct svm_test *test) "sti \n\t" "call *%c[PREPARE_GIF_CLEAR](%[test]) \n \t" "mov %[vmcb_phys], %%rax \n\t" - ASM_PRE_VMRUN_CMD + ASM_PRE_VMRUN_CMD(regs) ".global vmrun_rip\n\t" \ "vmrun_rip: vmrun %%rax\n\t" \ - ASM_POST_VMRUN_CMD + ASM_POST_VMRUN_CMD(regs) "cli \n\t" "stgi" : // inputs clobbered by the guest: "=D" (the_test), // first argument register "=b" (the_vmcb) // callee save register! : [test] "0" (the_test), - [vmcb_phys] "1"(the_vmcb), - [PREPARE_GIF_CLEAR] "i" (offsetof(struct svm_test, prepare_gif_clear)) + [vmcb_phys] "1"(the_vmcb), + [PREPARE_GIF_CLEAR] "i" (offsetof(struct svm_test, prepare_gif_clear)), + [regs] "i"(®s) : "rax", "rcx", "rdx", "rsi", "r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15", "memory"); diff --git a/x86/svm.h b/x86/svm.h index 4c609795..7cc3b690 100644 --- a/x86/svm.h +++ b/x86/svm.h @@ -23,28 +23,10 @@ struct svm_test { bool on_vcpu_done; }; -struct regs { - u64 rax; - u64 rbx; - u64 rcx; - u64 rdx; - u64 cr2; - u64 rbp; - u64 rsi; - u64 rdi; - u64 r8; - u64 r9; - u64 r10; - u64 r11; - u64 r12; - u64 r13; - u64 r14; - u64 r15; - u64 rflags; -}; - typedef void (*test_guest_func)(struct svm_test *); +extern struct x86_gpr_regs regs; + bool smp_supported(void); bool default_supported(void); void default_prepare(struct svm_test *test); @@ -53,7 +35,7 @@ bool default_finished(struct svm_test *test); int get_test_stage(struct svm_test *test); void set_test_stage(struct svm_test *test, int s); void inc_test_stage(struct svm_test *test); -struct regs get_regs(void); +struct x86_gpr_regs get_regs(void); int __svm_vmrun(u64 rip); void __svm_bare_vmrun(void); int svm_vmrun(void); @@ -61,51 +43,4 @@ void test_set_guest(test_guest_func func); extern struct vmcb *vmcb; extern struct svm_test svm_tests[]; - - -#define SAVE_GPR_C \ - "xchg %%rbx, regs+0x8\n\t" \ - "xchg %%rcx, regs+0x10\n\t" \ - "xchg %%rdx, regs+0x18\n\t" \ - "xchg %%rbp, regs+0x28\n\t" \ - "xchg %%rsi, regs+0x30\n\t" \ - "xchg %%rdi, regs+0x38\n\t" \ - "xchg %%r8, regs+0x40\n\t" \ - "xchg %%r9, regs+0x48\n\t" \ - "xchg %%r10, regs+0x50\n\t" \ - "xchg %%r11, regs+0x58\n\t" \ - "xchg %%r12, regs+0x60\n\t" \ - "xchg %%r13, regs+0x68\n\t" \ - "xchg %%r14, regs+0x70\n\t" \ - "xchg %%r15, regs+0x78\n\t" - -#define LOAD_GPR_C SAVE_GPR_C - -#define ASM_PRE_VMRUN_CMD \ - "vmload %%rax\n\t" \ - "mov regs+0x80, %%r15\n\t" \ - "mov %%r15, 0x170(%%rax)\n\t" \ - "mov regs, %%r15\n\t" \ - "mov %%r15, 0x1f8(%%rax)\n\t" \ - LOAD_GPR_C \ - -#define ASM_POST_VMRUN_CMD \ - SAVE_GPR_C \ - "mov 0x170(%%rax), %%r15\n\t" \ - "mov %%r15, regs+0x80\n\t" \ - "mov 0x1f8(%%rax), %%r15\n\t" \ - "mov %%r15, regs\n\t" \ - "vmsave %%rax\n\t" \ - - - -#define SVM_BARE_VMRUN \ - asm volatile ( \ - ASM_PRE_VMRUN_CMD \ - "vmrun %%rax\n\t" \ - ASM_POST_VMRUN_CMD \ - : \ - : "a" (virt_to_phys(vmcb)) \ - : "memory", "r15") \ - #endif diff --git a/x86/svm_tests.c b/x86/svm_tests.c index 07ac01ff..cb47fb02 100644 --- a/x86/svm_tests.c +++ b/x86/svm_tests.c @@ -3147,6 +3147,7 @@ into: static void svm_into_test(void) { handle_exception(OF_VECTOR, guest_test_of_handler); + test_set_guest(svm_of_test_guest); report(svm_vmrun() == SVM_EXIT_VMMCALL && of_test_counter == 1, "#OF is generated in L2 exception handler0"); @@ -3351,7 +3352,7 @@ static void svm_lbrv_test1(void) wrmsr(MSR_IA32_DEBUGCTLMSR, DEBUGCTLMSR_LBR); DO_BRANCH(host_branch1); - SVM_BARE_VMRUN; + SVM_BARE_VMRUN(vmcb,regs); dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR); if (vmcb->control.exit_code != SVM_EXIT_VMMCALL) { @@ -3374,7 +3375,7 @@ static void svm_lbrv_test2(void) wrmsr(MSR_IA32_DEBUGCTLMSR, DEBUGCTLMSR_LBR); DO_BRANCH(host_branch2); wrmsr(MSR_IA32_DEBUGCTLMSR, 0); - SVM_BARE_VMRUN; + SVM_BARE_VMRUN(vmcb,regs); dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR); wrmsr(MSR_IA32_DEBUGCTLMSR, 0); @@ -3402,7 +3403,7 @@ static void svm_lbrv_nested_test1(void) wrmsr(MSR_IA32_DEBUGCTLMSR, DEBUGCTLMSR_LBR); DO_BRANCH(host_branch3); - SVM_BARE_VMRUN; + SVM_BARE_VMRUN(vmcb,regs); dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR); wrmsr(MSR_IA32_DEBUGCTLMSR, 0); @@ -3437,7 +3438,7 @@ static void svm_lbrv_nested_test2(void) wrmsr(MSR_IA32_DEBUGCTLMSR, DEBUGCTLMSR_LBR); DO_BRANCH(host_branch4); - SVM_BARE_VMRUN; + SVM_BARE_VMRUN(vmcb,regs); dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR); wrmsr(MSR_IA32_DEBUGCTLMSR, 0); -- 2.26.3