lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Date:   Tue, 25 Feb 2020 14:04:53 +0100
From:   Vitaly Kuznetsov <vkuznets@...hat.com>
To:     linmiaohe <linmiaohe@...wei.com>
Cc:     kvm@...r.kernel.org, linux-kernel@...r.kernel.org, x86@...nel.org,
        pbonzini@...hat.com, rkrcmar@...hat.com,
        sean.j.christopherson@...el.com, wanpengli@...cent.com,
        jmattson@...gle.com, joro@...tes.org, tglx@...utronix.de,
        mingo@...hat.com, bp@...en8.de, hpa@...or.com
Subject: Re: [PATCH] KVM: Fix some obsolete comments

linmiaohe <linmiaohe@...wei.com> writes:

> From: Miaohe Lin <linmiaohe@...wei.com>
>
> Remove some obsolete comments, fix wrong function name and description.
>
> Signed-off-by: Miaohe Lin <linmiaohe@...wei.com>
> ---
>  arch/x86/kvm/svm.c        | 3 ---
>  arch/x86/kvm/vmx/nested.c | 4 ++--
>  arch/x86/kvm/vmx/vmx.c    | 2 +-
>  3 files changed, 3 insertions(+), 6 deletions(-)
>
> diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
> index fd3fc9fbefff..ee114a9913eb 100644
> --- a/arch/x86/kvm/svm.c
> +++ b/arch/x86/kvm/svm.c
> @@ -3228,9 +3228,6 @@ static int nested_svm_exit_special(struct vcpu_svm *svm)
>  	return NESTED_EXIT_CONTINUE;
>  }
>  
> -/*
> - * If this function returns true, this #vmexit was already handled
> - */
>  static int nested_svm_intercept(struct vcpu_svm *svm)
>  {

Thank you for the cleanup, I looked at nested_svm_intercept() and I see
room for improvement, e.g. (completely untested!)

diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 76c24b3491f6..fcb26d64d3c7 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -3280,42 +3280,36 @@ static int nested_svm_intercept(struct vcpu_svm *svm)
        case SVM_EXIT_IOIO:
                vmexit = nested_svm_intercept_ioio(svm);
                break;
-       case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR8: {
-               u32 bit = 1U << (exit_code - SVM_EXIT_READ_CR0);
-               if (svm->nested.intercept_cr & bit)
+       case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR8:
+               if (svm->nested.intercept_cr &
+                   BIT(exit_code - SVM_EXIT_READ_CR0))
                        vmexit = NESTED_EXIT_DONE;
                break;
-       }
-       case SVM_EXIT_READ_DR0 ... SVM_EXIT_WRITE_DR7: {
-               u32 bit = 1U << (exit_code - SVM_EXIT_READ_DR0);
-               if (svm->nested.intercept_dr & bit)
+       case SVM_EXIT_READ_DR0 ... SVM_EXIT_WRITE_DR7:
+               if (svm->nested.intercept_dr &
+                   BIT(exit_code - SVM_EXIT_READ_DR0))
                        vmexit = NESTED_EXIT_DONE;
                break;
-       }
-       case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
-               u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
-               if (svm->nested.intercept_exceptions & excp_bits) {
+       case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f:
+               if (svm->nested.intercept_exceptions &
+                   BIT(exit_code - SVM_EXIT_EXCP_BASE)) {
                        if (exit_code == SVM_EXIT_EXCP_BASE + DB_VECTOR)
                                vmexit = nested_svm_intercept_db(svm);
                        else
                                vmexit = NESTED_EXIT_DONE;
-               }
-               /* async page fault always cause vmexit */
-               else if ((exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR) &&
-                        svm->vcpu.arch.exception.nested_apf != 0)
+               } else if ((exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR) &&
+                          svm->vcpu.arch.exception.nested_apf != 0) {
+                       /* async page fault always cause vmexit */
                        vmexit = NESTED_EXIT_DONE;
+               }
                break;
-       }
-       case SVM_EXIT_ERR: {
+       case SVM_EXIT_ERR:
                vmexit = NESTED_EXIT_DONE;
                break;
-       }
-       default: {
-               u64 exit_bits = 1ULL << (exit_code - SVM_EXIT_INTR);
-               if (svm->nested.intercept & exit_bits)
+       default:
+               if (svm->nested.intercept & BIT_ULL(exit_code - SVM_EXIT_INTR))
                        vmexit = NESTED_EXIT_DONE;
        }
-       }
 
        return vmexit;
 }

Feel free to pick stuff you like and split your changes to this function
in a separate patch.

>  	u32 exit_code = svm->vmcb->control.exit_code;
> diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
> index 0946122a8d3b..46c5f63136a8 100644
> --- a/arch/x86/kvm/vmx/nested.c
> +++ b/arch/x86/kvm/vmx/nested.c
> @@ -2960,7 +2960,7 @@ static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
>  	/*
>  	 * Induce a consistency check VMExit by clearing bit 1 in GUEST_RFLAGS,
>  	 * which is reserved to '1' by hardware.  GUEST_RFLAGS is guaranteed to
> -	 * be written (by preparve_vmcs02()) before the "real" VMEnter, i.e.
> +	 * be written (by prepare_vmcs02()) before the "real" VMEnter, i.e.
>  	 * there is no need to preserve other bits or save/restore the field.
>  	 */
>  	vmcs_writel(GUEST_RFLAGS, 0);
> @@ -4382,7 +4382,7 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
>   * Decode the memory-address operand of a vmx instruction, as recorded on an
>   * exit caused by such an instruction (run by a guest hypervisor).
>   * On success, returns 0. When the operand is invalid, returns 1 and throws
> - * #UD or #GP.
> + * #UD, #GP or #SS.

Oxford comma, anyone? :-)))

>   */
>  int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification,
>  			u32 vmx_instruction_info, bool wr, int len, gva_t *ret)
> diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
> index 69948aa1b127..8d91fa9acbb2 100644
> --- a/arch/x86/kvm/vmx/vmx.c
> +++ b/arch/x86/kvm/vmx/vmx.c
> @@ -808,7 +808,7 @@ void update_exception_bitmap(struct kvm_vcpu *vcpu)
>  	if (to_vmx(vcpu)->rmode.vm86_active)
>  		eb = ~0;
>  	if (enable_ept)
> -		eb &= ~(1u << PF_VECTOR); /* bypass_guest_pf = 0 */
> +		eb &= ~(1u << PF_VECTOR);
>  
>  	/* When we are running a nested L2 guest and L1 specified for it a
>  	 * certain exception bitmap, we must trap the same exceptions and pass

All your changes look correct, so

Reviewed-by: Vitaly Kuznetsov <vkuznets@...hat.com>

-- 
Vitaly

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ