[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CALMp9eTxs5nb9Ay0ELVa71cmA9VPzaMSuGgW_iM2tmAVvXs4Pg@mail.gmail.com>
Date: Tue, 16 Jun 2020 16:17:09 -0700
From: Jim Mattson <jmattson@...gle.com>
To: Babu Moger <babu.moger@....com>
Cc: Wanpeng Li <wanpengli@...cent.com>, Joerg Roedel <joro@...tes.org>,
"the arch/x86 maintainers" <x86@...nel.org>,
Sean Christopherson <sean.j.christopherson@...el.com>,
Ingo Molnar <mingo@...hat.com>, Borislav Petkov <bp@...en8.de>,
"H . Peter Anvin" <hpa@...or.com>,
Paolo Bonzini <pbonzini@...hat.com>,
Vitaly Kuznetsov <vkuznets@...hat.com>,
Thomas Gleixner <tglx@...utronix.de>,
LKML <linux-kernel@...r.kernel.org>,
kvm list <kvm@...r.kernel.org>
Subject: Re: [PATCH v2 2/3] KVM:SVM: Add extended intercept support
On Tue, Jun 16, 2020 at 3:03 PM Babu Moger <babu.moger@....com> wrote:
>
> The new intercept bits have been added in vmcb control
> area to support the interception of INVPCID instruction.
>
> The following bit is added to the VMCB layout control area
> to control intercept of INVPCID:
>
> Byte Offset Bit(s) Function
> 14h 2 intercept INVPCID
>
> Add the interfaces to support these extended interception.
> Also update the tracing for extended intercepts.
>
> AMD documentation for INVPCID feature is available at "AMD64
> Architecture Programmer’s Manual Volume 2: System Programming,
> Pub. 24593 Rev. 3.34(or later)"
>
> The documentation can be obtained at the links below:
> Link: https://www.amd.com/system/files/TechDocs/24593.pdf
> Link: https://bugzilla.kernel.org/show_bug.cgi?id=206537
Not your change, but this documentation is terrible. There is no
INVLPCID instruction, nor is there a PCID instruction.
> Signed-off-by: Babu Moger <babu.moger@....com>
> ---
> arch/x86/include/asm/svm.h | 3 ++-
> arch/x86/kvm/svm/nested.c | 6 +++++-
> arch/x86/kvm/svm/svm.c | 1 +
> arch/x86/kvm/svm/svm.h | 18 ++++++++++++++++++
> arch/x86/kvm/trace.h | 12 ++++++++----
> 5 files changed, 34 insertions(+), 6 deletions(-)
>
> diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
> index 8a1f5382a4ea..62649fba8908 100644
> --- a/arch/x86/include/asm/svm.h
> +++ b/arch/x86/include/asm/svm.h
> @@ -61,7 +61,8 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
> u32 intercept_dr;
> u32 intercept_exceptions;
> u64 intercept;
> - u8 reserved_1[40];
> + u32 intercept_extended;
> + u8 reserved_1[36];
It seems like a more straightforward implementation would simply
change 'u64 intercept' to 'u32 intercept[3].'
> u16 pause_filter_thresh;
> u16 pause_filter_count;
> u64 iopm_base_pa;
> diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
> index 8a6db11dcb43..7f6d0f2533e2 100644
> --- a/arch/x86/kvm/svm/nested.c
> +++ b/arch/x86/kvm/svm/nested.c
> @@ -121,6 +121,7 @@ void recalc_intercepts(struct vcpu_svm *svm)
> c->intercept_dr = h->intercept_dr;
> c->intercept_exceptions = h->intercept_exceptions;
> c->intercept = h->intercept;
> + c->intercept_extended = h->intercept_extended;
>
> if (g->int_ctl & V_INTR_MASKING_MASK) {
> /* We only want the cr8 intercept bits of L1 */
> @@ -142,6 +143,7 @@ void recalc_intercepts(struct vcpu_svm *svm)
> c->intercept_dr |= g->intercept_dr;
> c->intercept_exceptions |= g->intercept_exceptions;
> c->intercept |= g->intercept;
> + c->intercept_extended |= g->intercept_extended;
> }
>
> static void copy_vmcb_control_area(struct vmcb_control_area *dst,
> @@ -151,6 +153,7 @@ static void copy_vmcb_control_area(struct vmcb_control_area *dst,
> dst->intercept_dr = from->intercept_dr;
> dst->intercept_exceptions = from->intercept_exceptions;
> dst->intercept = from->intercept;
> + dst->intercept_extended = from->intercept_extended;
> dst->iopm_base_pa = from->iopm_base_pa;
> dst->msrpm_base_pa = from->msrpm_base_pa;
> dst->tsc_offset = from->tsc_offset;
> @@ -433,7 +436,8 @@ int nested_svm_vmrun(struct vcpu_svm *svm)
> trace_kvm_nested_intercepts(nested_vmcb->control.intercept_cr & 0xffff,
> nested_vmcb->control.intercept_cr >> 16,
> nested_vmcb->control.intercept_exceptions,
> - nested_vmcb->control.intercept);
> + nested_vmcb->control.intercept,
> + nested_vmcb->control.intercept_extended);
>
> /* Clear internal status */
> kvm_clear_exception_queue(&svm->vcpu);
> diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
> index 9e333b91ff78..285e5e1ff518 100644
> --- a/arch/x86/kvm/svm/svm.c
> +++ b/arch/x86/kvm/svm/svm.c
> @@ -2801,6 +2801,7 @@ static void dump_vmcb(struct kvm_vcpu *vcpu)
> pr_err("%-20s%04x\n", "dr_write:", control->intercept_dr >> 16);
> pr_err("%-20s%08x\n", "exceptions:", control->intercept_exceptions);
> pr_err("%-20s%016llx\n", "intercepts:", control->intercept);
> + pr_err("%-20s%08x\n", "intercepts (extended):", control->intercept_extended);
> pr_err("%-20s%d\n", "pause filter count:", control->pause_filter_count);
> pr_err("%-20s%d\n", "pause filter threshold:",
> control->pause_filter_thresh);
> diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
> index 6ac4c00a5d82..935d08fac03d 100644
> --- a/arch/x86/kvm/svm/svm.h
> +++ b/arch/x86/kvm/svm/svm.h
> @@ -311,6 +311,24 @@ static inline void clr_intercept(struct vcpu_svm *svm, int bit)
> recalc_intercepts(svm);
> }
>
> +static inline void set_extended_intercept(struct vcpu_svm *svm, int bit)
> +{
> + struct vmcb *vmcb = get_host_vmcb(svm);
> +
> + vmcb->control.intercept_extended |= (1U << bit);
> +
> + recalc_intercepts(svm);
> +}
> +
> +static inline void clr_extended_intercept(struct vcpu_svm *svm, int bit)
> +{
> + struct vmcb *vmcb = get_host_vmcb(svm);
> +
> + vmcb->control.intercept_extended &= ~(1U << bit);
> +
> + recalc_intercepts(svm);
> +}
You wouldn't need these new functions if you defined 'u32
intercept[3],' as I suggested above. Just change set_intercept and
clr_intercept to use __set_bit and __clear_bit.
> static inline bool is_intercept(struct vcpu_svm *svm, int bit)
> {
> return (svm->vmcb->control.intercept & (1ULL << bit)) != 0;
> diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h
> index b66432b015d2..5c841c42b33d 100644
> --- a/arch/x86/kvm/trace.h
> +++ b/arch/x86/kvm/trace.h
> @@ -544,14 +544,16 @@ TRACE_EVENT(kvm_nested_vmrun,
> );
>
> TRACE_EVENT(kvm_nested_intercepts,
> - TP_PROTO(__u16 cr_read, __u16 cr_write, __u32 exceptions, __u64 intercept),
> - TP_ARGS(cr_read, cr_write, exceptions, intercept),
> + TP_PROTO(__u16 cr_read, __u16 cr_write, __u32 exceptions, __u64 intercept,
> + __u32 extended),
> + TP_ARGS(cr_read, cr_write, exceptions, intercept, extended),
>
> TP_STRUCT__entry(
> __field( __u16, cr_read )
> __field( __u16, cr_write )
> __field( __u32, exceptions )
> __field( __u64, intercept )
> + __field( __u32, extended )
> ),
>
> TP_fast_assign(
> @@ -559,11 +561,13 @@ TRACE_EVENT(kvm_nested_intercepts,
> __entry->cr_write = cr_write;
> __entry->exceptions = exceptions;
> __entry->intercept = intercept;
> + __entry->extended = extended;
> ),
>
> - TP_printk("cr_read: %04x cr_write: %04x excp: %08x intercept: %016llx",
> + TP_printk("cr_read: %04x cr_write: %04x excp: %08x intercept: %016llx"
> + "intercept (extended): %08x",
> __entry->cr_read, __entry->cr_write, __entry->exceptions,
> - __entry->intercept)
> + __entry->intercept, __entry->extended)
> );
> /*
> * Tracepoint for #VMEXIT while nested
>
Powered by blists - more mailing lists