lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <ZGRJjaW1fREfBD2M@yzhao56-desk.sh.intel.com>
Date:   Wed, 17 May 2023 11:27:09 +0800
From:   Yan Zhao <yan.y.zhao@...el.com>
To:     Sean Christopherson <seanjc@...gle.com>
CC:     Paolo Bonzini <pbonzini@...hat.com>,
        Zhenyu Wang <zhenyuw@...ux.intel.com>,
        Zhi Wang <zhi.a.wang@...el.com>, <kvm@...r.kernel.org>,
        <intel-gvt-dev@...ts.freedesktop.org>,
        <intel-gfx@...ts.freedesktop.org>, <linux-kernel@...r.kernel.org>,
        Ben Gardon <bgardon@...gle.com>
Subject: Re: [PATCH v3 21/28] KVM: x86/mmu: Use page-track notifiers iff
 there are external users

Reviewed-by: Yan Zhao <yan.y.zhao@...el.com>

On Fri, May 12, 2023 at 05:35:53PM -0700, Sean Christopherson wrote:
> Disable the page-track notifier code at compile time if there are no
> external users, i.e. if CONFIG_KVM_EXTERNAL_WRITE_TRACKING=n.  KVM itself
> now hooks emulated writes directly instead of relying on the page-track
> mechanism.
> 
> Provide a stub for "struct kvm_page_track_notifier_node" so that including
> headers directly from the command line, e.g. for testing include guards,
> doesn't fail due to a struct having an incomplete type.
> 
> Signed-off-by: Sean Christopherson <seanjc@...gle.com>
> ---
>  arch/x86/include/asm/kvm_host.h       |  2 ++
>  arch/x86/include/asm/kvm_page_track.h | 22 +++++++++++++-------
>  arch/x86/kvm/mmu/page_track.c         | 10 ++++-----
>  arch/x86/kvm/mmu/page_track.h         | 29 +++++++++++++++++++++++----
>  4 files changed, 47 insertions(+), 16 deletions(-)
> 
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index 113598d3e886..5ce06a75d3de 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -1247,7 +1247,9 @@ struct kvm_arch {
>  	 * create an NX huge page (without hanging the guest).
>  	 */
>  	struct list_head possible_nx_huge_pages;
> +#ifdef CONFIG_KVM_EXTERNAL_WRITE_TRACKING
>  	struct kvm_page_track_notifier_head track_notifier_head;
> +#endif
>  	/*
>  	 * Protects marking pages unsync during page faults, as TDP MMU page
>  	 * faults only take mmu_lock for read.  For simplicity, the unsync
> diff --git a/arch/x86/include/asm/kvm_page_track.h b/arch/x86/include/asm/kvm_page_track.h
> index 76c0070dfe2a..61adb07b5927 100644
> --- a/arch/x86/include/asm/kvm_page_track.h
> +++ b/arch/x86/include/asm/kvm_page_track.h
> @@ -9,6 +9,14 @@ enum kvm_page_track_mode {
>  	KVM_PAGE_TRACK_MAX,
>  };
>  
> +void kvm_slot_page_track_add_page(struct kvm *kvm,
> +				  struct kvm_memory_slot *slot, gfn_t gfn,
> +				  enum kvm_page_track_mode mode);
> +void kvm_slot_page_track_remove_page(struct kvm *kvm,
> +				     struct kvm_memory_slot *slot, gfn_t gfn,
> +				     enum kvm_page_track_mode mode);
> +
> +#ifdef CONFIG_KVM_EXTERNAL_WRITE_TRACKING
>  /*
>   * The notifier represented by @kvm_page_track_notifier_node is linked into
>   * the head which will be notified when guest is triggering the track event.
> @@ -48,18 +56,18 @@ struct kvm_page_track_notifier_node {
>  				    struct kvm_page_track_notifier_node *node);
>  };
>  
> -void kvm_slot_page_track_add_page(struct kvm *kvm,
> -				  struct kvm_memory_slot *slot, gfn_t gfn,
> -				  enum kvm_page_track_mode mode);
> -void kvm_slot_page_track_remove_page(struct kvm *kvm,
> -				     struct kvm_memory_slot *slot, gfn_t gfn,
> -				     enum kvm_page_track_mode mode);
> -
>  void
>  kvm_page_track_register_notifier(struct kvm *kvm,
>  				 struct kvm_page_track_notifier_node *n);
>  void
>  kvm_page_track_unregister_notifier(struct kvm *kvm,
>  				   struct kvm_page_track_notifier_node *n);
> +#else
> +/*
> + * Allow defining a node in a structure even if page tracking is disabled, e.g.
> + * to play nice with testing headers via direct inclusion from the command line.
> + */
> +struct kvm_page_track_notifier_node {};
> +#endif /* CONFIG_KVM_EXTERNAL_WRITE_TRACKING */
>  
>  #endif
> diff --git a/arch/x86/kvm/mmu/page_track.c b/arch/x86/kvm/mmu/page_track.c
> index e15329d48f95..b20aad7ac3fe 100644
> --- a/arch/x86/kvm/mmu/page_track.c
> +++ b/arch/x86/kvm/mmu/page_track.c
> @@ -194,6 +194,7 @@ bool kvm_slot_page_track_is_active(struct kvm *kvm,
>  	return !!READ_ONCE(slot->arch.gfn_track[mode][index]);
>  }
>  
> +#ifdef CONFIG_KVM_EXTERNAL_WRITE_TRACKING
>  void kvm_page_track_cleanup(struct kvm *kvm)
>  {
>  	struct kvm_page_track_notifier_head *head;
> @@ -255,14 +256,13 @@ EXPORT_SYMBOL_GPL(kvm_page_track_unregister_notifier);
>   * The node should figure out if the written page is the one that node is
>   * interested in by itself.
>   */
> -void kvm_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new,
> -			  int bytes)
> +void __kvm_page_track_write(struct kvm *kvm, gpa_t gpa, const u8 *new, int bytes)
>  {
>  	struct kvm_page_track_notifier_head *head;
>  	struct kvm_page_track_notifier_node *n;
>  	int idx;
>  
> -	head = &vcpu->kvm->arch.track_notifier_head;
> +	head = &kvm->arch.track_notifier_head;
>  
>  	if (hlist_empty(&head->track_notifier_list))
>  		return;
> @@ -273,8 +273,6 @@ void kvm_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new,
>  		if (n->track_write)
>  			n->track_write(gpa, new, bytes, n);
>  	srcu_read_unlock(&head->track_srcu, idx);
> -
> -	kvm_mmu_track_write(vcpu, gpa, new, bytes);
>  }
>  
>  /*
> @@ -299,3 +297,5 @@ void kvm_page_track_delete_slot(struct kvm *kvm, struct kvm_memory_slot *slot)
>  			n->track_remove_region(slot->base_gfn, slot->npages, n);
>  	srcu_read_unlock(&head->track_srcu, idx);
>  }
> +
> +#endif
> diff --git a/arch/x86/kvm/mmu/page_track.h b/arch/x86/kvm/mmu/page_track.h
> index 89712f123ad3..931b26b8fc8f 100644
> --- a/arch/x86/kvm/mmu/page_track.h
> +++ b/arch/x86/kvm/mmu/page_track.h
> @@ -6,8 +6,6 @@
>  
>  #include <asm/kvm_page_track.h>
>  
> -int kvm_page_track_init(struct kvm *kvm);
> -void kvm_page_track_cleanup(struct kvm *kvm);
>  
>  bool kvm_page_track_write_tracking_enabled(struct kvm *kvm);
>  int kvm_page_track_write_tracking_alloc(struct kvm_memory_slot *slot);
> @@ -21,13 +19,36 @@ bool kvm_slot_page_track_is_active(struct kvm *kvm,
>  				   const struct kvm_memory_slot *slot,
>  				   gfn_t gfn, enum kvm_page_track_mode mode);
>  
> -void kvm_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new,
> -			  int bytes);
> +#ifdef CONFIG_KVM_EXTERNAL_WRITE_TRACKING
> +int kvm_page_track_init(struct kvm *kvm);
> +void kvm_page_track_cleanup(struct kvm *kvm);
> +
> +void __kvm_page_track_write(struct kvm *kvm, gpa_t gpa, const u8 *new, int bytes);
>  void kvm_page_track_delete_slot(struct kvm *kvm, struct kvm_memory_slot *slot);
>  
>  static inline bool kvm_page_track_has_external_user(struct kvm *kvm)
>  {
>  	return hlist_empty(&kvm->arch.track_notifier_head.track_notifier_list);
>  }
> +#else
> +static inline int kvm_page_track_init(struct kvm *kvm) { return 0; }
> +static inline void kvm_page_track_cleanup(struct kvm *kvm) { }
> +
> +static inline void __kvm_page_track_write(struct kvm *kvm, gpa_t gpa,
> +					  const u8 *new, int bytes) { }
> +static inline void kvm_page_track_delete_slot(struct kvm *kvm,
> +					      struct kvm_memory_slot *slot) { }
> +
> +static inline bool kvm_page_track_has_external_user(struct kvm *kvm) { return false; }
> +
> +#endif /* CONFIG_KVM_EXTERNAL_WRITE_TRACKING */
> +
> +static inline void kvm_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa,
> +					const u8 *new, int bytes)
> +{
> +	__kvm_page_track_write(vcpu->kvm, gpa, new, bytes);
> +
> +	kvm_mmu_track_write(vcpu, gpa, new, bytes);
> +}
>  
>  #endif /* __KVM_X86_PAGE_TRACK_H */
> -- 
> 2.40.1.606.ga4b1b128d6-goog
> 

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ