[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <Yf1zd4urw8Jro5mi@google.com>
Date: Fri, 4 Feb 2022 18:41:59 +0000
From: David Matlack <dmatlack@...gle.com>
To: Paolo Bonzini <pbonzini@...hat.com>
Cc: linux-kernel@...r.kernel.org, kvm@...r.kernel.org,
seanjc@...gle.com, vkuznets@...hat.com
Subject: Re: [PATCH 04/23] KVM: MMU: constify uses of struct kvm_mmu_role_regs
On Fri, Feb 04, 2022 at 06:56:59AM -0500, Paolo Bonzini wrote:
> struct kvm_mmu_role_regs is computed just once and then accessed. Use
> const to enforce this.
>
> Signed-off-by: Paolo Bonzini <pbonzini@...hat.com>
Reviewed-by: David Matlack <dmatlack@...gle.com>
> ---
> arch/x86/kvm/mmu/mmu.c | 19 +++++++++++--------
> 1 file changed, 11 insertions(+), 8 deletions(-)
>
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index 0039b2f21286..3add9d8b0630 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -208,7 +208,7 @@ struct kvm_mmu_role_regs {
> * the single source of truth for the MMU's state.
> */
> #define BUILD_MMU_ROLE_REGS_ACCESSOR(reg, name, flag) \
> -static inline bool __maybe_unused ____is_##reg##_##name(struct kvm_mmu_role_regs *regs)\
> +static inline bool __maybe_unused ____is_##reg##_##name(const struct kvm_mmu_role_regs *regs)\
> { \
> return !!(regs->reg & flag); \
> }
> @@ -255,7 +255,7 @@ static struct kvm_mmu_role_regs vcpu_to_role_regs(struct kvm_vcpu *vcpu)
> return regs;
> }
>
> -static int role_regs_to_root_level(struct kvm_mmu_role_regs *regs)
> +static int role_regs_to_root_level(const struct kvm_mmu_role_regs *regs)
> {
> if (!____is_cr0_pg(regs))
> return 0;
> @@ -4666,7 +4666,7 @@ static void paging32_init_context(struct kvm_mmu *context)
> }
>
> static union kvm_mmu_extended_role kvm_calc_mmu_role_ext(struct kvm_vcpu *vcpu,
> - struct kvm_mmu_role_regs *regs)
> + const struct kvm_mmu_role_regs *regs)
> {
> union kvm_mmu_extended_role ext = {0};
>
> @@ -4687,7 +4687,7 @@ static union kvm_mmu_extended_role kvm_calc_mmu_role_ext(struct kvm_vcpu *vcpu,
> }
>
> static union kvm_mmu_role kvm_calc_mmu_role_common(struct kvm_vcpu *vcpu,
> - struct kvm_mmu_role_regs *regs,
> + const struct kvm_mmu_role_regs *regs,
> bool base_only)
> {
> union kvm_mmu_role role = {0};
> @@ -4723,7 +4723,8 @@ static inline int kvm_mmu_get_tdp_level(struct kvm_vcpu *vcpu)
>
> static union kvm_mmu_role
> kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu,
> - struct kvm_mmu_role_regs *regs, bool base_only)
> + const struct kvm_mmu_role_regs *regs,
> + bool base_only)
> {
> union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, regs, base_only);
>
> @@ -4769,7 +4770,8 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
>
> static union kvm_mmu_role
> kvm_calc_shadow_root_page_role_common(struct kvm_vcpu *vcpu,
> - struct kvm_mmu_role_regs *regs, bool base_only)
> + const struct kvm_mmu_role_regs *regs,
> + bool base_only)
> {
> union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, regs, base_only);
>
> @@ -4782,7 +4784,8 @@ kvm_calc_shadow_root_page_role_common(struct kvm_vcpu *vcpu,
>
> static union kvm_mmu_role
> kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu,
> - struct kvm_mmu_role_regs *regs, bool base_only)
> + const struct kvm_mmu_role_regs *regs,
> + bool base_only)
> {
> union kvm_mmu_role role =
> kvm_calc_shadow_root_page_role_common(vcpu, regs, base_only);
> @@ -4940,7 +4943,7 @@ static void init_kvm_softmmu(struct kvm_vcpu *vcpu)
> }
>
> static union kvm_mmu_role
> -kvm_calc_nested_mmu_role(struct kvm_vcpu *vcpu, struct kvm_mmu_role_regs *regs)
> +kvm_calc_nested_mmu_role(struct kvm_vcpu *vcpu, const struct kvm_mmu_role_regs *regs)
> {
> union kvm_mmu_role role;
>
> --
> 2.31.1
>
>
Powered by blists - more mailing lists