lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <752cacbf-5268-6ea0-8c5d-36fb297789ee@intel.com>
Date:   Tue, 27 Dec 2022 08:59:13 +0800
From:   "Yang, Weijiang" <weijiang.yang@...el.com>
To:     Like Xu <like.xu.linux@...il.com>,
        Paolo Bonzini <pbonzini@...hat.com>
CC:     "Christopherson,, Sean" <seanjc@...gle.com>,
        "kvm@...r.kernel.org" <kvm@...r.kernel.org>,
        "linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
        Aaron Lewis <aaronlewis@...gle.com>
Subject: Re: [PATCH 1/3] KVM: x86: Omit PMU MSRs from KVM_GET_MSR_INDEX_LIST
 if !enable_pmu


On 12/26/2022 7:17 PM, Like Xu wrote:
> From: Like Xu <likexu@...cent.com>
>
> When the PMU is disabled, don't bother sharing the PMU MSRs with
> userspace through KVM_GET_MSR_INDEX_LIST.  Instead, filter them out
> so userspace doesn't have to keep track of them.
>
> Note that 'enable_pmu' is read-only, so userspace has no control over
> whether the PMU MSRs are included in the list or not.
>
> Suggested-by: Sean Christopherson <seanjc@...gle.com>
> Co-developed-by: Aaron Lewis <aaronlewis@...gle.com>
> Signed-off-by: Aaron Lewis <aaronlewis@...gle.com>
> Signed-off-by: Like Xu <likexu@...cent.com>
> ---
>   arch/x86/include/asm/kvm_host.h |  1 +
>   arch/x86/kvm/x86.c              | 22 ++++++++++++++++++++--
>   2 files changed, 21 insertions(+), 2 deletions(-)
>
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index f35f1ff4427b..2ed710b393eb 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -514,6 +514,7 @@ struct kvm_pmc {
>   #define MSR_ARCH_PERFMON_PERFCTR_MAX	(MSR_ARCH_PERFMON_PERFCTR0 + KVM_INTEL_PMC_MAX_GENERIC - 1)
>   #define MSR_ARCH_PERFMON_EVENTSEL_MAX	(MSR_ARCH_PERFMON_EVENTSEL0 + KVM_INTEL_PMC_MAX_GENERIC - 1)
>   #define KVM_PMC_MAX_FIXED	3
> +#define MSR_ARCH_PERFMON_FIXED_CTR_MAX	(MSR_ARCH_PERFMON_FIXED_CTR0 + KVM_PMC_MAX_FIXED - 1)
>   #define KVM_AMD_PMC_MAX_GENERIC	6
>   struct kvm_pmu {
>   	unsigned nr_arch_gp_counters;
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index 5c3ce39cdccb..f570367463c8 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -7054,15 +7054,32 @@ static void kvm_init_msr_list(void)
>   				continue;
>   			break;
>   		case MSR_ARCH_PERFMON_PERFCTR0 ... MSR_ARCH_PERFMON_PERFCTR_MAX:
> -			if (msrs_to_save_all[i] - MSR_ARCH_PERFMON_PERFCTR0 >=
> +			if (!enable_pmu || msrs_to_save_all[i] - MSR_ARCH_PERFMON_PERFCTR0 >=
>   			    min(KVM_INTEL_PMC_MAX_GENERIC, kvm_pmu_cap.num_counters_gp))
>   				continue;
>   			break;
>   		case MSR_ARCH_PERFMON_EVENTSEL0 ... MSR_ARCH_PERFMON_EVENTSEL_MAX:
> -			if (msrs_to_save_all[i] - MSR_ARCH_PERFMON_EVENTSEL0 >=
> +			if (!enable_pmu || msrs_to_save_all[i] - MSR_ARCH_PERFMON_EVENTSEL0 >=
>   			    min(KVM_INTEL_PMC_MAX_GENERIC, kvm_pmu_cap.num_counters_gp))
>   				continue;
>   			break;
> +		case MSR_ARCH_PERFMON_FIXED_CTR0 ... MSR_ARCH_PERFMON_FIXED_CTR_MAX:
> +			if (!enable_pmu || msrs_to_save_all[i] - MSR_ARCH_PERFMON_FIXED_CTR0 >=
> +			    min(KVM_PMC_MAX_FIXED, kvm_pmu_cap.num_counters_fixed))
> +				continue;
> +			break;
> +		case MSR_F15H_PERF_CTL0 ... MSR_F15H_PERF_CTR5:
> +		case MSR_K7_EVNTSEL0 ... MSR_K7_PERFCTR3:
> +		case MSR_CORE_PERF_FIXED_CTR_CTRL:
> +		case MSR_CORE_PERF_GLOBAL_STATUS:
> +		case MSR_CORE_PERF_GLOBAL_CTRL:
> +		case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
> +		case MSR_IA32_DS_AREA:
> +		case MSR_IA32_PEBS_ENABLE:
> +		case MSR_PEBS_DATA_CFG:
> +			if (!enable_pmu)
> +				continue;
> +			break;


I prefer use a helper to wrap the hunk of PMU msr checks and move the 
helper to

the "default" branch of switch, it makes the code looks nicer:

default:

if(!enable_pmu && !kvm_pmu_valid_msrlist(msr))

         continue;


>   		case MSR_IA32_XFD:
>   		case MSR_IA32_XFD_ERR:
>   			if (!kvm_cpu_cap_has(X86_FEATURE_XFD))
> @@ -13468,3 +13485,4 @@ static void __exit kvm_x86_exit(void)
>   	 */
>   }
>   module_exit(kvm_x86_exit);
> +


Extra newline.



Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ