lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <ZC90Ni/DaoObtE7o@google.com>
Date:   Thu, 6 Apr 2023 18:39:02 -0700
From:   Sean Christopherson <seanjc@...gle.com>
To:     Like Xu <like.xu.linux@...il.com>
Cc:     Paolo Bonzini <pbonzini@...hat.com>, kvm@...r.kernel.org,
        linux-kernel@...r.kernel.org
Subject: Re: [PATCH v4 06/12] KVM: x86/pmu: Make part of the Intel v2 PMU MSRs
 handling x86 generic

On Tue, Feb 14, 2023, Like Xu wrote:
> @@ -574,11 +569,61 @@ static void kvm_pmu_mark_pmc_in_use(struct kvm_vcpu *vcpu, u32 msr)
>  
>  int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
>  {
> +	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
> +	u32 msr = msr_info->index;
> +
> +	switch (msr) {
> +	case MSR_CORE_PERF_GLOBAL_STATUS:
> +		msr_info->data = pmu->global_status;
> +		return 0;
> +	case MSR_CORE_PERF_GLOBAL_CTRL:
> +		msr_info->data = pmu->global_ctrl;
> +		return 0;
> +	case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
> +		msr_info->data = 0;
> +		return 0;
> +	default:
> +		break;
> +	}
> +
>  	return static_call(kvm_x86_pmu_get_msr)(vcpu, msr_info);
>  }
>  
>  int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
>  {
> +	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
> +	u32 msr = msr_info->index;
> +	u64 data = msr_info->data;
> +	u64 diff;
> +
> +	switch (msr) {
> +	case MSR_CORE_PERF_GLOBAL_STATUS:
> +		if (!msr_info->host_initiated || (data & pmu->global_ovf_ctrl_mask))
> +			return 1; /* RO MSR */
> +
> +		pmu->global_status = data;
> +		return 0;
> +	case MSR_CORE_PERF_GLOBAL_CTRL:
> +		if (!kvm_valid_perf_global_ctrl(pmu, data))
> +			return 1;
> +
> +		if (pmu->global_ctrl != data) {
> +			diff = pmu->global_ctrl ^ data;
> +			pmu->global_ctrl = data;
> +			reprogram_counters(pmu, diff);
> +		}
> +		return 0;
> +	case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
> +		if (data & pmu->global_ovf_ctrl_mask)
> +			return 1;
> +
> +		if (!msr_info->host_initiated)
> +			pmu->global_status &= ~data;
> +		return 0;
> +	default:
> +		break;
> +	}
> +
>  	kvm_pmu_mark_pmc_in_use(vcpu, msr_info->index);
>  	return static_call(kvm_x86_pmu_set_msr)(vcpu, msr_info);
>  }

Please tweak these to follow the patterns for other MSR helpers (see below).  I
don't actually mind the style, but people get used to the pattern and make mistakes
when there are unexpected deviations.

int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{
	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
	u32 msr = msr_info->index;

	switch (msr) {
	case MSR_CORE_PERF_GLOBAL_STATUS:
		msr_info->data = pmu->global_status;
		break;
	case MSR_CORE_PERF_GLOBAL_CTRL:
		msr_info->data = pmu->global_ctrl;
		break;
	case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
		msr_info->data = 0;
		break;
	default:
		return static_call(kvm_x86_pmu_get_msr)(vcpu, msr_info);
	}

	return 0;
}

int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{
	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
	u32 msr = msr_info->index;
	u64 data = msr_info->data;
	u64 diff;

	switch (msr) {
	case MSR_CORE_PERF_GLOBAL_STATUS:
		if (!msr_info->host_initiated)
			return 1; /* RO MSR */

		pmu->global_status = data;
		break;
	case MSR_CORE_PERF_GLOBAL_CTRL:
		if (!kvm_valid_perf_global_ctrl(pmu, data))
			return 1;

		if (pmu->global_ctrl != data) {
			diff = pmu->global_ctrl ^ data;
			pmu->global_ctrl = data;
			reprogram_counters(pmu, diff);
		}
		break;
	case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
		if (data & pmu->global_ovf_ctrl_mask)
			return 1;

		if (!msr_info->host_initiated)
			pmu->global_status &= ~data;
		break;
	default:
		kvm_pmu_mark_pmc_in_use(vcpu, msr_info->index);
		return static_call(kvm_x86_pmu_set_msr)(vcpu, msr_info);
	}

	return 0;
}

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ