lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <561ac6df385e977cc51d51a8ab28ee49@www.loen.fr>
Date:   Thu, 05 Dec 2019 09:43:09 +0000
From:   Marc Zyngier <maz@...nel.org>
To:     Eric Auger <eric.auger@...hat.com>
Cc:     <eric.auger.pro@...il.com>, <linux-kernel@...r.kernel.org>,
        <kvmarm@...ts.cs.columbia.edu>, <james.morse@....com>,
        <andrew.murray@....com>, <suzuki.poulose@....com>,
        <drjones@...hat.com>
Subject: Re: [RFC 2/3] KVM: arm64: pmu: Fix chained SW_INCR counters

Hi Eric,

On 2019-12-04 20:44, Eric Auger wrote:
> At the moment a SW_INCR counter always overflows on 32-bit
> boundary, independently on whether the n+1th counter is
> programmed as CHAIN.
>
> Check whether the SW_INCR counter is a 64b counter and if so,
> implement the 64b logic.
>
> Fixes: 80f393a23be6 ("KVM: arm/arm64: Support chained PMU counters")
> Signed-off-by: Eric Auger <eric.auger@...hat.com>
> ---
>  virt/kvm/arm/pmu.c | 16 +++++++++++++++-
>  1 file changed, 15 insertions(+), 1 deletion(-)
>
> diff --git a/virt/kvm/arm/pmu.c b/virt/kvm/arm/pmu.c
> index c3f8b059881e..7ab477db2f75 100644
> --- a/virt/kvm/arm/pmu.c
> +++ b/virt/kvm/arm/pmu.c
> @@ -491,6 +491,8 @@ void kvm_pmu_software_increment(struct kvm_vcpu
> *vcpu, u64 val)
>
>  	enable = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
>  	for (i = 0; i < ARMV8_PMU_CYCLE_IDX; i++) {
> +		bool chained = test_bit(i >> 1, vcpu->arch.pmu.chained);
> +

I'd rather you use kvm_pmu_pmc_is_chained() rather than open-coding
this. But see below:

>  		if (!(val & BIT(i)))
>  			continue;
>  		type = __vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i)
> @@ -500,8 +502,20 @@ void kvm_pmu_software_increment(struct kvm_vcpu
> *vcpu, u64 val)
>  			reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1;
>  			reg = lower_32_bits(reg);
>  			__vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) = reg;
> -			if (!reg)
> +			if (reg) /* no overflow */
> +				continue;
> +			if (chained) {
> +				reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i + 1) + 1;
> +				reg = lower_32_bits(reg);
> +				__vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i + 1) = reg;
> +				if (reg)
> +					continue;
> +				/* mark an overflow on high counter */
> +				__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i + 1);
> +			} else {
> +				/* mark an overflow */
>  				__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i);
> +			}
>  		}
>  	}
>  }

I think the whole function is a bit of a mess, and could be better
structured to treat 64bit counters as a first class citizen.

I'm suggesting something along those lines, which tries to
streamline things a bit and keep the flow uniform between the
two word sizes. IMHO, it helps reasonning about it and gives
scope to the ARMv8.5 full 64bit counters... It is of course
completely untested.

Thoughts?

         M.

diff --git a/virt/kvm/arm/pmu.c b/virt/kvm/arm/pmu.c
index 8731dfeced8b..cf371f643ade 100644
--- a/virt/kvm/arm/pmu.c
+++ b/virt/kvm/arm/pmu.c
@@ -480,26 +480,43 @@ static void kvm_pmu_perf_overflow(struct 
perf_event *perf_event,
   */
  void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
  {
+	struct kvm_pmu *pmu = &vcpu->arch.pmu;
  	int i;
-	u64 type, enable, reg;

-	if (val == 0)
-		return;
+	/* Weed out disabled counters */
+	val &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);

-	enable = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
  	for (i = 0; i < ARMV8_PMU_CYCLE_IDX; i++) {
+		u64 type, reg;
+		int ovs = i;
+
  		if (!(val & BIT(i)))
  			continue;
-		type = __vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i)
-		       & ARMV8_PMU_EVTYPE_EVENT;
-		if ((type == ARMV8_PMUV3_PERFCTR_SW_INCR)
-		    && (enable & BIT(i))) {
-			reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1;
-			reg = lower_32_bits(reg);
-			__vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) = reg;
-			if (!reg)
-				__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i);
+
+		/* PMSWINC only applies to ... SW_INC! */
+		type = __vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i);
+		type &= ARMV8_PMU_EVTYPE_EVENT;
+		if (type != ARMV8_PMUV3_PERFCTR_SW_INCR)
+			continue;
+
+		/* Potential 64bit value */
+		reg = kvm_pmu_get_counter_value(vcpu, i) + 1;
+
+		/* Start by writing back the low 32bits */
+		__vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) = lower_32_bits(reg);
+
+		/*
+		 * 64bit counter? Write back the upper bits and target
+		 * the overflow bit at the next counter
+		 */
+		if (kvm_pmu_pmc_is_chained(&pmu->pmc[i])) {
+			reg = upper_32_bits(reg);
+			__vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i + 1) = reg;
+			ovs++;
  		}
+
+		if (!lower_32_bits(reg))
+			__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(ovs);
  	}
  }


-- 
Jazz is not dead. It just smells funny...

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ