lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [day] [month] [year] [list]
Message-Id: <20230814030645.11632-1-shijie@os.amperecomputing.com>
Date:   Mon, 14 Aug 2023 11:06:45 +0800
From:   Huang Shijie <shijie@...amperecomputing.com>
To:     maz@...nel.org
Cc:     oliver.upton@...ux.dev, james.morse@....com,
        suzuki.poulose@....com, yuzenghui@...wei.com,
        catalin.marinas@....com, will@...nel.org, pbonzini@...hat.com,
        peterz@...radead.org, ingo@...hat.com, acme@...nel.org,
        mark.rutland@....com, alexander.shishkin@...ux.intel.com,
        jolsa@...nel.org, namhyung@...nel.org, irogers@...gle.com,
        linux-arm-kernel@...ts.infradead.org, kvmarm@...ts.linux.dev,
        linux-kernel@...r.kernel.org, kvm@...r.kernel.org,
        linux-perf-users@...r.kernel.org, patches@...erecomputing.com,
        zwang@...erecomputing.com,
        Huang Shijie <shijie@...amperecomputing.com>
Subject: [PATCH v3] KVM:arm64: reconfigurate the event filters for guest context

1.) Background.
   1.1) In arm64, start a guest with Qemu which is running as a VMM of KVM,
        and bind the guest to core 33 and run program "a" in guest.
        The code of "a" shows below:
   	----------------------------------------------------------
		#include <stdio.h>

		int main()
		{
			unsigned long i = 0;

			for (;;) {
				i++;
			}

			printf("i:%ld\n", i);
			return 0;
		}
   	----------------------------------------------------------

   1.2) Use the following perf command in host:
      #perf stat -e cycles:G,cycles:H -C 33 -I 1000 sleep 1
          #           time             counts unit events
               1.000817400      3,299,471,572      cycles:G
               1.000817400          3,240,586      cycles:H

       This result is correct, my cpu's frequency is 3.3G.

   1.3) Use the following perf command in host:
      #perf stat -e cycles:G,cycles:H -C 33 -d -d  -I 1000 sleep 1
            time             counts unit events
     1.000831480        153,634,097      cycles:G                                                                (70.03%)
     1.000831480      3,147,940,599      cycles:H                                                                (70.03%)
     1.000831480      1,143,598,527      L1-dcache-loads                                                         (70.03%)
     1.000831480              9,986      L1-dcache-load-misses            #    0.00% of all L1-dcache accesses   (70.03%)
     1.000831480    <not supported>      LLC-loads
     1.000831480    <not supported>      LLC-load-misses
     1.000831480        580,887,696      L1-icache-loads                                                         (70.03%)
     1.000831480             77,855      L1-icache-load-misses            #    0.01% of all L1-icache accesses   (70.03%)
     1.000831480      6,112,224,612      dTLB-loads                                                              (70.03%)
     1.000831480             16,222      dTLB-load-misses                 #    0.00% of all dTLB cache accesses  (69.94%)
     1.000831480        590,015,996      iTLB-loads                                                              (59.95%)
     1.000831480                505      iTLB-load-misses                 #    0.00% of all iTLB cache accesses  (59.95%)

       This result is wrong. The "cycle:G" should be nearly 3.3G.

2.) Root cause.
	There is only 7 counters in my arm64 platform:
	  (one cycle counter) + (6 normal counters)

	In 1.3 above, we will use 10 event counters.
	Since we only have 7 counters, the perf core will trigger
       	multiplexing in hrtimer:
	     perf_mux_hrtimer_restart() --> perf_rotate_context().

       If the hrtimer occurs when the host is running, it's fine.
       If the hrtimer occurs when the guest is running,
       the perf_rotate_context() will program the PMU with filters for
       host context. The KVM does not have a chance to restore
       PMU registers with kvm_vcpu_pmu_restore_guest().
       The PMU does not work correctly, so we got wrong result.

3.) About this patch.
	3.1) Add a week arch_perf_mux() for perf_mux_hrtimer_handler().
        3.2) In the arm64, implement the arch_perf_mux().
		Make a KVM_REQ_PMU_RESTORE_GUEST request if the
		perf multiplexing occurs in the guest context.

4.) Test result of this patch:
      #perf stat -e cycles:G,cycles:H -C 33 -d -d  -I 1000 sleep 1
            time             counts unit events
     1.001006400      3,298,348,656      cycles:G                                                                (70.03%)
     1.001006400          3,144,532      cycles:H                                                                (70.03%)
     1.001006400            941,149      L1-dcache-loads                                                         (70.03%)
     1.001006400             17,937      L1-dcache-load-misses            #    1.91% of all L1-dcache accesses   (70.03%)
     1.001006400    <not supported>      LLC-loads
     1.001006400    <not supported>      LLC-load-misses
     1.001006400          1,101,889      L1-icache-loads                                                         (70.03%)
     1.001006400            121,638      L1-icache-load-misses            #   11.04% of all L1-icache accesses   (70.03%)
     1.001006400          1,031,228      dTLB-loads                                                              (70.03%)
     1.001006400             26,952      dTLB-load-misses                 #    2.61% of all dTLB cache accesses  (69.93%)
     1.001006400          1,030,678      iTLB-loads                                                              (59.94%)
     1.001006400                338      iTLB-load-misses                 #    0.03% of all iTLB cache accesses  (59.94%)

    The result is correct. The "cycle:G" is nearly 3.3G now.

Signed-off-by: Huang Shijie <shijie@...amperecomputing.com>
---

v1 --> v2:
	Do not change perf/core code, only change the ARM64 kvm code.
	v1: https://lkml.org/lkml/2023/8/8/1465

v2 --> v3:
	Disdurb the perf/core again.
	v2:http://lists.infradead.org/pipermail/linux-arm-kernel/2023-August/858427.html

---
 arch/arm64/include/asm/kvm_host.h |  1 +
 arch/arm64/kvm/arm.c              | 18 ++++++++++++++++++
 include/linux/perf_event.h        |  2 ++
 kernel/events/core.c              |  5 +++++
 4 files changed, 26 insertions(+)

diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 8b6096753740..151810445d79 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -49,6 +49,7 @@
 #define KVM_REQ_RELOAD_GICv4	KVM_ARCH_REQ(4)
 #define KVM_REQ_RELOAD_PMU	KVM_ARCH_REQ(5)
 #define KVM_REQ_SUSPEND		KVM_ARCH_REQ(6)
+#define KVM_REQ_PMU_RESTORE_GUEST	KVM_ARCH_REQ(7)
 
 #define KVM_DIRTY_LOG_MANUAL_CAPS   (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \
 				     KVM_DIRTY_LOG_INITIALLY_SET)
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index c2c14059f6a8..53d6555912d1 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -792,6 +792,9 @@ static int check_vcpu_requests(struct kvm_vcpu *vcpu)
 			preempt_enable();
 		}
 
+		if (kvm_check_request(KVM_REQ_PMU_RESTORE_GUEST, vcpu))
+			kvm_vcpu_pmu_restore_guest(vcpu);
+
 		if (kvm_check_request(KVM_REQ_RELOAD_PMU, vcpu))
 			kvm_pmu_handle_pmcr(vcpu,
 					    __vcpu_sys_reg(vcpu, PMCR_EL0));
@@ -878,6 +881,21 @@ static int noinstr kvm_arm_vcpu_enter_exit(struct kvm_vcpu *vcpu)
 	return ret;
 }
 
+void arch_perf_mux(bool rotations)
+{
+	struct kvm_vcpu *vcpu;
+
+	if (!kvm_arm_support_pmu_v3() || !has_vhe())
+		return;
+
+	vcpu = kvm_get_running_vcpu();
+	if (!vcpu)
+		return;
+
+	if (rotations)
+		kvm_make_request(KVM_REQ_PMU_RESTORE_GUEST, vcpu);
+}
+
 /**
  * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code
  * @vcpu:	The VCPU pointer
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 2166a69e3bf2..07ef1aa00226 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -1860,6 +1860,8 @@ extern void arch_perf_update_userpage(struct perf_event *event,
 				      struct perf_event_mmap_page *userpg,
 				      u64 now);
 
+extern void arch_perf_mux(bool rotations);
+
 #ifdef CONFIG_MMU
 extern __weak u64 arch_perf_get_page_size(struct mm_struct *mm, unsigned long addr);
 #endif
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 6fd9272eec6e..eefb28198dd1 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -1059,6 +1059,10 @@ static void perf_cgroup_switch(struct task_struct *task)
 }
 #endif
 
+void __weak arch_perf_mux(bool rotations)
+{
+}
+
 /*
  * set default to be dependent on timer tick just
  * like original code
@@ -1076,6 +1080,7 @@ static enum hrtimer_restart perf_mux_hrtimer_handler(struct hrtimer *hr)
 
 	cpc = container_of(hr, struct perf_cpu_pmu_context, hrtimer);
 	rotations = perf_rotate_context(cpc);
+	arch_perf_mux(rotations);
 
 	raw_spin_lock(&cpc->hrtimer_lock);
 	if (rotations)
-- 
2.39.2

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ