lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [day] [month] [year] [list]
Date: Fri, 31 May 2024 07:04:21 +0000
From: "Linda Chai(BJ-RD)" <LindaChai@...oxin.com>
To: "ricardo.neri-calderon@...ux.intel.com"
	<ricardo.neri-calderon@...ux.intel.com>
CC: "Cobe Chen(BJ-RD)" <CobeChen@...oxin.com>, LeoLiu-oc
	<LeoLiu-oc@...oxin.com>, "Linda Chai(BJ-RD)" <LindaChai@...oxin.com>, "Tim
 Guo(BJ-RD)" <TimGuo@...oxin.com>, Tony W Wang-oc <TonyWWang-oc@...oxin.com>,
	"acpica-devel@...ts.linux.dev" <acpica-devel@...ts.linux.dev>, "bp@...en8.de"
	<bp@...en8.de>, "dave.hansen@...ux.intel.com" <dave.hansen@...ux.intel.com>,
	"hpa@...or.com" <hpa@...or.com>, "j.granados@...sung.com"
	<j.granados@...sung.com>, "lenb@...nel.org" <lenb@...nel.org>,
	"linux-acpi@...r.kernel.org" <linux-acpi@...r.kernel.org>,
	"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
	"linux-pm@...r.kernel.org" <linux-pm@...r.kernel.org>, "mcgrof@...nel.org"
	<mcgrof@...nel.org>, "mingo@...hat.com" <mingo@...hat.com>,
	"peterz@...radead.org" <peterz@...radead.org>, "rafael@...nel.org"
	<rafael@...nel.org>, "robert.moore@...el.com" <robert.moore@...el.com>,
	"tglx@...utronix.de" <tglx@...utronix.de>, "viresh.kumar@...aro.org"
	<viresh.kumar@...aro.org>, "x86@...nel.org" <x86@...nel.org>
Subject: Re: [PATCH 3/3] ACPI: cpufreq: Add ITMT support when CPPC enabled for
 Zhaoxin CPUs

Hi ,Ricardo
About this itmt dumplicated code in intel-pstate and acpi-cpufreq driver;
how about we put these common codes in arch/x86/kernel/itmt.c?
in intel-pstate/acpi-cpufreq driver, it only get highest frequency through cppc interface and call sched_set_itmt_core_prio provided by itmt.c;
    in the sched_set_itmt_core_prio, do these following works:
1) set core priorty according to the highest frequency;
2) check whether cores’highest frequencies are different, if yes, set itmt capable & enable;
3) check whether all online cores have updated core priority, which guarantee rebuild_sched_domains will get the correct priority info for each online core;

Following this rule, the patch looks like this:
diff --git a/arch/x86/kernel/itmt.c b/arch/x86/kernel/itmt.c
index 9a7c03d47861..eb24df2826bc 100644
--- a/arch/x86/kernel/itmt.c
+++ b/arch/x86/kernel/itmt.c
@@ -22,6 +22,7 @@
#include <linux/mutex.h>
#include <linux/sysctl.h>
#include <linux/nodemask.h>
+#include <linux/itmt.h>

 static DEFINE_MUTEX(itmt_update_mutex);
DEFINE_PER_CPU_READ_MOSTLY(int, sched_core_priority);
@@ -162,6 +163,13 @@ int arch_asym_cpu_priority(int cpu)
     return per_cpu(sched_core_priority, cpu);
}

+/* The work item is needed to avoid CPU hotplug locking issues */
+static void sched_itmt_work_fn(struct work_struct *work)
+{
+     sched_set_itmt_support();
+}
+static DECLARE_WORK(sched_itmt_work, sched_itmt_work_fn);
+
/**
  * sched_set_itmt_core_prio() - Set CPU priority based on ITMT
  * @prio:     Priority of @cpu
@@ -176,7 +184,36 @@ int arch_asym_cpu_priority(int cpu)
  * the CPU priorities. The sched domains have no
  * dependency on CPU priorities.
  */
+
+static u64 max_highest_prio = 0, min_highest_prio = U64_MAX;
+static bool core_priority_diff=false;
+static struct cpumask core_prio_cpumask;
void sched_set_itmt_core_prio(int prio, int cpu)
{
     per_cpu(sched_core_priority, cpu) = prio;
+     cpumask_set_cpu(cpu, &core_prio_cpumask);
+
+     if (max_highest_prio <= min_highest_prio)
+     {
+            if (prio > max_highest_prio)
+                   max_highest_prio = prio;
+
+            if (prio < min_highest_prio)
+                   min_highest_prio = prio;
+
+            if (max_highest_prio > min_highest_prio)
+                   core_priority_diff = true;
+     }
+
+     if (core_priority_diff && cpumask_equal(&core_prio_cpumask, cpu_online_mask))
+     {
+            /*
+            * This code  can be run during CPU online under the CPU hotplug locks,
+            * so sched_set_itmt cannot be called from here.
+            * queue a work item to invoke it
+            */
+            pr_debug("queue a work to set itmt support and enable\n");
+            schedule_work(&sched_itmt_work);
+     }
+
}
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 37f1cdf46d29..3e5e0f66b2ed 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -30,6 +30,7 @@
#include <acpi/processor.h>
#include <acpi/cppc_acpi.h>

+#include <linux/itmt.h>
#include <asm/msr.h>
#include <asm/processor.h>
#include <asm/cpufeature.h>
@@ -663,8 +664,17 @@ static u64 get_max_boost_ratio(unsigned int cpu)

      return div_u64(highest_perf << SCHED_CAPACITY_SHIFT, nominal_perf);
}
+
+static void core_set_itmt_prio(int cpu)
+{
+     u64 highest_perf;
+     cppc_get_highest_perf(cpu, &highest_perf);
+     sched_set_itmt_core_prio(highest_perf, cpu);
+}
+
#else
static inline u64 get_max_boost_ratio(unsigned int cpu) { return 0; }
+static void core_set_itmt_prio(int cpu) {}
#endif

 static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
@@ -677,7 +687,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
     unsigned int valid_states = 0;
     unsigned int result = 0;
     u64 max_boost_ratio;
-     unsigned int i;
+     unsigned int i,j;
#ifdef CONFIG_SMP
     static int blacklisted;
#endif
@@ -741,6 +751,10 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
            pr_info_once("overriding BIOS provided _PSD data\n");
     }
#endif
+     for_each_cpu(j,policy->cpus)
+     {
+            core_set_itmt_prio(j);
+     }

      /* capability check */
     if (perf->state_count <= 1) {
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index dbbf299f4219..4b04e6db9d5b 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -27,6 +27,7 @@
#include <linux/pm_qos.h>
#include <linux/bitfield.h>
#include <trace/events/power.h>
+#include <linux/itmt.h>

 #include <asm/cpu.h>
#include <asm/div64.h>
@@ -340,23 +341,14 @@ static bool intel_pstate_get_ppc_enable_status(void)

 #ifdef CONFIG_ACPI_CPPC_LIB

-/* The work item is needed to avoid CPU hotplug locking issues */
-static void intel_pstste_sched_itmt_work_fn(struct work_struct *work)
-{
-     sched_set_itmt_support();
-}
-
-static DECLARE_WORK(sched_itmt_work, intel_pstste_sched_itmt_work_fn);
-
#define CPPC_MAX_PERF  U8_MAX

 static void intel_pstate_set_itmt_prio(int cpu)
{
-     struct cppc_perf_caps cppc_perf;
-     static u32 max_highest_perf = 0, min_highest_perf = U32_MAX;
+     u64 highest_perf;
     int ret;

-     ret = cppc_get_perf_caps(cpu, &cppc_perf);
+     ret = cppc_get_highest_perf(cpu,&highest_perf);
     if (ret)
            return;

@@ -365,33 +357,15 @@ static void intel_pstate_set_itmt_prio(int cpu)
      * In this case we can't use CPPC.highest_perf to enable ITMT.
      * In this case we can look at MSR_HWP_CAPABILITIES bits [8:0] to decide.
      */
-     if (cppc_perf.highest_perf == CPPC_MAX_PERF)
-            cppc_perf.highest_perf = HWP_HIGHEST_PERF(READ_ONCE(all_cpu_data[cpu]->hwp_cap_cached));
+     if (highest_perf == CPPC_MAX_PERF)
+            highest_perf = HWP_HIGHEST_PERF(READ_ONCE(all_cpu_data[cpu]->hwp_cap_cached));

      /*
      * The priorities can be set regardless of whether or not
      * sched_set_itmt_support(true) has been called and it is valid to
      * update them at any time after it has been called.
      */
-     sched_set_itmt_core_prio(cppc_perf.highest_perf, cpu);
-
-     if (max_highest_perf <= min_highest_perf) {
-            if (cppc_perf.highest_perf > max_highest_perf)
-                   max_highest_perf = cppc_perf.highest_perf;
-
-            if (cppc_perf.highest_perf < min_highest_perf)
-                   min_highest_perf = cppc_perf.highest_perf;
-
-            if (max_highest_perf > min_highest_perf) {
-                   /*
-                   * This code can be run during CPU online under the
-                   * CPU hotplug locks, so sched_set_itmt_support()
-                   * cannot be called from here.  Queue up a work item
-                   * to invoke it.
-                   */
-                   schedule_work(&sched_itmt_work);
-            }
-     }
+     sched_set_itmt_core_prio(highest_perf, cpu);
}

 static int intel_pstate_get_cppc_guaranteed(int cpu)
Thanks
Linda



保密声明:
本邮件含有保密或专有信息,仅供指定收件人使用。严禁对本邮件或其内容做任何未经授权的查阅、使用、复制或转发。
CONFIDENTIAL NOTE:
This email contains confidential or legally privileged information and is for the sole use of its intended recipient. Any unauthorized review, use, copying or forwarding of this email or the content of this email is strictly prohibited.

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ