[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220627215031.2158825-1-srinivas.pandruvada@linux.intel.com>
Date: Mon, 27 Jun 2022 14:50:31 -0700
From: Srinivas Pandruvada <srinivas.pandruvada@...ux.intel.com>
To: hdegoede@...hat.com, markgross@...nel.org
Cc: platform-driver-x86@...r.kernel.org, linux-kernel@...r.kernel.org,
Srinivas Pandruvada <srinivas.pandruvada@...ux.intel.com>
Subject: [PATCH] platform/x86: ISST: PUNIT device mapping with Sub-NUMA clustering
On a multiple package system using Sub-NUMA clustering, there is an issue
in mapping Linux CPU number to PUNIT PCI device when manufacturer decided
to reuse the PCI bus number across packages. Bus number can be reused as
long as they are in different domain or segment. In this case some CPU
will fail to find a PCI device to issue SST requests.
When bus numbers are reused across CPU packages, we are using proximity
information by matching CPU numa node id to PUNIT PCI device numa node
id. But on a package there can be only one PUNIT PCI device, but multiple
numa nodes (one for each sub cluster). So, the numa node ID of the PUNIT
PCI device can only match with one numa node id of CPUs in a sub cluster
in the package.
Since there can be only one PUNIT PCI device per package, if we match
with numa node id of any sub cluster in that package, we can use that
mapping for any CPU in that package. So, store the match information
in a per package data structure and return the information when there
is no match.
Signed-off-by: Srinivas Pandruvada <srinivas.pandruvada@...ux.intel.com>
---
.../intel/speed_select_if/isst_if_common.c | 22 +++++++++++++++++++
1 file changed, 22 insertions(+)
diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
index e8424e70d81d..f3cd1be3283a 100644
--- a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
+++ b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
@@ -286,11 +286,18 @@ struct isst_if_cpu_info {
int numa_node;
};
+struct isst_if_pkg_info {
+ struct pci_dev *pci_dev[2];
+};
+
static struct isst_if_cpu_info *isst_cpu_info;
+static struct isst_if_pkg_info *isst_pkg_info;
+
#define ISST_MAX_PCI_DOMAINS 8
static struct pci_dev *_isst_if_get_pci_dev(int cpu, int bus_no, int dev, int fn)
{
+ int pkg_id = topology_physical_package_id(cpu);
struct pci_dev *matched_pci_dev = NULL;
struct pci_dev *pci_dev = NULL;
int no_matches = 0;
@@ -324,6 +331,8 @@ static struct pci_dev *_isst_if_get_pci_dev(int cpu, int bus_no, int dev, int fn
}
if (node == isst_cpu_info[cpu].numa_node) {
+ isst_pkg_info[pkg_id].pci_dev[bus_no] = _pci_dev;
+
pci_dev = _pci_dev;
break;
}
@@ -342,6 +351,9 @@ static struct pci_dev *_isst_if_get_pci_dev(int cpu, int bus_no, int dev, int fn
if (!pci_dev && no_matches == 1)
pci_dev = matched_pci_dev;
+ if (!pci_dev)
+ pci_dev = isst_pkg_info[pkg_id].pci_dev[bus_no];
+
return pci_dev;
}
@@ -417,10 +429,19 @@ static int isst_if_cpu_info_init(void)
if (!isst_cpu_info)
return -ENOMEM;
+ isst_pkg_info = kcalloc(topology_max_packages(),
+ sizeof(*isst_pkg_info),
+ GFP_KERNEL);
+ if (!isst_pkg_info) {
+ kfree(isst_cpu_info);
+ return -ENOMEM;
+ }
+
ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
"platform/x86/isst-if:online",
isst_if_cpu_online, NULL);
if (ret < 0) {
+ kfree(isst_pkg_info);
kfree(isst_cpu_info);
return ret;
}
@@ -433,6 +454,7 @@ static int isst_if_cpu_info_init(void)
static void isst_if_cpu_info_exit(void)
{
cpuhp_remove_state(isst_if_online_id);
+ kfree(isst_pkg_info);
kfree(isst_cpu_info);
};
--
2.31.1
Powered by blists - more mailing lists