[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220704101605.1318280-7-sudeep.holla@arm.com>
Date: Mon, 4 Jul 2022 11:15:50 +0100
From: Sudeep Holla <sudeep.holla@....com>
To: linux-kernel@...r.kernel.org,
Greg Kroah-Hartman <gregkh@...uxfoundation.org>
Cc: Sudeep Holla <sudeep.holla@....com>, conor.dooley@...rochip.com,
valentina.fernandezalanis@...rochip.com,
Vincent Guittot <vincent.guittot@...aro.org>,
Dietmar Eggemann <dietmar.eggemann@....com>,
Qing Wang <wangqing@...o.com>,
Rob Herring <robh+dt@...nel.org>,
"Rafael J . Wysocki" <rafael@...nel.org>,
Ionela Voinescu <ionela.voinescu@....com>,
Pierre Gondois <pierre.gondois@....com>,
linux-arm-kernel@...ts.infradead.org,
linux-riscv@...ts.infradead.org, Gavin Shan <gshan@...hat.com>
Subject: [PATCH v6 06/21] cacheinfo: Allow early detection and population of cache attributes
Some architecture/platforms may need to setup cache properties very
early in the boot along with other cpu topologies so that all these
information can be used to build sched_domains which is used by the
scheduler.
Allow detect_cache_attributes to be called quite early during the boot.
Tested-by: Ionela Voinescu <ionela.voinescu@....com>
Reviewed-by: Gavin Shan <gshan@...hat.com>
Signed-off-by: Sudeep Holla <sudeep.holla@....com>
---
drivers/base/cacheinfo.c | 55 ++++++++++++++++++++++++++-------------
include/linux/cacheinfo.h | 1 +
2 files changed, 38 insertions(+), 18 deletions(-)
diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
index fdc1baa342f1..4d21a1022fa9 100644
--- a/drivers/base/cacheinfo.c
+++ b/drivers/base/cacheinfo.c
@@ -193,14 +193,8 @@ static int cache_setup_of_node(unsigned int cpu)
{
struct device_node *np;
struct cacheinfo *this_leaf;
- struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
unsigned int index = 0;
- /* skip if fw_token is already populated */
- if (this_cpu_ci->info_list->fw_token) {
- return 0;
- }
-
np = of_cpu_device_node_get(cpu);
if (!np) {
pr_err("Failed to find cpu%d device node\n", cpu);
@@ -236,6 +230,18 @@ int __weak cache_setup_acpi(unsigned int cpu)
unsigned int coherency_max_size;
+static int cache_setup_properties(unsigned int cpu)
+{
+ int ret = 0;
+
+ if (of_have_populated_dt())
+ ret = cache_setup_of_node(cpu);
+ else if (!acpi_disabled)
+ ret = cache_setup_acpi(cpu);
+
+ return ret;
+}
+
static int cache_shared_cpu_map_setup(unsigned int cpu)
{
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
@@ -246,21 +252,21 @@ static int cache_shared_cpu_map_setup(unsigned int cpu)
if (this_cpu_ci->cpu_map_populated)
return 0;
- if (of_have_populated_dt())
- ret = cache_setup_of_node(cpu);
- else if (!acpi_disabled)
- ret = cache_setup_acpi(cpu);
-
- if (ret)
- return ret;
+ /*
+ * skip setting up cache properties if LLC is valid, just need
+ * to update the shared cpu_map if the cache attributes were
+ * populated early before all the cpus are brought online
+ */
+ if (!last_level_cache_is_valid(cpu)) {
+ ret = cache_setup_properties(cpu);
+ if (ret)
+ return ret;
+ }
for (index = 0; index < cache_leaves(cpu); index++) {
unsigned int i;
this_leaf = per_cpu_cacheinfo_idx(cpu, index);
- /* skip if shared_cpu_map is already populated */
- if (!cpumask_empty(&this_leaf->shared_cpu_map))
- continue;
cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
for_each_online_cpu(i) {
@@ -330,17 +336,28 @@ int __weak populate_cache_leaves(unsigned int cpu)
return -ENOENT;
}
-static int detect_cache_attributes(unsigned int cpu)
+int detect_cache_attributes(unsigned int cpu)
{
int ret;
+ /* Since early detection of the cacheinfo is allowed via this
+ * function and this also gets called as CPU hotplug callbacks via
+ * cacheinfo_cpu_online, the initialisation can be skipped and only
+ * CPU maps can be updated as the CPU online status would be update
+ * if called via cacheinfo_cpu_online path.
+ */
+ if (per_cpu_cacheinfo(cpu))
+ goto update_cpu_map;
+
if (init_cache_level(cpu) || !cache_leaves(cpu))
return -ENOENT;
per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu),
sizeof(struct cacheinfo), GFP_KERNEL);
- if (per_cpu_cacheinfo(cpu) == NULL)
+ if (per_cpu_cacheinfo(cpu) == NULL) {
+ cache_leaves(cpu) = 0;
return -ENOMEM;
+ }
/*
* populate_cache_leaves() may completely setup the cache leaves and
@@ -349,6 +366,8 @@ static int detect_cache_attributes(unsigned int cpu)
ret = populate_cache_leaves(cpu);
if (ret)
goto free_ci;
+
+update_cpu_map:
/*
* For systems using DT for cache hierarchy, fw_token
* and shared_cpu_map will be set up here only if they are
diff --git a/include/linux/cacheinfo.h b/include/linux/cacheinfo.h
index 7e429bc5c1a4..00b7a6ae8617 100644
--- a/include/linux/cacheinfo.h
+++ b/include/linux/cacheinfo.h
@@ -84,6 +84,7 @@ int populate_cache_leaves(unsigned int cpu);
int cache_setup_acpi(unsigned int cpu);
bool last_level_cache_is_valid(unsigned int cpu);
bool last_level_cache_is_shared(unsigned int cpu_x, unsigned int cpu_y);
+int detect_cache_attributes(unsigned int cpu);
#ifndef CONFIG_ACPI_PPTT
/*
* acpi_find_last_cache_level is only called on ACPI enabled
--
2.37.0
Powered by blists - more mailing lists