[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251124185412.24155-10-tony.luck@intel.com>
Date: Mon, 24 Nov 2025 10:53:46 -0800
From: Tony Luck <tony.luck@...el.com>
To: Fenghua Yu <fenghuay@...dia.com>,
Reinette Chatre <reinette.chatre@...el.com>,
Maciej Wieczor-Retman <maciej.wieczor-retman@...el.com>,
Peter Newman <peternewman@...gle.com>,
James Morse <james.morse@....com>,
Babu Moger <babu.moger@....com>,
Drew Fustini <dfustini@...libre.com>,
Dave Martin <Dave.Martin@....com>,
Chen Yu <yu.c.chen@...el.com>
Cc: x86@...nel.org,
linux-kernel@...r.kernel.org,
patches@...ts.linux.dev,
Tony Luck <tony.luck@...el.com>
Subject: [PATCH v14 09/32] x86,fs/resctrl: Rename some L3 specific functions
With the arrival of monitor events tied to new domains associated with a
different resource it would be clearer if the L3 resource specific functions
are more accurately named.
Rename three groups of functions:
Functions that allocate/free architecture per-RMID MBM state information:
arch_domain_mbm_alloc() -> l3_mon_domain_mbm_alloc()
mon_domain_free() -> l3_mon_domain_free()
Functions that allocate/free filesystem per-RMID MBM state information:
domain_setup_mon_state() -> domain_setup_l3_mon_state()
domain_destroy_mon_state() -> domain_destroy_l3_mon_state()
Initialization/exit:
rdt_get_mon_l3_config() -> rdt_get_l3_mon_config()
resctrl_mon_resource_init() -> resctrl_l3_mon_resource_init()
resctrl_mon_resource_exit() -> resctrl_l3_mon_resource_exit()
Ensure kernel-doc descriptions of these functions' return values are present
and correctly formatted.
Signed-off-by: Tony Luck <tony.luck@...el.com>
Reviewed-by: Reinette Chatre <reinette.chatre@...el.com>
---
arch/x86/kernel/cpu/resctrl/internal.h | 2 +-
fs/resctrl/internal.h | 6 +++---
arch/x86/kernel/cpu/resctrl/core.c | 20 +++++++++++---------
arch/x86/kernel/cpu/resctrl/monitor.c | 2 +-
fs/resctrl/monitor.c | 8 ++++----
fs/resctrl/rdtgroup.c | 24 ++++++++++++------------
6 files changed, 32 insertions(+), 30 deletions(-)
diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h
index d6da21d4684b..ae182b5f9a3c 100644
--- a/arch/x86/kernel/cpu/resctrl/internal.h
+++ b/arch/x86/kernel/cpu/resctrl/internal.h
@@ -213,7 +213,7 @@ union l3_qos_abmc_cfg {
void rdt_ctrl_update(void *arg);
-int rdt_get_mon_l3_config(struct rdt_resource *r);
+int rdt_get_l3_mon_config(struct rdt_resource *r);
bool rdt_cpu_has(int flag);
diff --git a/fs/resctrl/internal.h b/fs/resctrl/internal.h
index af47b6ddef62..9768341aa21c 100644
--- a/fs/resctrl/internal.h
+++ b/fs/resctrl/internal.h
@@ -357,7 +357,9 @@ int alloc_rmid(u32 closid);
void free_rmid(u32 closid, u32 rmid);
-void resctrl_mon_resource_exit(void);
+int resctrl_l3_mon_resource_init(void);
+
+void resctrl_l3_mon_resource_exit(void);
void mon_event_count(void *info);
@@ -367,8 +369,6 @@ void mon_event_read(struct rmid_read *rr, struct rdt_resource *r,
struct rdt_domain_hdr *hdr, struct rdtgroup *rdtgrp,
cpumask_t *cpumask, int evtid, int first);
-int resctrl_mon_resource_init(void);
-
void mbm_setup_overflow_handler(struct rdt_l3_mon_domain *dom,
unsigned long delay_ms,
int exclude_cpu);
diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c
index cc1b846f9645..b3a2dc56155d 100644
--- a/arch/x86/kernel/cpu/resctrl/core.c
+++ b/arch/x86/kernel/cpu/resctrl/core.c
@@ -368,7 +368,7 @@ static void ctrl_domain_free(struct rdt_hw_ctrl_domain *hw_dom)
kfree(hw_dom);
}
-static void mon_domain_free(struct rdt_hw_l3_mon_domain *hw_dom)
+static void l3_mon_domain_free(struct rdt_hw_l3_mon_domain *hw_dom)
{
int idx;
@@ -401,11 +401,13 @@ static int domain_setup_ctrlval(struct rdt_resource *r, struct rdt_ctrl_domain *
}
/**
- * arch_domain_mbm_alloc() - Allocate arch private storage for the MBM counters
+ * l3_mon_domain_mbm_alloc() - Allocate arch private storage for the MBM counters
* @num_rmid: The size of the MBM counter array
* @hw_dom: The domain that owns the allocated arrays
+ *
+ * Return: 0 for success, or -ENOMEM.
*/
-static int arch_domain_mbm_alloc(u32 num_rmid, struct rdt_hw_l3_mon_domain *hw_dom)
+static int l3_mon_domain_mbm_alloc(u32 num_rmid, struct rdt_hw_l3_mon_domain *hw_dom)
{
size_t tsize = sizeof(*hw_dom->arch_mbm_states[0]);
enum resctrl_event_id eventid;
@@ -519,7 +521,7 @@ static void l3_mon_domain_setup(int cpu, int id, struct rdt_resource *r, struct
ci = get_cpu_cacheinfo_level(cpu, RESCTRL_L3_CACHE);
if (!ci) {
pr_warn_once("Can't find L3 cache for CPU:%d resource %s\n", cpu, r->name);
- mon_domain_free(hw_dom);
+ l3_mon_domain_free(hw_dom);
return;
}
d->ci_id = ci->id;
@@ -527,8 +529,8 @@ static void l3_mon_domain_setup(int cpu, int id, struct rdt_resource *r, struct
arch_mon_domain_online(r, d);
- if (arch_domain_mbm_alloc(r->mon.num_rmid, hw_dom)) {
- mon_domain_free(hw_dom);
+ if (l3_mon_domain_mbm_alloc(r->mon.num_rmid, hw_dom)) {
+ l3_mon_domain_free(hw_dom);
return;
}
@@ -538,7 +540,7 @@ static void l3_mon_domain_setup(int cpu, int id, struct rdt_resource *r, struct
if (err) {
list_del_rcu(&d->hdr.list);
synchronize_rcu();
- mon_domain_free(hw_dom);
+ l3_mon_domain_free(hw_dom);
}
}
@@ -664,7 +666,7 @@ static void domain_remove_cpu_mon(int cpu, struct rdt_resource *r)
resctrl_offline_mon_domain(r, hdr);
list_del_rcu(&hdr->list);
synchronize_rcu();
- mon_domain_free(hw_dom);
+ l3_mon_domain_free(hw_dom);
break;
}
default:
@@ -917,7 +919,7 @@ static __init bool get_rdt_mon_resources(void)
if (!ret)
return false;
- return !rdt_get_mon_l3_config(r);
+ return !rdt_get_l3_mon_config(r);
}
static __init void __check_quirks_intel(void)
diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c
index 04b8f1e1f314..20605212656c 100644
--- a/arch/x86/kernel/cpu/resctrl/monitor.c
+++ b/arch/x86/kernel/cpu/resctrl/monitor.c
@@ -424,7 +424,7 @@ static __init int snc_get_config(void)
return ret;
}
-int __init rdt_get_mon_l3_config(struct rdt_resource *r)
+int __init rdt_get_l3_mon_config(struct rdt_resource *r)
{
unsigned int mbm_offset = boot_cpu_data.x86_cache_mbm_width_offset;
struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
diff --git a/fs/resctrl/monitor.c b/fs/resctrl/monitor.c
index f90609212c86..cbd9dd5656af 100644
--- a/fs/resctrl/monitor.c
+++ b/fs/resctrl/monitor.c
@@ -1774,7 +1774,7 @@ ssize_t mbm_L3_assignments_write(struct kernfs_open_file *of, char *buf,
}
/**
- * resctrl_mon_resource_init() - Initialise global monitoring structures.
+ * resctrl_l3_mon_resource_init() - Initialise global monitoring structures.
*
* Allocate and initialise global monitor resources that do not belong to a
* specific domain. i.e. the rmid_ptrs[] used for the limbo and free lists.
@@ -1783,9 +1783,9 @@ ssize_t mbm_L3_assignments_write(struct kernfs_open_file *of, char *buf,
* Resctrl's cpuhp callbacks may be called before this point to bring a domain
* online.
*
- * Returns 0 for success, or -ENOMEM.
+ * Return: 0 for success, or -ENOMEM.
*/
-int resctrl_mon_resource_init(void)
+int resctrl_l3_mon_resource_init(void)
{
struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3);
int ret;
@@ -1835,7 +1835,7 @@ int resctrl_mon_resource_init(void)
return 0;
}
-void resctrl_mon_resource_exit(void)
+void resctrl_l3_mon_resource_exit(void)
{
struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3);
diff --git a/fs/resctrl/rdtgroup.c b/fs/resctrl/rdtgroup.c
index 2ed435db1923..b57e1e78bbc2 100644
--- a/fs/resctrl/rdtgroup.c
+++ b/fs/resctrl/rdtgroup.c
@@ -4246,7 +4246,7 @@ static void rdtgroup_setup_default(void)
mutex_unlock(&rdtgroup_mutex);
}
-static void domain_destroy_mon_state(struct rdt_l3_mon_domain *d)
+static void domain_destroy_l3_mon_state(struct rdt_l3_mon_domain *d)
{
int idx;
@@ -4301,13 +4301,13 @@ void resctrl_offline_mon_domain(struct rdt_resource *r, struct rdt_domain_hdr *h
cancel_delayed_work(&d->cqm_limbo);
}
- domain_destroy_mon_state(d);
+ domain_destroy_l3_mon_state(d);
out_unlock:
mutex_unlock(&rdtgroup_mutex);
}
/**
- * domain_setup_mon_state() - Initialise domain monitoring structures.
+ * domain_setup_l3_mon_state() - Initialise domain monitoring structures.
* @r: The resource for the newly online domain.
* @d: The newly online domain.
*
@@ -4315,11 +4315,11 @@ void resctrl_offline_mon_domain(struct rdt_resource *r, struct rdt_domain_hdr *h
* Called when the first CPU of a domain comes online, regardless of whether
* the filesystem is mounted.
* During boot this may be called before global allocations have been made by
- * resctrl_mon_resource_init().
+ * resctrl_l3_mon_resource_init().
*
- * Returns 0 for success, or -ENOMEM.
+ * Return: 0 for success, or -ENOMEM.
*/
-static int domain_setup_mon_state(struct rdt_resource *r, struct rdt_l3_mon_domain *d)
+static int domain_setup_l3_mon_state(struct rdt_resource *r, struct rdt_l3_mon_domain *d)
{
u32 idx_limit = resctrl_arch_system_num_rmid_idx();
size_t tsize = sizeof(*d->mbm_states[0]);
@@ -4386,7 +4386,7 @@ int resctrl_online_mon_domain(struct rdt_resource *r, struct rdt_domain_hdr *hdr
goto out_unlock;
d = container_of(hdr, struct rdt_l3_mon_domain, hdr);
- err = domain_setup_mon_state(r, d);
+ err = domain_setup_l3_mon_state(r, d);
if (err)
goto out_unlock;
@@ -4503,13 +4503,13 @@ int resctrl_init(void)
io_alloc_init();
- ret = resctrl_mon_resource_init();
+ ret = resctrl_l3_mon_resource_init();
if (ret)
return ret;
ret = sysfs_create_mount_point(fs_kobj, "resctrl");
if (ret) {
- resctrl_mon_resource_exit();
+ resctrl_l3_mon_resource_exit();
return ret;
}
@@ -4544,7 +4544,7 @@ int resctrl_init(void)
cleanup_mountpoint:
sysfs_remove_mount_point(fs_kobj, "resctrl");
- resctrl_mon_resource_exit();
+ resctrl_l3_mon_resource_exit();
return ret;
}
@@ -4580,7 +4580,7 @@ static bool resctrl_online_domains_exist(void)
* When called by the architecture code, all CPUs and resctrl domains must be
* offline. This ensures the limbo and overflow handlers are not scheduled to
* run, meaning the data structures they access can be freed by
- * resctrl_mon_resource_exit().
+ * resctrl_l3_mon_resource_exit().
*
* After resctrl_exit() returns, the architecture code should return an
* error from all resctrl_arch_ functions that can do this.
@@ -4607,5 +4607,5 @@ void resctrl_exit(void)
* it can be used to umount resctrl.
*/
- resctrl_mon_resource_exit();
+ resctrl_l3_mon_resource_exit();
}
--
2.51.1
Powered by blists - more mailing lists