[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <8f73c9ec4c9999c262d9297d46a03209a8affe3f.1716552602.git.babu.moger@amd.com>
Date: Fri, 24 May 2024 07:23:22 -0500
From: Babu Moger <babu.moger@....com>
To: <corbet@....net>, <fenghua.yu@...el.com>, <reinette.chatre@...el.com>,
<tglx@...utronix.de>, <mingo@...hat.com>, <bp@...en8.de>,
<dave.hansen@...ux.intel.com>
CC: <x86@...nel.org>, <hpa@...or.com>, <paulmck@...nel.org>,
<rdunlap@...radead.org>, <tj@...nel.org>, <peterz@...radead.org>,
<yanjiewtw@...il.com>, <babu.moger@....com>, <kim.phillips@....com>,
<lukas.bulwahn@...il.com>, <seanjc@...gle.com>, <jmattson@...gle.com>,
<leitao@...ian.org>, <jpoimboe@...nel.org>, <rick.p.edgecombe@...el.com>,
<kirill.shutemov@...ux.intel.com>, <jithu.joseph@...el.com>,
<kai.huang@...el.com>, <kan.liang@...ux.intel.com>,
<daniel.sneddon@...ux.intel.com>, <pbonzini@...hat.com>,
<sandipan.das@....com>, <ilpo.jarvinen@...ux.intel.com>,
<peternewman@...gle.com>, <maciej.wieczor-retman@...el.com>,
<linux-doc@...r.kernel.org>, <linux-kernel@...r.kernel.org>,
<eranian@...gle.com>, <james.morse@....com>
Subject: [PATCH v4 03/19] x86/resctrl: Consolidate monitoring related data from rdt_resource
Consolidate all the data related to monitoring into separate structure
for better readability.
Suggested-by: Reinette Chatre <reinette.chatre@...el.com>
Signed-off-by: Babu Moger <babu.moger@....com>
---
v4: New patch.
---
arch/x86/kernel/cpu/resctrl/core.c | 2 +-
arch/x86/kernel/cpu/resctrl/monitor.c | 16 ++++++++--------
arch/x86/kernel/cpu/resctrl/rdtgroup.c | 8 ++++----
include/linux/resctrl.h | 16 ++++++++++++----
4 files changed, 25 insertions(+), 17 deletions(-)
diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c
index df1b43fd9f0e..6cf3c887c693 100644
--- a/arch/x86/kernel/cpu/resctrl/core.c
+++ b/arch/x86/kernel/cpu/resctrl/core.c
@@ -535,7 +535,7 @@ static void domain_add_cpu(int cpu, struct rdt_resource *r)
return;
}
- if (r->mon_capable && arch_domain_mbm_alloc(r->num_rmid, hw_dom)) {
+ if (r->mon_capable && arch_domain_mbm_alloc(r->mon.num_rmid, hw_dom)) {
domain_free(hw_dom);
return;
}
diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c
index 2345e6836593..b35d04fc761b 100644
--- a/arch/x86/kernel/cpu/resctrl/monitor.c
+++ b/arch/x86/kernel/cpu/resctrl/monitor.c
@@ -254,11 +254,11 @@ void resctrl_arch_reset_rmid_all(struct rdt_resource *r, struct rdt_domain *d)
if (is_mbm_total_enabled())
memset(hw_dom->arch_mbm_total, 0,
- sizeof(*hw_dom->arch_mbm_total) * r->num_rmid);
+ sizeof(*hw_dom->arch_mbm_total) * r->mon.num_rmid);
if (is_mbm_local_enabled())
memset(hw_dom->arch_mbm_local, 0,
- sizeof(*hw_dom->arch_mbm_local) * r->num_rmid);
+ sizeof(*hw_dom->arch_mbm_local) * r->mon.num_rmid);
}
static u64 mbm_overflow_count(u64 prev_msr, u64 cur_msr, unsigned int width)
@@ -1004,14 +1004,14 @@ static struct mon_evt mbm_local_event = {
*/
static void l3_mon_evt_init(struct rdt_resource *r)
{
- INIT_LIST_HEAD(&r->evt_list);
+ INIT_LIST_HEAD(&r->mon.evt_list);
if (is_llc_occupancy_enabled())
- list_add_tail(&llc_occupancy_event.list, &r->evt_list);
+ list_add_tail(&llc_occupancy_event.list, &r->mon.evt_list);
if (is_mbm_total_enabled())
- list_add_tail(&mbm_total_event.list, &r->evt_list);
+ list_add_tail(&mbm_total_event.list, &r->mon.evt_list);
if (is_mbm_local_enabled())
- list_add_tail(&mbm_local_event.list, &r->evt_list);
+ list_add_tail(&mbm_local_event.list, &r->mon.evt_list);
}
int __init rdt_get_mon_l3_config(struct rdt_resource *r)
@@ -1023,7 +1023,7 @@ int __init rdt_get_mon_l3_config(struct rdt_resource *r)
resctrl_rmid_realloc_limit = boot_cpu_data.x86_cache_size * 1024;
hw_res->mon_scale = boot_cpu_data.x86_cache_occ_scale;
- r->num_rmid = boot_cpu_data.x86_cache_max_rmid + 1;
+ r->mon.num_rmid = boot_cpu_data.x86_cache_max_rmid + 1;
hw_res->mbm_width = MBM_CNTR_WIDTH_BASE;
if (mbm_offset > 0 && mbm_offset <= MBM_CNTR_WIDTH_OFFSET_MAX)
@@ -1038,7 +1038,7 @@ int __init rdt_get_mon_l3_config(struct rdt_resource *r)
*
* For a 35MB LLC and 56 RMIDs, this is ~1.8% of the LLC.
*/
- threshold = resctrl_rmid_realloc_limit / r->num_rmid;
+ threshold = resctrl_rmid_realloc_limit / r->mon.num_rmid;
/*
* Because num_rmid may not be a power of two, round the value
diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
index 02f213f1c51c..7114d58ef1e3 100644
--- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
+++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
@@ -1098,7 +1098,7 @@ static int rdt_num_rmids_show(struct kernfs_open_file *of,
{
struct rdt_resource *r = of->kn->parent->priv;
- seq_printf(seq, "%d\n", r->num_rmid);
+ seq_printf(seq, "%d\n", r->mon.num_rmid);
return 0;
}
@@ -1109,7 +1109,7 @@ static int rdt_mon_features_show(struct kernfs_open_file *of,
struct rdt_resource *r = of->kn->parent->priv;
struct mon_evt *mevt;
- list_for_each_entry(mevt, &r->evt_list, list) {
+ list_for_each_entry(mevt, &r->mon.evt_list, list) {
seq_printf(seq, "%s\n", mevt->name);
if (mevt->configurable)
seq_printf(seq, "%s_config\n", mevt->name);
@@ -3042,14 +3042,14 @@ static int mkdir_mondata_subdir(struct kernfs_node *parent_kn,
if (ret)
goto out_destroy;
- if (WARN_ON(list_empty(&r->evt_list))) {
+ if (WARN_ON(list_empty(&r->mon.evt_list))) {
ret = -EPERM;
goto out_destroy;
}
priv.u.rid = r->rid;
priv.u.domid = d->id;
- list_for_each_entry(mevt, &r->evt_list, list) {
+ list_for_each_entry(mevt, &r->mon.evt_list, list) {
priv.u.evtid = mevt->evtid;
ret = mon_addfile(kn, mevt->name, priv.priv);
if (ret)
diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h
index a365f67131ec..bf99eb9c6ce4 100644
--- a/include/linux/resctrl.h
+++ b/include/linux/resctrl.h
@@ -150,22 +150,31 @@ struct resctrl_membw {
struct rdt_parse_data;
struct resctrl_schema;
+/**
+ * struct resctrl_mon - Monitoring related data
+ * @num_rmid: Number of RMIDs available
+ * @evt_list: List of monitoring events
+ */
+struct resctrl_mon {
+ int num_rmid;
+ struct list_head evt_list;
+};
+
/**
* struct rdt_resource - attributes of a resctrl resource
* @rid: The index of the resource
* @alloc_capable: Is allocation available on this machine
* @mon_capable: Is monitor feature available on this machine
- * @num_rmid: Number of RMIDs available
* @cache_level: Which cache level defines scope of this resource
* @cache: Cache allocation related data
* @membw: If the component has bandwidth controls, their properties.
+ * @mon: Monitoring related data.
* @domains: RCU list of all domains for this resource
* @name: Name to use in "schemata" file.
* @data_width: Character width of data when displaying
* @default_ctrl: Specifies default cache cbm or memory B/W percent.
* @format_str: Per resource format string to show domain value
* @parse_ctrlval: Per resource function pointer to parse control values
- * @evt_list: List of monitoring events
* @fflags: flags to choose base and info files
* @cdp_capable: Is the CDP feature available on this resource
*/
@@ -173,10 +182,10 @@ struct rdt_resource {
int rid;
bool alloc_capable;
bool mon_capable;
- int num_rmid;
int cache_level;
struct resctrl_cache cache;
struct resctrl_membw membw;
+ struct resctrl_mon mon;
struct list_head domains;
char *name;
int data_width;
@@ -185,7 +194,6 @@ struct rdt_resource {
int (*parse_ctrlval)(struct rdt_parse_data *data,
struct resctrl_schema *s,
struct rdt_domain *d);
- struct list_head evt_list;
unsigned long fflags;
bool cdp_capable;
};
--
2.34.1
Powered by blists - more mailing lists