[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <84e5cffb2b3f8088f77781babe89b0cab513ed79.1749848715.git.babu.moger@amd.com>
Date: Fri, 13 Jun 2025 16:05:05 -0500
From: Babu Moger <babu.moger@....com>
To: <babu.moger@....com>, <corbet@....net>, <tony.luck@...el.com>,
<reinette.chatre@...el.com>, <Dave.Martin@....com>, <james.morse@....com>,
<tglx@...utronix.de>, <mingo@...hat.com>, <bp@...en8.de>,
<dave.hansen@...ux.intel.com>
CC: <x86@...nel.org>, <hpa@...or.com>, <akpm@...ux-foundation.org>,
<rostedt@...dmis.org>, <paulmck@...nel.org>, <thuth@...hat.com>,
<ardb@...nel.org>, <gregkh@...uxfoundation.org>, <seanjc@...gle.com>,
<thomas.lendacky@....com>, <pawan.kumar.gupta@...ux.intel.com>,
<manali.shukla@....com>, <perry.yuan@....com>, <kai.huang@...el.com>,
<peterz@...radead.org>, <xiaoyao.li@...el.com>, <kan.liang@...ux.intel.com>,
<mario.limonciello@....com>, <xin3.li@...el.com>, <gautham.shenoy@....com>,
<xin@...or.com>, <chang.seok.bae@...el.com>, <fenghuay@...dia.com>,
<peternewman@...gle.com>, <maciej.wieczor-retman@...el.com>,
<eranian@...gle.com>, <linux-doc@...r.kernel.org>,
<linux-kernel@...r.kernel.org>
Subject: [PATCH v14 21/32] fs/resctrl: Pass entire struct rdtgroup rather than passing individual members
Reading the monitoring data requires RMID, CLOSID, and event ID, among
other parameters. These are passed individually, resulting in architecture
specific function calls.
Passing the pointer to the full rdtgroup structure simplifies access to
these parameters.
Additionally, when "mbm_event" mode is enabled, a counter ID is required
to read the event. The counter ID is obtained through mbm_cntr_get(),
which expects a struct rdtgroup pointer.
Refactor the code to pass a pointer to struct rdtgroup instead of
individual members in preparation for this requirement.
Suggested-by: Reinette Chatre <reinette.chatre@...el.com>
Signed-off-by: Babu Moger <babu.moger@....com>
---
v14: Few text update to commit log.
v13: New patch to pass the entire struct rdtgroup to __mon_event_count(),
mbm_update(), and related functions.
---
fs/resctrl/monitor.c | 29 ++++++++++++++++-------------
1 file changed, 16 insertions(+), 13 deletions(-)
diff --git a/fs/resctrl/monitor.c b/fs/resctrl/monitor.c
index cf7f6a22ea51..31e08d891db2 100644
--- a/fs/resctrl/monitor.c
+++ b/fs/resctrl/monitor.c
@@ -356,9 +356,11 @@ static struct mbm_state *get_mbm_state(struct rdt_mon_domain *d, u32 closid,
return state ? &state[idx] : NULL;
}
-static int __mon_event_count(u32 closid, u32 rmid, struct rmid_read *rr)
+static int __mon_event_count(struct rdtgroup *rdtgrp, struct rmid_read *rr)
{
int cpu = smp_processor_id();
+ u32 closid = rdtgrp->closid;
+ u32 rmid = rdtgrp->mon.rmid;
struct rdt_mon_domain *d;
struct cacheinfo *ci;
struct mbm_state *m;
@@ -429,9 +431,11 @@ static int __mon_event_count(u32 closid, u32 rmid, struct rmid_read *rr)
* __mon_event_count() is compared with the chunks value from the previous
* invocation. This must be called once per second to maintain values in MBps.
*/
-static void mbm_bw_count(u32 closid, u32 rmid, struct rmid_read *rr)
+static void mbm_bw_count(struct rdtgroup *rdtgrp, struct rmid_read *rr)
{
u64 cur_bw, bytes, cur_bytes;
+ u32 closid = rdtgrp->closid;
+ u32 rmid = rdtgrp->mon.rmid;
struct mbm_state *m;
m = get_mbm_state(rr->d, closid, rmid, rr->evtid);
@@ -460,7 +464,7 @@ void mon_event_count(void *info)
rdtgrp = rr->rgrp;
- ret = __mon_event_count(rdtgrp->closid, rdtgrp->mon.rmid, rr);
+ ret = __mon_event_count(rdtgrp, rr);
/*
* For Ctrl groups read data from child monitor groups and
@@ -471,8 +475,7 @@ void mon_event_count(void *info)
if (rdtgrp->type == RDTCTRL_GROUP) {
list_for_each_entry(entry, head, mon.crdtgrp_list) {
- if (__mon_event_count(entry->closid, entry->mon.rmid,
- rr) == 0)
+ if (__mon_event_count(entry, rr) == 0)
ret = 0;
}
}
@@ -603,7 +606,7 @@ static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_mon_domain *dom_mbm)
}
static void mbm_update_one_event(struct rdt_resource *r, struct rdt_mon_domain *d,
- u32 closid, u32 rmid, enum resctrl_event_id evtid)
+ struct rdtgroup *rdtgrp, enum resctrl_event_id evtid)
{
struct rmid_read rr = {0};
@@ -617,30 +620,30 @@ static void mbm_update_one_event(struct rdt_resource *r, struct rdt_mon_domain *
return;
}
- __mon_event_count(closid, rmid, &rr);
+ __mon_event_count(rdtgrp, &rr);
/*
* If the software controller is enabled, compute the
* bandwidth for this event id.
*/
if (is_mba_sc(NULL))
- mbm_bw_count(closid, rmid, &rr);
+ mbm_bw_count(rdtgrp, &rr);
resctrl_arch_mon_ctx_free(rr.r, rr.evtid, rr.arch_mon_ctx);
}
static void mbm_update(struct rdt_resource *r, struct rdt_mon_domain *d,
- u32 closid, u32 rmid)
+ struct rdtgroup *rdtgrp)
{
/*
* This is protected from concurrent reads from user as both
* the user and overflow handler hold the global mutex.
*/
if (resctrl_is_mon_event_enabled(QOS_L3_MBM_TOTAL_EVENT_ID))
- mbm_update_one_event(r, d, closid, rmid, QOS_L3_MBM_TOTAL_EVENT_ID);
+ mbm_update_one_event(r, d, rdtgrp, QOS_L3_MBM_TOTAL_EVENT_ID);
if (resctrl_is_mon_event_enabled(QOS_L3_MBM_LOCAL_EVENT_ID))
- mbm_update_one_event(r, d, closid, rmid, QOS_L3_MBM_LOCAL_EVENT_ID);
+ mbm_update_one_event(r, d, rdtgrp, QOS_L3_MBM_LOCAL_EVENT_ID);
}
/*
@@ -713,11 +716,11 @@ void mbm_handle_overflow(struct work_struct *work)
d = container_of(work, struct rdt_mon_domain, mbm_over.work);
list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) {
- mbm_update(r, d, prgrp->closid, prgrp->mon.rmid);
+ mbm_update(r, d, prgrp);
head = &prgrp->mon.crdtgrp_list;
list_for_each_entry(crgrp, head, mon.crdtgrp_list)
- mbm_update(r, d, crgrp->closid, crgrp->mon.rmid);
+ mbm_update(r, d, crgrp);
if (is_mba_sc(NULL))
update_mba_bw(prgrp, d);
--
2.34.1
Powered by blists - more mailing lists