[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <619c9cbb6b1525a2d4a46a042384e6771800d61c.1747349530.git.babu.moger@amd.com>
Date: Thu, 15 May 2025 17:52:01 -0500
From: Babu Moger <babu.moger@....com>
To: <corbet@....net>, <tony.luck@...el.com>, <reinette.chatre@...el.com>,
<tglx@...utronix.de>, <mingo@...hat.com>, <bp@...en8.de>,
<dave.hansen@...ux.intel.com>
CC: <james.morse@....com>, <dave.martin@....com>, <fenghuay@...dia.com>,
<x86@...nel.org>, <hpa@...or.com>, <paulmck@...nel.org>,
<akpm@...ux-foundation.org>, <thuth@...hat.com>, <rostedt@...dmis.org>,
<ardb@...nel.org>, <gregkh@...uxfoundation.org>,
<daniel.sneddon@...ux.intel.com>, <jpoimboe@...nel.org>,
<alexandre.chartre@...cle.com>, <pawan.kumar.gupta@...ux.intel.com>,
<thomas.lendacky@....com>, <perry.yuan@....com>, <seanjc@...gle.com>,
<kai.huang@...el.com>, <xiaoyao.li@...el.com>, <babu.moger@....com>,
<kan.liang@...ux.intel.com>, <xin3.li@...el.com>, <ebiggers@...gle.com>,
<xin@...or.com>, <sohil.mehta@...el.com>, <andrew.cooper3@...rix.com>,
<mario.limonciello@....com>, <linux-doc@...r.kernel.org>,
<linux-kernel@...r.kernel.org>, <peternewman@...gle.com>,
<maciej.wieczor-retman@...el.com>, <eranian@...gle.com>,
<Xiaojian.Du@....com>, <gautham.shenoy@....com>
Subject: [PATCH v13 16/27] x86/resctrl: Pass entire struct rdtgroup rather than passing individual members
The mbm_cntr_assign mode requires a cntr_id to read event data. The
cntr_id is retrieved via mbm_cntr_get, which takes a struct rdtgroup as
a parameter.
Passing the full rdtgroup also provides access to closid and rmid, both of
which are necessary to read monitoring events.
Refactor the code to pass the entire struct rdtgroup instead of individual
members in preparation for this requirement.
Suggested-by: Reinette Chatre <reinette.chatre@...el.com>
Signed-off-by: Babu Moger <babu.moger@....com>
---
v13: New patch to pass the entire struct rdtgroup to __mon_event_count(),
mbm_update(), and related functions.
---
fs/resctrl/monitor.c | 29 ++++++++++++++++-------------
1 file changed, 16 insertions(+), 13 deletions(-)
diff --git a/fs/resctrl/monitor.c b/fs/resctrl/monitor.c
index c98a61bde179..a477be9cdb66 100644
--- a/fs/resctrl/monitor.c
+++ b/fs/resctrl/monitor.c
@@ -357,9 +357,11 @@ static struct mbm_state *get_mbm_state(struct rdt_mon_domain *d, u32 closid,
}
}
-static int __mon_event_count(u32 closid, u32 rmid, struct rmid_read *rr)
+static int __mon_event_count(struct rdtgroup *rdtgrp, struct rmid_read *rr)
{
int cpu = smp_processor_id();
+ u32 closid = rdtgrp->closid;
+ u32 rmid = rdtgrp->mon.rmid;
struct rdt_mon_domain *d;
struct mbm_state *m;
int err, ret;
@@ -428,9 +430,11 @@ static int __mon_event_count(u32 closid, u32 rmid, struct rmid_read *rr)
* __mon_event_count() is compared with the chunks value from the previous
* invocation. This must be called once per second to maintain values in MBps.
*/
-static void mbm_bw_count(u32 closid, u32 rmid, struct rmid_read *rr)
+static void mbm_bw_count(struct rdtgroup *rdtgrp, struct rmid_read *rr)
{
u64 cur_bw, bytes, cur_bytes;
+ u32 closid = rdtgrp->closid;
+ u32 rmid = rdtgrp->mon.rmid;
struct mbm_state *m;
m = get_mbm_state(rr->d, closid, rmid, rr->evtid);
@@ -459,7 +463,7 @@ void mon_event_count(void *info)
rdtgrp = rr->rgrp;
- ret = __mon_event_count(rdtgrp->closid, rdtgrp->mon.rmid, rr);
+ ret = __mon_event_count(rdtgrp, rr);
/*
* For Ctrl groups read data from child monitor groups and
@@ -470,8 +474,7 @@ void mon_event_count(void *info)
if (rdtgrp->type == RDTCTRL_GROUP) {
list_for_each_entry(entry, head, mon.crdtgrp_list) {
- if (__mon_event_count(entry->closid, entry->mon.rmid,
- rr) == 0)
+ if (__mon_event_count(entry, rr) == 0)
ret = 0;
}
}
@@ -602,7 +605,7 @@ static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_mon_domain *dom_mbm)
}
static void mbm_update_one_event(struct rdt_resource *r, struct rdt_mon_domain *d,
- u32 closid, u32 rmid, enum resctrl_event_id evtid)
+ struct rdtgroup *rdtgrp, enum resctrl_event_id evtid)
{
struct rmid_read rr = {0};
@@ -616,30 +619,30 @@ static void mbm_update_one_event(struct rdt_resource *r, struct rdt_mon_domain *
return;
}
- __mon_event_count(closid, rmid, &rr);
+ __mon_event_count(rdtgrp, &rr);
/*
* If the software controller is enabled, compute the
* bandwidth for this event id.
*/
if (is_mba_sc(NULL))
- mbm_bw_count(closid, rmid, &rr);
+ mbm_bw_count(rdtgrp, &rr);
resctrl_arch_mon_ctx_free(rr.r, rr.evtid, rr.arch_mon_ctx);
}
static void mbm_update(struct rdt_resource *r, struct rdt_mon_domain *d,
- u32 closid, u32 rmid)
+ struct rdtgroup *rdtgrp)
{
/*
* This is protected from concurrent reads from user as both
* the user and overflow handler hold the global mutex.
*/
if (resctrl_arch_is_mbm_total_enabled())
- mbm_update_one_event(r, d, closid, rmid, QOS_L3_MBM_TOTAL_EVENT_ID);
+ mbm_update_one_event(r, d, rdtgrp, QOS_L3_MBM_TOTAL_EVENT_ID);
if (resctrl_arch_is_mbm_local_enabled())
- mbm_update_one_event(r, d, closid, rmid, QOS_L3_MBM_LOCAL_EVENT_ID);
+ mbm_update_one_event(r, d, rdtgrp, QOS_L3_MBM_LOCAL_EVENT_ID);
}
/*
@@ -712,11 +715,11 @@ void mbm_handle_overflow(struct work_struct *work)
d = container_of(work, struct rdt_mon_domain, mbm_over.work);
list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) {
- mbm_update(r, d, prgrp->closid, prgrp->mon.rmid);
+ mbm_update(r, d, prgrp);
head = &prgrp->mon.crdtgrp_list;
list_for_each_entry(crgrp, head, mon.crdtgrp_list)
- mbm_update(r, d, crgrp->closid, crgrp->mon.rmid);
+ mbm_update(r, d, crgrp);
if (is_mba_sc(NULL))
update_mba_bw(prgrp, d);
--
2.34.1
Powered by blists - more mailing lists