[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251219181147.3404071-35-ben.horgan@arm.com>
Date: Fri, 19 Dec 2025 18:11:36 +0000
From: Ben Horgan <ben.horgan@....com>
To: ben.horgan@....com
Cc: amitsinght@...vell.com,
baisheng.gao@...soc.com,
baolin.wang@...ux.alibaba.com,
carl@...amperecomputing.com,
dave.martin@....com,
david@...nel.org,
dfustini@...libre.com,
fenghuay@...dia.com,
gshan@...hat.com,
james.morse@....com,
jonathan.cameron@...wei.com,
kobak@...dia.com,
lcherian@...vell.com,
linux-arm-kernel@...ts.infradead.org,
linux-kernel@...r.kernel.org,
peternewman@...gle.com,
punit.agrawal@....qualcomm.com,
quic_jiles@...cinc.com,
reinette.chatre@...el.com,
rohit.mathew@....com,
scott@...amperecomputing.com,
sdonthineni@...dia.com,
tan.shaopeng@...itsu.com,
xhao@...ux.alibaba.com,
catalin.marinas@....com,
will@...nel.org,
corbet@....net,
maz@...nel.org,
oupton@...nel.org,
joey.gouly@....com,
suzuki.poulose@....com,
kvmarm@...ts.linux.dev
Subject: [PATCH v2 34/45] arm_mpam: resctrl: Add resctrl_arch_rmid_read() and resctrl_arch_reset_rmid()
From: James Morse <james.morse@....com>
resctrl uses resctrl_arch_rmid_read() to read counters. CDP emulation means
the counter may need reading in three different ways. The same goes for
reset.
The helpers behind the resctrl_arch_ functions will be re-used for the ABMC
equivalent functions.
Add the rounding helper for checking monitor values while we're here.
Signed-off-by: James Morse <james.morse@....com>
Signed-off-by: Ben Horgan <ben.horgan@....com>
---
Changes since rfc:
cfg intialisation style
code flow at end of read_mon_cdp_safe()
---
drivers/resctrl/mpam_resctrl.c | 159 +++++++++++++++++++++++++++++++++
include/linux/arm_mpam.h | 5 ++
2 files changed, 164 insertions(+)
diff --git a/drivers/resctrl/mpam_resctrl.c b/drivers/resctrl/mpam_resctrl.c
index 638c84cdcfb6..8a1263986145 100644
--- a/drivers/resctrl/mpam_resctrl.c
+++ b/drivers/resctrl/mpam_resctrl.c
@@ -354,6 +354,165 @@ void resctrl_arch_mon_ctx_free(struct rdt_resource *r,
resctrl_arch_mon_ctx_free_no_wait(evtid, mon_idx);
}
+static int
+__read_mon(struct mpam_resctrl_mon *mon, struct mpam_component *mon_comp,
+ enum mpam_device_features mon_type,
+ int mon_idx,
+ enum resctrl_conf_type cdp_type, u32 closid, u32 rmid, u64 *val)
+{
+ struct mon_cfg cfg;
+
+ if (!mpam_is_enabled())
+ return -EINVAL;
+
+ /* Shift closid to account for CDP */
+ closid = resctrl_get_config_index(closid, cdp_type);
+
+ if (mon_idx == USE_PRE_ALLOCATED) {
+ int mbwu_idx = resctrl_arch_rmid_idx_encode(closid, rmid);
+
+ mon_idx = mon->mbwu_idx_to_mon[mbwu_idx];
+ if (mon_idx == -1) {
+ if (mpam_resctrl_abmc_enabled()) {
+ /* Report Unassigned */
+ return -ENOENT;
+ }
+ /* Report Unavailable */
+ return -EINVAL;
+ }
+ }
+
+ if (irqs_disabled()) {
+ /* Check if we can access this domain without an IPI */
+ return -EIO;
+ }
+
+ cfg = (struct mon_cfg) {
+ .mon = mon_idx,
+ .match_pmg = true,
+ .partid = closid,
+ .pmg = rmid,
+ };
+
+ return mpam_msmon_read(mon_comp, &cfg, mon_type, val);
+}
+
+static int read_mon_cdp_safe(struct mpam_resctrl_mon *mon, struct mpam_component *mon_comp,
+ enum mpam_device_features mon_type,
+ int mon_idx, u32 closid, u32 rmid, u64 *val)
+{
+ if (cdp_enabled) {
+ u64 code_val = 0, data_val = 0;
+ int err;
+
+ err = __read_mon(mon, mon_comp, mon_type, mon_idx,
+ CDP_CODE, closid, rmid, &code_val);
+ if (err)
+ return err;
+
+ err = __read_mon(mon, mon_comp, mon_type, mon_idx,
+ CDP_DATA, closid, rmid, &data_val);
+ if (err)
+ return err;
+
+ *val += code_val + data_val;
+ return 0;
+ }
+
+ return __read_mon(mon, mon_comp, mon_type, mon_idx,
+ CDP_NONE, closid, rmid, val);
+}
+
+/* MBWU when not in ABMC mode, and CSU counters. */
+int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_mon_domain *d,
+ u32 closid, u32 rmid, enum resctrl_event_id eventid,
+ u64 *val, void *arch_mon_ctx)
+{
+ struct mpam_resctrl_dom *l3_dom;
+ struct mpam_component *mon_comp;
+ u32 mon_idx = *(u32 *)arch_mon_ctx;
+ enum mpam_device_features mon_type;
+ struct mpam_resctrl_mon *mon = &mpam_resctrl_counters[eventid];
+
+ resctrl_arch_rmid_read_context_check();
+
+ if (eventid >= QOS_NUM_EVENTS || !mon->class)
+ return -EINVAL;
+
+ l3_dom = container_of(d, struct mpam_resctrl_dom, resctrl_mon_dom);
+ mon_comp = l3_dom->mon_comp[eventid];
+
+ switch (eventid) {
+ case QOS_L3_OCCUP_EVENT_ID:
+ mon_type = mpam_feat_msmon_csu;
+ break;
+ case QOS_L3_MBM_LOCAL_EVENT_ID:
+ case QOS_L3_MBM_TOTAL_EVENT_ID:
+ mon_type = mpam_feat_msmon_mbwu;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return read_mon_cdp_safe(mon, mon_comp, mon_type, mon_idx,
+ closid, rmid, val);
+}
+
+static void __reset_mon(struct mpam_resctrl_mon *mon, struct mpam_component *mon_comp,
+ int mon_idx,
+ enum resctrl_conf_type cdp_type, u32 closid, u32 rmid)
+{
+ struct mon_cfg cfg = { };
+
+ if (!mpam_is_enabled())
+ return;
+
+ /* Shift closid to account for CDP */
+ closid = resctrl_get_config_index(closid, cdp_type);
+
+ if (mon_idx == USE_PRE_ALLOCATED) {
+ int mbwu_idx = resctrl_arch_rmid_idx_encode(closid, rmid);
+ mon_idx = mon->mbwu_idx_to_mon[mbwu_idx];
+ }
+
+ if (mon_idx == -1)
+ return;
+ cfg.mon = mon_idx;
+ mpam_msmon_reset_mbwu(mon_comp, &cfg);
+}
+
+static void reset_mon_cdp_safe(struct mpam_resctrl_mon *mon, struct mpam_component *mon_comp,
+ int mon_idx, u32 closid, u32 rmid)
+{
+ if (cdp_enabled) {
+ __reset_mon(mon, mon_comp, mon_idx, CDP_CODE, closid, rmid);
+ __reset_mon(mon, mon_comp, mon_idx, CDP_DATA, closid, rmid);
+ } else {
+ __reset_mon(mon, mon_comp, mon_idx, CDP_NONE, closid, rmid);
+ }
+}
+
+/* Called via IPI. Call with read_cpus_lock() held. */
+void resctrl_arch_reset_rmid(struct rdt_resource *r, struct rdt_mon_domain *d,
+ u32 closid, u32 rmid, enum resctrl_event_id eventid)
+{
+ struct mpam_resctrl_dom *l3_dom;
+ struct mpam_component *mon_comp;
+ struct mpam_resctrl_mon *mon = &mpam_resctrl_counters[eventid];
+
+ if (!mpam_is_enabled())
+ return;
+
+ /* Only MBWU counters are relevant, and for supported event types. */
+ if (eventid == QOS_L3_OCCUP_EVENT_ID || !mon->class)
+ return;
+
+ l3_dom = container_of(d, struct mpam_resctrl_dom, resctrl_mon_dom);
+ mon_comp = l3_dom->mon_comp[eventid];
+
+ reset_mon_cdp_safe(mon, mon_comp, USE_PRE_ALLOCATED, closid, rmid);
+}
+
static bool cache_has_usable_cpor(struct mpam_class *class)
{
struct mpam_props *cprops = &class->props;
diff --git a/include/linux/arm_mpam.h b/include/linux/arm_mpam.h
index e1461e32af75..86d5e326d2bd 100644
--- a/include/linux/arm_mpam.h
+++ b/include/linux/arm_mpam.h
@@ -67,6 +67,11 @@ struct rdt_resource;
void *resctrl_arch_mon_ctx_alloc(struct rdt_resource *r, enum resctrl_event_id evtid);
void resctrl_arch_mon_ctx_free(struct rdt_resource *r, enum resctrl_event_id evtid, void *ctx);
+static inline unsigned int resctrl_arch_round_mon_val(unsigned int val)
+{
+ return val;
+}
+
/**
* mpam_register_requestor() - Register a requestor with the MPAM driver
* @partid_max: The maximum PARTID value the requestor can generate.
--
2.43.0
Powered by blists - more mailing lists