[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20251205215901.17772-20-james.morse@arm.com>
Date: Fri, 5 Dec 2025 21:58:42 +0000
From: James Morse <james.morse@....com>
To: linux-kernel@...r.kernel.org,
linux-arm-kernel@...ts.infradead.org
Cc: James Morse <james.morse@....com>,
D Scott Phillips OS <scott@...amperecomputing.com>,
carl@...amperecomputing.com,
lcherian@...vell.com,
bobo.shaobowang@...wei.com,
tan.shaopeng@...itsu.com,
baolin.wang@...ux.alibaba.com,
Jamie Iles <quic_jiles@...cinc.com>,
Xin Hao <xhao@...ux.alibaba.com>,
peternewman@...gle.com,
dfustini@...libre.com,
amitsinght@...vell.com,
David Hildenbrand <david@...nel.org>,
Dave Martin <dave.martin@....com>,
Koba Ko <kobak@...dia.com>,
Shanker Donthineni <sdonthineni@...dia.com>,
fenghuay@...dia.com,
baisheng.gao@...soc.com,
Jonathan Cameron <jonathan.cameron@...wei.com>,
Gavin Shan <gshan@...hat.com>,
Ben Horgan <ben.horgan@....com>,
rohit.mathew@....com,
reinette.chatre@...el.com,
Punit Agrawal <punit.agrawal@....qualcomm.com>
Subject: [RFC PATCH 19/38] arm_mpam: resctrl: pick classes for use as mbm counters
resctrl has two types of counters, NUMA-local and global. MPAM has only
bandwidth counters, but the position of the MSC may mean it counts
NUMA-local, or global traffic.
But the topology information is not available.
Apply a heuristic: the L2 or L3 supports bandwidth monitors, these are
probably NUMA-local. If the memory controller supports bandwidth
monitors, they are probably global.
This also allows us to assert that we don't have the same class
backing two different resctrl events.
Because the class or component backing the event may not be 'the L3',
it is necessary for mpam_resctrl_get_domain_from_cpu() to search
the monitor domains too. This matters the most for 'monitor only'
systems, where 'the L3' control domains may be empty, and the
ctrl_comp pointer NULL.
resctrl expects there to be enough monitors for every possible control
and monitor group to have one. Such a system gets called 'free running'
as the monitors can be programmed once and left running.
Any other platform will need to emulate ABMC.
Signed-off-by: James Morse <james.morse@....com>
---
drivers/resctrl/mpam_internal.h | 8 ++
drivers/resctrl/mpam_resctrl.c | 141 ++++++++++++++++++++++++++++++--
2 files changed, 144 insertions(+), 5 deletions(-)
diff --git a/drivers/resctrl/mpam_internal.h b/drivers/resctrl/mpam_internal.h
index f9d2a1004c32..0984ac32f303 100644
--- a/drivers/resctrl/mpam_internal.h
+++ b/drivers/resctrl/mpam_internal.h
@@ -339,6 +339,14 @@ struct mpam_msc_ris {
struct mpam_resctrl_dom {
struct mpam_component *ctrl_comp;
+
+ /*
+ * There is no single mon_comp because different events may be backed
+ * by different class/components. mon_comp is indexed by the event
+ * number.
+ */
+ struct mpam_component *mon_comp[QOS_NUM_EVENTS];
+
struct rdt_ctrl_domain resctrl_ctrl_dom;
struct rdt_mon_domain resctrl_mon_dom;
};
diff --git a/drivers/resctrl/mpam_resctrl.c b/drivers/resctrl/mpam_resctrl.c
index fc1f054f187e..9978eb48c1f4 100644
--- a/drivers/resctrl/mpam_resctrl.c
+++ b/drivers/resctrl/mpam_resctrl.c
@@ -50,6 +50,14 @@ static bool exposed_mon_capable;
*/
static bool cdp_enabled;
+/* Whether this num_mbw_mon could result in a free_running system */
+static int __mpam_monitors_free_running(u16 num_mbwu_mon)
+{
+ if (num_mbwu_mon >= resctrl_arch_system_num_rmid_idx())
+ return resctrl_arch_system_num_rmid_idx();
+ return 0;
+}
+
bool resctrl_arch_alloc_capable(void)
{
return exposed_alloc_capable;
@@ -290,6 +298,26 @@ static bool cache_has_usable_csu(struct mpam_class *class)
return (mpam_partid_max > 1) || (mpam_pmg_max != 0);
}
+static bool class_has_usable_mbwu(struct mpam_class *class)
+{
+ struct mpam_props *cprops = &class->props;
+
+ if (!mpam_has_feature(mpam_feat_msmon_mbwu, cprops))
+ return false;
+
+ /*
+ * resctrl expects the bandwidth counters to be free running,
+ * which means we need as many monitors as resctrl has
+ * control/monitor groups.
+ */
+ if (__mpam_monitors_free_running(cprops->num_mbwu_mon)) {
+ pr_debug("monitors usable in free-running mode\n");
+ return true;
+ }
+
+ return false;
+}
+
/*
* Calculate the worst-case percentage change from each implemented step
* in the control.
@@ -554,7 +582,7 @@ static void counter_update_class(enum resctrl_event_id evt_id,
static void mpam_resctrl_pick_counters(void)
{
struct mpam_class *class;
- bool has_csu;
+ bool has_csu, has_mbwu;
lockdep_assert_cpus_held();
@@ -586,7 +614,37 @@ static void mpam_resctrl_pick_counters(void)
return;
}
}
+
+ has_mbwu = class_has_usable_mbwu(class);
+ if (has_mbwu && topology_matches_l3(class)) {
+ pr_debug("class %u has usable MBWU, and matches L3 topology",
+ class->level);
+
+ /*
+ * MBWU counters may be 'local' or 'total' depending on
+ * where they are in the topology. Counters on caches
+ * are assumed to be local. If it's on the memory
+ * controller, its assumed to be global.
+ */
+ switch (class->type) {
+ case MPAM_CLASS_CACHE:
+ counter_update_class(QOS_L3_MBM_LOCAL_EVENT_ID,
+ class);
+ break;
+ case MPAM_CLASS_MEMORY:
+ counter_update_class(QOS_L3_MBM_TOTAL_EVENT_ID,
+ class);
+ break;
+ default:
+ break;
+ }
+ }
}
+
+ /* Allocation of MBWU monitors assumes that the class is unique... */
+ if (mpam_resctrl_counters[QOS_L3_MBM_LOCAL_EVENT_ID].class)
+ WARN_ON_ONCE(mpam_resctrl_counters[QOS_L3_MBM_LOCAL_EVENT_ID].class ==
+ mpam_resctrl_counters[QOS_L3_MBM_TOTAL_EVENT_ID].class);
}
static int mpam_resctrl_control_init(struct mpam_resctrl_res *res,
@@ -910,6 +968,20 @@ static bool mpam_resctrl_offline_domain_hdr(unsigned int cpu,
return false;
}
+static struct mpam_component *find_component(struct mpam_class *victim, int cpu)
+{
+ struct mpam_component *victim_comp;
+
+ guard(srcu)(&mpam_srcu);
+ list_for_each_entry_srcu(victim_comp, &victim->components, class_list,
+ srcu_read_lock_held(&mpam_srcu)) {
+ if (cpumask_test_cpu(cpu, &victim_comp->affinity))
+ return victim_comp;
+ }
+
+ return NULL;
+}
+
static struct mpam_resctrl_dom *
mpam_resctrl_alloc_domain(unsigned int cpu, struct mpam_resctrl_res *res)
{
@@ -959,8 +1031,32 @@ mpam_resctrl_alloc_domain(unsigned int cpu, struct mpam_resctrl_res *res)
}
if (exposed_mon_capable) {
+ int i;
+ struct mpam_component *mon_comp, *any_mon_comp;
+
+ /*
+ * Even if the monitor domain is backed by a different
+ * component, the L3 component IDs need to be used... only
+ * there may be no ctrl_comp for the L3.
+ * Search each event's class list for a component with
+ * overlapping CPUs and set up the dom->mon_comp array.
+ */
+ for (i = 0; i < QOS_NUM_EVENTS; i++) {
+ struct mpam_resctrl_mon *mon;
+
+ mon = &mpam_resctrl_counters[i];
+ if (!mon->class)
+ continue; // dummy resource
+
+ mon_comp = find_component(mon->class, cpu);
+ dom->mon_comp[i] = mon_comp;
+ if (mon_comp)
+ any_mon_comp = mon_comp;
+ }
+ WARN_ON_ONCE(!any_mon_comp);
+
mon_d = &dom->resctrl_mon_dom;
- mpam_resctrl_domain_hdr_init(cpu, ctrl_comp, &mon_d->hdr);
+ mpam_resctrl_domain_hdr_init(cpu, any_mon_comp, &mon_d->hdr);
mon_d->hdr.type = RESCTRL_MON_DOMAIN;
/* TODO: this list should be sorted */
list_add_tail_rcu(&mon_d->hdr.list, &r->mon_domains);
@@ -982,16 +1078,47 @@ mpam_resctrl_alloc_domain(unsigned int cpu, struct mpam_resctrl_res *res)
return dom;
}
+/*
+ * We know all the monitors are associated with the L3, even if there are no
+ * controls and therefore no control component. Find the cache-id for the CPU
+ * and use that to search for existing resctrl domains.
+ * This relies on mpam_resctrl_pick_domain_id() using the L3 cache-id
+ * for anything that is not a cache.
+ */
+static struct mpam_resctrl_dom *mpam_resctrl_get_mon_domain_from_cpu(int cpu)
+{
+ u32 cache_id;
+ struct rdt_mon_domain *mon_d;
+ struct mpam_resctrl_dom *dom;
+ struct mpam_resctrl_res *l3 = &mpam_resctrl_controls[RDT_RESOURCE_L3];
+
+ if (!l3->class)
+ return NULL;
+ /* TODO: how does this order with cacheinfo updates under cpuhp? */
+ cache_id = get_cpu_cacheinfo_id(cpu, 3);
+ if (cache_id == ~0)
+ return NULL;
+
+ list_for_each_entry(mon_d, &l3->resctrl_res.mon_domains, hdr.list) {
+ dom = container_of(mon_d, struct mpam_resctrl_dom, resctrl_mon_dom);
+
+ if (mon_d->hdr.id == cache_id)
+ return dom;
+ }
+
+ return NULL;
+}
+
static struct mpam_resctrl_dom *
mpam_resctrl_get_domain_from_cpu(int cpu, struct mpam_resctrl_res *res)
{
struct mpam_resctrl_dom *dom;
struct rdt_ctrl_domain *ctrl_d;
+ struct rdt_resource *r = &res->resctrl_res;
lockdep_assert_cpus_held();
- list_for_each_entry_rcu(ctrl_d, &res->resctrl_res.ctrl_domains,
- hdr.list) {
+ list_for_each_entry_rcu(ctrl_d, &r->ctrl_domains, hdr.list) {
dom = container_of(ctrl_d, struct mpam_resctrl_dom,
resctrl_ctrl_dom);
@@ -999,7 +1126,11 @@ mpam_resctrl_get_domain_from_cpu(int cpu, struct mpam_resctrl_res *res)
return dom;
}
- return NULL;
+ if (r->rid != RDT_RESOURCE_L3)
+ return NULL;
+
+ /* Search the mon domain list too - needed on monitor only platforms. */
+ return mpam_resctrl_get_mon_domain_from_cpu(cpu);
}
int mpam_resctrl_online_cpu(unsigned int cpu)
--
2.39.5
Powered by blists - more mailing lists