[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <175793565675.709179.10332281384410397406.tip-bot2@tip-bot2>
Date: Mon, 15 Sep 2025 11:27:36 -0000
From: "tip-bot2 for Babu Moger" <tip-bot2@...utronix.de>
To: linux-tip-commits@...r.kernel.org
Cc: Babu Moger <babu.moger@....com>, "Borislav Petkov (AMD)" <bp@...en8.de>,
Reinette Chatre <reinette.chatre@...el.com>, x86@...nel.org,
linux-kernel@...r.kernel.org
Subject:
[tip: x86/cache] fs/resctrl: Add the functionality to unassign MBM events
The following commit has been merged into the x86/cache branch of tip:
Commit-ID: aab2c5088cdb26e80d51ffbe72d24ab23fa1533e
Gitweb: https://git.kernel.org/tip/aab2c5088cdb26e80d51ffbe72d24ab23fa1533e
Author: Babu Moger <babu.moger@....com>
AuthorDate: Fri, 05 Sep 2025 16:34:17 -05:00
Committer: Borislav Petkov (AMD) <bp@...en8.de>
CommitterDate: Mon, 15 Sep 2025 12:22:24 +02:00
fs/resctrl: Add the functionality to unassign MBM events
The "mbm_event" counter assignment mode offers "num_mbm_cntrs" number of
counters that can be assigned to RMID, event pairs and monitor bandwidth usage
as long as it is assigned. If all the counters are in use, the kernel logs the
error message "Failed to allocate counter for <event> in domain <id>" in
/sys/fs/resctrl/info/last_cmd_status when a new assignment is requested.
To make space for a new assignment, users must unassign an already assigned
counter and retry the assignment again.
Add the functionality to unassign and free the counters in the domain. Also,
add the helper rdtgroup_unassign_cntrs() to unassign counters in the group.
Signed-off-by: Babu Moger <babu.moger@....com>
Signed-off-by: Borislav Petkov (AMD) <bp@...en8.de>
Reviewed-by: Reinette Chatre <reinette.chatre@...el.com>
Link: https://lore.kernel.org/cover.1757108044.git.babu.moger@amd.com
---
fs/resctrl/internal.h | 2 +-
fs/resctrl/monitor.c | 66 ++++++++++++++++++++++++++++++++++++++++++-
2 files changed, 68 insertions(+)
diff --git a/fs/resctrl/internal.h b/fs/resctrl/internal.h
index 762705d..c6b66d4 100644
--- a/fs/resctrl/internal.h
+++ b/fs/resctrl/internal.h
@@ -398,6 +398,8 @@ int resctrl_available_mbm_cntrs_show(struct kernfs_open_file *of, struct seq_fil
void rdtgroup_assign_cntrs(struct rdtgroup *rdtgrp);
+void rdtgroup_unassign_cntrs(struct rdtgroup *rdtgrp);
+
#ifdef CONFIG_RESCTRL_FS_PSEUDO_LOCK
int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp);
diff --git a/fs/resctrl/monitor.c b/fs/resctrl/monitor.c
index 106e9bd..2ed29ae 100644
--- a/fs/resctrl/monitor.c
+++ b/fs/resctrl/monitor.c
@@ -405,6 +405,14 @@ static int mbm_cntr_alloc(struct rdt_resource *r, struct rdt_mon_domain *d,
return -ENOSPC;
}
+/*
+ * mbm_cntr_free() - Clear the counter ID configuration details in the domain @d.
+ */
+static void mbm_cntr_free(struct rdt_mon_domain *d, int cntr_id)
+{
+ memset(&d->cntr_cfg[cntr_id], 0, sizeof(*d->cntr_cfg));
+}
+
static int __mon_event_count(u32 closid, u32 rmid, struct rmid_read *rr)
{
int cpu = smp_processor_id();
@@ -1043,6 +1051,64 @@ void rdtgroup_assign_cntrs(struct rdtgroup *rdtgrp)
&mon_event_all[QOS_L3_MBM_LOCAL_EVENT_ID]);
}
+/*
+ * rdtgroup_free_unassign_cntr() - Unassign and reset the counter ID configuration
+ * for the event pointed to by @mevt within the domain @d and resctrl group @rdtgrp.
+ */
+static void rdtgroup_free_unassign_cntr(struct rdt_resource *r, struct rdt_mon_domain *d,
+ struct rdtgroup *rdtgrp, struct mon_evt *mevt)
+{
+ int cntr_id;
+
+ cntr_id = mbm_cntr_get(r, d, rdtgrp, mevt->evtid);
+
+ /* If there is no cntr_id assigned, nothing to do */
+ if (cntr_id < 0)
+ return;
+
+ rdtgroup_assign_cntr(r, d, mevt->evtid, rdtgrp->mon.rmid, rdtgrp->closid, cntr_id, false);
+
+ mbm_cntr_free(d, cntr_id);
+}
+
+/*
+ * rdtgroup_unassign_cntr_event() - Unassign a hardware counter associated with
+ * the event structure @mevt from the domain @d and the group @rdtgrp. Unassign
+ * the counters from all the domains if @d is NULL else unassign from @d.
+ */
+static void rdtgroup_unassign_cntr_event(struct rdt_mon_domain *d, struct rdtgroup *rdtgrp,
+ struct mon_evt *mevt)
+{
+ struct rdt_resource *r = resctrl_arch_get_resource(mevt->rid);
+
+ if (!d) {
+ list_for_each_entry(d, &r->mon_domains, hdr.list)
+ rdtgroup_free_unassign_cntr(r, d, rdtgrp, mevt);
+ } else {
+ rdtgroup_free_unassign_cntr(r, d, rdtgrp, mevt);
+ }
+}
+
+/*
+ * rdtgroup_unassign_cntrs() - Unassign the counters associated with MBM events.
+ * Called when a group is deleted.
+ */
+void rdtgroup_unassign_cntrs(struct rdtgroup *rdtgrp)
+{
+ struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3);
+
+ if (!r->mon_capable || !resctrl_arch_mbm_cntr_assign_enabled(r))
+ return;
+
+ if (resctrl_is_mon_event_enabled(QOS_L3_MBM_TOTAL_EVENT_ID))
+ rdtgroup_unassign_cntr_event(NULL, rdtgrp,
+ &mon_event_all[QOS_L3_MBM_TOTAL_EVENT_ID]);
+
+ if (resctrl_is_mon_event_enabled(QOS_L3_MBM_LOCAL_EVENT_ID))
+ rdtgroup_unassign_cntr_event(NULL, rdtgrp,
+ &mon_event_all[QOS_L3_MBM_LOCAL_EVENT_ID]);
+}
+
int resctrl_mbm_assign_mode_show(struct kernfs_open_file *of,
struct seq_file *s, void *v)
{
Powered by blists - more mailing lists