[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <099ecbbe678dd44387a8962d0cb81e61500cd2fa.1722981659.git.babu.moger@amd.com>
Date: Tue, 6 Aug 2024 17:00:52 -0500
From: Babu Moger <babu.moger@....com>
To: <corbet@....net>, <fenghua.yu@...el.com>, <reinette.chatre@...el.com>,
<tglx@...utronix.de>, <mingo@...hat.com>, <bp@...en8.de>,
<dave.hansen@...ux.intel.com>
CC: <x86@...nel.org>, <hpa@...or.com>, <paulmck@...nel.org>,
<rdunlap@...radead.org>, <tj@...nel.org>, <peterz@...radead.org>,
<yanjiewtw@...il.com>, <babu.moger@....com>, <kim.phillips@....com>,
<lukas.bulwahn@...il.com>, <seanjc@...gle.com>, <jmattson@...gle.com>,
<leitao@...ian.org>, <jpoimboe@...nel.org>, <rick.p.edgecombe@...el.com>,
<kirill.shutemov@...ux.intel.com>, <jithu.joseph@...el.com>,
<kai.huang@...el.com>, <kan.liang@...ux.intel.com>,
<daniel.sneddon@...ux.intel.com>, <pbonzini@...hat.com>,
<sandipan.das@....com>, <ilpo.jarvinen@...ux.intel.com>,
<peternewman@...gle.com>, <maciej.wieczor-retman@...el.com>,
<linux-doc@...r.kernel.org>, <linux-kernel@...r.kernel.org>,
<eranian@...gle.com>, <james.morse@....com>
Subject: [PATCH v6 15/22] x86/resctrl: Add the interface to assign a hardware counter
The ABMC feature provides an option to the user to assign a hardware
counter to an RMID and monitor the bandwidth as long as it is assigned.
The assigned RMID will be tracked by the hardware until the user unassigns
it manually.
Counters are configured by writing to L3_QOS_ABMC_CFG MSR and
specifying the counter id, bandwidth source, and bandwidth types.
Provide the interface to assign the counter ids to RMID.
The feature details are documented in the APM listed below [1].
[1] AMD64 Architecture Programmer's Manual Volume 2: System Programming
Publication # 24593 Revision 3.41 section 19.3.3.3 Assignable Bandwidth
Monitoring (ABMC).
Link: https://bugzilla.kernel.org/show_bug.cgi?id=206537
Signed-off-by: Babu Moger <babu.moger@....com>
---
v6: Removed mbm_cntr_alloc() from this patch to keep fs and arch code
separate.
Added code to update the counter assignment at domain level.
v5: Few name changes to match cntr_id.
Changed the function names to
rdtgroup_assign_cntr
resctr_arch_assign_cntr
More comments on commit log.
Added function summary.
v4: Commit message update.
User bitmap APIs where applicable.
Changed the interfaces considering MPAM(arm).
Added domain specific assignment.
v3: Removed the static from the prototype of rdtgroup_assign_abmc.
The function is not called directly from user anymore. These
changes are related to global assignment interface.
v2: Minor text changes in commit message.
---
arch/x86/kernel/cpu/resctrl/internal.h | 4 ++
arch/x86/kernel/cpu/resctrl/rdtgroup.c | 97 ++++++++++++++++++++++++++
2 files changed, 101 insertions(+)
diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h
index d93082b65d69..4e8109dee174 100644
--- a/arch/x86/kernel/cpu/resctrl/internal.h
+++ b/arch/x86/kernel/cpu/resctrl/internal.h
@@ -685,6 +685,10 @@ int mbm_cntr_alloc(struct rdt_resource *r);
void mbm_cntr_free(u32 cntr_id);
void resctrl_mbm_evt_config_init(struct rdt_hw_mon_domain *hw_dom);
unsigned int mon_event_config_index_get(u32 evtid);
+int resctrl_arch_assign_cntr(struct rdt_mon_domain *d, enum resctrl_event_id evtid,
+ u32 rmid, u32 cntr_id, u32 closid, bool assign);
+int rdtgroup_assign_cntr(struct rdtgroup *rdtgrp, enum resctrl_event_id evtid);
+int rdtgroup_alloc_cntr(struct rdtgroup *rdtgrp, int index);
void rdt_staged_configs_clear(void);
bool closid_allocated(unsigned int closid);
int resctrl_find_cleanest_closid(void);
diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
index 60696b248b56..1ee91a7293a8 100644
--- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
+++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
@@ -1864,6 +1864,103 @@ static ssize_t mbm_local_bytes_config_write(struct kernfs_open_file *of,
return ret ?: nbytes;
}
+static void rdtgroup_abmc_cfg(void *info)
+{
+ u64 *msrval = info;
+
+ wrmsrl(MSR_IA32_L3_QOS_ABMC_CFG, *msrval);
+}
+
+/*
+ * Send an IPI to the domain to assign the counter id to RMID.
+ */
+int resctrl_arch_assign_cntr(struct rdt_mon_domain *d, enum resctrl_event_id evtid,
+ u32 rmid, u32 cntr_id, u32 closid, bool assign)
+{
+ struct rdt_hw_mon_domain *hw_dom = resctrl_to_arch_mon_dom(d);
+ union l3_qos_abmc_cfg abmc_cfg = { 0 };
+ struct arch_mbm_state *arch_mbm;
+
+ abmc_cfg.split.cfg_en = 1;
+ abmc_cfg.split.cntr_en = assign ? 1 : 0;
+ abmc_cfg.split.cntr_id = cntr_id;
+ abmc_cfg.split.bw_src = rmid;
+
+ /* Update the event configuration from the domain */
+ if (evtid == QOS_L3_MBM_TOTAL_EVENT_ID) {
+ abmc_cfg.split.bw_type = hw_dom->mbm_total_cfg;
+ arch_mbm = &hw_dom->arch_mbm_total[rmid];
+ } else {
+ abmc_cfg.split.bw_type = hw_dom->mbm_local_cfg;
+ arch_mbm = &hw_dom->arch_mbm_local[rmid];
+ }
+
+ smp_call_function_any(&d->hdr.cpu_mask, rdtgroup_abmc_cfg, &abmc_cfg, 1);
+
+ /*
+ * Reset the architectural state so that reading of hardware
+ * counter is not considered as an overflow in next update.
+ */
+ if (arch_mbm)
+ memset(arch_mbm, 0, sizeof(struct arch_mbm_state));
+
+ return 0;
+}
+
+/* Allocate a new counter id if the event is unassigned */
+int rdtgroup_alloc_cntr(struct rdtgroup *rdtgrp, int index)
+{
+ struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
+ int cntr_id;
+
+ /* Nothing to do if event has been assigned already */
+ if (rdtgrp->mon.cntr_id[index] != MON_CNTR_UNSET) {
+ rdt_last_cmd_puts("ABMC counter is assigned already\n");
+ return 0;
+ }
+
+ /*
+ * Allocate a new counter id and update domains
+ */
+ cntr_id = mbm_cntr_alloc(r);
+ if (cntr_id < 0) {
+ rdt_last_cmd_puts("Out of ABMC counters\n");
+ return -ENOSPC;
+ }
+
+ rdtgrp->mon.cntr_id[index] = cntr_id;
+
+ return 0;
+}
+
+/*
+ * Assign a hardware counter to the group and assign the counter
+ * all the domains in the group. It will try to allocate the mbm
+ * counter if the counter is available.
+ */
+int rdtgroup_assign_cntr(struct rdtgroup *rdtgrp, enum resctrl_event_id evtid)
+{
+ struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
+ struct rdt_mon_domain *d;
+ int index;
+
+ index = mon_event_config_index_get(evtid);
+ if (index == INVALID_CONFIG_INDEX)
+ return -EINVAL;
+
+ if (rdtgroup_alloc_cntr(rdtgrp, index))
+ return -EINVAL;
+
+ list_for_each_entry(d, &r->mon_domains, hdr.list) {
+ resctrl_arch_assign_cntr(d, evtid, rdtgrp->mon.rmid,
+ rdtgrp->mon.cntr_id[index],
+ rdtgrp->closid, true);
+ set_bit(rdtgrp->mon.cntr_id[index], d->mbm_cntr_map);
+ }
+
+ return 0;
+}
+
/* rdtgroup information files for one cache resource. */
static struct rftype res_common_files[] = {
{
--
2.34.1
Powered by blists - more mailing lists