[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <1f6b7f11ace54add1d907b539a473ba9c274364f.1705688539.git.babu.moger@amd.com>
Date: Fri, 19 Jan 2024 12:22:17 -0600
From: Babu Moger <babu.moger@....com>
To: <corbet@....net>, <fenghua.yu@...el.com>, <reinette.chatre@...el.com>,
<tglx@...utronix.de>, <mingo@...hat.com>, <bp@...en8.de>,
<dave.hansen@...ux.intel.com>
CC: <x86@...nel.org>, <hpa@...or.com>, <paulmck@...nel.org>,
<rdunlap@...radead.org>, <tj@...nel.org>, <peterz@...radead.org>,
<yanjiewtw@...il.com>, <babu.moger@....com>, <kim.phillips@....com>,
<lukas.bulwahn@...il.com>, <seanjc@...gle.com>, <jmattson@...gle.com>,
<leitao@...ian.org>, <jpoimboe@...nel.org>, <rick.p.edgecombe@...el.com>,
<kirill.shutemov@...ux.intel.com>, <jithu.joseph@...el.com>,
<kai.huang@...el.com>, <kan.liang@...ux.intel.com>,
<daniel.sneddon@...ux.intel.com>, <pbonzini@...hat.com>,
<sandipan.das@....com>, <ilpo.jarvinen@...ux.intel.com>,
<peternewman@...gle.com>, <maciej.wieczor-retman@...el.com>,
<linux-doc@...r.kernel.org>, <linux-kernel@...r.kernel.org>,
<eranian@...gle.com>
Subject: [PATCH v2 15/17] x86/resctrl: Add the interface to assign the RMID
With the support of ABMC (Assignable Bandwidth Monitoring Counters)
feature, the user has the option to assign or unassign the RMID to
hardware counter and monitor the bandwidth for the longer duration.
Provide the interface to assign the counter to the group.
The ABMC feature implements a pair of MSRs, L3_QOS_ABMC_CFG (MSR
C000_03FDh) and L3_QOS_ABMC_DSC (MSR C000_3FEh). Each logical processor
implements a separate copy of these registers. Attempts to read or write
these MSRs when ABMC is not enabled will result in a #GP(0) exception.
Individual assignable bandwidth counters are configured by writing to
L3_QOS_ABMC_CFG MSR and specifying the Counter ID, Bandwidth Source, and
Bandwidth Types. Reading L3_QOS_ABMC_DSC returns the configuration of the
counter specified by L3_QOS_ABMC_CFG [CtrID].
The feature details are documented in the APM listed below [1].
[1] AMD64 Architecture Programmer's Manual Volume 2: System Programming
Publication # 24593 Revision 3.41 section 19.3.3.3 Assignable Bandwidth
Monitoring (ABMC).
Signed-off-by: Babu Moger <babu.moger@....com>
Link: https://bugzilla.kernel.org/show_bug.cgi?id=206537
---
v2: Minor text changes in commit message.
---
Documentation/arch/x86/resctrl.rst | 7 ++
arch/x86/kernel/cpu/resctrl/rdtgroup.c | 160 ++++++++++++++++++++++++-
2 files changed, 166 insertions(+), 1 deletion(-)
diff --git a/Documentation/arch/x86/resctrl.rst b/Documentation/arch/x86/resctrl.rst
index 4f89d5d1b61f..2729c6fe6127 100644
--- a/Documentation/arch/x86/resctrl.rst
+++ b/Documentation/arch/x86/resctrl.rst
@@ -420,6 +420,13 @@ When monitoring is enabled all MON groups will also contain:
# cat /sys/fs/resctrl/monitor_state
total=assign;local=assign
+ The user needs to pin (or assign) RMID to read the MBM event in
+ ABMC mode. Each event can be assigned or unassigned separately.
+ Example::
+
+ # echo total=assign > /sys/fs/resctrl/monitor_state
+ # echo total=assign;local=assign > /sys/fs/resctrl/monitor_state
+
"mon_hw_id":
Available only with debug option. The identifier used by hardware
for the monitor group. On x86 this is the RMID.
diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
index df8d2390fc69..3447fc4ff2e9 100644
--- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
+++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
@@ -180,6 +180,18 @@ static void assignable_counters_init(void)
assignable_counter_free_map_len = hw_res->mbm_assignable_counters;
}
+static int assignable_counters_alloc(void)
+{
+ u32 counterid = ffs(assignable_counter_free_map);
+
+ if (counterid == 0)
+ return -ENOSPC;
+ counterid--;
+ assignable_counter_free_map &= ~(1 << counterid);
+
+ return counterid;
+}
+
/**
* rdtgroup_mode_by_closid - Return mode of resource group with closid
* @closid: closid if the resource group
@@ -1635,6 +1647,151 @@ static inline unsigned int mon_event_config_index_get(u32 evtid)
}
}
+static void rdtgroup_abmc_msrwrite(void *info)
+{
+ u64 *msrval = info;
+
+ wrmsrl(MSR_IA32_L3_QOS_ABMC_CFG, *msrval);
+}
+
+static void rdtgroup_abmc_domain(struct rdt_domain *d,
+ struct rdtgroup *rdtgrp,
+ u32 evtid, int index, bool assign)
+{
+ struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
+ union l3_qos_abmc_cfg abmc_cfg = { 0 };
+ struct arch_mbm_state *arch_mbm;
+
+ abmc_cfg.split.cfg_en = 1;
+ abmc_cfg.split.ctr_en = assign ? 1 : 0;
+ abmc_cfg.split.ctr_id = rdtgrp->mon.abmc_ctr_id[index];
+ abmc_cfg.split.bw_src = rdtgrp->mon.rmid;
+
+ /*
+ * Read the event configuration from the domain and pass it as
+ * bw_type.
+ */
+ if (evtid == QOS_L3_MBM_TOTAL_EVENT_ID) {
+ abmc_cfg.split.bw_type = hw_dom->mbm_total_cfg;
+ arch_mbm = &hw_dom->arch_mbm_total[rdtgrp->mon.rmid];
+ } else {
+ abmc_cfg.split.bw_type = hw_dom->mbm_local_cfg;
+ arch_mbm = &hw_dom->arch_mbm_local[rdtgrp->mon.rmid];
+ }
+
+ smp_call_function_any(&d->cpu_mask, rdtgroup_abmc_msrwrite, &abmc_cfg, 1);
+
+ /* Reset the internal counters */
+ if (arch_mbm)
+ memset(arch_mbm, 0, sizeof(struct arch_mbm_state));
+}
+
+static ssize_t rdtgroup_assign_abmc(struct rdtgroup *rdtgrp,
+ struct rdt_resource *r,
+ u32 evtid, int mon_state)
+{
+ int counterid = 0, index;
+ struct rdt_domain *d;
+
+ if (rdtgrp->mon.monitor_state & mon_state) {
+ rdt_last_cmd_puts("ABMC counter is assigned already\n");
+ return 0;
+ }
+
+ index = mon_event_config_index_get(evtid);
+ if (index == INVALID_CONFIG_INDEX) {
+ pr_warn_once("Invalid event id %d\n", evtid);
+ return -EINVAL;
+ }
+
+ /*
+ * Allocate a new counter and update domains
+ */
+ counterid = assignable_counters_alloc();
+ if (counterid < 0) {
+ rdt_last_cmd_puts("Out of ABMC counters\n");
+ return -ENOSPC;
+ }
+
+ rdtgrp->mon.abmc_ctr_id[index] = counterid;
+
+ list_for_each_entry(d, &r->domains, list)
+ rdtgroup_abmc_domain(d, rdtgrp, evtid, index, 1);
+
+ rdtgrp->mon.monitor_state |= mon_state;
+
+ return 0;
+}
+
+/**
+ * rdtgroup_monitor_state_write - Interface to assign/unassign an RMID.
+ *
+ * Return: 0 for success
+ */
+static ssize_t rdtgroup_monitor_state_write(struct kernfs_open_file *of,
+ char *buf, size_t nbytes, loff_t off)
+{
+ struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
+ struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
+ char *abmc_str, *event_str;
+ struct rdtgroup *rdtgrp;
+ int ret = 0, mon_state;
+ u32 evtid;
+
+ rdtgrp = rdtgroup_kn_lock_live(of->kn);
+ if (!rdtgrp) {
+ rdtgroup_kn_unlock(of->kn);
+ return -EINVAL;
+ }
+
+ if (!hw_res->abmc_enabled) {
+ rdt_last_cmd_puts("ABMC is not enabled\n");
+ rdtgroup_kn_unlock(of->kn);
+ return -EINVAL;
+ }
+
+ rdt_last_cmd_clear();
+
+ while (buf && buf[0] != '\0') {
+ /* Start processing the strings for each domain */
+ abmc_str = strim(strsep(&buf, ";"));
+ event_str = strsep(&abmc_str, "=");
+
+ if (event_str && abmc_str) {
+ if (!strcmp(event_str, "total")) {
+ mon_state = TOTAL_ASSIGN;
+ evtid = QOS_L3_MBM_TOTAL_EVENT_ID;
+ } else if (!strcmp(event_str, "local")) {
+ mon_state = LOCAL_ASSIGN;
+ evtid = QOS_L3_MBM_LOCAL_EVENT_ID;
+ } else {
+ rdt_last_cmd_puts("Invalid ABMC event\n");
+ ret = -EINVAL;
+ break;
+ }
+
+ if (!strcmp(abmc_str, "assign")) {
+ ret = rdtgroup_assign_abmc(rdtgrp, r, evtid, mon_state);
+ if (ret) {
+ rdt_last_cmd_puts("ABMC assign failed\n");
+ break;
+ }
+ } else {
+ rdt_last_cmd_puts("Invalid ABMC event\n");
+ ret = -EINVAL;
+ break;
+ }
+ } else {
+ rdt_last_cmd_puts("Invalid ABMC input\n");
+ ret = -EINVAL;
+ break;
+ }
+ }
+
+ rdtgroup_kn_unlock(of->kn);
+ return ret ?: nbytes;
+}
+
static void mon_event_config_read(void *info)
{
struct mon_config_info *mon_info = info;
@@ -2003,9 +2160,10 @@ static struct rftype res_common_files[] = {
},
{
.name = "monitor_state",
- .mode = 0444,
+ .mode = 0644,
.kf_ops = &rdtgroup_kf_single_ops,
.seq_show = rdtgroup_monitor_state_show,
+ .write = rdtgroup_monitor_state_write,
},
{
.name = "tasks",
--
2.34.1
Powered by blists - more mailing lists