[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <176382651593.498.17898036111535286825.tip-bot2@tip-bot2>
Date: Sat, 22 Nov 2025 15:48:35 -0000
From: "tip-bot2 for Babu Moger" <tip-bot2@...utronix.de>
To: linux-tip-commits@...r.kernel.org
Cc: Babu Moger <babu.moger@....com>, "Borislav Petkov (AMD)" <bp@...en8.de>,
Reinette Chatre <reinette.chatre@...el.com>, x86@...nel.org,
linux-kernel@...r.kernel.org
Subject:
[tip: x86/cache] fs/resctrl: Introduce interface to display io_alloc CBMs
The following commit has been merged into the x86/cache branch of tip:
Commit-ID: 77b662326200135fe72cedc47fb1b0e5679d604d
Gitweb: https://git.kernel.org/tip/77b662326200135fe72cedc47fb1b0e5679d604d
Author: Babu Moger <babu.moger@....com>
AuthorDate: Wed, 12 Nov 2025 18:57:33 -06:00
Committer: Borislav Petkov (AMD) <bp@...en8.de>
CommitterDate: Sat, 22 Nov 2025 11:37:21 +01:00
fs/resctrl: Introduce interface to display io_alloc CBMs
Introduce the "io_alloc_cbm" resctrl file to display the capacity bitmasks
(CBMs) that represent the portions of each cache instance allocated
for I/O traffic on a cache resource that supports the "io_alloc" feature.
io_alloc_cbm resides in the info directory of a cache resource, for example,
/sys/fs/resctrl/info/L3/. Since the resource name is part of the path, it
is not necessary to display the resource name as done in the schemata file.
When CDP is enabled, io_alloc routes traffic using the highest CLOSID
associated with the CDP_CODE resource and that CLOSID becomes unusable for
the CDP_DATA resource. The highest CLOSID of CDP_CODE and CDP_DATA resources
will be kept in sync to ensure consistent user interface. In preparation for
this, access the CBMs for I/O traffic through highest CLOSID of either
CDP_CODE or CDP_DATA resource.
Signed-off-by: Babu Moger <babu.moger@....com>
Signed-off-by: Borislav Petkov (AMD) <bp@...en8.de>
Reviewed-by: Reinette Chatre <reinette.chatre@...el.com>
Link: https://patch.msgid.link/55a3ff66a70e7ce8239f022e62b334e9d64af604.1762995456.git.babu.moger@amd.com
---
Documentation/filesystems/resctrl.rst | 19 +++++++++++-
fs/resctrl/ctrlmondata.c | 45 ++++++++++++++++++++++++--
fs/resctrl/internal.h | 2 +-
fs/resctrl/rdtgroup.c | 11 +++++-
4 files changed, 73 insertions(+), 4 deletions(-)
diff --git a/Documentation/filesystems/resctrl.rst b/Documentation/filesystems/resctrl.rst
index 91c71e2..e799453 100644
--- a/Documentation/filesystems/resctrl.rst
+++ b/Documentation/filesystems/resctrl.rst
@@ -182,6 +182,25 @@ related to allocation:
available for general (CPU) cache allocation for both the CDP_CODE
and CDP_DATA resources.
+"io_alloc_cbm":
+ Capacity bitmasks that describe the portions of cache instances to
+ which I/O traffic from supported I/O devices are routed when "io_alloc"
+ is enabled.
+
+ CBMs are displayed in the following format:
+
+ <cache_id0>=<cbm>;<cache_id1>=<cbm>;...
+
+ Example::
+
+ # cat /sys/fs/resctrl/info/L3/io_alloc_cbm
+ 0=ffff;1=ffff
+
+ When CDP is enabled "io_alloc_cbm" associated with the CDP_DATA and CDP_CODE
+ resources may reflect the same values. For example, values read from and
+ written to /sys/fs/resctrl/info/L3DATA/io_alloc_cbm may be reflected by
+ /sys/fs/resctrl/info/L3CODE/io_alloc_cbm and vice versa.
+
Memory bandwidth(MB) subdirectory contains the following files
with respect to allocation:
diff --git a/fs/resctrl/ctrlmondata.c b/fs/resctrl/ctrlmondata.c
index 454fdf3..1ac89b1 100644
--- a/fs/resctrl/ctrlmondata.c
+++ b/fs/resctrl/ctrlmondata.c
@@ -381,7 +381,8 @@ out:
return ret ?: nbytes;
}
-static void show_doms(struct seq_file *s, struct resctrl_schema *schema, int closid)
+static void show_doms(struct seq_file *s, struct resctrl_schema *schema,
+ char *resource_name, int closid)
{
struct rdt_resource *r = schema->res;
struct rdt_ctrl_domain *dom;
@@ -391,7 +392,8 @@ static void show_doms(struct seq_file *s, struct resctrl_schema *schema, int clo
/* Walking r->domains, ensure it can't race with cpuhp */
lockdep_assert_cpus_held();
- seq_printf(s, "%*s:", max_name_width, schema->name);
+ if (resource_name)
+ seq_printf(s, "%*s:", max_name_width, resource_name);
list_for_each_entry(dom, &r->ctrl_domains, hdr.list) {
if (sep)
seq_puts(s, ";");
@@ -437,7 +439,7 @@ int rdtgroup_schemata_show(struct kernfs_open_file *of,
closid = rdtgrp->closid;
list_for_each_entry(schema, &resctrl_schema_all, list) {
if (closid < schema->num_closid)
- show_doms(s, schema, closid);
+ show_doms(s, schema, schema->name, closid);
}
}
} else {
@@ -823,3 +825,40 @@ out_unlock:
return ret ?: nbytes;
}
+
+int resctrl_io_alloc_cbm_show(struct kernfs_open_file *of, struct seq_file *seq, void *v)
+{
+ struct resctrl_schema *s = rdt_kn_parent_priv(of->kn);
+ struct rdt_resource *r = s->res;
+ int ret = 0;
+
+ cpus_read_lock();
+ mutex_lock(&rdtgroup_mutex);
+
+ rdt_last_cmd_clear();
+
+ if (!r->cache.io_alloc_capable) {
+ rdt_last_cmd_printf("io_alloc is not supported on %s\n", s->name);
+ ret = -ENODEV;
+ goto out_unlock;
+ }
+
+ if (!resctrl_arch_get_io_alloc_enabled(r)) {
+ rdt_last_cmd_printf("io_alloc is not enabled on %s\n", s->name);
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ /*
+ * When CDP is enabled, the CBMs of the highest CLOSID of CDP_CODE and
+ * CDP_DATA are kept in sync. As a result, the io_alloc CBMs shown for
+ * either CDP resource are identical and accurately represent the CBMs
+ * used for I/O.
+ */
+ show_doms(seq, s, NULL, resctrl_io_alloc_closid(r));
+
+out_unlock:
+ mutex_unlock(&rdtgroup_mutex);
+ cpus_read_unlock();
+ return ret;
+}
diff --git a/fs/resctrl/internal.h b/fs/resctrl/internal.h
index 145e22f..779a575 100644
--- a/fs/resctrl/internal.h
+++ b/fs/resctrl/internal.h
@@ -438,6 +438,8 @@ ssize_t resctrl_io_alloc_write(struct kernfs_open_file *of, char *buf,
size_t nbytes, loff_t off);
const char *rdtgroup_name_by_closid(u32 closid);
+int resctrl_io_alloc_cbm_show(struct kernfs_open_file *of, struct seq_file *seq,
+ void *v);
#ifdef CONFIG_RESCTRL_FS_PSEUDO_LOCK
int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp);
diff --git a/fs/resctrl/rdtgroup.c b/fs/resctrl/rdtgroup.c
index 9f9bd31..ac326db 100644
--- a/fs/resctrl/rdtgroup.c
+++ b/fs/resctrl/rdtgroup.c
@@ -1972,6 +1972,12 @@ static struct rftype res_common_files[] = {
.write = resctrl_io_alloc_write,
},
{
+ .name = "io_alloc_cbm",
+ .mode = 0444,
+ .kf_ops = &rdtgroup_kf_single_ops,
+ .seq_show = resctrl_io_alloc_cbm_show,
+ },
+ {
.name = "max_threshold_occupancy",
.mode = 0644,
.kf_ops = &rdtgroup_kf_single_ops,
@@ -2171,9 +2177,12 @@ static void io_alloc_init(void)
{
struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3);
- if (r->cache.io_alloc_capable)
+ if (r->cache.io_alloc_capable) {
resctrl_file_fflags_init("io_alloc", RFTYPE_CTRL_INFO |
RFTYPE_RES_CACHE);
+ resctrl_file_fflags_init("io_alloc_cbm",
+ RFTYPE_CTRL_INFO | RFTYPE_RES_CACHE);
+ }
}
void resctrl_file_fflags_init(const char *config, unsigned long fflags)
Powered by blists - more mailing lists