lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251107012401.224515-2-atomlin@atomlin.com>
Date: Thu,  6 Nov 2025 20:24:00 -0500
From: Aaron Tomlin <atomlin@...mlin.com>
To: tony.luck@...el.com,
	reinette.chatre@...el.com,
	Dave.Martin@....com,
	james.morse@....com,
	babu.moger@....com,
	tglx@...utronix.de,
	mingo@...hat.com,
	bp@...en8.de,
	dave.hansen@...ux.intel.com
Cc: dave.martin@....com,
	linux-kernel@...r.kernel.org
Subject: [PATCH 1/2] x86/resctrl: Add io_alloc_min_cbm_all interface for CBM reset

This patch introduces the new resctrl interface file "io_alloc_min_cbm_all"
to provide users with a clean mechanism to reset all I/O allocation CBMs
(Cache-Block Masks) to their minimum configuration.

Writing '0' to this file triggers the logic to set each corresponding CBM
to the minimum number of consecutive bits (effectively clearing them to 0
or their smallest supported mask). This simplifies the process of clearing
or resetting the I/O allocation state without requiring manual CBM string
calculations and eliminating the need for multiple writes to "io_alloc_cbm".

Signed-off-by: Aaron Tomlin <atomlin@...mlin.com>
---
 Documentation/filesystems/resctrl.rst     |  13 +++
 arch/x86/kernel/cpu/resctrl/core.c        |   2 +-
 arch/x86/kernel/cpu/resctrl/ctrlmondata.c |  23 +++--
 fs/resctrl/ctrlmondata.c                  | 117 ++++++++++++++++++----
 fs/resctrl/internal.h                     |   3 +
 fs/resctrl/rdtgroup.c                     |  10 +-
 include/linux/resctrl.h                   |  30 +++++-
 7 files changed, 165 insertions(+), 33 deletions(-)

diff --git a/Documentation/filesystems/resctrl.rst b/Documentation/filesystems/resctrl.rst
index ccc425b65b27..9899bc716459 100644
--- a/Documentation/filesystems/resctrl.rst
+++ b/Documentation/filesystems/resctrl.rst
@@ -213,6 +213,19 @@ related to allocation:
 		written to /sys/fs/resctrl/info/L3DATA/io_alloc_cbm may be reflected by
 		/sys/fs/resctrl/info/L3CODE/io_alloc_cbm and vice versa.
 
+"io_alloc_min_cbm_all":
+		Set each CBM to their minimum number of consecutive bits.
+
+		Example::
+
+			# cat /sys/fs/resctrl/info/L3/io_alloc_cbm
+			0=ffff;1=00ff
+
+			# echo 0 > /sys/fs/resctrl/info/L3/io_alloc_min_cbm_all
+
+			# cat /sys/fs/resctrl/info/L3/io_alloc_cbm
+			0=0;1=0
+
 Memory bandwidth(MB) subdirectory contains the following files
 with respect to allocation:
 
diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c
index 3792ab4819dc..44aea6b534e0 100644
--- a/arch/x86/kernel/cpu/resctrl/core.c
+++ b/arch/x86/kernel/cpu/resctrl/core.c
@@ -276,7 +276,7 @@ static void rdt_get_cdp_config(int level)
 
 static void rdt_set_io_alloc_capable(struct rdt_resource *r)
 {
-	r->cache.io_alloc_capable = true;
+	r->cache.io_alloc.io_alloc_capable = true;
 }
 
 static void rdt_get_cdp_l3_config(void)
diff --git a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
index b20e705606b8..0f051d848422 100644
--- a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
+++ b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
@@ -57,14 +57,19 @@ int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid)
 		hw_dom = resctrl_to_arch_ctrl_dom(d);
 		msr_param.res = NULL;
 		for (t = 0; t < CDP_NUM_TYPES; t++) {
-			cfg = &hw_dom->d_resctrl.staged_config[t];
-			if (!cfg->have_new_ctrl)
-				continue;
-
-			idx = resctrl_get_config_index(closid, t);
-			if (cfg->new_ctrl == hw_dom->ctrl_val[idx])
-				continue;
-			hw_dom->ctrl_val[idx] = cfg->new_ctrl;
+			if (resctrl_should_io_alloc_min_cbm(r)) {
+				idx = resctrl_get_config_index(closid, t);
+				hw_dom->ctrl_val[idx] = apply_io_alloc_min_cbm(r);
+			} else {
+				cfg = &hw_dom->d_resctrl.staged_config[t];
+				if (!cfg->have_new_ctrl)
+					continue;
+
+				idx = resctrl_get_config_index(closid, t);
+				if (cfg->new_ctrl == hw_dom->ctrl_val[idx])
+					continue;
+				hw_dom->ctrl_val[idx] = cfg->new_ctrl;
+			}
 
 			if (!msr_param.res) {
 				msr_param.low = idx;
@@ -123,7 +128,7 @@ int resctrl_arch_io_alloc_enable(struct rdt_resource *r, bool enable)
 {
 	struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
 
-	if (hw_res->r_resctrl.cache.io_alloc_capable &&
+	if (hw_res->r_resctrl.cache.io_alloc.io_alloc_capable &&
 	    hw_res->sdciae_enabled != enable) {
 		_resctrl_sdciae_enable(r, enable);
 		hw_res->sdciae_enabled = enable;
diff --git a/fs/resctrl/ctrlmondata.c b/fs/resctrl/ctrlmondata.c
index b2d178d3556e..6cbf4cfaf974 100644
--- a/fs/resctrl/ctrlmondata.c
+++ b/fs/resctrl/ctrlmondata.c
@@ -688,7 +688,7 @@ int resctrl_io_alloc_show(struct kernfs_open_file *of, struct seq_file *seq, voi
 
 	mutex_lock(&rdtgroup_mutex);
 
-	if (r->cache.io_alloc_capable) {
+	if (r->cache.io_alloc.io_alloc_capable) {
 		if (resctrl_arch_get_io_alloc_enabled(r))
 			seq_puts(seq, "enabled\n");
 		else
@@ -758,6 +758,50 @@ u32 resctrl_io_alloc_closid(struct rdt_resource *r)
 		return resctrl_arch_get_num_closid(r) - 1;
 }
 
+/*
+ * check_io_alloc_support() - Establish if io_alloc is supported
+ *
+ * @s: resctrl resource schema.
+ *
+ * This function must be called under the cpu hotplug lock
+ * and rdtgroup mutex
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+static int check_io_alloc_support(struct resctrl_schema *s)
+{
+	struct rdt_resource *r = s->res;
+
+	if (!r->cache.io_alloc.io_alloc_capable) {
+		rdt_last_cmd_printf("io_alloc is not supported on %s\n", s->name);
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+/*
+ * check_io_alloc_enabled() - Establish if io_alloc is enabled
+ *
+ * @s: resctrl resource schema
+ *
+ * This function must be called under the cpu hotplug lock
+ * and rdtgroup mutex
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+static int check_io_alloc_enabled(struct resctrl_schema *s)
+{
+	struct rdt_resource *r = s->res;
+
+	if (!resctrl_arch_get_io_alloc_enabled(r)) {
+		rdt_last_cmd_printf("io_alloc is not enabled on %s\n", s->name);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
 ssize_t resctrl_io_alloc_write(struct kernfs_open_file *of, char *buf,
 			       size_t nbytes, loff_t off)
 {
@@ -777,11 +821,9 @@ ssize_t resctrl_io_alloc_write(struct kernfs_open_file *of, char *buf,
 
 	rdt_last_cmd_clear();
 
-	if (!r->cache.io_alloc_capable) {
-		rdt_last_cmd_printf("io_alloc is not supported on %s\n", s->name);
-		ret = -ENODEV;
+	ret = check_io_alloc_support(s);
+	if (ret)
 		goto out_unlock;
-	}
 
 	/* If the feature is already up to date, no action is needed. */
 	if (resctrl_arch_get_io_alloc_enabled(r) == enable)
@@ -828,6 +870,47 @@ ssize_t resctrl_io_alloc_write(struct kernfs_open_file *of, char *buf,
 	return ret ?: nbytes;
 }
 
+ssize_t resctrl_io_alloc_cbm_min_write(struct kernfs_open_file *of, char *buf,
+				       size_t nbytes, loff_t off)
+{
+	struct resctrl_schema *s = rdt_kn_parent_priv(of->kn);
+	struct rdt_resource *r = s->res;
+	bool reset;
+	u32 io_alloc_closid;
+	int ret;
+
+	ret = kstrtobool(buf, &reset);
+	if (ret)
+		return ret;
+	if (reset)
+		return -EINVAL;
+
+	cpus_read_lock();
+	mutex_lock(&rdtgroup_mutex);
+
+	rdt_last_cmd_clear();
+
+	ret = check_io_alloc_support(s);
+	if (ret)
+		goto out_unlock;
+
+	ret = check_io_alloc_enabled(s);
+	if (ret)
+		goto out_unlock;
+
+	r->cache.io_alloc.io_alloc_min_cbm = true;
+
+	io_alloc_closid = resctrl_io_alloc_closid(r);
+	ret = resctrl_arch_update_domains(r, io_alloc_closid);
+
+	r->cache.io_alloc.io_alloc_min_cbm = false;
+out_unlock:
+	mutex_unlock(&rdtgroup_mutex);
+	cpus_read_unlock();
+
+	return ret ?: nbytes;
+}
+
 int resctrl_io_alloc_cbm_show(struct kernfs_open_file *of, struct seq_file *seq, void *v)
 {
 	struct resctrl_schema *s = rdt_kn_parent_priv(of->kn);
@@ -839,17 +922,13 @@ int resctrl_io_alloc_cbm_show(struct kernfs_open_file *of, struct seq_file *seq,
 
 	rdt_last_cmd_clear();
 
-	if (!r->cache.io_alloc_capable) {
-		rdt_last_cmd_printf("io_alloc is not supported on %s\n", s->name);
-		ret = -ENODEV;
+	ret = check_io_alloc_support(s);
+	if (ret)
 		goto out_unlock;
-	}
 
-	if (!resctrl_arch_get_io_alloc_enabled(r)) {
-		rdt_last_cmd_printf("io_alloc is not enabled on %s\n", s->name);
-		ret = -EINVAL;
+	ret = check_io_alloc_enabled(s);
+	if (ret)
 		goto out_unlock;
-	}
 
 	/*
 	 * When CDP is enabled, the CBMs of the highest CLOSID of CDP_CODE and
@@ -928,17 +1007,13 @@ ssize_t resctrl_io_alloc_cbm_write(struct kernfs_open_file *of, char *buf,
 	mutex_lock(&rdtgroup_mutex);
 	rdt_last_cmd_clear();
 
-	if (!r->cache.io_alloc_capable) {
-		rdt_last_cmd_printf("io_alloc is not supported on %s\n", s->name);
-		ret = -ENODEV;
+	ret = check_io_alloc_support(s);
+	if (ret)
 		goto out_unlock;
-	}
 
-	if (!resctrl_arch_get_io_alloc_enabled(r)) {
-		rdt_last_cmd_printf("io_alloc is not enabled on %s\n", s->name);
-		ret = -EINVAL;
+	ret = check_io_alloc_enabled(s);
+	if (ret)
 		goto out_unlock;
-	}
 
 	io_alloc_closid = resctrl_io_alloc_closid(r);
 
diff --git a/fs/resctrl/internal.h b/fs/resctrl/internal.h
index bff4a54ae333..f50f1ab562b0 100644
--- a/fs/resctrl/internal.h
+++ b/fs/resctrl/internal.h
@@ -442,6 +442,9 @@ int resctrl_io_alloc_cbm_show(struct kernfs_open_file *of, struct seq_file *seq,
 			      void *v);
 ssize_t resctrl_io_alloc_cbm_write(struct kernfs_open_file *of, char *buf,
 				   size_t nbytes, loff_t off);
+ssize_t resctrl_io_alloc_cbm_min_write(struct kernfs_open_file *of, char *buf,
+				       size_t nbytes, loff_t off);
+
 u32 resctrl_io_alloc_closid(struct rdt_resource *r);
 
 #ifdef CONFIG_RESCTRL_FS_PSEUDO_LOCK
diff --git a/fs/resctrl/rdtgroup.c b/fs/resctrl/rdtgroup.c
index ea320dcf8aba..bd41ab5a8eb4 100644
--- a/fs/resctrl/rdtgroup.c
+++ b/fs/resctrl/rdtgroup.c
@@ -1995,6 +1995,12 @@ static struct rftype res_common_files[] = {
 		.seq_show	= resctrl_io_alloc_cbm_show,
 		.write		= resctrl_io_alloc_cbm_write,
 	},
+	{
+		.name		= "io_alloc_min_cbm_all",
+		.mode		= 0644,
+		.kf_ops		= &rdtgroup_kf_single_ops,
+		.write		= resctrl_io_alloc_cbm_min_write,
+	},
 	{
 		.name		= "max_threshold_occupancy",
 		.mode		= 0644,
@@ -2195,11 +2201,13 @@ static void io_alloc_init(void)
 {
 	struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3);
 
-	if (r->cache.io_alloc_capable) {
+	if (r->cache.io_alloc.io_alloc_capable) {
 		resctrl_file_fflags_init("io_alloc", RFTYPE_CTRL_INFO |
 					 RFTYPE_RES_CACHE);
 		resctrl_file_fflags_init("io_alloc_cbm",
 					 RFTYPE_CTRL_INFO | RFTYPE_RES_CACHE);
+		resctrl_file_fflags_init("io_alloc_min_cbm_all",
+					 RFTYPE_CTRL_INFO | RFTYPE_RES_CACHE);
 	}
 }
 
diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h
index 54701668b3df..7987d458ebf8 100644
--- a/include/linux/resctrl.h
+++ b/include/linux/resctrl.h
@@ -215,7 +215,10 @@ struct resctrl_cache {
 	unsigned int	shareable_bits;
 	bool		arch_has_sparse_bitmasks;
 	bool		arch_has_per_cpu_cfg;
-	bool		io_alloc_capable;
+	struct {
+		bool	io_alloc_capable;
+		bool	io_alloc_min_cbm;
+	} io_alloc;
 };
 
 /**
@@ -415,6 +418,31 @@ static inline bool resctrl_is_mbm_event(enum resctrl_event_id eventid)
 		eventid <= QOS_L3_MBM_LOCAL_EVENT_ID);
 }
 
+/*
+ * apply_io_alloc_min_cbm() - Apply minimum io_alloc CBM
+ *
+ * @r: resctrl resource
+ *
+ * Return: Minimum number of consecutive io_alloc CBM bits to be set.
+ */
+static inline u32 apply_io_alloc_min_cbm(struct rdt_resource *r)
+{
+	return r->cache.min_cbm_bits;
+}
+
+/*
+ * resctrl_should_io_alloc_min_cbm() - Should the minimum io_alloc
+ *				       CBM be applied
+ * @r: resctrl resource
+ *
+ * Return: True if the minimum number of consecutive
+ * bits to be set in the io_alloc CBM should be applied.
+ */
+static inline bool resctrl_should_io_alloc_min_cbm(struct rdt_resource *r)
+{
+	return r->cache.io_alloc.io_alloc_min_cbm;
+}
+
 u32 resctrl_get_mon_evt_cfg(enum resctrl_event_id eventid);
 
 /* Iterate over all memory bandwidth events */
-- 
2.51.0


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ