lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <6870a52c-4594-4ec0-aa56-1bf9e1fadeb0@intel.com>
Date: Tue, 17 Jun 2025 21:01:46 -0700
From: Reinette Chatre <reinette.chatre@...el.com>
To: Babu Moger <babu.moger@....com>, <corbet@....net>, <tony.luck@...el.com>,
	<Dave.Martin@....com>, <james.morse@....com>, <tglx@...utronix.de>,
	<mingo@...hat.com>, <bp@...en8.de>, <dave.hansen@...ux.intel.com>
CC: <x86@...nel.org>, <hpa@...or.com>, <akpm@...ux-foundation.org>,
	<paulmck@...nel.org>, <rostedt@...dmis.org>, <thuth@...hat.com>,
	<ardb@...nel.org>, <gregkh@...uxfoundation.org>, <seanjc@...gle.com>,
	<thomas.lendacky@....com>, <pawan.kumar.gupta@...ux.intel.com>,
	<perry.yuan@....com>, <yosry.ahmed@...ux.dev>, <kai.huang@...el.com>,
	<xiaoyao.li@...el.com>, <peterz@...radead.org>, <kan.liang@...ux.intel.com>,
	<mario.limonciello@....com>, <xin3.li@...el.com>, <sohil.mehta@...el.com>,
	<chang.seok.bae@...el.com>, <andrew.cooper3@...rix.com>,
	<ebiggers@...gle.com>, <ak@...ux.intel.com>, <xin@...or.com>,
	<linux-doc@...r.kernel.org>, <linux-kernel@...r.kernel.org>
Subject: Re: [PATCH v6 6/8] fs/resctrl: Introduce interface to display
 io_alloc CBMs

Hi Babu,

On 6/11/25 2:23 PM, Babu Moger wrote:
> The io_alloc feature in resctrl enables system software to configure
> the portion of the L3 cache allocated for I/O traffic.

Drop L3?

> 
> Add the interface to display CBMs (Capacity Bit Mask) of io_alloc
> feature.

After the fs/arch split it is not always obvious what is meant with
"interface" ... it could be new API between fs and arch or it could be
new resctrl file.
This can be specific:
	Add "io_alloc_cbm" resctrl file to display ...

> 
> The CBM interface file io_alloc_cbm will reside in the info directory
> (e.g., /sys/fs/resctrl/info/L3/). Displaying the resource name is not
> necessary. Pass the resource name to show_doms() and print it only if
> the name is valid. For io_alloc, pass NULL to suppress printing the
> resource name.
> 
> When CDP is enabled, io_alloc routes traffic using the highest CLOSID
> associated with an L3CODE resource. However, CBMs can be accessed via
> either L3CODE or L3DATA resources.
> 
> Signed-off-by: Babu Moger <babu.moger@....com>
> ---

...

> ---
>  Documentation/filesystems/resctrl.rst | 13 +++++++
>  fs/resctrl/ctrlmondata.c              |  8 +++--
>  fs/resctrl/internal.h                 |  2 ++
>  fs/resctrl/rdtgroup.c                 | 51 ++++++++++++++++++++++++++-
>  4 files changed, 70 insertions(+), 4 deletions(-)
> 
> diff --git a/Documentation/filesystems/resctrl.rst b/Documentation/filesystems/resctrl.rst
> index 03c829b2c276..b31748ec8c61 100644
> --- a/Documentation/filesystems/resctrl.rst
> +++ b/Documentation/filesystems/resctrl.rst
> @@ -169,6 +169,19 @@ related to allocation:
>  		When CDP is enabled, io_alloc routes I/O traffic using the highest
>  		CLOSID allocated for the instruction cache (L3CODE).
>  
> +"io_alloc_cbm":
> +		Capacity Bit Masks (CBMs) available to supported IO devices which
> +		can directly insert cache lines in L3 which can help to reduce the

"CBMs that describe the portions of cache instances to which I/O traffic               
from supported IO devices are routed."

Please check ...  there seems to be some inconsistency in "IO" vs "I/O" use.

Also consider something like,
"When CDP is enabled "io_alloc_cbm" associated with the DATA 
 and CODE resources may reflect the same values. For example, values read from
 and written to /sys/fs/resctrl/info/L3DATA/io_alloc_cbm may be reflected by
 /sys/fs/resctrl/info/L3CODE/io_alloc_cbm and vice versa."
What do you think?

> +		latency. CBMs are displayed in the following format:
> +
> +			<cache_id0>=<cbm>;<cache_id1>=<cbm>;...
> +
> +		Example::
> +
> +			# cat /sys/fs/resctrl/info/L3/io_alloc_cbm
> +			0=ffff;1=ffff
> +
> +
>  Memory bandwidth(MB) subdirectory contains the following files
>  with respect to allocation:
>  
> diff --git a/fs/resctrl/ctrlmondata.c b/fs/resctrl/ctrlmondata.c
> index 6ed2dfd4dbbd..ea039852569a 100644
> --- a/fs/resctrl/ctrlmondata.c
> +++ b/fs/resctrl/ctrlmondata.c
> @@ -381,7 +381,8 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
>  	return ret ?: nbytes;
>  }
>  
> -static void show_doms(struct seq_file *s, struct resctrl_schema *schema, int closid)
> +void show_doms(struct seq_file *s, struct resctrl_schema *schema, char *resource_name,
> +	       int closid)
>  {
>  	struct rdt_resource *r = schema->res;
>  	struct rdt_ctrl_domain *dom;
> @@ -391,7 +392,8 @@ static void show_doms(struct seq_file *s, struct resctrl_schema *schema, int clo
>  	/* Walking r->domains, ensure it can't race with cpuhp */
>  	lockdep_assert_cpus_held();
>  
> -	seq_printf(s, "%*s:", max_name_width, schema->name);
> +	if (resource_name)
> +		seq_printf(s, "%*s:", max_name_width, resource_name);
>  	list_for_each_entry(dom, &r->ctrl_domains, hdr.list) {
>  		if (sep)
>  			seq_puts(s, ";");
> @@ -437,7 +439,7 @@ int rdtgroup_schemata_show(struct kernfs_open_file *of,
>  			closid = rdtgrp->closid;
>  			list_for_each_entry(schema, &resctrl_schema_all, list) {
>  				if (closid < schema->num_closid)
> -					show_doms(s, schema, closid);
> +					show_doms(s, schema, schema->name, closid);
>  			}
>  		}
>  	} else {
> diff --git a/fs/resctrl/internal.h b/fs/resctrl/internal.h
> index 9a8cf6f11151..14f3697c1187 100644
> --- a/fs/resctrl/internal.h
> +++ b/fs/resctrl/internal.h
> @@ -374,6 +374,8 @@ void rdt_staged_configs_clear(void);
>  bool closid_allocated(unsigned int closid);
>  
>  int resctrl_find_cleanest_closid(void);
> +void show_doms(struct seq_file *s, struct resctrl_schema *schema,
> +	       char *name, int closid);
>  
>  #ifdef CONFIG_RESCTRL_FS_PSEUDO_LOCK
>  int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp);
> diff --git a/fs/resctrl/rdtgroup.c b/fs/resctrl/rdtgroup.c
> index bbc032b4d0e9..0c2d2cf4baa1 100644
> --- a/fs/resctrl/rdtgroup.c
> +++ b/fs/resctrl/rdtgroup.c
> @@ -1997,6 +1997,46 @@ static ssize_t resctrl_io_alloc_write(struct kernfs_open_file *of, char *buf,
>  	return ret ?: nbytes;
>  }
>  
> +static int resctrl_io_alloc_cbm_show(struct kernfs_open_file *of,
> +				     struct seq_file *seq, void *v)
> +{
> +	struct resctrl_schema *s = rdt_kn_parent_priv(of->kn);
> +	struct rdt_resource *r = s->res;
> +	u32 io_alloc_closid;
> +	int ret = 0;
> +
> +	cpus_read_lock();
> +	mutex_lock(&rdtgroup_mutex);
> +
> +	rdt_last_cmd_clear();
> +
> +	if (!r->cache.io_alloc_capable) {
> +		rdt_last_cmd_puts("io_alloc feature is not supported on the resource\n");
> +		ret = -ENODEV;
> +		goto cbm_show_out;

out_unlock

> +	}
> +
> +	if (!resctrl_arch_get_io_alloc_enabled(r)) {
> +		rdt_last_cmd_puts("io_alloc feature is not enabled\n");
> +		ret = -EINVAL;
> +		goto cbm_show_out;
> +	}
> +
> +	io_alloc_closid = resctrl_io_alloc_closid_get(r);
> +	if (io_alloc_closid < 0) {

Another example where io_alloc_closid must be valid thanks to earlier resctrl_arch_get_io_alloc_enabled(r).

> +		rdt_last_cmd_puts("Max CLOSID to support io_alloc is not available\n");
> +		ret = -EINVAL;
> +		goto cbm_show_out;
> +	}
> +
> +	show_doms(seq, resctrl_schema_io_alloc(s), NULL, io_alloc_closid);
> +
> +cbm_show_out:

out_unlock ... to match rest of resctrl

> +	mutex_unlock(&rdtgroup_mutex);
> +	cpus_read_unlock();
> +	return ret;
> +}
> +
>  /* rdtgroup information files for one cache resource. */
>  static struct rftype res_common_files[] = {
>  	{

Reinette

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ