[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <65124831-2c00-4ab7-91db-f8e348686d7d@intel.com>
Date: Fri, 21 Mar 2025 15:58:39 -0700
From: Reinette Chatre <reinette.chatre@...el.com>
To: Babu Moger <babu.moger@....com>, <tglx@...utronix.de>, <mingo@...hat.com>,
<bp@...en8.de>, <dave.hansen@...ux.intel.com>
CC: <x86@...nel.org>, <hpa@...or.com>, <akpm@...ux-foundation.org>,
<paulmck@...nel.org>, <thuth@...hat.com>, <rostedt@...dmis.org>,
<xiongwei.song@...driver.com>, <pawan.kumar.gupta@...ux.intel.com>,
<jpoimboe@...nel.org>, <daniel.sneddon@...ux.intel.com>,
<thomas.lendacky@....com>, <perry.yuan@....com>, <sandipan.das@....com>,
<kai.huang@...el.com>, <seanjc@...gle.com>, <xin3.li@...el.com>,
<ebiggers@...gle.com>, <andrew.cooper3@...rix.com>,
<mario.limonciello@....com>, <tan.shaopeng@...itsu.com>,
<james.morse@....com>, <tony.luck@...el.com>, <peternewman@...gle.com>,
<linux-doc@...r.kernel.org>, <linux-kernel@...r.kernel.org>,
<eranian@...gle.com>, <corbet@....net>
Subject: Re: [PATCH v3 6/7] x86/resctrl: Introduce interface to display
io_alloc CBMs
Hi Babu,
On 1/30/25 1:20 PM, Babu Moger wrote:
> The io_alloc feature in resctrl enables system software to configure
> the portion of the L3 cache allocated for I/O traffic.
>
> Add the interface to display CBMs (Capacity Bit Mask) of io_alloc
> feature.
>
> When CDP is enabled, io_alloc routes traffic using the highest CLOSID
> which corresponds to CDP_CODE. Add a check for the CDP resource type.
It is not obvious to me what is meant with "highest CLOSID
which corresponds to CDP_CODE" ... how about "highest CLOSID used by
a L3CODE resource"?
>
> Signed-off-by: Babu Moger <babu.moger@....com>
> ---
> v3: Minor changes due to changes in resctrl_arch_get_io_alloc_enabled()
> and resctrl_io_alloc_closid_get().
> Added the check to verify CDP resource type.
> Updated the commit log.
>
> v2: Fixed to display only on L3 resources.
> Added the locks while processing.
> Rename the displat to io_alloc_cbm (from sdciae_cmd).
> ---
> arch/x86/kernel/cpu/resctrl/core.c | 2 ++
> arch/x86/kernel/cpu/resctrl/ctrlmondata.c | 2 +-
> arch/x86/kernel/cpu/resctrl/internal.h | 1 +
> arch/x86/kernel/cpu/resctrl/rdtgroup.c | 38 +++++++++++++++++++++++
> 4 files changed, 42 insertions(+), 1 deletion(-)
>
> diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c
> index 88bc95c14ea8..030f738dea8d 100644
> --- a/arch/x86/kernel/cpu/resctrl/core.c
> +++ b/arch/x86/kernel/cpu/resctrl/core.c
> @@ -311,6 +311,8 @@ static void rdt_set_io_alloc_capable(struct rdt_resource *r)
> r->cache.io_alloc_capable = true;
> resctrl_file_fflags_init("io_alloc",
> RFTYPE_CTRL_INFO | RFTYPE_RES_CACHE);
> + resctrl_file_fflags_init("io_alloc_cbm",
> + RFTYPE_CTRL_INFO | RFTYPE_RES_CACHE);
> }
>
> static void rdt_get_cdp_l3_config(void)
> diff --git a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
> index 536351159cc2..d272dea43924 100644
> --- a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
> +++ b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
> @@ -444,7 +444,7 @@ u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_ctrl_domain *d,
> return hw_dom->ctrl_val[idx];
> }
>
> -static void show_doms(struct seq_file *s, struct resctrl_schema *schema, int closid)
> +void show_doms(struct seq_file *s, struct resctrl_schema *schema, int closid)
> {
> struct rdt_resource *r = schema->res;
> struct rdt_ctrl_domain *dom;
> diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h
> index 61bc609e932b..07cf8409174d 100644
> --- a/arch/x86/kernel/cpu/resctrl/internal.h
> +++ b/arch/x86/kernel/cpu/resctrl/internal.h
> @@ -668,4 +668,5 @@ void resctrl_file_fflags_init(const char *config, unsigned long fflags);
> void rdt_staged_configs_clear(void);
> bool closid_allocated(unsigned int closid);
> int resctrl_find_cleanest_closid(void);
> +void show_doms(struct seq_file *s, struct resctrl_schema *schema, int closid);
> #endif /* _ASM_X86_RESCTRL_INTERNAL_H */
> diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
> index 37295dd14abe..81b9d8c5dabf 100644
> --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
> +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
> @@ -1967,6 +1967,38 @@ static ssize_t resctrl_io_alloc_write(struct kernfs_open_file *of, char *buf,
> return ret ?: nbytes;
> }
>
> +static int resctrl_io_alloc_cbm_show(struct kernfs_open_file *of,
> + struct seq_file *seq, void *v)
> +{
> + struct resctrl_schema *s = of->kn->parent->priv;
> + struct rdt_resource *r = s->res;
> + u32 io_alloc_closid;
> + int ret = 0;
> +
> + if (!r->cache.io_alloc_capable || s->conf_type == CDP_DATA) {
> + rdt_last_cmd_puts("io_alloc feature is not supported on the resource\n");
rdt_last_cmd_puts() has to be called with rdtgroup_mutex held, also clear it before use.
> + return -EINVAL;
How about ENODEV?
> + }
> +
> + cpus_read_lock();
> + mutex_lock(&rdtgroup_mutex);
> +
> + if (!resctrl_arch_get_io_alloc_enabled(r)) {
> + rdt_last_cmd_puts("io_alloc feature is not enabled\n");
> + ret = -EINVAL;
> + goto cbm_show_out;
> + }
> +
> + io_alloc_closid = resctrl_io_alloc_closid_get(r, s);
> +
> + show_doms(seq, s, io_alloc_closid);
> +
> +cbm_show_out:
> + mutex_unlock(&rdtgroup_mutex);
> + cpus_read_unlock();
> + return ret;
> +}
> +
> /* rdtgroup information files for one cache resource. */
> static struct rftype res_common_files[] = {
> {
> @@ -2126,6 +2158,12 @@ static struct rftype res_common_files[] = {
> .seq_show = resctrl_io_alloc_show,
> .write = resctrl_io_alloc_write,
> },
> + {
> + .name = "io_alloc_cbm",
> + .mode = 0444,
> + .kf_ops = &rdtgroup_kf_single_ops,
> + .seq_show = resctrl_io_alloc_cbm_show,
> + },
> {
> .name = "mba_MBps_event",
> .mode = 0644,
Reinette
Powered by blists - more mailing lists