[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <94c855b4-1a30-48e7-a4a4-26a57bf4a5c5@intel.com>
Date: Thu, 14 Aug 2025 14:58:35 -0700
From: Reinette Chatre <reinette.chatre@...el.com>
To: Tony Luck <tony.luck@...el.com>, Fenghua Yu <fenghuay@...dia.com>, "Maciej
Wieczor-Retman" <maciej.wieczor-retman@...el.com>, Peter Newman
<peternewman@...gle.com>, James Morse <james.morse@....com>, Babu Moger
<babu.moger@....com>, Drew Fustini <dfustini@...libre.com>, Dave Martin
<Dave.Martin@....com>, Chen Yu <yu.c.chen@...el.com>
CC: <x86@...nel.org>, <linux-kernel@...r.kernel.org>,
<patches@...ts.linux.dev>
Subject: Re: [PATCH v8 28/32] fs/resctrl: Move RMID initialization to first
mount
Hi Tony,
On 8/11/25 11:17 AM, Tony Luck wrote:
> resctrl assumesthat the number of supported RMIDs is enumerated during
"assumesthat" -> "assumes that"
> early initialization.
The context can be made a more specific though. For example,
L3 monitor features are enumerated during resctrl initialization
and rmid_ptrs[] that tracks all RMIDs and depends on the
number of supported RMIDs is allocated during this time.
Telemetry monitor features are enumerated during resctrl mount and may
support a different number of RMIDs compared to L3 monitor features.
>
> Telemetry monitor events break that assumption because they are not
> enumerated in early initialization and may support a different number
> of RMIDs compared to L3 monitor features.
>
> Delay allocation and initialization of rmid_ptrs[] until first mount.
>
> Rename routines to match what they now do:
> dom_data_init() -> setup_rmid_lru_list()
> dom_data_exit() -> free_rmid_lru_list()
>
> Signed-off-by: Tony Luck <tony.luck@...el.com>
> ---
...
> diff --git a/fs/resctrl/monitor.c b/fs/resctrl/monitor.c
> index 0d9c8d4e0e9c..4c13b55db995 100644
> --- a/fs/resctrl/monitor.c
> +++ b/fs/resctrl/monitor.c
> @@ -802,20 +802,19 @@ void mbm_setup_overflow_handler(struct rdt_l3_mon_domain *dom, unsigned long del
> schedule_delayed_work_on(cpu, &dom->mbm_over, delay);
> }
>
> -static int dom_data_init(struct rdt_resource *r)
> +int setup_rmid_lru_list(void)
> {
> u32 idx_limit = resctrl_arch_system_num_rmid_idx();
> struct rmid_entry *entry = NULL;
> - int err = 0, i;
> u32 idx;
> + int i;
>
> - mutex_lock(&rdtgroup_mutex);
> + if (rmid_ptrs)
> + return 0;
>
> rmid_ptrs = kcalloc(idx_limit, sizeof(struct rmid_entry), GFP_KERNEL);
> - if (!rmid_ptrs) {
> - err = -ENOMEM;
> - goto out_unlock;
> - }
> + if (!rmid_ptrs)
> + return -ENOMEM;
>
> for (i = 0; i < idx_limit; i++) {
> entry = &rmid_ptrs[i];
> @@ -828,31 +827,20 @@ static int dom_data_init(struct rdt_resource *r)
> /*
> * RESCTRL_RESERVED_CLOSID and RESCTRL_RESERVED_RMID are special and
> * are always allocated. These are used for the rdtgroup_default
> - * control group, which will be setup later in resctrl_init().
> + * control group, which was setup earlier in rdtgroup_setup_default().
> */
> idx = resctrl_arch_rmid_idx_encode(RESCTRL_RESERVED_CLOSID,
> RESCTRL_RESERVED_RMID);
> entry = __rmid_entry(idx);
> list_del(&entry->list);
>
> -out_unlock:
> - mutex_unlock(&rdtgroup_mutex);
> -
> - return err;
> + return 0;
> }
>
Removing the mutex lock/unlock from setup_rmid_lru_list() is correct since
setup_rmid_lru_list() is called with mutex held.
free_rmid_lru_list() is not called with mutex held so it is not clear why
it is ok to remove mutex lock/unlock below?
> -static void dom_data_exit(struct rdt_resource *r)
> +void free_rmid_lru_list(void)
> {
> - mutex_lock(&rdtgroup_mutex);
> -
> - if (!r->mon_capable)
> - goto out_unlock;
> -
> kfree(rmid_ptrs);
> rmid_ptrs = NULL;
> -
> -out_unlock:
> - mutex_unlock(&rdtgroup_mutex);
> }
>
> #define MON_EVENT(_eventid, _name, _res, _fp) \
> @@ -914,7 +902,8 @@ bool resctrl_is_mon_event_enabled(enum resctrl_event_id eventid)
> * resctrl_mon_l3_resource_init() - Initialise global monitoring structures.
> *
> * Allocate and initialise global monitor resources that do not belong to a
> - * specific domain. i.e. the rmid_ptrs[] used for the limbo and free lists.
> + * specific domain. i.e. the closid_num_dirty_rmid[] used to find the CLOSID
> + * with the cleanest set of RMIDs.
> * Called once during boot after the struct rdt_resource's have been configured
> * but before the filesystem is mounted.
> * Resctrl's cpuhp callbacks may be called before this point to bring a domain
> @@ -925,7 +914,6 @@ bool resctrl_is_mon_event_enabled(enum resctrl_event_id eventid)
> int resctrl_mon_l3_resource_init(void)
> {
> struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3);
> - int ret;
>
> if (!r->mon_capable)
> return 0;
> @@ -947,15 +935,6 @@ int resctrl_mon_l3_resource_init(void)
> closid_num_dirty_rmid = tmp;
> }
>
> - ret = dom_data_init(r);
> - if (ret) {
> - if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) {
> - kfree(closid_num_dirty_rmid);
> - closid_num_dirty_rmid = NULL;
> - }
> - return ret;
> - }
> -
> if (resctrl_arch_is_evt_configurable(QOS_L3_MBM_TOTAL_EVENT_ID)) {
> mon_event_all[QOS_L3_MBM_TOTAL_EVENT_ID].configurable = true;
> resctrl_file_fflags_init("mbm_total_bytes_config",
> @@ -977,12 +956,8 @@ int resctrl_mon_l3_resource_init(void)
>
> void resctrl_mon_l3_resource_exit(void)
> {
> - struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3);
> -
> if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) {
> kfree(closid_num_dirty_rmid);
> closid_num_dirty_rmid = NULL;
> }
> -
> - dom_data_exit(r);
> }
> diff --git a/fs/resctrl/rdtgroup.c b/fs/resctrl/rdtgroup.c
> index 5352480eb55c..d1ecc5cd3089 100644
> --- a/fs/resctrl/rdtgroup.c
> +++ b/fs/resctrl/rdtgroup.c
> @@ -2599,6 +2599,12 @@ static int rdt_get_tree(struct fs_context *fc)
> goto out;
> }
>
> + if (resctrl_arch_mon_capable()) {
> + ret = setup_rmid_lru_list();
> + if (ret)
> + goto out;
> + }
> +
> ret = rdtgroup_setup_root(ctx);
> if (ret)
> goto out;
Shouldn't this be cleaned up on failure? I expected to see free_rmid_lru_list()
in rdt_get_tree()'s failure paths.
> @@ -4408,4 +4414,5 @@ void resctrl_exit(void)
> */
>
> resctrl_mon_l3_resource_exit();
> + free_rmid_lru_list();
> }
Reinette
Powered by blists - more mailing lists