[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <895cee86-ac6e-43e7-aece-e283200384ef@intel.com>
Date: Tue, 2 Dec 2025 08:06:47 -0800
From: Reinette Chatre <reinette.chatre@...el.com>
To: Tony Luck <tony.luck@...el.com>, Fenghua Yu <fenghuay@...dia.com>, "Maciej
Wieczor-Retman" <maciej.wieczor-retman@...el.com>, Peter Newman
<peternewman@...gle.com>, James Morse <james.morse@....com>, Babu Moger
<babu.moger@....com>, Drew Fustini <dfustini@...libre.com>, Dave Martin
<Dave.Martin@....com>, Chen Yu <yu.c.chen@...el.com>
CC: <x86@...nel.org>, <linux-kernel@...r.kernel.org>,
<patches@...ts.linux.dev>
Subject: Re: [PATCH v14 07/32] x86,fs/resctrl: Use struct rdt_domain_hdr when
reading counters
Hi Tony,
On 11/24/25 10:53 AM, Tony Luck wrote:
...
> diff --git a/fs/resctrl/monitor.c b/fs/resctrl/monitor.c
> index 179962a81362..7765491ddb4c 100644
> --- a/fs/resctrl/monitor.c
> +++ b/fs/resctrl/monitor.c
> @@ -159,7 +159,7 @@ void __check_limbo(struct rdt_mon_domain *d, bool force_free)
> break;
>
> entry = __rmid_entry(idx);
> - if (resctrl_arch_rmid_read(r, d, entry->closid, entry->rmid,
> + if (resctrl_arch_rmid_read(r, &d->hdr, entry->closid, entry->rmid,
> QOS_L3_OCCUP_EVENT_ID, &val,
> arch_mon_ctx)) {
> rmid_dirty = true;
> @@ -421,11 +421,16 @@ static int __l3_mon_event_count(struct rdtgroup *rdtgrp, struct rmid_read *rr)
> struct rdt_mon_domain *d;
> int cntr_id = -ENOENT;
> struct mbm_state *m;
> - int err, ret;
> u64 tval = 0;
>
> + if (!domain_header_is_valid(rr->hdr, RESCTRL_MON_DOMAIN, RDT_RESOURCE_L3)) {
> + rr->err = -EIO;
> + return -EINVAL;
> + }
> + d = container_of(rr->hdr, struct rdt_mon_domain, hdr);
> +
> if (rr->is_mbm_cntr) {
> - cntr_id = mbm_cntr_get(rr->r, rr->d, rdtgrp, rr->evtid);
> + cntr_id = mbm_cntr_get(rr->r, d, rdtgrp, rr->evtid);
> if (cntr_id < 0) {
> rr->err = -ENOENT;
> return -EINVAL;
> @@ -434,32 +439,41 @@ static int __l3_mon_event_count(struct rdtgroup *rdtgrp, struct rmid_read *rr)
>
> if (rr->first) {
> if (rr->is_mbm_cntr)
> - resctrl_arch_reset_cntr(rr->r, rr->d, closid, rmid, cntr_id, rr->evtid);
> + resctrl_arch_reset_cntr(rr->r, d, closid, rmid, cntr_id, rr->evtid);
> else
> - resctrl_arch_reset_rmid(rr->r, rr->d, closid, rmid, rr->evtid);
> - m = get_mbm_state(rr->d, closid, rmid, rr->evtid);
> + resctrl_arch_reset_rmid(rr->r, d, closid, rmid, rr->evtid);
> + m = get_mbm_state(d, closid, rmid, rr->evtid);
> if (m)
> memset(m, 0, sizeof(struct mbm_state));
> return 0;
> }
>
> - if (rr->d) {
> - /* Reading a single domain, must be on a CPU in that domain. */
> - if (!cpumask_test_cpu(cpu, &rr->d->hdr.cpu_mask))
> - return -EINVAL;
> - if (rr->is_mbm_cntr)
> - rr->err = resctrl_arch_cntr_read(rr->r, rr->d, closid, rmid, cntr_id,
> - rr->evtid, &tval);
> - else
> - rr->err = resctrl_arch_rmid_read(rr->r, rr->d, closid, rmid,
> - rr->evtid, &tval, rr->arch_mon_ctx);
> - if (rr->err)
> - return rr->err;
> + /* Reading a single domain, must be on a CPU in that domain. */
> + if (!cpumask_test_cpu(cpu, &d->hdr.cpu_mask))
> + return -EINVAL;
> + if (rr->is_mbm_cntr)
> + rr->err = resctrl_arch_cntr_read(rr->r, d, closid, rmid, cntr_id,
> + rr->evtid, &tval);
> + else
> + rr->err = resctrl_arch_rmid_read(rr->r, rr->hdr, closid, rmid,
> + rr->evtid, &tval, rr->arch_mon_ctx);
> + if (rr->err)
> + return rr->err;
>
> - rr->val += tval;
> + rr->val += tval;
>
> - return 0;
> - }
> + return 0;
> +}
> +
> +static int __l3_mon_event_count_sum(struct rdtgroup *rdtgrp, struct rmid_read *rr)
> +{
> + int cpu = smp_processor_id();
> + u32 closid = rdtgrp->closid;
> + u32 rmid = rdtgrp->mon.rmid;
> + struct rdt_mon_domain *d;
> + int cntr_id = -ENOENT;
> + u64 tval = 0;
> + int err, ret;
>
> /* Summing domains that share a cache, must be on a CPU for that cache. */
> if (!cpumask_test_cpu(cpu, &rr->ci->shared_cpu_map))
> @@ -480,7 +494,7 @@ static int __l3_mon_event_count(struct rdtgroup *rdtgrp, struct rmid_read *rr)
> err = resctrl_arch_cntr_read(rr->r, d, closid, rmid, cntr_id,
> rr->evtid, &tval);
This is not safe. The current __mon_event_count() implementation being refactored by this series
ensures that if rr->is_mbm_cntr is true then cntr_id is valid. This patch places the code doing so
in __l3_mon_event_count() without an equivalent in the new __l3_mon_event_count_sum(). From what I
can tell, since __l3_mon_event_count_sum() sets cntr_id to -ENOENT and never initializes it correctly,
resctrl_arch_cntr_read() will be called with an invalid cntr_id that it is not able to handle.
There is no overlap in support for SNC and assignable counters. Do you expect that this is something that
should be supported? Even if it is, SNC is model specific so it may be reasonable to expect that when/if
a system supporting both features arrives it would need enabling anyway. I thus propose for simplicity
that the handling of assignable counters by __l3_mon_event_count_sum() be dropped, albeit with a loud
complaint if it is ever called with rr->is_mbm_cntr set.
Reinette
Powered by blists - more mailing lists