[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CALPaoCjJXHD+HgFizzvNEvBorbUcJLTngLb7UJy-uMdybhCfrg@mail.gmail.com>
Date: Thu, 6 Nov 2025 17:15:25 +0100
From: Peter Newman <peternewman@...gle.com>
To: James Morse <james.morse@....com>
Cc: linux-kernel@...r.kernel.org, linux-arm-kernel@...ts.infradead.org,
linux-acpi@...r.kernel.org,
D Scott Phillips OS <scott@...amperecomputing.com>, carl@...amperecomputing.com,
lcherian@...vell.com, bobo.shaobowang@...wei.com, tan.shaopeng@...itsu.com,
baolin.wang@...ux.alibaba.com, Jamie Iles <quic_jiles@...cinc.com>,
Xin Hao <xhao@...ux.alibaba.com>, dfustini@...libre.com, amitsinght@...vell.com,
David Hildenbrand <david@...hat.com>, Dave Martin <dave.martin@....com>, Koba Ko <kobak@...dia.com>,
Shanker Donthineni <sdonthineni@...dia.com>, fenghuay@...dia.com, baisheng.gao@...soc.com,
Jonathan Cameron <jonathan.cameron@...wei.com>, Rob Herring <robh@...nel.org>,
Rohit Mathew <rohit.mathew@....com>, Rafael Wysocki <rafael@...nel.org>, Len Brown <lenb@...nel.org>,
Lorenzo Pieralisi <lpieralisi@...nel.org>, Hanjun Guo <guohanjun@...wei.com>,
Sudeep Holla <sudeep.holla@....com>, Catalin Marinas <catalin.marinas@....com>,
Will Deacon <will@...nel.org>, Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
Danilo Krummrich <dakr@...nel.org>, Jeremy Linton <jeremy.linton@....com>, Gavin Shan <gshan@...hat.com>,
Ben Horgan <ben.horgan@....com>
Subject: Re: [PATCH v3 26/29] arm_mpam: Use long MBWU counters if supported
Hi Ben (and James),
On Fri, Oct 17, 2025 at 8:59 PM James Morse <james.morse@....com> wrote:
>
> From: Rohit Mathew <rohit.mathew@....com>
>
> Now that the larger counter sizes are probed, make use of them.
>
> Callers of mpam_msmon_read() may not know (or care!) about the different
> counter sizes. Allow them to specify mpam_feat_msmon_mbwu and have the
> driver pick the counter to use.
>
> Only 32bit accesses to the MSC are required to be supported by the
> spec, but these registers are 64bits. The lower half may overflow
> into the higher half between two 32bit reads. To avoid this, use
> a helper that reads the top half multiple times to check for overflow.
>
> Signed-off-by: Rohit Mathew <rohit.mathew@....com>
> [morse: merged multiple patches from Rohit, added explicit counter selection ]
> Signed-off-by: James Morse <james.morse@....com>
> Reviewed-by: Ben Horgan <ben.horgan@....com>
> Reviewed-by: Jonathan Cameron <jonathan.cameron@...wei.com>
> Reviewed-by: Fenghua Yu <fenghuay@...dia.com>
> Tested-by: Fenghua Yu <fenghuay@...dia.com>
> ---
> Changes since v2:
> * Removed mpam_feat_msmon_mbwu as a top-level bit for explicit 31bit counter
> selection.
> * Allow callers of mpam_msmon_read() to specify mpam_feat_msmon_mbwu and have
> the driver pick a supported counter size.
> * Rephrased commit message.
>
> Changes since v1:
> * Only clear OFLOW_STATUS_L on MBWU counters.
>
> Changes since RFC:
> * Commit message wrangling.
> * Refer to 31 bit counters as opposed to 32 bit (registers).
> ---
> drivers/resctrl/mpam_devices.c | 134 ++++++++++++++++++++++++++++-----
> 1 file changed, 116 insertions(+), 18 deletions(-)
>
> diff --git a/drivers/resctrl/mpam_devices.c b/drivers/resctrl/mpam_devices.c
> index f4d07234ce10..c207a6d2832c 100644
> --- a/drivers/resctrl/mpam_devices.c
> +++ b/drivers/resctrl/mpam_devices.c
> @@ -897,6 +897,48 @@ struct mon_read {
> int err;
> };
>
> +static bool mpam_ris_has_mbwu_long_counter(struct mpam_msc_ris *ris)
> +{
> + return (mpam_has_feature(mpam_feat_msmon_mbwu_63counter, &ris->props) ||
> + mpam_has_feature(mpam_feat_msmon_mbwu_44counter, &ris->props));
> +}
> +
> +static u64 mpam_msc_read_mbwu_l(struct mpam_msc *msc)
> +{
> + int retry = 3;
> + u32 mbwu_l_low;
> + u64 mbwu_l_high1, mbwu_l_high2;
> +
> + mpam_mon_sel_lock_held(msc);
> +
> + WARN_ON_ONCE((MSMON_MBWU_L + sizeof(u64)) > msc->mapped_hwpage_sz);
> + WARN_ON_ONCE(!cpumask_test_cpu(smp_processor_id(), &msc->accessibility));
> +
> + mbwu_l_high2 = __mpam_read_reg(msc, MSMON_MBWU_L + 4);
> + do {
> + mbwu_l_high1 = mbwu_l_high2;
> + mbwu_l_low = __mpam_read_reg(msc, MSMON_MBWU_L);
> + mbwu_l_high2 = __mpam_read_reg(msc, MSMON_MBWU_L + 4);
> +
> + retry--;
> + } while (mbwu_l_high1 != mbwu_l_high2 && retry > 0);
> +
> + if (mbwu_l_high1 == mbwu_l_high2)
> + return (mbwu_l_high1 << 32) | mbwu_l_low;
> + return MSMON___NRDY_L;
> +}
> +
> +static void mpam_msc_zero_mbwu_l(struct mpam_msc *msc)
> +{
> + mpam_mon_sel_lock_held(msc);
> +
> + WARN_ON_ONCE((MSMON_MBWU_L + sizeof(u64)) > msc->mapped_hwpage_sz);
> + WARN_ON_ONCE(!cpumask_test_cpu(smp_processor_id(), &msc->accessibility));
> +
> + __mpam_write_reg(msc, MSMON_MBWU_L, 0);
> + __mpam_write_reg(msc, MSMON_MBWU_L + 4, 0);
> +}
> +
> static void gen_msmon_ctl_flt_vals(struct mon_read *m, u32 *ctl_val,
> u32 *flt_val)
> {
> @@ -924,7 +966,9 @@ static void gen_msmon_ctl_flt_vals(struct mon_read *m, u32 *ctl_val,
> ctx->csu_exclude_clean);
>
> break;
> - case mpam_feat_msmon_mbwu:
> + case mpam_feat_msmon_mbwu_31counter:
> + case mpam_feat_msmon_mbwu_44counter:
> + case mpam_feat_msmon_mbwu_63counter:
> *ctl_val |= MSMON_CFG_MBWU_CTL_TYPE_MBWU;
>
> if (mpam_has_feature(mpam_feat_msmon_mbwu_rwbw, &m->ris->props))
> @@ -946,7 +990,9 @@ static void read_msmon_ctl_flt_vals(struct mon_read *m, u32 *ctl_val,
> *ctl_val = mpam_read_monsel_reg(msc, CFG_CSU_CTL);
> *flt_val = mpam_read_monsel_reg(msc, CFG_CSU_FLT);
> return;
> - case mpam_feat_msmon_mbwu:
> + case mpam_feat_msmon_mbwu_31counter:
> + case mpam_feat_msmon_mbwu_44counter:
> + case mpam_feat_msmon_mbwu_63counter:
> *ctl_val = mpam_read_monsel_reg(msc, CFG_MBWU_CTL);
> *flt_val = mpam_read_monsel_reg(msc, CFG_MBWU_FLT);
> return;
> @@ -959,6 +1005,9 @@ static void read_msmon_ctl_flt_vals(struct mon_read *m, u32 *ctl_val,
> static void clean_msmon_ctl_val(u32 *cur_ctl)
> {
> *cur_ctl &= ~MSMON_CFG_x_CTL_OFLOW_STATUS;
> +
> + if (FIELD_GET(MSMON_CFG_x_CTL_TYPE, *cur_ctl) == MSMON_CFG_MBWU_CTL_TYPE_MBWU)
> + *cur_ctl &= ~MSMON_CFG_MBWU_CTL_OFLOW_STATUS_L;
> }
>
> static void write_msmon_ctl_flt_vals(struct mon_read *m, u32 ctl_val,
> @@ -978,10 +1027,15 @@ static void write_msmon_ctl_flt_vals(struct mon_read *m, u32 ctl_val,
> mpam_write_monsel_reg(msc, CSU, 0);
> mpam_write_monsel_reg(msc, CFG_CSU_CTL, ctl_val | MSMON_CFG_x_CTL_EN);
> break;
> - case mpam_feat_msmon_mbwu:
> + case mpam_feat_msmon_mbwu_44counter:
> + case mpam_feat_msmon_mbwu_63counter:
> + mpam_msc_zero_mbwu_l(m->ris->vmsc->msc);
> + fallthrough;
> + case mpam_feat_msmon_mbwu_31counter:
> mpam_write_monsel_reg(msc, CFG_MBWU_FLT, flt_val);
> mpam_write_monsel_reg(msc, CFG_MBWU_CTL, ctl_val);
> mpam_write_monsel_reg(msc, MBWU, 0);
The fallthrough above seems to be problematic, assuming the MBWU=0
being last for 31-bit was intentional. For long counters, this is
zeroing the counter before updating the filter/control registers, but
then clearing the 32-bit version of the counter. This fails to clear
the NRDY bit on the long counter, which isn't cleared by software
anywhere else.
>From section 10.3.2 from the MPAM spec shared:
"On a counting monitor, the NRDY bit remains set until it is reset by
software writing it as 0 in the monitor register, or automatically
after the monitor is captured in the capture register by a capture
event"
If I update the 63-bit case to call
mpam_msc_zero_mbwu_l(m->ris->vmsc->msc) after updating the
control/filter registers (in addition to the other items I pointed in
my last reply), I'm able to read MBWU counts from my hardware through
mbm_total_bytes.
Thanks,
-Peter
Powered by blists - more mailing lists