[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <TYAPR01MB6330D9919539D40ADA52B6228B54A@TYAPR01MB6330.jpnprd01.prod.outlook.com>
Date: Mon, 12 Jun 2023 05:39:15 +0000
From: "Shaopeng Tan (Fujitsu)" <tan.shaopeng@...itsu.com>
To: 'James Morse' <james.morse@....com>,
"x86@...nel.org" <x86@...nel.org>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>
CC: Fenghua Yu <fenghua.yu@...el.com>,
Reinette Chatre <reinette.chatre@...el.com>,
Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>, Borislav Petkov <bp@...en8.de>,
H Peter Anvin <hpa@...or.com>,
Babu Moger <Babu.Moger@....com>,
"shameerali.kolothum.thodi@...wei.com"
<shameerali.kolothum.thodi@...wei.com>,
D Scott Phillips OS <scott@...amperecomputing.com>,
"carl@...amperecomputing.com" <carl@...amperecomputing.com>,
"lcherian@...vell.com" <lcherian@...vell.com>,
"bobo.shaobowang@...wei.com" <bobo.shaobowang@...wei.com>,
"xingxin.hx@...nanolis.org" <xingxin.hx@...nanolis.org>,
"baolin.wang@...ux.alibaba.com" <baolin.wang@...ux.alibaba.com>,
Jamie Iles <quic_jiles@...cinc.com>,
Xin Hao <xhao@...ux.alibaba.com>,
"peternewman@...gle.com" <peternewman@...gle.com>,
"dfustini@...libre.com" <dfustini@...libre.com>
Subject: RE: [PATCH v4 15/24] x86/resctrl: Allow arch to allocate memory
needed in resctrl_arch_rmid_read()
Hello James,
> Depending on the number of monitors available, Arm's MPAM may need to
> allocate a monitor prior to reading the counter value. Allocating a contended
> resource may involve sleeping.
>
> add_rmid_to_limbo() calls resctrl_arch_rmid_read() for multiple domains, the
> allocation should be valid for all domains.
>
> __check_limbo() and mon_event_count() each make multiple calls to
> resctrl_arch_rmid_read(), to avoid extra work on contended systems, the
> allocation should be valid for multiple invocations of resctrl_arch_rmid_read().
>
> Add arch hooks for this allocation, which need calling before
> resctrl_arch_rmid_read(). The allocated monitor is passed to
> resctrl_arch_rmid_read(), then freed again afterwards. The helper can be
> called on any CPU, and can sleep.
>
> Tested-by: Shaopeng Tan <tan.shaopeng@...itsu.com>
> Signed-off-by: James Morse <james.morse@....com>
> ----
> Changes since v3:
> * Expanded comment.
> * Removed stray header include.
> * Reworded commit message.
> * Made ctx a void * instead of an int.
> ---
> arch/x86/include/asm/resctrl.h | 11 ++++++++++
> arch/x86/kernel/cpu/resctrl/ctrlmondata.c | 5 +++++
> arch/x86/kernel/cpu/resctrl/internal.h | 1 +
> arch/x86/kernel/cpu/resctrl/monitor.c | 26
> +++++++++++++++++++----
> include/linux/resctrl.h | 5 ++++-
> 5 files changed, 43 insertions(+), 5 deletions(-)
>
> diff --git a/arch/x86/include/asm/resctrl.h b/arch/x86/include/asm/resctrl.h
> index 78376c19ee6f..20729364982b 100644
> --- a/arch/x86/include/asm/resctrl.h
> +++ b/arch/x86/include/asm/resctrl.h
> @@ -136,6 +136,17 @@ static inline u32 resctrl_arch_rmid_idx_encode(u32
> ignored, u32 rmid)
> return rmid;
> }
>
> +/* x86 can always read an rmid, nothing needs allocating */ struct
> +rdt_resource; static inline void *resctrl_arch_mon_ctx_alloc(struct
> +rdt_resource *r, int evtid) {
> + might_sleep();
> + return NULL;
> +};
> +
> +static inline void resctrl_arch_mon_ctx_free(struct rdt_resource *r, int evtid,
> + void *ctx) { };
> +
> void resctrl_cpu_detect(struct cpuinfo_x86 *c);
>
> #else
> diff --git a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
> b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
> index 6eeccad192ee..280d66fae21c 100644
> --- a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
> +++ b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
> @@ -546,6 +546,9 @@ void mon_event_read(struct rmid_read *rr, struct
> rdt_resource *r,
> rr->d = d;
> rr->val = 0;
> rr->first = first;
> + rr->arch_mon_ctx = resctrl_arch_mon_ctx_alloc(r, evtid);
> + if (IS_ERR(rr->arch_mon_ctx))
> + return;
>
> cpu = cpumask_any_housekeeping(&d->cpu_mask);
>
> @@ -559,6 +562,8 @@ void mon_event_read(struct rmid_read *rr, struct
> rdt_resource *r,
> smp_call_function_any(&d->cpu_mask, mon_event_count, rr,
> 1);
> else
> smp_call_on_cpu(cpu, smp_mon_event_count, rr, false);
> +
> + resctrl_arch_mon_ctx_free(r, evtid, rr->arch_mon_ctx);
> }
>
> int rdtgroup_mondata_show(struct seq_file *m, void *arg) diff --git
> a/arch/x86/kernel/cpu/resctrl/internal.h
> b/arch/x86/kernel/cpu/resctrl/internal.h
> index 7960366b9434..a7e025cffdbc 100644
> --- a/arch/x86/kernel/cpu/resctrl/internal.h
> +++ b/arch/x86/kernel/cpu/resctrl/internal.h
> @@ -136,6 +136,7 @@ struct rmid_read {
> bool first;
> int err;
> u64 val;
> + void *arch_mon_ctx;
> };
>
> extern bool rdt_alloc_capable;
> diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c
> b/arch/x86/kernel/cpu/resctrl/monitor.c
> index fb33100e172b..6d140018358a 100644
> --- a/arch/x86/kernel/cpu/resctrl/monitor.c
> +++ b/arch/x86/kernel/cpu/resctrl/monitor.c
> @@ -264,7 +264,7 @@ static u64 mbm_overflow_count(u64 prev_msr, u64
> cur_msr, unsigned int width)
>
> int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_domain *d,
> u32 closid, u32 rmid, enum resctrl_event_id
> eventid,
> - u64 *val)
> + u64 *val, void *ignored)
> {
> struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
> struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d); @@
> -331,9 +331,14 @@ void __check_limbo(struct rdt_domain *d, bool force_free)
> u32 idx_limit = resctrl_arch_system_num_rmid_idx();
> struct rmid_entry *entry;
> u32 idx, cur_idx = 1;
> + void *arch_mon_ctx;
> bool rmid_dirty;
> u64 val = 0;
>
> + arch_mon_ctx = resctrl_arch_mon_ctx_alloc(r,
> QOS_L3_OCCUP_EVENT_ID);
> + if (arch_mon_ctx < 0)
> + return;
> +
Would it be better to use IS_ERR() macro?
> /*
> * Skip RMID 0 and start from RMID 1 and check all the RMIDs that
> * are marked as busy for occupancy < threshold. If the occupancy @@
> -347,7 +352,8 @@ void __check_limbo(struct rdt_domain *d, bool force_free)
>
> entry = __rmid_entry(idx);
> if (resctrl_arch_rmid_read(r, d, entry->closid, entry->rmid,
> - QOS_L3_OCCUP_EVENT_ID,
> &val)) {
> + QOS_L3_OCCUP_EVENT_ID,
> &val,
> + arch_mon_ctx)) {
> rmid_dirty = true;
> } else {
> rmid_dirty = (val >= resctrl_rmid_realloc_threshold);
> @@ -360,6 +366,8 @@ void __check_limbo(struct rdt_domain *d, bool
> force_free)
> }
> cur_idx = idx + 1;
> }
> +
> + resctrl_arch_mon_ctx_free(r, QOS_L3_OCCUP_EVENT_ID,
> arch_mon_ctx);
> }
>
> bool has_busy_rmid(struct rdt_resource *r, struct rdt_domain *d) @@ -539,7
> +547,7 @@ static int __mon_event_count(u32 closid, u32 rmid, struct
> rmid_read *rr)
> }
>
> rr->err = resctrl_arch_rmid_read(rr->r, rr->d, closid, rmid, rr->evtid,
> - &tval);
> + &tval, rr->arch_mon_ctx);
> if (rr->err)
> return rr->err;
>
> @@ -589,7 +597,6 @@ void mon_event_count(void *info)
> int ret;
>
> rdtgrp = rr->rgrp;
> -
> ret = __mon_event_count(rdtgrp->closid, rdtgrp->mon.rmid, rr);
>
> /*
> @@ -749,11 +756,21 @@ static void mbm_update(struct rdt_resource *r, struct
> rdt_domain *d,
> if (is_mbm_total_enabled()) {
> rr.evtid = QOS_L3_MBM_TOTAL_EVENT_ID;
> rr.val = 0;
> + rr.arch_mon_ctx = resctrl_arch_mon_ctx_alloc(rr.r, rr.evtid);
> + if (rr.arch_mon_ctx < 0)
> + return;
> +
Same here.
> __mon_event_count(closid, rmid, &rr);
> +
> + resctrl_arch_mon_ctx_free(rr.r, rr.evtid, rr.arch_mon_ctx);
> }
> if (is_mbm_local_enabled()) {
> rr.evtid = QOS_L3_MBM_LOCAL_EVENT_ID;
> rr.val = 0;
> + rr.arch_mon_ctx = resctrl_arch_mon_ctx_alloc(rr.r, rr.evtid);
> + if (rr.arch_mon_ctx < 0)
> + return;
> +
Same here.
Best regards,
Shaopeng TAN
Powered by blists - more mailing lists