[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <20250721124104.806120-6-quic_zhonhan@quicinc.com>
Date: Mon, 21 Jul 2025 20:41:04 +0800
From: Zhongqiu Han <quic_zhonhan@...cinc.com>
To: <rafael@...nel.org>, <lenb@...nel.org>, <pavel@...nel.org>,
<tony.luck@...el.com>, <reinette.chatre@...el.com>,
<Dave.Martin@....com>, <james.morse@....com>, <ulf.hansson@...aro.org>,
<amit.kucheria@...aro.org>, <christian.loehle@....com>
CC: <linux-pm@...r.kernel.org>, <linux-kernel@...r.kernel.org>,
<quic_zhonhan@...cinc.com>
Subject: [PATCH v2 5/5] resctrl: Replace PM QoS logic with cpu_affinity_latency_qos_* helpers
Use cpu_affinity_latency_qos_* helpers to replace the dev_pm_qos_request
handling for CPU masks in pseudo-locking.
These helpers encapsulate latency QoS operations for a group of CPUs,
providing a consistent mechanism for request addition, removal, and error
handling. This improves code clarity and maintainability. As resctrlfs is
used on different platforms, a unified interface also makes it easier to
adapt to platform-specific CPU masks and latency QoS values.
The only functional change, and a beneficial one, is that when the latency
QoS value is updated for a CPU in the mask, the interface will actively
wake up that CPU to ensure the QoS setting takes effect immediately. This
helps enforce latency constraints in a timely manner on
performance-critical paths.
Signed-off-by: Zhongqiu Han <quic_zhonhan@...cinc.com>
---
fs/resctrl/pseudo_lock.c | 51 +++-------------------------------------
include/linux/resctrl.h | 3 ++-
2 files changed, 5 insertions(+), 49 deletions(-)
diff --git a/fs/resctrl/pseudo_lock.c b/fs/resctrl/pseudo_lock.c
index 87bbc2605de1..8be3ae804af9 100644
--- a/fs/resctrl/pseudo_lock.c
+++ b/fs/resctrl/pseudo_lock.c
@@ -108,25 +108,9 @@ static struct rdtgroup *region_find_by_minor(unsigned int minor)
return rdtgrp_match;
}
-/**
- * struct pseudo_lock_pm_req - A power management QoS request list entry
- * @list: Entry within the @pm_reqs list for a pseudo-locked region
- * @req: PM QoS request
- */
-struct pseudo_lock_pm_req {
- struct list_head list;
- struct dev_pm_qos_request req;
-};
-
static void pseudo_lock_cstates_relax(struct pseudo_lock_region *plr)
{
- struct pseudo_lock_pm_req *pm_req, *next;
-
- list_for_each_entry_safe(pm_req, next, &plr->pm_reqs, list) {
- dev_pm_qos_remove_request(&pm_req->req);
- list_del(&pm_req->list);
- kfree(pm_req);
- }
+ cpu_affinity_latency_qos_remove(&plr->pm_reqs);
}
/**
@@ -149,36 +133,8 @@ static void pseudo_lock_cstates_relax(struct pseudo_lock_region *plr)
*/
static int pseudo_lock_cstates_constrain(struct pseudo_lock_region *plr)
{
- struct pseudo_lock_pm_req *pm_req;
- int cpu;
- int ret;
-
- for_each_cpu(cpu, &plr->d->hdr.cpu_mask) {
- pm_req = kzalloc(sizeof(*pm_req), GFP_KERNEL);
- if (!pm_req) {
- rdt_last_cmd_puts("Failure to allocate memory for PM QoS\n");
- ret = -ENOMEM;
- goto out_err;
- }
- ret = dev_pm_qos_add_request(get_cpu_device(cpu),
- &pm_req->req,
- DEV_PM_QOS_RESUME_LATENCY,
- 30);
- if (ret < 0) {
- rdt_last_cmd_printf("Failed to add latency req CPU%d\n",
- cpu);
- kfree(pm_req);
- ret = -1;
- goto out_err;
- }
- list_add(&pm_req->list, &plr->pm_reqs);
- }
-
- return 0;
-
-out_err:
- pseudo_lock_cstates_relax(plr);
- return ret;
+ return cpu_affinity_latency_qos_add(&plr->pm_reqs, &plr->d->hdr.cpu_mask,
+ 30);
}
/**
@@ -275,7 +231,6 @@ static int pseudo_lock_init(struct rdtgroup *rdtgrp)
return -ENOMEM;
init_waitqueue_head(&plr->lock_thread_wq);
- INIT_LIST_HEAD(&plr->pm_reqs);
rdtgrp->plr = plr;
return 0;
}
diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h
index 6fb4894b8cfd..521fe70b0425 100644
--- a/include/linux/resctrl.h
+++ b/include/linux/resctrl.h
@@ -6,6 +6,7 @@
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/pid.h>
+#include <linux/pm_qos.h>
#include <linux/resctrl_types.h>
#ifdef CONFIG_ARCH_HAS_CPU_RESCTRL
@@ -108,7 +109,7 @@ struct pseudo_lock_region {
void *kmem;
unsigned int minor;
struct dentry *debugfs_dir;
- struct list_head pm_reqs;
+ struct cpu_affinity_qos_req pm_reqs;
};
/**
--
2.43.0
Powered by blists - more mailing lists