[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <CAJZ5v0iq7UODJ83fkwnzfFR3HpG2_R-YRnip_cLwyUHZZ+rXyg@mail.gmail.com>
Date: Mon, 21 Jul 2025 18:39:37 +0200
From: "Rafael J. Wysocki" <rafael@...nel.org>
To: Ulf Hansson <ulf.hansson@...aro.org>
Cc: "Rafael J . Wysocki" <rafael@...nel.org>, linux-pm@...r.kernel.org,
Kevin Hilman <khilman@...libre.com>, Pavel Machek <pavel@...nel.org>, Len Brown <len.brown@...el.com>,
Daniel Lezcano <daniel.lezcano@...aro.org>, Saravana Kannan <saravanak@...gle.com>,
Maulik Shah <quic_mkshah@...cinc.com>, Prasad Sodagudi <psodagud@...cinc.com>,
linux-kernel@...r.kernel.org
Subject: Re: [RFC/PATCH 1/3] PM: QoS: Introduce a system-wakeup QoS limit
On Wed, Jul 16, 2025 at 2:33 PM Ulf Hansson <ulf.hansson@...aro.org> wrote:
>
> Some platforms and devices supports multiple low-power-states than can be
> used for system-wide suspend. Today these states are selected on per
> subsystem basis and in most cases it's the deepest possible state that
> becomes selected.
>
> For some use-cases this is a problem as it isn't suitable or even breaks
> the system-wakeup latency constraint, when we decide to enter these deeper
> states during system-wide suspend.
>
> Therefore, let's introduce an interface for user-space, allowing us to
> specify the system-wakeup QoS limit. Subsequent changes will start taking
> into account the QoS limit.
Well, this is not really a system-wakeup limit, but a CPU idle state
latency limit for states entered in the last step of suspend-to-idle.
It looks like the problem is that the existing CPU latency QoS is not
taken into account by suspend-to-idle, so instead of adding an
entirely new interface to overcome this, would it make sense to add an
ioctl() to the existing one that would allow the user of it to
indicate that the given request should also be respected by
suspend-to-idle?
There are two basic reasons why I think so:
(1) The requests that you want to be respected by suspend-to-idle
should also be respected by the regular "runtime" idle, or at least I
don't see a reason why it wouldn't be the case.
(2) The new interface introduced by this patch basically duplicates
the existing one.
The flow related to this I kind of envision would be as follows:
(1) User space opens /dev/cpu_dma_latency and a single CPU latency QoS
request is created via cpu_latency_qos_add_request().
(2) User space calls a new ioctl() on the open file descriptor to
indicate that the request should also apply to suspend-to-idle. A new
request is created with the same value and added to a new list of
constraints. That new list of constraints will be used by
suspend-to-idle.
(3) Writing to the open file descriptor causes both requests to be updated.
(4) If user space does not want the request to apply to
suspend-to-idle any more, it can use another new ioctl() to achieve
that. It would cause the second (suspend-to-idle) copy of the request
to be dropped.
(5) Closing the file descriptor causes both copies of the request to be dropped.
> Signed-off-by: Ulf Hansson <ulf.hansson@...aro.org>
> ---
> include/linux/pm_qos.h | 9 ++++
> kernel/power/qos.c | 114 +++++++++++++++++++++++++++++++++++++++++
> 2 files changed, 123 insertions(+)
>
> diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h
> index 4a69d4af3ff8..5f84084f19c8 100644
> --- a/include/linux/pm_qos.h
> +++ b/include/linux/pm_qos.h
> @@ -143,6 +143,15 @@ bool pm_qos_update_flags(struct pm_qos_flags *pqf,
> struct pm_qos_flags_request *req,
> enum pm_qos_req_action action, s32 val);
>
> +#ifdef CONFIG_PM_SLEEP
> +s32 system_wakeup_latency_qos_limit(void);
> +#else
> +static inline s32 system_wakeup_latency_qos_limit(void)
> +{
> + return PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
> +}
> +#endif
> +
> #ifdef CONFIG_CPU_IDLE
> s32 cpu_latency_qos_limit(void);
> bool cpu_latency_qos_request_active(struct pm_qos_request *req);
> diff --git a/kernel/power/qos.c b/kernel/power/qos.c
> index 4244b069442e..fb496c220ffe 100644
> --- a/kernel/power/qos.c
> +++ b/kernel/power/qos.c
> @@ -209,6 +209,120 @@ bool pm_qos_update_flags(struct pm_qos_flags *pqf,
> return prev_value != curr_value;
> }
>
> +#ifdef CONFIG_PM_SLEEP
> +static struct pm_qos_constraints system_wakeup_latency_constraints = {
> + .list = PLIST_HEAD_INIT(system_wakeup_latency_constraints.list),
> + .target_value = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT,
> + .default_value = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT,
> + .no_constraint_value = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT,
> + .type = PM_QOS_MIN,
> +};
> +
> +/**
> + * system_wakeup_latency_qos_limit - Current system wakeup latency QoS limit.
> + *
> + * Returns the current system wakeup latency QoS limit that may have been
> + * requested by user-space.
> + */
> +s32 system_wakeup_latency_qos_limit(void)
> +{
> + return pm_qos_read_value(&system_wakeup_latency_constraints);
> +}
> +
> +static int system_wakeup_latency_qos_open(struct inode *inode,
> + struct file *filp)
> +{
> + struct pm_qos_request *req;
> +
> + req = kzalloc(sizeof(*req), GFP_KERNEL);
> + if (!req)
> + return -ENOMEM;
> +
> + req->qos = &system_wakeup_latency_constraints;
> + pm_qos_update_target(req->qos, &req->node, PM_QOS_ADD_REQ,
> + PM_QOS_RESUME_LATENCY_NO_CONSTRAINT);
> + filp->private_data = req;
> +
> + return 0;
> +}
> +
> +static int system_wakeup_latency_qos_release(struct inode *inode,
> + struct file *filp)
> +{
> + struct pm_qos_request *req = filp->private_data;
> +
> + filp->private_data = NULL;
> + pm_qos_update_target(req->qos, &req->node, PM_QOS_REMOVE_REQ,
> + PM_QOS_RESUME_LATENCY_NO_CONSTRAINT);
> + kfree(req);
> +
> + return 0;
> +}
> +
> +static ssize_t system_wakeup_latency_qos_read(struct file *filp,
> + char __user *buf,
> + size_t count,
> + loff_t *f_pos)
> +{
> + s32 value = pm_qos_read_value(&system_wakeup_latency_constraints);
> +
> + return simple_read_from_buffer(buf, count, f_pos, &value, sizeof(s32));
> +}
> +
> +static ssize_t system_wakeup_latency_qos_write(struct file *filp,
> + const char __user *buf,
> + size_t count, loff_t *f_pos)
> +{
> + struct pm_qos_request *req = filp->private_data;
> + s32 value;
> +
> + if (count == sizeof(s32)) {
> + if (copy_from_user(&value, buf, sizeof(s32)))
> + return -EFAULT;
> + } else {
> + int ret;
> +
> + ret = kstrtos32_from_user(buf, count, 16, &value);
> + if (ret)
> + return ret;
> + }
> +
> + if (value < 0)
> + return -EINVAL;
> +
> + pm_qos_update_target(req->qos, &req->node, PM_QOS_UPDATE_REQ, value);
> +
> + return count;
> +}
> +
> +static const struct file_operations system_wakeup_latency_qos_fops = {
> + .open = system_wakeup_latency_qos_open,
> + .release = system_wakeup_latency_qos_release,
> + .read = system_wakeup_latency_qos_read,
> + .write = system_wakeup_latency_qos_write,
> + .llseek = noop_llseek,
> +};
> +
> +static struct miscdevice system_wakeup_latency_qos_miscdev = {
> + .minor = MISC_DYNAMIC_MINOR,
> + .name = "system_wakeup_latency",
> + .fops = &system_wakeup_latency_qos_fops,
> +};
> +
> +static int __init system_wakeup_latency_qos_init(void)
> +{
> + int ret;
> +
> + ret = misc_register(&system_wakeup_latency_qos_miscdev);
> + if (ret < 0)
> + pr_err("%s: %s setup failed\n", __func__,
> + system_wakeup_latency_qos_miscdev.name);
> +
> + return ret;
> +}
> +late_initcall(system_wakeup_latency_qos_init);
> +#endif /* CONFIG_PM_SLEEP */
> +
> #ifdef CONFIG_CPU_IDLE
> /* Definitions related to the CPU latency QoS. */
>
> --
Powered by blists - more mailing lists