[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240911162316.516725-3-felix.moessbauer@siemens.com>
Date: Wed, 11 Sep 2024 18:23:16 +0200
From: Felix Moessbauer <felix.moessbauer@...mens.com>
To: axboe@...nel.dk
Cc: stable@...r.kernel.org,
asml.silence@...il.com,
linux-kernel@...r.kernel.org,
io-uring@...r.kernel.org,
cgroups@...r.kernel.org,
dqminh@...udflare.com,
longman@...hat.com,
adriaan.schmidt@...mens.com,
florian.bezdeka@...mens.com,
Felix Moessbauer <felix.moessbauer@...mens.com>
Subject: [PATCH 6.1 2/2] io_uring/io-wq: inherit cpuset of cgroup in io worker
commit 84eacf177faa605853c58e5b1c0d9544b88c16fd upstream.
The io worker threads are userland threads that just never exit to the
userland. By that, they are also assigned to a cgroup (the group of the
creating task).
When creating a new io worker, this worker should inherit the cpuset
of the cgroup.
Fixes: da64d6db3bd3 ("io_uring: One wqe per wq")
Signed-off-by: Felix Moessbauer <felix.moessbauer@...mens.com>
---
io_uring/io-wq.c | 8 +++++++-
1 file changed, 7 insertions(+), 1 deletion(-)
diff --git a/io_uring/io-wq.c b/io_uring/io-wq.c
index c74bcc8d2f06..04265bf8d319 100644
--- a/io_uring/io-wq.c
+++ b/io_uring/io-wq.c
@@ -1157,6 +1157,7 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
{
int ret, node, i;
struct io_wq *wq;
+ cpumask_var_t allowed_mask;
if (WARN_ON_ONCE(!data->free_work || !data->do_work))
return ERR_PTR(-EINVAL);
@@ -1176,6 +1177,9 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
wq->do_work = data->do_work;
ret = -ENOMEM;
+ if (!alloc_cpumask_var(&allowed_mask, GFP_KERNEL))
+ goto err;
+ cpuset_cpus_allowed(current, allowed_mask);
for_each_node(node) {
struct io_wqe *wqe;
int alloc_node = node;
@@ -1188,7 +1192,8 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
wq->wqes[node] = wqe;
if (!alloc_cpumask_var(&wqe->cpu_mask, GFP_KERNEL))
goto err;
- cpumask_copy(wqe->cpu_mask, cpumask_of_node(node));
+ if (!cpumask_and(wqe->cpu_mask, cpumask_of_node(node), allowed_mask))
+ cpumask_copy(wqe->cpu_mask, allowed_mask);
wqe->node = alloc_node;
wqe->acct[IO_WQ_ACCT_BOUND].max_workers = bounded;
wqe->acct[IO_WQ_ACCT_UNBOUND].max_workers =
@@ -1222,6 +1227,7 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
free_cpumask_var(wq->wqes[node]->cpu_mask);
kfree(wq->wqes[node]);
}
+ free_cpumask_var(allowed_mask);
err_wq:
kfree(wq);
return ERR_PTR(ret);
--
2.39.2
Powered by blists - more mailing lists