[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CADUfDZq+0H6nZ1vtw-URN_FbbU-Vkp8sXK_HKZpoXegAsT9_Pg@mail.gmail.com>
Date: Wed, 16 Apr 2025 11:48:45 -0700
From: Caleb Sander Mateos <csander@...estorage.com>
To: Uday Shankar <ushankar@...estorage.com>
Cc: Ming Lei <ming.lei@...hat.com>, Jens Axboe <axboe@...nel.dk>, linux-block@...r.kernel.org,
linux-kernel@...r.kernel.org
Subject: Re: [PATCH v4 1/4] ublk: require unique task per io instead of unique
task per hctx
On Tue, Apr 15, 2025 at 6:00 PM Uday Shankar <ushankar@...estorage.com> wrote:
>
> Currently, ublk_drv associates to each hardware queue (hctx) a unique
> task (called the queue's ubq_daemon) which is allowed to issue
> COMMIT_AND_FETCH commands against the hctx. If any other task attempts
> to do so, the command fails immediately with EINVAL. When considered
> together with the block layer architecture, the result is that for each
> CPU C on the system, there is a unique ublk server thread which is
> allowed to handle I/O submitted on CPU C. This can lead to suboptimal
> performance under imbalanced load generation. For an extreme example,
> suppose all the load is generated on CPUs mapping to a single ublk
> server thread. Then that thread may be fully utilized and become the
> bottleneck in the system, while other ublk server threads are totally
> idle.
>
> This issue can also be addressed directly in the ublk server without
> kernel support by having threads dequeue I/Os and pass them around to
> ensure even load. But this solution requires inter-thread communication
> at least twice for each I/O (submission and completion), which is
> generally a bad pattern for performance. The problem gets even worse
> with zero copy, as more inter-thread communication would be required to
> have the buffer register/unregister calls to come from the correct
> thread.
>
> Therefore, address this issue in ublk_drv by requiring a unique task per
> I/O instead of per queue/hctx. Imbalanced load can then be balanced
> across all ublk server threads by having threads issue FETCH_REQs in a
> round-robin manner. As a small toy example, consider a system with a
> single ublk device having 2 queues, each of queue depth 4. A ublk server
> having 4 threads could issue its FETCH_REQs against this device as
> follows (where each entry is the qid,tag pair that the FETCH_REQ
> targets):
>
> poller thread: T0 T1 T2 T3
> 0,0 0,1 0,2 0,3
> 1,3 1,0 1,1 1,2
>
> Since tags appear to be allocated in sequential chunks, this setup
> provides a rough approximation to distributing I/Os round-robin across
> all ublk server threads, while letting I/Os stay fully thread-local.
>
> Signed-off-by: Uday Shankar <ushankar@...estorage.com>
> Reviewed-by: Caleb Sander Mateos <csander@...estorage.com>
> ---
> drivers/block/ublk_drv.c | 75 ++++++++++++++++++++++--------------------------
> 1 file changed, 34 insertions(+), 41 deletions(-)
>
> diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
> index cdb1543fa4a9817aa2ca2fca66720f589cf222be..9a0d2547512fc8119460739230599d48d2c2a306 100644
> --- a/drivers/block/ublk_drv.c
> +++ b/drivers/block/ublk_drv.c
> @@ -150,6 +150,7 @@ struct ublk_io {
> int res;
>
> struct io_uring_cmd *cmd;
> + struct task_struct *task;
> };
>
> struct ublk_queue {
> @@ -157,11 +158,9 @@ struct ublk_queue {
> int q_depth;
>
> unsigned long flags;
> - struct task_struct *ubq_daemon;
> struct ublksrv_io_desc *io_cmd_buf;
>
> bool force_abort;
> - bool timeout;
> bool canceling;
> bool fail_io; /* copy of dev->state == UBLK_S_DEV_FAIL_IO */
> unsigned short nr_io_ready; /* how many ios setup */
> @@ -1072,11 +1071,6 @@ static inline struct ublk_uring_cmd_pdu *ublk_get_uring_cmd_pdu(
> return io_uring_cmd_to_pdu(ioucmd, struct ublk_uring_cmd_pdu);
> }
>
> -static inline bool ubq_daemon_is_dying(struct ublk_queue *ubq)
> -{
> - return ubq->ubq_daemon->flags & PF_EXITING;
> -}
> -
> /* todo: handle partial completion */
> static inline void __ublk_complete_rq(struct request *req)
> {
> @@ -1224,13 +1218,13 @@ static void ublk_dispatch_req(struct ublk_queue *ubq,
> /*
> * Task is exiting if either:
> *
> - * (1) current != ubq_daemon.
> + * (1) current != io->task.
> * io_uring_cmd_complete_in_task() tries to run task_work
> - * in a workqueue if ubq_daemon(cmd's task) is PF_EXITING.
> + * in a workqueue if cmd's task is PF_EXITING.
> *
> * (2) current->flags & PF_EXITING.
> */
> - if (unlikely(current != ubq->ubq_daemon || current->flags & PF_EXITING)) {
> + if (unlikely(current != io->task || current->flags & PF_EXITING)) {
> __ublk_abort_rq(ubq, req);
> return;
> }
> @@ -1336,23 +1330,20 @@ static void ublk_queue_cmd_list(struct ublk_queue *ubq, struct rq_list *l)
> static enum blk_eh_timer_return ublk_timeout(struct request *rq)
> {
> struct ublk_queue *ubq = rq->mq_hctx->driver_data;
> + struct ublk_io *io = &ubq->ios[rq->tag];
> unsigned int nr_inflight = 0;
> int i;
>
> if (ubq->flags & UBLK_F_UNPRIVILEGED_DEV) {
> - if (!ubq->timeout) {
> - send_sig(SIGKILL, ubq->ubq_daemon, 0);
> - ubq->timeout = true;
> - }
> -
> + send_sig(SIGKILL, io->task, 0);
> return BLK_EH_DONE;
> }
>
> - if (!ubq_daemon_is_dying(ubq))
> + if (!(io->task->flags & PF_EXITING))
> return BLK_EH_RESET_TIMER;
>
> for (i = 0; i < ubq->q_depth; i++) {
> - struct ublk_io *io = &ubq->ios[i];
> + io = &ubq->ios[i];
>
> if (!(io->flags & UBLK_IO_FLAG_ACTIVE))
> nr_inflight++;
> @@ -1552,8 +1543,8 @@ static void ublk_commit_completion(struct ublk_device *ub,
> }
>
> /*
> - * Called from ubq_daemon context via cancel fn, meantime quiesce ublk
> - * blk-mq queue, so we are called exclusively with blk-mq and ubq_daemon
> + * Called from io task context via cancel fn, meantime quiesce ublk
> + * blk-mq queue, so we are called exclusively with blk-mq and io task
> * context, so everything is serialized.
> */
> static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq)
> @@ -1669,13 +1660,13 @@ static void ublk_uring_cmd_cancel_fn(struct io_uring_cmd *cmd,
> return;
>
> task = io_uring_cmd_get_task(cmd);
> - if (WARN_ON_ONCE(task && task != ubq->ubq_daemon))
> + io = &ubq->ios[pdu->tag];
> + if (WARN_ON_ONCE(task && task != io->task))
> return;
>
> ub = ubq->dev;
> need_schedule = ublk_abort_requests(ub, ubq);
>
> - io = &ubq->ios[pdu->tag];
> WARN_ON_ONCE(io->cmd != cmd);
> ublk_cancel_cmd(ubq, io, issue_flags);
>
> @@ -1836,8 +1827,6 @@ static void ublk_mark_io_ready(struct ublk_device *ub, struct ublk_queue *ubq)
> mutex_lock(&ub->mutex);
> ubq->nr_io_ready++;
> if (ublk_queue_ready(ubq)) {
> - ubq->ubq_daemon = current;
> - get_task_struct(ubq->ubq_daemon);
> ub->nr_queues_ready++;
>
> if (capable(CAP_SYS_ADMIN))
> @@ -1952,14 +1941,14 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
> if (!ubq || ub_cmd->q_id != ubq->q_id)
> goto out;
>
> - if (ubq->ubq_daemon && ubq->ubq_daemon != current)
> - goto out;
> -
> if (tag >= ubq->q_depth)
> goto out;
>
> io = &ubq->ios[tag];
>
> + if (io->task && io->task != current)
> + goto out;
> +
> /* there is pending io cmd, something must be wrong */
> if (io->flags & UBLK_IO_FLAG_ACTIVE) {
> ret = -EBUSY;
> @@ -2012,6 +2001,7 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
>
> ublk_fill_io_cmd(io, cmd, ub_cmd->addr);
> ublk_mark_io_ready(ub, ubq);
> + io->task = get_task_struct(current);
Should io->task be set before ublk_mark_io_ready()? I worry that once
the ublk device is marked ready, it may be assumed that io->task is
constant.
Best,
Caleb
Powered by blists - more mailing lists