[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <aV-xEjo5ejSM73DN@intel.com>
Date: Thu, 8 Jan 2026 08:28:50 -0500
From: Rodrigo Vivi <rodrigo.vivi@...el.com>
To: Marco Crivellari <marco.crivellari@...e.com>
CC: <linux-kernel@...r.kernel.org>, <intel-xe@...ts.freedesktop.org>,
<dri-devel@...ts.freedesktop.org>, Tejun Heo <tj@...nel.org>, Lai Jiangshan
<jiangshanlai@...il.com>, Frederic Weisbecker <frederic@...nel.org>,
Sebastian Andrzej Siewior <bigeasy@...utronix.de>, Michal Hocko
<mhocko@...e.com>, Lucas De Marchi <lucas.demarchi@...el.com>, "Thomas
Hellstrom" <thomas.hellstrom@...ux.intel.com>, David Airlie
<airlied@...il.com>, Simona Vetter <simona@...ll.ch>
Subject: Re: [PATCH v2 1/2] drm/xe: replace use of system_unbound_wq with
system_dfl_wq
On Mon, Nov 03, 2025 at 06:06:03PM +0100, Marco Crivellari wrote:
> Currently if a user enqueue a work item using schedule_delayed_work() the
> used wq is "system_wq" (per-cpu wq) while queue_delayed_work() use
> WORK_CPU_UNBOUND (used when a cpu is not specified). The same applies to
> schedule_work() that is using system_wq and queue_work(), that makes use
> again of WORK_CPU_UNBOUND.
>
> This lack of consistency cannot be addressed without refactoring the API.
>
> The above change to the Workqueue API has been introduced by:
>
> commit 128ea9f6ccfb ("workqueue: Add system_percpu_wq and system_dfl_wq")
>
> system_unbound_wq should be the default workqueue so as not to enforce
> locality constraints for random work whenever it's not required.
>
> The old system_unbound_wq will be kept for a few release cycles.
I'm sorry for the delay here, but could you please refactor this commit
message?
The first part of this commit message is the true justification for your
original work, not for this patch here.
Except for your last phrase, which indicates, some wish of removing
the unbound_wq, it doesn't state clear on why we should change the
unbound per the dfl (default?).
Perhaps the authors of these cases below wanted to be unbound,
but choosing the default will make us to be tied to whatever
default might become in the future.
Right now both unbound and dfl are identical. In the future
you are planning to remove the unbound, but what about the dfl?
Any plans or possible changes? If no change is planned to dfl,
why create default and simply not stay with the unbound one
that is much more clear on its intention?
Thanks,
Rodrigo.
>
> Suggested-by: Tejun Heo <tj@...nel.org>
> Signed-off-by: Marco Crivellari <marco.crivellari@...e.com>
> ---
> drivers/gpu/drm/xe/xe_devcoredump.c | 2 +-
> drivers/gpu/drm/xe/xe_execlist.c | 2 +-
> drivers/gpu/drm/xe/xe_guc_ct.c | 4 ++--
> drivers/gpu/drm/xe/xe_oa.c | 2 +-
> drivers/gpu/drm/xe/xe_vm.c | 4 ++--
> 5 files changed, 7 insertions(+), 7 deletions(-)
>
> diff --git a/drivers/gpu/drm/xe/xe_devcoredump.c b/drivers/gpu/drm/xe/xe_devcoredump.c
> index 203e3038cc81..806335487021 100644
> --- a/drivers/gpu/drm/xe/xe_devcoredump.c
> +++ b/drivers/gpu/drm/xe/xe_devcoredump.c
> @@ -362,7 +362,7 @@ static void devcoredump_snapshot(struct xe_devcoredump *coredump,
>
> xe_engine_snapshot_capture_for_queue(q);
>
> - queue_work(system_unbound_wq, &ss->work);
> + queue_work(system_dfl_wq, &ss->work);
>
> xe_force_wake_put(gt_to_fw(q->gt), fw_ref);
> dma_fence_end_signalling(cookie);
> diff --git a/drivers/gpu/drm/xe/xe_execlist.c b/drivers/gpu/drm/xe/xe_execlist.c
> index f83d421ac9d3..99010709f0d2 100644
> --- a/drivers/gpu/drm/xe/xe_execlist.c
> +++ b/drivers/gpu/drm/xe/xe_execlist.c
> @@ -422,7 +422,7 @@ static void execlist_exec_queue_kill(struct xe_exec_queue *q)
> static void execlist_exec_queue_destroy(struct xe_exec_queue *q)
> {
> INIT_WORK(&q->execlist->destroy_async, execlist_exec_queue_destroy_async);
> - queue_work(system_unbound_wq, &q->execlist->destroy_async);
> + queue_work(system_dfl_wq, &q->execlist->destroy_async);
> }
>
> static int execlist_exec_queue_set_priority(struct xe_exec_queue *q,
> diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c
> index 18f6327bf552..bc2ec3603e7b 100644
> --- a/drivers/gpu/drm/xe/xe_guc_ct.c
> +++ b/drivers/gpu/drm/xe/xe_guc_ct.c
> @@ -543,7 +543,7 @@ int xe_guc_ct_enable(struct xe_guc_ct *ct)
> spin_lock_irq(&ct->dead.lock);
> if (ct->dead.reason) {
> ct->dead.reason |= (1 << CT_DEAD_STATE_REARM);
> - queue_work(system_unbound_wq, &ct->dead.worker);
> + queue_work(system_dfl_wq, &ct->dead.worker);
> }
> spin_unlock_irq(&ct->dead.lock);
> #endif
> @@ -2186,7 +2186,7 @@ static void ct_dead_capture(struct xe_guc_ct *ct, struct guc_ctb *ctb, u32 reaso
>
> spin_unlock_irqrestore(&ct->dead.lock, flags);
>
> - queue_work(system_unbound_wq, &(ct)->dead.worker);
> + queue_work(system_dfl_wq, &(ct)->dead.worker);
> }
>
> static void ct_dead_print(struct xe_dead_ct *dead)
> diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c
> index a4894eb0d7f3..4e362cd43d51 100644
> --- a/drivers/gpu/drm/xe/xe_oa.c
> +++ b/drivers/gpu/drm/xe/xe_oa.c
> @@ -967,7 +967,7 @@ static void xe_oa_config_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
> struct xe_oa_fence *ofence = container_of(cb, typeof(*ofence), cb);
>
> INIT_DELAYED_WORK(&ofence->work, xe_oa_fence_work_fn);
> - queue_delayed_work(system_unbound_wq, &ofence->work,
> + queue_delayed_work(system_dfl_wq, &ofence->work,
> usecs_to_jiffies(NOA_PROGRAM_ADDITIONAL_DELAY_US));
> dma_fence_put(fence);
> }
> diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
> index 63c65e3d207b..d3a0e0231cd1 100644
> --- a/drivers/gpu/drm/xe/xe_vm.c
> +++ b/drivers/gpu/drm/xe/xe_vm.c
> @@ -1064,7 +1064,7 @@ static void vma_destroy_cb(struct dma_fence *fence,
> struct xe_vma *vma = container_of(cb, struct xe_vma, destroy_cb);
>
> INIT_WORK(&vma->destroy_work, vma_destroy_work_func);
> - queue_work(system_unbound_wq, &vma->destroy_work);
> + queue_work(system_dfl_wq, &vma->destroy_work);
> }
>
> static void xe_vma_destroy(struct xe_vma *vma, struct dma_fence *fence)
> @@ -1823,7 +1823,7 @@ static void xe_vm_free(struct drm_gpuvm *gpuvm)
> struct xe_vm *vm = container_of(gpuvm, struct xe_vm, gpuvm);
>
> /* To destroy the VM we need to be able to sleep */
> - queue_work(system_unbound_wq, &vm->destroy_work);
> + queue_work(system_dfl_wq, &vm->destroy_work);
> }
>
> struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id)
> --
> 2.51.1
>
Powered by blists - more mailing lists