[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250814094824.217142-9-janusz.krzysztofik@linux.intel.com>
Date: Thu, 14 Aug 2025 10:16:14 +0200
From: Janusz Krzysztofik <janusz.krzysztofik@...ux.intel.com>
To: Christian König <christian.koenig@....com>
Cc: Sumit Semwal <sumit.semwal@...aro.org>,
Gustavo Padovan <gustavo@...ovan.org>,
Chris Wilson <chris.p.wilson@...ux.intel.com>,
linux-media@...r.kernel.org,
dri-devel@...ts.freedesktop.org,
linaro-mm-sig@...ts.linaro.org,
linux-kernel@...r.kernel.org,
intel-gfx@...ts.freedesktop.org,
intel-xe@...ts.freedesktop.org,
Janusz Krzysztofik <janusz.krzysztofik@...ux.intel.com>
Subject: [PATCH 3/4] dma-buf/fence-chain: Wait on each tested chain link
Userspace may build dma_fence chains of arbitrary length step by step,
e.g. via drm_syncobj IOCTLs, and each step may start waiting on a chain
link it has added. Adjust the wait_* selftests to cover such extreme use
cases.
Having that done, don't enable signaling on each chain link when building
the chain. There is no point in doing that as long as no user is waiting
on the link, and even then, signaling is enabled automatically as soon as
a user starts waiting on the fence. Let individual test cases decide
which links of the chain should be waited on / need signaling enabled.
Suggested-by: Christian König <christian.koenig@....com>
Signed-off-by: Janusz Krzysztofik <janusz.krzysztofik@...ux.intel.com>
---
drivers/dma-buf/st-dma-fence-chain.c | 120 ++++++++++++++++++++-------
1 file changed, 91 insertions(+), 29 deletions(-)
diff --git a/drivers/dma-buf/st-dma-fence-chain.c b/drivers/dma-buf/st-dma-fence-chain.c
index bff4192420d8b..0e7a24ed7caeb 100644
--- a/drivers/dma-buf/st-dma-fence-chain.c
+++ b/drivers/dma-buf/st-dma-fence-chain.c
@@ -145,8 +145,6 @@ static int fence_chains_init(struct fence_chains *fc, unsigned int count,
}
fc->tail = fc->chains[i];
-
- dma_fence_enable_sw_signaling(fc->chains[i]);
}
fc->chain_length = i;
@@ -570,23 +568,34 @@ static int __wait_fence_chains(void *arg)
static int wait_forward(void *arg)
{
+ struct task_struct **tsk;
struct fence_chains fc;
- struct task_struct *tsk;
ktime_t dt;
+ int i = 0;
int err;
- int i;
err = fence_chains_init(&fc, CHAIN_SZ, seqno_inc);
if (err)
return err;
- tsk = kthread_run(__wait_fence_chains, fc.tail, "dmabuf/wait");
- if (IS_ERR(tsk)) {
- err = PTR_ERR(tsk);
+ tsk = kmalloc_array(fc.chain_length, sizeof(*tsk), GFP_KERNEL);
+ if (!tsk) {
+ err = -ENOMEM;
goto err;
}
- get_task_struct(tsk);
- yield_to(tsk, true);
+
+ for (i = 0; i < fc.chain_length; i++) {
+ tsk[i] = kthread_run(__wait_fence_chains, fc.chains[i],
+ "dmabuf/wait-%llu", fc.fences[i]->seqno);
+ if (IS_ERR(tsk[i])) {
+ err = PTR_ERR(tsk[i]);
+ pr_err("Reported %d for kthread_run(%llu)!\n",
+ err, fc.fences[i]->seqno);
+ goto err;
+ }
+ get_task_struct(tsk[i]);
+ yield_to(tsk[i], true);
+ }
dt = -ktime_get();
for (i = 0; i < fc.chain_length; i++)
@@ -595,32 +604,53 @@ static int wait_forward(void *arg)
pr_info("%s: %d signals in %llu ns\n", __func__, fc.chain_length, ktime_to_ns(dt));
- err = kthread_stop_put(tsk);
-
err:
+ while (i--) {
+ int tsk_err = kthread_stop_put(tsk[i]);
+
+ if (tsk_err)
+ pr_err("Reported %d for kthread_stop_put(%llu)!\n",
+ tsk_err, fc.fences[i]->seqno);
+
+ if (!err)
+ err = tsk_err;
+ }
+ kfree(tsk);
+
fence_chains_fini(&fc);
return err;
}
static int wait_backward(void *arg)
{
+ struct task_struct **tsk;
struct fence_chains fc;
- struct task_struct *tsk;
ktime_t dt;
+ int i = 0;
int err;
- int i;
err = fence_chains_init(&fc, CHAIN_SZ, seqno_inc);
if (err)
return err;
- tsk = kthread_run(__wait_fence_chains, fc.tail, "dmabuf/wait");
- if (IS_ERR(tsk)) {
- err = PTR_ERR(tsk);
+ tsk = kmalloc_array(fc.chain_length, sizeof(*tsk), GFP_KERNEL);
+ if (!tsk) {
+ err = -ENOMEM;
goto err;
}
- get_task_struct(tsk);
- yield_to(tsk, true);
+
+ for (i = 0; i < fc.chain_length; i++) {
+ tsk[i] = kthread_run(__wait_fence_chains, fc.chains[i],
+ "dmabuf/wait-%llu", fc.fences[i]->seqno);
+ if (IS_ERR(tsk[i])) {
+ err = PTR_ERR(tsk[i]);
+ pr_err("Reported %d for kthread_run(%llu)!\n",
+ err, fc.fences[i]->seqno);
+ goto err;
+ }
+ get_task_struct(tsk[i]);
+ yield_to(tsk[i], true);
+ }
dt = -ktime_get();
for (i = fc.chain_length; i--; )
@@ -629,9 +659,20 @@ static int wait_backward(void *arg)
pr_info("%s: %d signals in %llu ns\n", __func__, fc.chain_length, ktime_to_ns(dt));
- err = kthread_stop_put(tsk);
-
+ i = fc.chain_length;
err:
+ while (i--) {
+ int tsk_err = kthread_stop_put(tsk[i]);
+
+ if (tsk_err)
+ pr_err("Reported %d for kthread_stop_put(%llu)!\n",
+ tsk_err, fc.fences[i]->seqno);
+
+ if (!err)
+ err = tsk_err;
+ }
+ kfree(tsk);
+
fence_chains_fini(&fc);
return err;
}
@@ -654,11 +695,11 @@ static void randomise_fences(struct fence_chains *fc)
static int wait_random(void *arg)
{
+ struct task_struct **tsk;
struct fence_chains fc;
- struct task_struct *tsk;
ktime_t dt;
+ int i = 0;
int err;
- int i;
err = fence_chains_init(&fc, CHAIN_SZ, seqno_inc);
if (err)
@@ -666,13 +707,24 @@ static int wait_random(void *arg)
randomise_fences(&fc);
- tsk = kthread_run(__wait_fence_chains, fc.tail, "dmabuf/wait");
- if (IS_ERR(tsk)) {
- err = PTR_ERR(tsk);
+ tsk = kmalloc_array(fc.chain_length, sizeof(*tsk), GFP_KERNEL);
+ if (!tsk) {
+ err = -ENOMEM;
goto err;
}
- get_task_struct(tsk);
- yield_to(tsk, true);
+
+ for (i = 0; i < fc.chain_length; i++) {
+ tsk[i] = kthread_run(__wait_fence_chains, fc.chains[i],
+ "dmabuf/wait-%llu", fc.fences[i]->seqno);
+ if (IS_ERR(tsk[i])) {
+ err = PTR_ERR(tsk[i]);
+ pr_err("Reported %d for kthread_run(%llu)!\n",
+ err, fc.fences[i]->seqno);
+ goto err;
+ }
+ get_task_struct(tsk[i]);
+ yield_to(tsk[i], true);
+ }
dt = -ktime_get();
for (i = 0; i < fc.chain_length; i++)
@@ -681,9 +733,19 @@ static int wait_random(void *arg)
pr_info("%s: %d signals in %llu ns\n", __func__, fc.chain_length, ktime_to_ns(dt));
- err = kthread_stop_put(tsk);
-
err:
+ while (i--) {
+ int tsk_err = kthread_stop_put(tsk[i]);
+
+ if (tsk_err)
+ pr_err("Reported %d for kthread_stop_put(%llu)!\n",
+ tsk_err, fc.fences[i]->seqno);
+
+ if (!err)
+ err = tsk_err;
+ }
+ kfree(tsk);
+
fence_chains_fini(&fc);
return err;
}
--
2.50.1
Powered by blists - more mailing lists