[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <aB8oOVLJ_k2YYMzP@gondor.apana.org.au>
Date: Sat, 10 May 2025 18:19:37 +0800
From: Herbert Xu <herbert@...dor.apana.org.au>
To: Klaus Kudielka <klaus.kudielka@...il.com>
Cc: Corentin Labbe <clabbe.montjoie@...il.com>, regressions@...ts.linux.dev,
linux-kernel@...r.kernel.org,
Linux Crypto Mailing List <linux-crypto@...r.kernel.org>,
Boris Brezillon <bbrezillon@...nel.org>,
EBALARD Arnaud <Arnaud.Ebalard@....gouv.fr>,
Romain Perier <romain.perier@...il.com>
Subject: Re: [v3 PATCH] crypto: marvell/cesa - Do not chain submitted requests
On Sat, May 10, 2025 at 11:38:16AM +0200, Klaus Kudielka wrote:
>
> Patch applied on top.
> On the first attempt all self-tests passed, but on the second and third unfortunately not.
OK, you can get more consistent errors by enabling extra testing with
CONFIG_CRYPTO_MANAGER_EXTRA_TESTS=y.
In any case, let's go back to the printk approach and see what's
going on. This patch could either go on top of the previous ones
or be applied by itself.
Thanks,
--
Email: Herbert Xu <herbert@...dor.apana.org.au>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt
--
diff --git a/drivers/crypto/marvell/cesa/cesa.c b/drivers/crypto/marvell/cesa/cesa.c
index 9c21f5d835d2..fd7f43575cb2 100644
--- a/drivers/crypto/marvell/cesa/cesa.c
+++ b/drivers/crypto/marvell/cesa/cesa.c
@@ -127,6 +127,8 @@ static irqreturn_t mv_cesa_int(int irq, void *priv)
if (!(status & mask))
break;
+ pr_err("mv_cesa_int: %d 0x%x 0x%x\n", engine->id, status, mask);
+
/*
* TODO: avoid clearing the FPGA_INT_STATUS if this not
* relevant on some platforms.
diff --git a/drivers/crypto/marvell/cesa/hash.c b/drivers/crypto/marvell/cesa/hash.c
index a46ae868110a..cc7efd9a099a 100644
--- a/drivers/crypto/marvell/cesa/hash.c
+++ b/drivers/crypto/marvell/cesa/hash.c
@@ -396,6 +396,8 @@ static void mv_cesa_ahash_complete(struct crypto_async_request *req)
}
atomic_sub(ahashreq->nbytes, &engine->load);
+
+ pr_err("mv_cesa_ahash_complete: %d 0x%lx\n", engine->id, (unsigned long)ahashreq);
}
static void mv_cesa_ahash_prepare(struct crypto_async_request *req,
@@ -417,6 +419,8 @@ static void mv_cesa_ahash_req_cleanup(struct crypto_async_request *req)
struct ahash_request *ahashreq = ahash_request_cast(req);
struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
+ pr_err("mv_cesa_ahash_req_cleanup: %d 0x%lx\n", creq->base.engine->id, (unsigned long)ahashreq);
+
if (creq->last_req)
mv_cesa_ahash_last_cleanup(ahashreq);
@@ -783,6 +787,7 @@ static int mv_cesa_ahash_queue_req(struct ahash_request *req)
engine = mv_cesa_select_engine(req->nbytes);
mv_cesa_ahash_prepare(&req->base, engine);
+ pr_err("mv_cesa_ahash_queue_req: %d 0x%lx\n", engine->id, (unsigned long)req);
ret = mv_cesa_queue_req(&req->base, &creq->base);
if (mv_cesa_req_needs_cleanup(&req->base, ret))
diff --git a/drivers/crypto/marvell/cesa/tdma.c b/drivers/crypto/marvell/cesa/tdma.c
index 9b5fd957dde2..59be742c5a1c 100644
--- a/drivers/crypto/marvell/cesa/tdma.c
+++ b/drivers/crypto/marvell/cesa/tdma.c
@@ -47,6 +47,8 @@ void mv_cesa_dma_step(struct mv_cesa_req *dreq)
engine->chain_hw.last = dreq->chain.last;
spin_unlock_bh(&engine->lock);
+ pr_err("mv_cesa_dma_step: %d 0x%lx 0x%lx 0x%lx\n", engine->id, (unsigned long)dreq, (unsigned long)dreq->chain.first->cur_dma, (unsigned long)dreq->chain.last->cur_dma);
+
writel_relaxed(0, engine->regs + CESA_SA_CFG);
mv_cesa_set_int_mask(engine, CESA_SA_INT_ACC0_IDMA_DONE);
@@ -137,6 +139,7 @@ int mv_cesa_tdma_process(struct mv_cesa_engine *engine, u32 status)
int res = 0;
tdma_cur = readl(engine->regs + CESA_TDMA_CUR);
+ pr_err("mv_cesa_tdma_process: %d 0x%lx\n", engine->id, (unsigned long)tdma_cur);
for (tdma = engine->chain_hw.first; tdma; tdma = next) {
spin_lock_bh(&engine->lock);
@@ -186,6 +189,8 @@ int mv_cesa_tdma_process(struct mv_cesa_engine *engine, u32 status)
break;
}
+ pr_err("mv_cesa_tdma_process: %d %d 0x%lx\n", engine->id, res, (unsigned long)req);
+
/*
* Save the last request in error to engine->req, so that the core
* knows which request was faulty
Powered by blists - more mailing lists