lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <20260203225821.3293475-3-tanmay.shah@amd.com>
Date: Tue, 3 Feb 2026 14:58:21 -0800
From: Tanmay Shah <tanmay.shah@....com>
To: <jassisinghbrar@...il.com>, <andersson@...nel.org>,
	<mathieu.poirier@...aro.org>
CC: <linux-kernel@...r.kernel.org>, <linux-remoteproc@...r.kernel.org>, Tanmay
 Shah <tanmay.shah@....com>
Subject: [PATCH v4 2/3] remoteproc: xlnx: assign mbox client per mbox channel

Sharing mbox client data structure between "tx" and "rx" channels
can lead to data corruption. Instead each channel should have its own
mbox client data structure.

Signed-off-by: Tanmay Shah <tanmay.shah@....com>
---

changes in v4:
  - separate remoteproc driver patch in to two patches

 drivers/remoteproc/xlnx_r5_remoteproc.c | 19 ++++++++++++++-----
 1 file changed, 14 insertions(+), 5 deletions(-)

diff --git a/drivers/remoteproc/xlnx_r5_remoteproc.c b/drivers/remoteproc/xlnx_r5_remoteproc.c
index bd619a6c42aa..109831c5815c 100644
--- a/drivers/remoteproc/xlnx_r5_remoteproc.c
+++ b/drivers/remoteproc/xlnx_r5_remoteproc.c
@@ -74,7 +74,8 @@ struct zynqmp_sram_bank {
  * @tx_mc_buf: to copy data to mailbox tx channel
  * @r5_core: this mailbox's corresponding r5_core pointer
  * @mbox_work: schedule work after receiving data from mailbox
- * @mbox_cl: mailbox client
+ * @mbox_tx_cl: tx channel mailbox client
+ * @mbox_rx_cl: rx channel mailbox client
  * @tx_chan: mailbox tx channel
  * @rx_chan: mailbox rx channel
  */
@@ -83,7 +84,8 @@ struct mbox_info {
 	unsigned char tx_mc_buf[MBOX_CLIENT_BUF_MAX];
 	struct zynqmp_r5_core *r5_core;
 	struct work_struct mbox_work;
-	struct mbox_client mbox_cl;
+	struct mbox_client mbox_tx_cl;
+	struct mbox_client mbox_rx_cl;
 	struct mbox_chan *tx_chan;
 	struct mbox_chan *rx_chan;
 };
@@ -230,7 +232,7 @@ static void zynqmp_r5_mb_rx_cb(struct mbox_client *cl, void *msg)
 	struct mbox_info *ipi;
 	size_t len;
 
-	ipi = container_of(cl, struct mbox_info, mbox_cl);
+	ipi = container_of(cl, struct mbox_info, mbox_rx_cl);
 
 	/* copy data from ipi buffer to r5_core */
 	ipi_msg = (struct zynqmp_ipi_message *)msg;
@@ -269,8 +271,8 @@ static struct mbox_info *zynqmp_r5_setup_mbox(struct device *cdev)
 	if (!ipi)
 		return NULL;
 
-	mbox_cl = &ipi->mbox_cl;
-	mbox_cl->rx_callback = zynqmp_r5_mb_rx_cb;
+	mbox_cl = &ipi->mbox_tx_cl;
+	mbox_cl->rx_callback = NULL;
 	mbox_cl->tx_block = false;
 	mbox_cl->knows_txdone = false;
 	mbox_cl->tx_done = NULL;
@@ -285,6 +287,13 @@ static struct mbox_info *zynqmp_r5_setup_mbox(struct device *cdev)
 		return NULL;
 	}
 
+	mbox_cl = &ipi->mbox_rx_cl;
+	mbox_cl->rx_callback = zynqmp_r5_mb_rx_cb;
+	mbox_cl->tx_block = false;
+	mbox_cl->knows_txdone = false;
+	mbox_cl->tx_done = NULL;
+	mbox_cl->dev = cdev;
+
 	ipi->rx_chan = mbox_request_channel_byname(mbox_cl, "rx");
 	if (IS_ERR(ipi->rx_chan)) {
 		mbox_free_channel(ipi->tx_chan);
-- 
2.34.1


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ