[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251217151609.3162665-25-den@valinux.co.jp>
Date: Thu, 18 Dec 2025 00:15:58 +0900
From: Koichiro Den <den@...inux.co.jp>
To: Frank.Li@....com,
dave.jiang@...el.com,
ntb@...ts.linux.dev,
linux-pci@...r.kernel.org,
dmaengine@...r.kernel.org,
linux-renesas-soc@...r.kernel.org,
netdev@...r.kernel.org,
linux-kernel@...r.kernel.org
Cc: mani@...nel.org,
kwilczynski@...nel.org,
kishon@...nel.org,
bhelgaas@...gle.com,
corbet@....net,
geert+renesas@...der.be,
magnus.damm@...il.com,
robh@...nel.org,
krzk+dt@...nel.org,
conor+dt@...nel.org,
vkoul@...nel.org,
joro@...tes.org,
will@...nel.org,
robin.murphy@....com,
jdmason@...zu.us,
allenbh@...il.com,
andrew+netdev@...n.ch,
davem@...emloft.net,
edumazet@...gle.com,
kuba@...nel.org,
pabeni@...hat.com,
Basavaraj.Natikar@....com,
Shyam-sundar.S-k@....com,
kurt.schwemmer@...rosemi.com,
logang@...tatee.com,
jingoohan1@...il.com,
lpieralisi@...nel.org,
utkarsh02t@...il.com,
jbrunet@...libre.com,
dlemoal@...nel.org,
arnd@...db.de,
elfring@...rs.sourceforge.net,
den@...inux.co.jp
Subject: [RFC PATCH v3 24/35] NTB: ntb_transport: Add additional hooks for DW eDMA backend
Add the infrastructure needed by the upcoming DW eDMA backed backend:
- add hooks and those invocations
(.enable/.disable/.pre_link_up/.post_link_up/.qp_init/.qp_free)
- store backend-private pointers in ctx/qp
No functional changes.
Signed-off-by: Koichiro Den <den@...inux.co.jp>
---
drivers/ntb/ntb_transport_core.c | 34 ++++++++++++++++++++++++++++
drivers/ntb/ntb_transport_internal.h | 20 ++++++++++++++++
2 files changed, 54 insertions(+)
diff --git a/drivers/ntb/ntb_transport_core.c b/drivers/ntb/ntb_transport_core.c
index bff8b41a0d3e..40c2548f5930 100644
--- a/drivers/ntb/ntb_transport_core.c
+++ b/drivers/ntb/ntb_transport_core.c
@@ -879,6 +879,9 @@ static void ntb_transport_link_cleanup(struct ntb_transport_ctx *nt)
count = ntb_spad_count(nt->ndev);
for (i = 0; i < count; i++)
ntb_spad_write(nt->ndev, i, 0);
+
+ if (nt->backend_ops.disable)
+ nt->backend_ops.disable(nt);
}
static void ntb_transport_link_cleanup_work(struct work_struct *work)
@@ -915,6 +918,12 @@ static void ntb_transport_link_work(struct work_struct *work)
/* send the local info, in the opposite order of the way we read it */
+ if (nt->backend_ops.pre_link_up) {
+ rc = nt->backend_ops.pre_link_up(nt);
+ if (rc)
+ return;
+ }
+
if (nt->use_msi) {
rc = ntb_msi_setup_mws(ndev);
if (rc) {
@@ -996,6 +1005,12 @@ static void ntb_transport_link_work(struct work_struct *work)
nt->link_is_up = true;
+ if (nt->backend_ops.post_link_up) {
+ rc = nt->backend_ops.post_link_up(nt);
+ if (rc)
+ return;
+ }
+
for (i = 0; i < nt->qp_count; i++) {
struct ntb_transport_qp *qp = &nt->qp_vec[i];
@@ -1178,6 +1193,12 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
if (rc)
return rc;
+ if (nt->backend_ops.enable) {
+ rc = nt->backend_ops.enable(nt, &mw_count);
+ if (rc)
+ goto err;
+ }
+
/*
* If we are using MSI, and have at least one extra memory window,
* we will reserve the last MW for the MSI window.
@@ -1267,6 +1288,12 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
rc = ntb_transport_init_queue(nt, i);
if (rc)
goto err2;
+
+ if (nt->backend_ops.qp_init) {
+ rc = nt->backend_ops.qp_init(nt, i);
+ if (rc)
+ goto err2;
+ }
}
INIT_DELAYED_WORK(&nt->link_work, ntb_transport_link_work);
@@ -1298,6 +1325,9 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
}
kfree(nt->mw_vec);
err:
+ if (nt->backend_ops.disable)
+ nt->backend_ops.disable(nt);
+
kfree(nt);
return rc;
}
@@ -2021,6 +2051,7 @@ EXPORT_SYMBOL_GPL(ntb_transport_create_queue);
*/
void ntb_transport_free_queue(struct ntb_transport_qp *qp)
{
+ struct ntb_transport_ctx *nt = qp->transport;
struct pci_dev *pdev;
struct ntb_queue_entry *entry;
u64 qp_bit;
@@ -2074,6 +2105,9 @@ void ntb_transport_free_queue(struct ntb_transport_qp *qp)
cancel_delayed_work_sync(&qp->link_work);
+ if (nt->backend_ops.qp_free)
+ nt->backend_ops.qp_free(qp);
+
qp->cb_data = NULL;
qp->rx_handler = NULL;
qp->tx_handler = NULL;
diff --git a/drivers/ntb/ntb_transport_internal.h b/drivers/ntb/ntb_transport_internal.h
index 33c06be36dfd..51ff08062d73 100644
--- a/drivers/ntb/ntb_transport_internal.h
+++ b/drivers/ntb/ntb_transport_internal.h
@@ -106,6 +106,9 @@ struct ntb_transport_qp {
int msi_irq;
struct ntb_msi_desc msi_desc;
struct ntb_msi_desc peer_msi_desc;
+
+ /* Backend-specific */
+ void *priv;
};
struct ntb_transport_mw {
@@ -122,6 +125,14 @@ struct ntb_transport_mw {
/**
* struct ntb_transport_backend_ops - backend-specific transport hooks
+ * @enable: Optional. Enable backend. Called once on
+ * ntb_transport_probe().
+ * @disable: Optional. Backend teardown hook.
+ * @qp_init: Optional. QP initialization hook called on
+ * ntb_transport_probe().
+ * @qp_free: Optional. Undo qp_init.
+ * @pre_link_up: Optional. Called before link-up handshake.
+ * @post_link_up: Optional. Called after link-up handshake.
* @setup_qp_mw: Set up memory windows for a given queue pair.
* @tx_free_entry: Return the number of free TX entries for the queue pair.
* @tx_enqueue: Backend-specific TX enqueue implementation.
@@ -130,6 +141,12 @@ struct ntb_transport_mw {
* @debugfs_stats_show: Dump backend-specific statistics, if any.
*/
struct ntb_transport_backend_ops {
+ int (*enable)(struct ntb_transport_ctx *nt, unsigned int *mw_count);
+ void (*disable)(struct ntb_transport_ctx *nt);
+ int (*qp_init)(struct ntb_transport_ctx *nt, unsigned int qp_num);
+ void (*qp_free)(struct ntb_transport_qp *qp);
+ int (*pre_link_up)(struct ntb_transport_ctx *nt);
+ int (*post_link_up)(struct ntb_transport_ctx *nt);
int (*setup_qp_mw)(struct ntb_transport_ctx *nt, unsigned int qp_num);
unsigned int (*tx_free_entry)(struct ntb_transport_qp *qp);
int (*tx_enqueue)(struct ntb_transport_qp *qp, struct ntb_queue_entry *entry,
@@ -166,6 +183,9 @@ struct ntb_transport_ctx {
/* Make sure workq of link event be executed serially */
struct mutex link_event_lock;
+
+ /* Backend-specific context */
+ void *priv;
};
enum {
--
2.51.0
Powered by blists - more mailing lists