[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251129160405.2568284-19-den@valinux.co.jp>
Date: Sun, 30 Nov 2025 01:03:56 +0900
From: Koichiro Den <den@...inux.co.jp>
To: ntb@...ts.linux.dev,
linux-pci@...r.kernel.org,
dmaengine@...r.kernel.org,
linux-kernel@...r.kernel.org,
Frank.Li@....com
Cc: mani@...nel.org,
kwilczynski@...nel.org,
kishon@...nel.org,
bhelgaas@...gle.com,
corbet@....net,
vkoul@...nel.org,
jdmason@...zu.us,
dave.jiang@...el.com,
allenbh@...il.com,
Basavaraj.Natikar@....com,
Shyam-sundar.S-k@....com,
kurt.schwemmer@...rosemi.com,
logang@...tatee.com,
jingoohan1@...il.com,
lpieralisi@...nel.org,
robh@...nel.org,
jbrunet@...libre.com,
fancer.lancer@...il.com,
arnd@...db.de,
pstanner@...hat.com,
elfring@...rs.sourceforge.net
Subject: [RFC PATCH v2 18/27] NTB: ntb_transport: Introduce ntb_transport_backend_ops
Introduce struct ntb_transport_backend_ops to abstract queue setup and
enqueue/poll operations. The existing implementation is moved behind
this interface, and a subsequent patch will add an eDMA-backed
implementation.
No functional changes intended.
Signed-off-by: Koichiro Den <den@...inux.co.jp>
---
drivers/ntb/ntb_transport.c | 127 +++++++++++++++++++++++-----------
include/linux/ntb_transport.h | 21 ++++++
2 files changed, 106 insertions(+), 42 deletions(-)
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index dad596e3a405..907db6c93d4d 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -228,6 +228,8 @@ struct ntb_transport_ctx {
struct ntb_dev *ndev;
+ struct ntb_transport_backend_ops backend_ops;
+
struct ntb_transport_mw *mw_vec;
struct ntb_transport_qp *qp_vec;
unsigned int mw_count;
@@ -488,15 +490,9 @@ void ntb_transport_unregister_client(struct ntb_transport_client *drv)
}
EXPORT_SYMBOL_GPL(ntb_transport_unregister_client);
-static int ntb_qp_debugfs_stats_show(struct seq_file *s, void *v)
+static void ntb_transport_default_debugfs_stats_show(struct seq_file *s,
+ struct ntb_transport_qp *qp)
{
- struct ntb_transport_qp *qp = s->private;
-
- if (!qp || !qp->link_is_up)
- return 0;
-
- seq_puts(s, "\nNTB QP stats:\n\n");
-
seq_printf(s, "rx_bytes - \t%llu\n", qp->rx_bytes);
seq_printf(s, "rx_pkts - \t%llu\n", qp->rx_pkts);
seq_printf(s, "rx_memcpy - \t%llu\n", qp->rx_memcpy);
@@ -526,6 +522,17 @@ static int ntb_qp_debugfs_stats_show(struct seq_file *s, void *v)
seq_printf(s, "Using TX DMA - \t%s\n", qp->tx_dma_chan ? "Yes" : "No");
seq_printf(s, "Using RX DMA - \t%s\n", qp->rx_dma_chan ? "Yes" : "No");
seq_printf(s, "QP Link - \t%s\n", qp->link_is_up ? "Up" : "Down");
+}
+
+static int ntb_qp_debugfs_stats_show(struct seq_file *s, void *v)
+{
+ struct ntb_transport_qp *qp = s->private;
+
+ if (!qp || !qp->link_is_up)
+ return 0;
+
+ seq_puts(s, "\nNTB QP stats:\n\n");
+ qp->transport->backend_ops.debugfs_stats_show(s, qp);
seq_putc(s, '\n');
return 0;
@@ -583,8 +590,8 @@ static struct ntb_queue_entry *ntb_list_mv(spinlock_t *lock,
return entry;
}
-static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt,
- unsigned int qp_num)
+static int ntb_transport_default_setup_qp_mw(struct ntb_transport_ctx *nt,
+ unsigned int qp_num)
{
struct ntb_transport_qp *qp = &nt->qp_vec[qp_num];
struct ntb_transport_mw *mw;
@@ -1128,7 +1135,7 @@ static void ntb_transport_link_work(struct work_struct *work)
for (i = 0; i < nt->qp_count; i++) {
struct ntb_transport_qp *qp = &nt->qp_vec[i];
- ntb_transport_setup_qp_mw(nt, i);
+ nt->backend_ops.setup_qp_mw(nt, i);
ntb_transport_setup_qp_peer_msi(nt, i);
if (qp->client_ready)
@@ -1236,6 +1243,40 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt,
return 0;
}
+static unsigned int ntb_transport_default_tx_free_entry(struct ntb_transport_qp *qp)
+{
+ unsigned int head = qp->tx_index;
+ unsigned int tail = qp->remote_rx_info->entry;
+
+ return tail >= head ? tail - head : qp->tx_max_entry + tail - head;
+}
+
+static int ntb_transport_default_rx_enqueue(struct ntb_transport_qp *qp,
+ struct ntb_queue_entry *entry)
+{
+ ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_pend_q);
+
+ if (qp->active)
+ tasklet_schedule(&qp->rxc_db_work);
+
+ return 0;
+}
+
+static void ntb_transport_default_rx_poll(struct ntb_transport_qp *qp);
+static int ntb_transport_default_tx_enqueue(struct ntb_transport_qp *qp,
+ struct ntb_queue_entry *entry,
+ void *cb, void *data, unsigned int len,
+ unsigned int flags);
+
+static const struct ntb_transport_backend_ops default_backend_ops = {
+ .setup_qp_mw = ntb_transport_default_setup_qp_mw,
+ .tx_free_entry = ntb_transport_default_tx_free_entry,
+ .tx_enqueue = ntb_transport_default_tx_enqueue,
+ .rx_enqueue = ntb_transport_default_rx_enqueue,
+ .rx_poll = ntb_transport_default_rx_poll,
+ .debugfs_stats_show = ntb_transport_default_debugfs_stats_show,
+};
+
static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
{
struct ntb_transport_ctx *nt;
@@ -1270,6 +1311,8 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
nt->ndev = ndev;
+ nt->backend_ops = default_backend_ops;
+
/*
* If we are using MSI, and have at least one extra memory window,
* we will reserve the last MW for the MSI window.
@@ -1679,14 +1722,10 @@ static int ntb_process_rxc(struct ntb_transport_qp *qp)
return 0;
}
-static void ntb_transport_rxc_db(unsigned long data)
+static void ntb_transport_default_rx_poll(struct ntb_transport_qp *qp)
{
- struct ntb_transport_qp *qp = (void *)data;
int rc, i;
- dev_dbg(&qp->ndev->pdev->dev, "%s: doorbell %d received\n",
- __func__, qp->qp_num);
-
/* Limit the number of packets processed in a single interrupt to
* provide fairness to others
*/
@@ -1718,6 +1757,17 @@ static void ntb_transport_rxc_db(unsigned long data)
}
}
+static void ntb_transport_rxc_db(unsigned long data)
+{
+ struct ntb_transport_qp *qp = (void *)data;
+ struct ntb_transport_ctx *nt = qp->transport;
+
+ dev_dbg(&qp->ndev->pdev->dev, "%s: doorbell %d received\n",
+ __func__, qp->qp_num);
+
+ nt->backend_ops.rx_poll(qp);
+}
+
static void ntb_tx_copy_callback(void *data,
const struct dmaengine_result *res)
{
@@ -1887,9 +1937,18 @@ static void ntb_async_tx(struct ntb_transport_qp *qp,
qp->tx_memcpy++;
}
-static int ntb_process_tx(struct ntb_transport_qp *qp,
- struct ntb_queue_entry *entry)
+static int ntb_transport_default_tx_enqueue(struct ntb_transport_qp *qp,
+ struct ntb_queue_entry *entry,
+ void *cb, void *data, unsigned int len,
+ unsigned int flags)
{
+ entry->cb_data = cb;
+ entry->buf = data;
+ entry->len = len;
+ entry->flags = flags;
+ entry->errors = 0;
+ entry->tx_index = 0;
+
if (!ntb_transport_tx_free_entry(qp)) {
qp->tx_ring_full++;
return -EAGAIN;
@@ -1916,6 +1975,7 @@ static int ntb_process_tx(struct ntb_transport_qp *qp,
static void ntb_send_link_down(struct ntb_transport_qp *qp)
{
+ struct ntb_transport_ctx *nt = qp->transport;
struct pci_dev *pdev = qp->ndev->pdev;
struct ntb_queue_entry *entry;
int i, rc;
@@ -1935,12 +1995,7 @@ static void ntb_send_link_down(struct ntb_transport_qp *qp)
if (!entry)
return;
- entry->cb_data = NULL;
- entry->buf = NULL;
- entry->len = 0;
- entry->flags = LINK_DOWN_FLAG;
-
- rc = ntb_process_tx(qp, entry);
+ rc = nt->backend_ops.tx_enqueue(qp, entry, NULL, NULL, 0, LINK_DOWN_FLAG);
if (rc)
dev_err(&pdev->dev, "ntb: QP%d unable to send linkdown msg\n",
qp->qp_num);
@@ -2227,6 +2282,7 @@ EXPORT_SYMBOL_GPL(ntb_transport_rx_remove);
int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
unsigned int len)
{
+ struct ntb_transport_ctx *nt = qp->transport;
struct ntb_queue_entry *entry;
if (!qp)
@@ -2244,12 +2300,7 @@ int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
entry->errors = 0;
entry->rx_index = 0;
- ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_pend_q);
-
- if (qp->active)
- tasklet_schedule(&qp->rxc_db_work);
-
- return 0;
+ return nt->backend_ops.rx_enqueue(qp, entry);
}
EXPORT_SYMBOL_GPL(ntb_transport_rx_enqueue);
@@ -2269,6 +2320,7 @@ EXPORT_SYMBOL_GPL(ntb_transport_rx_enqueue);
int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
unsigned int len)
{
+ struct ntb_transport_ctx *nt = qp->transport;
struct ntb_queue_entry *entry;
int rc;
@@ -2285,15 +2337,7 @@ int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
return -EBUSY;
}
- entry->cb_data = cb;
- entry->buf = data;
- entry->len = len;
- entry->flags = 0;
- entry->errors = 0;
- entry->retries = 0;
- entry->tx_index = 0;
-
- rc = ntb_process_tx(qp, entry);
+ rc = nt->backend_ops.tx_enqueue(qp, entry, cb, data, len, 0);
if (rc)
ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
&qp->tx_free_q);
@@ -2415,10 +2459,9 @@ EXPORT_SYMBOL_GPL(ntb_transport_max_size);
unsigned int ntb_transport_tx_free_entry(struct ntb_transport_qp *qp)
{
- unsigned int head = qp->tx_index;
- unsigned int tail = qp->remote_rx_info->entry;
+ struct ntb_transport_ctx *nt = qp->transport;
- return tail >= head ? tail - head : qp->tx_max_entry + tail - head;
+ return nt->backend_ops.tx_free_entry(qp);
}
EXPORT_SYMBOL_GPL(ntb_transport_tx_free_entry);
diff --git a/include/linux/ntb_transport.h b/include/linux/ntb_transport.h
index 7243eb98a722..297099d42370 100644
--- a/include/linux/ntb_transport.h
+++ b/include/linux/ntb_transport.h
@@ -49,6 +49,8 @@
*/
struct ntb_transport_qp;
+struct ntb_transport_ctx;
+struct ntb_queue_entry;
struct ntb_transport_client {
struct device_driver driver;
@@ -84,3 +86,22 @@ void ntb_transport_link_up(struct ntb_transport_qp *qp);
void ntb_transport_link_down(struct ntb_transport_qp *qp);
bool ntb_transport_link_query(struct ntb_transport_qp *qp);
unsigned int ntb_transport_tx_free_entry(struct ntb_transport_qp *qp);
+
+/**
+ * struct ntb_transport_backend_ops - backend-specific transport hooks
+ * @setup_qp_mw: Set up memory windows for a given queue pair.
+ * @tx_free_entry: Return the number of free TX entries for the queue pair.
+ * @tx_enqueue: Backend-specific TX enqueue implementation.
+ * @rx_enqueue: Backend-specific RX enqueue implementation.
+ * @rx_poll: Poll for RX completions / push new RX buffers.
+ * @debugfs_stats_show: Dump backend-specific statistics, if any.
+ */
+struct ntb_transport_backend_ops {
+ int (*setup_qp_mw)(struct ntb_transport_ctx *nt, unsigned int qp_num);
+ unsigned int (*tx_free_entry)(struct ntb_transport_qp *qp);
+ int (*tx_enqueue)(struct ntb_transport_qp *qp, struct ntb_queue_entry *entry,
+ void *cb, void *data, unsigned int len, unsigned int flags);
+ int (*rx_enqueue)(struct ntb_transport_qp *qp, struct ntb_queue_entry *entry);
+ void (*rx_poll)(struct ntb_transport_qp *qp);
+ void (*debugfs_stats_show)(struct seq_file *s, struct ntb_transport_qp *qp);
+};
--
2.48.1
Powered by blists - more mailing lists