[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20260118135440.1958279-18-den@valinux.co.jp>
Date: Sun, 18 Jan 2026 22:54:19 +0900
From: Koichiro Den <den@...inux.co.jp>
To: Frank.Li@....com,
dave.jiang@...el.com,
cassel@...nel.org,
mani@...nel.org,
kwilczynski@...nel.org,
kishon@...nel.org,
bhelgaas@...gle.com,
geert+renesas@...der.be,
robh@...nel.org,
vkoul@...nel.org,
jdmason@...zu.us,
allenbh@...il.com,
jingoohan1@...il.com,
lpieralisi@...nel.org
Cc: linux-pci@...r.kernel.org,
linux-doc@...r.kernel.org,
linux-kernel@...r.kernel.org,
linux-renesas-soc@...r.kernel.org,
devicetree@...r.kernel.org,
dmaengine@...r.kernel.org,
iommu@...ts.linux.dev,
ntb@...ts.linux.dev,
netdev@...r.kernel.org,
linux-kselftest@...r.kernel.org,
arnd@...db.de,
gregkh@...uxfoundation.org,
joro@...tes.org,
will@...nel.org,
robin.murphy@....com,
magnus.damm@...il.com,
krzk+dt@...nel.org,
conor+dt@...nel.org,
corbet@....net,
skhan@...uxfoundation.org,
andriy.shevchenko@...ux.intel.com,
jbrunet@...libre.com,
utkarsh02t@...il.com
Subject: [RFC PATCH v4 17/38] NTB: ntb_transport: Dynamically determine qp count
One MW can host multiple queue pairs, so stop limiting qp_count to the
number of MWs.
Now that both TX and RX MW sizing are done in the same place, the MW
layout is derived from a single code path on both host and endpoint, so
the layout cannot diverge between the two sides.
Signed-off-by: Koichiro Den <den@...inux.co.jp>
---
drivers/ntb/ntb_transport.c | 26 +++++++++++++++++++++++---
1 file changed, 23 insertions(+), 3 deletions(-)
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index 57a21f2daac6..6ed680d0470f 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -1022,7 +1022,9 @@ static void ntb_transport_link_work(struct work_struct *work)
container_of(work, struct ntb_transport_ctx, link_work.work);
struct ntb_dev *ndev = nt->ndev;
struct pci_dev *pdev = ndev->pdev;
+ struct ntb_transport_qp *qp;
resource_size_t size;
+ u64 qp_bitmap_free;
u32 val;
int rc = 0, i, spad;
@@ -1070,8 +1072,28 @@ static void ntb_transport_link_work(struct work_struct *work)
val = ntb_spad_read(ndev, NUM_QPS);
dev_dbg(&pdev->dev, "Remote max number of qps = %d\n", val);
- if (val != nt->qp_count)
+ if (val == 0) {
goto out;
+ } else if (val < nt->qp_count) {
+ /*
+ * Clamp local qp_count to peer-advertised NUM_QPS to avoid
+ * mismatched queues.
+ */
+ qp_bitmap_free = nt->qp_bitmap_free;
+ for (i = val; i < nt->qp_count; i++) {
+ qp = &nt->qp_vec[i];
+ ntb_transport_free_queue(qp);
+ debugfs_remove_recursive(qp->debugfs_dir);
+
+ /* Do not expose the queue any longer */
+ nt->qp_bitmap &= ~BIT_ULL(i);
+ nt->qp_bitmap_free &= ~BIT_ULL(i);
+ }
+ dev_warn(&pdev->dev,
+ "Local number of qps is reduced: %d->%d (%#llx->%#llx)\n",
+ nt->qp_count, val, qp_bitmap_free, nt->qp_bitmap_free);
+ nt->qp_count = val;
+ }
val = ntb_spad_read(ndev, NUM_MWS);
dev_dbg(&pdev->dev, "Remote number of mws = %d\n", val);
@@ -1300,8 +1322,6 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
if (max_num_clients && max_num_clients < qp_count)
qp_count = max_num_clients;
- else if (nt->mw_count < qp_count)
- qp_count = nt->mw_count;
qp_bitmap &= BIT_ULL(qp_count) - 1;
--
2.51.0
Powered by blists - more mailing lists