[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251217151609.3162665-13-den@valinux.co.jp>
Date: Thu, 18 Dec 2025 00:15:46 +0900
From: Koichiro Den <den@...inux.co.jp>
To: Frank.Li@....com,
dave.jiang@...el.com,
ntb@...ts.linux.dev,
linux-pci@...r.kernel.org,
dmaengine@...r.kernel.org,
linux-renesas-soc@...r.kernel.org,
netdev@...r.kernel.org,
linux-kernel@...r.kernel.org
Cc: mani@...nel.org,
kwilczynski@...nel.org,
kishon@...nel.org,
bhelgaas@...gle.com,
corbet@....net,
geert+renesas@...der.be,
magnus.damm@...il.com,
robh@...nel.org,
krzk+dt@...nel.org,
conor+dt@...nel.org,
vkoul@...nel.org,
joro@...tes.org,
will@...nel.org,
robin.murphy@....com,
jdmason@...zu.us,
allenbh@...il.com,
andrew+netdev@...n.ch,
davem@...emloft.net,
edumazet@...gle.com,
kuba@...nel.org,
pabeni@...hat.com,
Basavaraj.Natikar@....com,
Shyam-sundar.S-k@....com,
kurt.schwemmer@...rosemi.com,
logang@...tatee.com,
jingoohan1@...il.com,
lpieralisi@...nel.org,
utkarsh02t@...il.com,
jbrunet@...libre.com,
dlemoal@...nel.org,
arnd@...db.de,
elfring@...rs.sourceforge.net,
den@...inux.co.jp
Subject: [RFC PATCH v3 12/35] NTB: ntb_transport: Dynamically determine qp count
One MW can host multiple queue pairs, so stop limiting qp_count to the
number of MWs.
Now that both TX and RX MW sizing are done in the same place, the MW
layout is derived from a single code path on both host and endpoint, so
the layout cannot diverge between the two sides.
Signed-off-by: Koichiro Den <den@...inux.co.jp>
---
drivers/ntb/ntb_transport.c | 20 +++++++++++++++++---
1 file changed, 17 insertions(+), 3 deletions(-)
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index 42abd1ce02d5..bac842177b55 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -1024,6 +1024,7 @@ static void ntb_transport_link_work(struct work_struct *work)
struct ntb_dev *ndev = nt->ndev;
struct pci_dev *pdev = ndev->pdev;
resource_size_t size;
+ u64 qp_bitmap_free;
u32 val;
int rc = 0, i, spad;
@@ -1071,8 +1072,23 @@ static void ntb_transport_link_work(struct work_struct *work)
val = ntb_spad_read(ndev, NUM_QPS);
dev_dbg(&pdev->dev, "Remote max number of qps = %d\n", val);
- if (val != nt->qp_count)
+ if (val == 0)
goto out;
+ else if (val < nt->qp_count) {
+ /*
+ * Clamp local qp_count to peer-advertised NUM_QPS to avoid
+ * mismatched queues.
+ */
+ qp_bitmap_free = nt->qp_bitmap_free;
+ for (i = val; i < nt->qp_count; i++) {
+ nt->qp_bitmap &= ~BIT_ULL(i);
+ nt->qp_bitmap_free &= ~BIT_ULL(i);
+ }
+ dev_warn(&pdev->dev,
+ "Local number of qps is reduced: %d->%d (qp_bitmap_free: 0x%llx->0x%llx)\n",
+ nt->qp_count, val, qp_bitmap_free, nt->qp_bitmap_free);
+ nt->qp_count = val;
+ }
val = ntb_spad_read(ndev, NUM_MWS);
dev_dbg(&pdev->dev, "Remote number of mws = %d\n", val);
@@ -1301,8 +1317,6 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
if (max_num_clients && max_num_clients < qp_count)
qp_count = max_num_clients;
- else if (nt->mw_count < qp_count)
- qp_count = nt->mw_count;
qp_bitmap &= BIT_ULL(qp_count) - 1;
--
2.51.0
Powered by blists - more mailing lists