[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251217151609.3162665-12-den@valinux.co.jp>
Date: Thu, 18 Dec 2025 00:15:45 +0900
From: Koichiro Den <den@...inux.co.jp>
To: Frank.Li@....com,
dave.jiang@...el.com,
ntb@...ts.linux.dev,
linux-pci@...r.kernel.org,
dmaengine@...r.kernel.org,
linux-renesas-soc@...r.kernel.org,
netdev@...r.kernel.org,
linux-kernel@...r.kernel.org
Cc: mani@...nel.org,
kwilczynski@...nel.org,
kishon@...nel.org,
bhelgaas@...gle.com,
corbet@....net,
geert+renesas@...der.be,
magnus.damm@...il.com,
robh@...nel.org,
krzk+dt@...nel.org,
conor+dt@...nel.org,
vkoul@...nel.org,
joro@...tes.org,
will@...nel.org,
robin.murphy@....com,
jdmason@...zu.us,
allenbh@...il.com,
andrew+netdev@...n.ch,
davem@...emloft.net,
edumazet@...gle.com,
kuba@...nel.org,
pabeni@...hat.com,
Basavaraj.Natikar@....com,
Shyam-sundar.S-k@....com,
kurt.schwemmer@...rosemi.com,
logang@...tatee.com,
jingoohan1@...il.com,
lpieralisi@...nel.org,
utkarsh02t@...il.com,
jbrunet@...libre.com,
dlemoal@...nel.org,
arnd@...db.de,
elfring@...rs.sourceforge.net,
den@...inux.co.jp
Subject: [RFC PATCH v3 11/35] NTB: ntb_transport: Move TX memory window setup into setup_qp_mw()
Historically both TX and RX have assumed the same per-QP MW slice
(tx_max_entry == remote rx_max_entry), while those are calculated
separately in different places (pre and post the link-up negotiation
point). This has been safe because nt->link_is_up is never set to true
unless the pre-determined qp_count are the same among them, and qp_count
is typically limited to nt->mw_count, which should be carefully
configured by admin.
However, setup_qp_mw can actually split mw and handle multi-qps in one
MW properly, so qp_count needs not to be limited by nt->mw_count. Once
we relaxing the limitation, pre-determined qp_count can differ among
host side and endpoint, and link-up negotiation can easily fail.
Move the TX MW configuration (per-QP offset and size) into
ntb_transport_setup_qp_mw() so that both RX and TX layout decisions are
centralized in a single helper. ntb_transport_init_queue() now deals
only with per-QP software state, not with MW layout.
This keeps the previous behaviour, while preparing for relaxing the
qp_count limitation and improving readibility.
No functional change is intended.
Signed-off-by: Koichiro Den <den@...inux.co.jp>
---
drivers/ntb/ntb_transport.c | 76 ++++++++++++++++---------------------
1 file changed, 32 insertions(+), 44 deletions(-)
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index 57b4c0511927..42abd1ce02d5 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -569,7 +569,10 @@ static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt,
struct ntb_transport_mw *mw;
struct ntb_dev *ndev = nt->ndev;
struct ntb_queue_entry *entry;
- unsigned int rx_size, num_qps_mw;
+ phys_addr_t mw_base;
+ resource_size_t mw_size;
+ unsigned int rx_size, tx_size, num_qps_mw;
+ u64 qp_offset;
unsigned int mw_num, mw_count, qp_count;
unsigned int i;
int node;
@@ -588,13 +591,38 @@ static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt,
else
num_qps_mw = qp_count / mw_count;
- rx_size = (unsigned int)mw->xlat_size / num_qps_mw;
- qp->rx_buff = mw->virt_addr + rx_size * (qp_num / mw_count);
- rx_size -= sizeof(struct ntb_rx_info);
+ mw_base = nt->mw_vec[mw_num].phys_addr;
+ mw_size = nt->mw_vec[mw_num].phys_size;
+
+ if (mw_size > mw->xlat_size)
+ mw_size = mw->xlat_size;
+ if (max_mw_size && mw_size > max_mw_size)
+ mw_size = max_mw_size;
+
+ tx_size = (unsigned int)mw_size / num_qps_mw;
+ qp_offset = tx_size * (qp_num / mw_count);
+
+ qp->rx_buff = mw->virt_addr + qp_offset;
+
+ qp->tx_mw_size = tx_size;
+ qp->tx_mw = nt->mw_vec[mw_num].vbase + qp_offset;
+ if (!qp->tx_mw)
+ return -EINVAL;
+
+ qp->tx_mw_phys = mw_base + qp_offset;
+ if (!qp->tx_mw_phys)
+ return -EINVAL;
+ rx_size = tx_size;
+ rx_size -= sizeof(struct ntb_rx_info);
qp->remote_rx_info = qp->rx_buff + rx_size;
+ tx_size -= sizeof(struct ntb_rx_info);
+ qp->rx_info = qp->tx_mw + tx_size;
+
/* Due to housekeeping, there must be atleast 2 buffs */
+ qp->tx_max_frame = min(transport_mtu, tx_size / 2);
+ qp->tx_max_entry = tx_size / qp->tx_max_frame;
qp->rx_max_frame = min(transport_mtu, rx_size / 2);
qp->rx_max_entry = rx_size / qp->rx_max_frame;
qp->rx_index = 0;
@@ -1133,16 +1161,6 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt,
unsigned int qp_num)
{
struct ntb_transport_qp *qp;
- phys_addr_t mw_base;
- resource_size_t mw_size;
- unsigned int num_qps_mw, tx_size;
- unsigned int mw_num, mw_count, qp_count;
- u64 qp_offset;
-
- mw_count = nt->mw_count;
- qp_count = nt->qp_count;
-
- mw_num = QP_TO_MW(nt, qp_num);
qp = &nt->qp_vec[qp_num];
qp->qp_num = qp_num;
@@ -1152,36 +1170,6 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt,
qp->event_handler = NULL;
ntb_qp_link_context_reset(qp);
- if (mw_num < qp_count % mw_count)
- num_qps_mw = qp_count / mw_count + 1;
- else
- num_qps_mw = qp_count / mw_count;
-
- mw_base = nt->mw_vec[mw_num].phys_addr;
- mw_size = nt->mw_vec[mw_num].phys_size;
-
- if (max_mw_size && mw_size > max_mw_size)
- mw_size = max_mw_size;
-
- tx_size = (unsigned int)mw_size / num_qps_mw;
- qp_offset = tx_size * (qp_num / mw_count);
-
- qp->tx_mw_size = tx_size;
- qp->tx_mw = nt->mw_vec[mw_num].vbase + qp_offset;
- if (!qp->tx_mw)
- return -EINVAL;
-
- qp->tx_mw_phys = mw_base + qp_offset;
- if (!qp->tx_mw_phys)
- return -EINVAL;
-
- tx_size -= sizeof(struct ntb_rx_info);
- qp->rx_info = qp->tx_mw + tx_size;
-
- /* Due to housekeeping, there must be atleast 2 buffs */
- qp->tx_max_frame = min(transport_mtu, tx_size / 2);
- qp->tx_max_entry = tx_size / qp->tx_max_frame;
-
if (nt->debugfs_node_dir) {
char debugfs_name[8];
--
2.51.0
Powered by blists - more mailing lists