[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251217151609.3162665-16-den@valinux.co.jp>
Date: Thu, 18 Dec 2025 00:15:49 +0900
From: Koichiro Den <den@...inux.co.jp>
To: Frank.Li@....com,
dave.jiang@...el.com,
ntb@...ts.linux.dev,
linux-pci@...r.kernel.org,
dmaengine@...r.kernel.org,
linux-renesas-soc@...r.kernel.org,
netdev@...r.kernel.org,
linux-kernel@...r.kernel.org
Cc: mani@...nel.org,
kwilczynski@...nel.org,
kishon@...nel.org,
bhelgaas@...gle.com,
corbet@....net,
geert+renesas@...der.be,
magnus.damm@...il.com,
robh@...nel.org,
krzk+dt@...nel.org,
conor+dt@...nel.org,
vkoul@...nel.org,
joro@...tes.org,
will@...nel.org,
robin.murphy@....com,
jdmason@...zu.us,
allenbh@...il.com,
andrew+netdev@...n.ch,
davem@...emloft.net,
edumazet@...gle.com,
kuba@...nel.org,
pabeni@...hat.com,
Basavaraj.Natikar@....com,
Shyam-sundar.S-k@....com,
kurt.schwemmer@...rosemi.com,
logang@...tatee.com,
jingoohan1@...il.com,
lpieralisi@...nel.org,
utkarsh02t@...il.com,
jbrunet@...libre.com,
dlemoal@...nel.org,
arnd@...db.de,
elfring@...rs.sourceforge.net,
den@...inux.co.jp
Subject: [RFC PATCH v3 15/35] NTB: ntb_transport: Move internal types to ntb_transport_internal.h
No functional changes intended.
Signed-off-by: Koichiro Den <den@...inux.co.jp>
---
drivers/ntb/ntb_transport.c | 168 ++-------------------------
drivers/ntb/ntb_transport_internal.h | 164 ++++++++++++++++++++++++++
include/linux/ntb_transport.h | 5 +
3 files changed, 181 insertions(+), 156 deletions(-)
create mode 100644 drivers/ntb/ntb_transport_internal.h
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index 78d0469edbcc..3969fa29a5b9 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -65,6 +65,8 @@
#include "linux/ntb_transport.h"
#include <linux/pci-epc.h>
+#include "ntb_transport_internal.h"
+
#define NTB_TRANSPORT_VERSION 4
#define NTB_TRANSPORT_VER "4"
#define NTB_TRANSPORT_NAME "ntb_transport"
@@ -76,11 +78,11 @@ MODULE_VERSION(NTB_TRANSPORT_VER);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Intel Corporation");
-static unsigned long max_mw_size;
+unsigned long max_mw_size;
module_param(max_mw_size, ulong, 0644);
MODULE_PARM_DESC(max_mw_size, "Limit size of large memory windows");
-static unsigned int transport_mtu = 0x10000;
+unsigned int transport_mtu = 0x10000;
module_param(transport_mtu, uint, 0644);
MODULE_PARM_DESC(transport_mtu, "Maximum size of NTB transport packets");
@@ -96,7 +98,7 @@ static bool use_dma;
module_param(use_dma, bool, 0644);
MODULE_PARM_DESC(use_dma, "Use DMA engine to perform large data copy");
-static bool use_msi;
+bool use_msi;
#ifdef CONFIG_NTB_MSI
module_param(use_msi, bool, 0644);
MODULE_PARM_DESC(use_msi, "Use MSI interrupts instead of doorbells");
@@ -107,153 +109,12 @@ static struct dentry *nt_debugfs_dir;
/* Only two-ports NTB devices are supported */
#define PIDX NTB_DEF_PEER_IDX
-struct ntb_queue_entry {
- /* ntb_queue list reference */
- struct list_head entry;
- /* pointers to data to be transferred */
- void *cb_data;
- void *buf;
- unsigned int len;
- unsigned int flags;
- int retries;
- int errors;
- unsigned int tx_index;
- unsigned int rx_index;
-
- struct ntb_transport_qp *qp;
- union {
- struct ntb_payload_header __iomem *tx_hdr;
- struct ntb_payload_header *rx_hdr;
- };
-};
-
-struct ntb_rx_info {
- unsigned int entry;
-};
-
-struct ntb_transport_qp {
- struct ntb_transport_ctx *transport;
- struct ntb_dev *ndev;
- void *cb_data;
- struct dma_chan *tx_dma_chan;
- struct dma_chan *rx_dma_chan;
-
- bool client_ready;
- bool link_is_up;
- bool active;
-
- u8 qp_num; /* Only 64 QP's are allowed. 0-63 */
- u64 qp_bit;
-
- struct ntb_rx_info __iomem *rx_info;
- struct ntb_rx_info *remote_rx_info;
-
- void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data,
- void *data, int len);
- struct list_head tx_free_q;
- spinlock_t ntb_tx_free_q_lock;
- void __iomem *tx_mw;
- phys_addr_t tx_mw_phys;
- size_t tx_mw_size;
- dma_addr_t tx_mw_dma_addr;
- unsigned int tx_index;
- unsigned int tx_max_entry;
- unsigned int tx_max_frame;
-
- void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data,
- void *data, int len);
- struct list_head rx_post_q;
- struct list_head rx_pend_q;
- struct list_head rx_free_q;
- /* ntb_rx_q_lock: synchronize access to rx_XXXX_q */
- spinlock_t ntb_rx_q_lock;
- void *rx_buff;
- unsigned int rx_index;
- unsigned int rx_max_entry;
- unsigned int rx_max_frame;
- unsigned int rx_alloc_entry;
- dma_cookie_t last_cookie;
- struct tasklet_struct rxc_db_work;
-
- void (*event_handler)(void *data, int status);
- struct delayed_work link_work;
- struct work_struct link_cleanup;
-
- struct dentry *debugfs_dir;
- struct dentry *debugfs_stats;
-
- /* Stats */
- u64 rx_bytes;
- u64 rx_pkts;
- u64 rx_ring_empty;
- u64 rx_err_no_buf;
- u64 rx_err_oflow;
- u64 rx_err_ver;
- u64 rx_memcpy;
- u64 rx_async;
- u64 tx_bytes;
- u64 tx_pkts;
- u64 tx_ring_full;
- u64 tx_err_no_buf;
- u64 tx_memcpy;
- u64 tx_async;
-
- bool use_msi;
- int msi_irq;
- struct ntb_msi_desc msi_desc;
- struct ntb_msi_desc peer_msi_desc;
-};
-
-struct ntb_transport_mw {
- phys_addr_t phys_addr;
- resource_size_t phys_size;
- void __iomem *vbase;
- size_t xlat_size;
- size_t buff_size;
- size_t alloc_size;
- void *alloc_addr;
- void *virt_addr;
- dma_addr_t dma_addr;
-};
-
struct ntb_transport_client_dev {
struct list_head entry;
struct ntb_transport_ctx *nt;
struct device dev;
};
-struct ntb_transport_ctx {
- struct list_head entry;
- struct list_head client_devs;
-
- struct ntb_dev *ndev;
-
- struct ntb_transport_mw *mw_vec;
- struct ntb_transport_qp *qp_vec;
- unsigned int mw_count;
- unsigned int qp_count;
- u64 qp_bitmap;
- u64 qp_bitmap_free;
-
- bool use_msi;
- unsigned int msi_spad_offset;
- u64 msi_db_mask;
-
- bool link_is_up;
- struct delayed_work link_work;
- struct work_struct link_cleanup;
-
- struct dentry *debugfs_node_dir;
-
- /* Make sure workq of link event be executed serially */
- struct mutex link_event_lock;
-};
-
-enum {
- DESC_DONE_FLAG = BIT(0),
- LINK_DOWN_FLAG = BIT(1),
-};
-
struct ntb_payload_header {
unsigned int ver;
unsigned int len;
@@ -268,7 +129,7 @@ struct ntb_payload_header {
* DMA capabilities and IOMMU configuration are taken from the
* controller rather than the virtual NTB PCI function.
*/
-static struct device *get_dma_dev(struct ntb_dev *ndev)
+struct device *get_dma_dev(struct ntb_dev *ndev)
{
struct device *dev = &ndev->pdev->dev;
struct pci_epc *epc;
@@ -295,7 +156,6 @@ enum {
#define drv_client(__drv) \
container_of((__drv), struct ntb_transport_client, driver)
-#define QP_TO_MW(nt, qp) ((qp) % nt->mw_count)
#define NTB_QP_DEF_NUM_ENTRIES 100
#define NTB_LINK_DOWN_TIMEOUT 10
@@ -532,8 +392,7 @@ static int ntb_qp_debugfs_stats_show(struct seq_file *s, void *v)
}
DEFINE_SHOW_ATTRIBUTE(ntb_qp_debugfs_stats);
-static void ntb_list_add(spinlock_t *lock, struct list_head *entry,
- struct list_head *list)
+void ntb_list_add(spinlock_t *lock, struct list_head *entry, struct list_head *list)
{
unsigned long flags;
@@ -542,8 +401,7 @@ static void ntb_list_add(spinlock_t *lock, struct list_head *entry,
spin_unlock_irqrestore(lock, flags);
}
-static struct ntb_queue_entry *ntb_list_rm(spinlock_t *lock,
- struct list_head *list)
+struct ntb_queue_entry *ntb_list_rm(spinlock_t *lock, struct list_head *list)
{
struct ntb_queue_entry *entry;
unsigned long flags;
@@ -562,9 +420,8 @@ static struct ntb_queue_entry *ntb_list_rm(spinlock_t *lock,
return entry;
}
-static struct ntb_queue_entry *ntb_list_mv(spinlock_t *lock,
- struct list_head *list,
- struct list_head *to_list)
+struct ntb_queue_entry *ntb_list_mv(spinlock_t *lock, struct list_head *list,
+ struct list_head *to_list)
{
struct ntb_queue_entry *entry;
unsigned long flags;
@@ -982,7 +839,7 @@ static void ntb_qp_link_cleanup_work(struct work_struct *work)
msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
}
-static void ntb_qp_link_down(struct ntb_transport_qp *qp)
+void ntb_qp_link_down(struct ntb_transport_qp *qp)
{
schedule_work(&qp->link_cleanup);
}
@@ -1194,8 +1051,7 @@ static void ntb_qp_link_work(struct work_struct *work)
msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
}
-static int ntb_transport_init_queue(struct ntb_transport_ctx *nt,
- unsigned int qp_num)
+int ntb_transport_init_queue(struct ntb_transport_ctx *nt, unsigned int qp_num)
{
struct ntb_transport_qp *qp;
diff --git a/drivers/ntb/ntb_transport_internal.h b/drivers/ntb/ntb_transport_internal.h
new file mode 100644
index 000000000000..79c7dbcf6f91
--- /dev/null
+++ b/drivers/ntb/ntb_transport_internal.h
@@ -0,0 +1,164 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef _NTB_TRANSPORT_INTERNAL_H_
+#define _NTB_TRANSPORT_INTERNAL_H_
+
+#include <linux/ntb_transport.h>
+
+extern unsigned long max_mw_size;
+extern unsigned int transport_mtu;
+extern bool use_msi;
+
+#define QP_TO_MW(nt, qp) ((qp) % nt->mw_count)
+
+struct ntb_queue_entry {
+ /* ntb_queue list reference */
+ struct list_head entry;
+ /* pointers to data to be transferred */
+ void *cb_data;
+ void *buf;
+ unsigned int len;
+ unsigned int flags;
+ int retries;
+ int errors;
+ unsigned int tx_index;
+ unsigned int rx_index;
+
+ struct ntb_transport_qp *qp;
+ union {
+ struct ntb_payload_header __iomem *tx_hdr;
+ struct ntb_payload_header *rx_hdr;
+ };
+};
+
+struct ntb_rx_info {
+ unsigned int entry;
+};
+
+struct ntb_transport_qp {
+ struct ntb_transport_ctx *transport;
+ struct ntb_dev *ndev;
+ void *cb_data;
+ struct dma_chan *tx_dma_chan;
+ struct dma_chan *rx_dma_chan;
+
+ bool client_ready;
+ bool link_is_up;
+ bool active;
+
+ u8 qp_num; /* Only 64 QP's are allowed. 0-63 */
+ u64 qp_bit;
+
+ struct ntb_rx_info __iomem *rx_info;
+ struct ntb_rx_info *remote_rx_info;
+
+ void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data,
+ void *data, int len);
+ struct list_head tx_free_q;
+ spinlock_t ntb_tx_free_q_lock;
+ void __iomem *tx_mw;
+ phys_addr_t tx_mw_phys;
+ size_t tx_mw_size;
+ dma_addr_t tx_mw_dma_addr;
+ unsigned int tx_index;
+ unsigned int tx_max_entry;
+ unsigned int tx_max_frame;
+
+ void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data,
+ void *data, int len);
+ struct list_head rx_post_q;
+ struct list_head rx_pend_q;
+ struct list_head rx_free_q;
+ /* ntb_rx_q_lock: synchronize access to rx_XXXX_q */
+ spinlock_t ntb_rx_q_lock;
+ void *rx_buff;
+ unsigned int rx_index;
+ unsigned int rx_max_entry;
+ unsigned int rx_max_frame;
+ unsigned int rx_alloc_entry;
+ dma_cookie_t last_cookie;
+ struct tasklet_struct rxc_db_work;
+
+ void (*event_handler)(void *data, int status);
+ struct delayed_work link_work;
+ struct work_struct link_cleanup;
+
+ struct dentry *debugfs_dir;
+ struct dentry *debugfs_stats;
+
+ /* Stats */
+ u64 rx_bytes;
+ u64 rx_pkts;
+ u64 rx_ring_empty;
+ u64 rx_err_no_buf;
+ u64 rx_err_oflow;
+ u64 rx_err_ver;
+ u64 rx_memcpy;
+ u64 rx_async;
+ u64 tx_bytes;
+ u64 tx_pkts;
+ u64 tx_ring_full;
+ u64 tx_err_no_buf;
+ u64 tx_memcpy;
+ u64 tx_async;
+
+ bool use_msi;
+ int msi_irq;
+ struct ntb_msi_desc msi_desc;
+ struct ntb_msi_desc peer_msi_desc;
+};
+
+struct ntb_transport_mw {
+ phys_addr_t phys_addr;
+ resource_size_t phys_size;
+ void __iomem *vbase;
+ size_t xlat_size;
+ size_t buff_size;
+ size_t alloc_size;
+ void *alloc_addr;
+ void *virt_addr;
+ dma_addr_t dma_addr;
+};
+
+struct ntb_transport_ctx {
+ struct list_head entry;
+ struct list_head client_devs;
+
+ struct ntb_dev *ndev;
+
+ struct ntb_transport_mw *mw_vec;
+ struct ntb_transport_qp *qp_vec;
+ unsigned int mw_count;
+ unsigned int qp_count;
+ u64 qp_bitmap;
+ u64 qp_bitmap_free;
+
+ bool use_msi;
+ unsigned int msi_spad_offset;
+ u64 msi_db_mask;
+
+ bool link_is_up;
+ struct delayed_work link_work;
+ struct work_struct link_cleanup;
+
+ struct dentry *debugfs_node_dir;
+
+ /* Make sure workq of link event be executed serially */
+ struct mutex link_event_lock;
+};
+
+enum {
+ DESC_DONE_FLAG = BIT(0),
+ LINK_DOWN_FLAG = BIT(1),
+};
+
+void ntb_list_add(spinlock_t *lock, struct list_head *entry, struct list_head *list);
+struct ntb_queue_entry *ntb_list_rm(spinlock_t *lock, struct list_head *list);
+struct ntb_queue_entry *ntb_list_mv(spinlock_t *lock, struct list_head *list,
+ struct list_head *to_list);
+void ntb_qp_link_down(struct ntb_transport_qp *qp);
+int ntb_transport_init_queue(struct ntb_transport_ctx *nt,
+ unsigned int qp_num);
+struct device *get_dma_dev(struct ntb_dev *ndev);
+
+#endif /* _NTB_TRANSPORT_INTERNAL_H_ */
diff --git a/include/linux/ntb_transport.h b/include/linux/ntb_transport.h
index 7243eb98a722..b128ced77b39 100644
--- a/include/linux/ntb_transport.h
+++ b/include/linux/ntb_transport.h
@@ -48,6 +48,9 @@
* Jon Mason <jon.mason@...el.com>
*/
+#ifndef __LINUX_NTB_TRANSPORT_H
+#define __LINUX_NTB_TRANSPORT_H
+
struct ntb_transport_qp;
struct ntb_transport_client {
@@ -84,3 +87,5 @@ void ntb_transport_link_up(struct ntb_transport_qp *qp);
void ntb_transport_link_down(struct ntb_transport_qp *qp);
bool ntb_transport_link_query(struct ntb_transport_qp *qp);
unsigned int ntb_transport_tx_free_entry(struct ntb_transport_qp *qp);
+
+#endif /* __LINUX_NTB_TRANSPORT_H */
--
2.51.0
Powered by blists - more mailing lists