[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1700709010-22042-2-git-send-email-longli@linuxonhyperv.com>
Date: Wed, 22 Nov 2023 19:10:07 -0800
From: longli@...uxonhyperv.com
To: Jason Gunthorpe <jgg@...pe.ca>, Leon Romanovsky <leon@...nel.org>,
Ajay Sharma <sharmaajay@...rosoft.com>,
Dexuan Cui <decui@...rosoft.com>,
"K. Y. Srinivasan" <kys@...rosoft.com>,
Haiyang Zhang <haiyangz@...rosoft.com>,
Wei Liu <wei.liu@...nel.org>,
"David S. Miller" <davem@...emloft.net>,
Eric Dumazet <edumazet@...gle.com>,
Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>
Cc: linux-rdma@...r.kernel.org, linux-hyperv@...r.kernel.org,
netdev@...r.kernel.org, linux-kernel@...r.kernel.org,
Long Li <longli@...rosoft.com>
Subject: [Patch v1 1/4] RDMA/mana_ib: register RDMA device with GDMA
From: Long Li <longli@...rosoft.com>
MANA hardware supports RC queue pairs over RoCE. Software needs to register
with the RDMA management interface on the SoC to access this feature.
Signed-off-by: Long Li <longli@...rosoft.com>
---
drivers/infiniband/hw/mana/device.c | 25 ++++++++++++++++---
drivers/infiniband/hw/mana/main.c | 4 +--
drivers/infiniband/hw/mana/qp.c | 15 +++++------
.../net/ethernet/microsoft/mana/gdma_main.c | 5 ++++
include/net/mana/gdma.h | 4 +++
5 files changed, 41 insertions(+), 12 deletions(-)
diff --git a/drivers/infiniband/hw/mana/device.c b/drivers/infiniband/hw/mana/device.c
index d4541b8707e4..ee29ddf36cf3 100644
--- a/drivers/infiniband/hw/mana/device.c
+++ b/drivers/infiniband/hw/mana/device.c
@@ -68,7 +68,6 @@ static int mana_ib_probe(struct auxiliary_device *adev,
ibdev_dbg(&dev->ib_dev, "mdev=%p id=%d num_ports=%d\n", mdev,
mdev->dev_id.as_uint32, dev->ib_dev.phys_port_cnt);
- dev->gdma_dev = mdev;
dev->ib_dev.node_type = RDMA_NODE_IB_CA;
/*
@@ -78,16 +77,33 @@ static int mana_ib_probe(struct auxiliary_device *adev,
dev->ib_dev.num_comp_vectors = 1;
dev->ib_dev.dev.parent = mdev->gdma_context->dev;
- ret = ib_register_device(&dev->ib_dev, "mana_%d",
- mdev->gdma_context->dev);
+ ret = mana_gd_register_device(&mdev->gdma_context->mana_ib);
+ if (ret) {
+ ibdev_err(&dev->ib_dev, "Failed to register device, ret %d",
+ ret);
+ goto free_ib_device;
+ }
+ dev->gdma_dev = &mdev->gdma_context->mana_ib;
+
if (ret) {
ib_dealloc_device(&dev->ib_dev);
return ret;
}
+ ret = ib_register_device(&dev->ib_dev, "mana_%d",
+ mdev->gdma_context->dev);
+ if (ret)
+ goto destroy_adapter;
+
dev_set_drvdata(&adev->dev, dev);
return 0;
+
+destroy_adapter:
+ mana_gd_deregister_device(dev->gdma_dev);
+free_ib_device:
+ ib_dealloc_device(&dev->ib_dev);
+ return ret;
}
static void mana_ib_remove(struct auxiliary_device *adev)
@@ -95,6 +111,9 @@ static void mana_ib_remove(struct auxiliary_device *adev)
struct mana_ib_dev *dev = dev_get_drvdata(&adev->dev);
ib_unregister_device(&dev->ib_dev);
+
+ mana_gd_deregister_device(dev->gdma_dev);
+
ib_dealloc_device(&dev->ib_dev);
}
diff --git a/drivers/infiniband/hw/mana/main.c b/drivers/infiniband/hw/mana/main.c
index 7be4c3adb4e2..53730306ed9b 100644
--- a/drivers/infiniband/hw/mana/main.c
+++ b/drivers/infiniband/hw/mana/main.c
@@ -8,7 +8,7 @@
void mana_ib_uncfg_vport(struct mana_ib_dev *dev, struct mana_ib_pd *pd,
u32 port)
{
- struct gdma_dev *gd = dev->gdma_dev;
+ struct gdma_dev *gd = &dev->gdma_dev->gdma_context->mana;
struct mana_port_context *mpc;
struct net_device *ndev;
struct mana_context *mc;
@@ -31,7 +31,7 @@ void mana_ib_uncfg_vport(struct mana_ib_dev *dev, struct mana_ib_pd *pd,
int mana_ib_cfg_vport(struct mana_ib_dev *dev, u32 port, struct mana_ib_pd *pd,
u32 doorbell_id)
{
- struct gdma_dev *mdev = dev->gdma_dev;
+ struct gdma_dev *mdev = &dev->gdma_dev->gdma_context->mana;
struct mana_port_context *mpc;
struct mana_context *mc;
struct net_device *ndev;
diff --git a/drivers/infiniband/hw/mana/qp.c b/drivers/infiniband/hw/mana/qp.c
index 4b3b5b274e84..ae45d28eef5e 100644
--- a/drivers/infiniband/hw/mana/qp.c
+++ b/drivers/infiniband/hw/mana/qp.c
@@ -21,8 +21,8 @@ static int mana_ib_cfg_vport_steering(struct mana_ib_dev *dev,
u32 req_buf_size;
int i, err;
- mdev = dev->gdma_dev;
- gc = mdev->gdma_context;
+ gc = dev->gdma_dev->gdma_context;
+ mdev = &gc->mana;
req_buf_size =
sizeof(*req) + sizeof(mana_handle_t) * MANA_INDIRECT_TABLE_SIZE;
@@ -102,20 +102,21 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
struct ib_rwq_ind_table *ind_tbl = attr->rwq_ind_tbl;
struct mana_ib_create_qp_rss_resp resp = {};
struct mana_ib_create_qp_rss ucmd = {};
- struct gdma_dev *gd = mdev->gdma_dev;
mana_handle_t *mana_ind_table;
struct mana_port_context *mpc;
+ unsigned int ind_tbl_size;
struct mana_context *mc;
struct net_device *ndev;
struct mana_ib_cq *cq;
struct mana_ib_wq *wq;
- unsigned int ind_tbl_size;
+ struct gdma_dev *gd;
struct ib_cq *ibcq;
struct ib_wq *ibwq;
int i = 0;
u32 port;
int ret;
+ gd = &mdev->gdma_dev->gdma_context->mana;
mc = gd->driver_data;
if (!udata || udata->inlen < sizeof(ucmd))
@@ -266,8 +267,8 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
struct mana_ib_ucontext *mana_ucontext =
rdma_udata_to_drv_context(udata, struct mana_ib_ucontext,
ibucontext);
+ struct gdma_dev *gd = &mdev->gdma_dev->gdma_context->mana;
struct mana_ib_create_qp_resp resp = {};
- struct gdma_dev *gd = mdev->gdma_dev;
struct mana_ib_create_qp ucmd = {};
struct mana_obj_spec wq_spec = {};
struct mana_obj_spec cq_spec = {};
@@ -437,7 +438,7 @@ static int mana_ib_destroy_qp_rss(struct mana_ib_qp *qp,
{
struct mana_ib_dev *mdev =
container_of(qp->ibqp.device, struct mana_ib_dev, ib_dev);
- struct gdma_dev *gd = mdev->gdma_dev;
+ struct gdma_dev *gd = &mdev->gdma_dev->gdma_context->mana;
struct mana_port_context *mpc;
struct mana_context *mc;
struct net_device *ndev;
@@ -464,7 +465,7 @@ static int mana_ib_destroy_qp_raw(struct mana_ib_qp *qp, struct ib_udata *udata)
{
struct mana_ib_dev *mdev =
container_of(qp->ibqp.device, struct mana_ib_dev, ib_dev);
- struct gdma_dev *gd = mdev->gdma_dev;
+ struct gdma_dev *gd = &mdev->gdma_dev->gdma_context->mana;
struct ib_pd *ibpd = qp->ibqp.pd;
struct mana_port_context *mpc;
struct mana_context *mc;
diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
index 6367de0c2c2e..02e50ed632ee 100644
--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
+++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
@@ -158,6 +158,9 @@ static int mana_gd_detect_devices(struct pci_dev *pdev)
if (dev_type == GDMA_DEVICE_MANA) {
gc->mana.gdma_context = gc;
gc->mana.dev_id = dev;
+ } else if (dev_type == GDMA_DEVICE_MANA_IB) {
+ gc->mana_ib.dev_id = dev;
+ gc->mana_ib.gdma_context = gc;
}
}
@@ -971,6 +974,7 @@ int mana_gd_register_device(struct gdma_dev *gd)
return 0;
}
+EXPORT_SYMBOL(mana_gd_register_device);
int mana_gd_deregister_device(struct gdma_dev *gd)
{
@@ -1001,6 +1005,7 @@ int mana_gd_deregister_device(struct gdma_dev *gd)
return err;
}
+EXPORT_SYMBOL(mana_gd_deregister_device);
u32 mana_gd_wq_avail_space(struct gdma_queue *wq)
{
diff --git a/include/net/mana/gdma.h b/include/net/mana/gdma.h
index 88b6ef7ce1a6..000f0d7670f7 100644
--- a/include/net/mana/gdma.h
+++ b/include/net/mana/gdma.h
@@ -66,6 +66,7 @@ enum {
GDMA_DEVICE_NONE = 0,
GDMA_DEVICE_HWC = 1,
GDMA_DEVICE_MANA = 2,
+ GDMA_DEVICE_MANA_IB = 3,
};
struct gdma_resource {
@@ -387,6 +388,9 @@ struct gdma_context {
/* Azure network adapter */
struct gdma_dev mana;
+
+ /* Azure RDMA adapter */
+ struct gdma_dev mana_ib;
};
#define MAX_NUM_GDMA_DEVICES 4
--
2.34.1
Powered by blists - more mailing lists