[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20260127141929.GV13967@unreal>
Date: Tue, 27 Jan 2026 16:19:29 +0200
From: Leon Romanovsky <leon@...nel.org>
To: Konstantin Taranov <kotaranov@...ux.microsoft.com>
Cc: kotaranov@...rosoft.com, shirazsaleem@...rosoft.com,
longli@...rosoft.com, jgg@...pe.ca, linux-rdma@...r.kernel.org,
linux-kernel@...r.kernel.org
Subject: Re: [PATCH rdma-next v2 1/1] RDMA/mana_ib: device memory support
On Tue, Jan 27, 2026 at 12:26:49AM -0800, Konstantin Taranov wrote:
> From: Konstantin Taranov <kotaranov@...rosoft.com>
>
> Basic implementation of DM allowing to create and register
> DM memory and use its memory keys for networking.
>
> Signed-off-by: Konstantin Taranov <kotaranov@...rosoft.com>
> ---
> v2 removed debug prints. Removed comment and made destroy dm to fail,
> which can be useful when mana adds support of binding to DM memory.
> drivers/infiniband/hw/mana/device.c | 7 ++
> drivers/infiniband/hw/mana/mana_ib.h | 12 +++
> drivers/infiniband/hw/mana/mr.c | 130 +++++++++++++++++++++++++++
> include/net/mana/gdma.h | 47 +++++++++-
> 4 files changed, 193 insertions(+), 3 deletions(-)
<...>
> + err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
> + if (err || resp.hdr.status) {
> + if (!err)
> + err = -EPROTO;
> +
> + return err;
> + }
Please submit a patch that adds the `resp.hdr.status` check to
`mana_gd_send_request()`, and update all callers to rely solely on this
function's return value.
➜ kernel git:(wip/leon-for-next) git grep -A 1 mana_gd_send_request
...
drivers/infiniband/hw/mana/mr.c: err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
drivers/infiniband/hw/mana/mr.c- if (err || resp.hdr.status) {
--
drivers/infiniband/hw/mana/mr.c: err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
drivers/infiniband/hw/mana/mr.c- if (err || resp.hdr.status) {
--
drivers/infiniband/hw/mana/mr.c: err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
drivers/infiniband/hw/mana/mr.c- if (err || resp.hdr.status) {
....
Thanks
> +
> + dm->dm_handle = resp.dm_handle;
> +
> + return 0;
> +}
> +
> +struct ib_dm *mana_ib_alloc_dm(struct ib_device *ibdev,
> + struct ib_ucontext *context,
> + struct ib_dm_alloc_attr *attr,
> + struct uverbs_attr_bundle *attrs)
> +{
> + struct mana_ib_dev *dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
> + struct mana_ib_dm *dm;
> + int err;
> +
> + dm = kzalloc(sizeof(*dm), GFP_KERNEL);
> + if (!dm)
> + return ERR_PTR(-ENOMEM);
> +
> + err = mana_ib_gd_alloc_dm(dev, dm, attr);
> + if (err)
> + goto err_free;
> +
> + return &dm->ibdm;
> +
> +err_free:
> + kfree(dm);
> + return ERR_PTR(err);
> +}
> +
> +static int mana_ib_gd_destroy_dm(struct mana_ib_dev *mdev, struct mana_ib_dm *dm)
> +{
> + struct gdma_context *gc = mdev_to_gc(mdev);
> + struct gdma_destroy_dm_resp resp = {};
> + struct gdma_destroy_dm_req req = {};
> + int err;
> +
> + mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_DM, sizeof(req), sizeof(resp));
> + req.dm_handle = dm->dm_handle;
> +
> + err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
> + if (err || resp.hdr.status) {
> + if (!err)
> + err = -EPROTO;
> +
> + return err;
> + }
> +
> + return 0;
> +}
> +
> +int mana_ib_dealloc_dm(struct ib_dm *ibdm, struct uverbs_attr_bundle *attrs)
> +{
> + struct mana_ib_dev *dev = container_of(ibdm->device, struct mana_ib_dev, ib_dev);
> + struct mana_ib_dm *dm = container_of(ibdm, struct mana_ib_dm, ibdm);
> + int err;
> +
> + err = mana_ib_gd_destroy_dm(dev, dm);
> + if (err)
> + return err;
> +
> + kfree(dm);
> + return 0;
> +}
> +
> +struct ib_mr *mana_ib_reg_dm_mr(struct ib_pd *ibpd, struct ib_dm *ibdm,
> + struct ib_dm_mr_attr *attr,
> + struct uverbs_attr_bundle *attrs)
> +{
> + struct mana_ib_dev *dev = container_of(ibpd->device, struct mana_ib_dev, ib_dev);
> + struct mana_ib_dm *mana_dm = container_of(ibdm, struct mana_ib_dm, ibdm);
> + struct mana_ib_pd *pd = container_of(ibpd, struct mana_ib_pd, ibpd);
> + struct gdma_create_mr_params mr_params = {};
> + struct mana_ib_mr *mr;
> + int err;
> +
> + attr->access_flags &= ~IB_ACCESS_OPTIONAL;
> + if (attr->access_flags & ~VALID_MR_FLAGS)
> + return ERR_PTR(-EOPNOTSUPP);
> +
> + mr = kzalloc(sizeof(*mr), GFP_KERNEL);
> + if (!mr)
> + return ERR_PTR(-ENOMEM);
> +
> + mr_params.pd_handle = pd->pd_handle;
> + mr_params.mr_type = GDMA_MR_TYPE_DM;
> + mr_params.da.dm_handle = mana_dm->dm_handle;
> + mr_params.da.offset = attr->offset;
> + mr_params.da.length = attr->length;
> + mr_params.da.access_flags =
> + mana_ib_verbs_to_gdma_access_flags(attr->access_flags);
> +
> + err = mana_ib_gd_create_mr(dev, mr, &mr_params);
> + if (err)
> + goto err_free;
> +
> + return &mr->ibmr;
> +
> +err_free:
> + kfree(mr);
> + return ERR_PTR(err);
> +}
> diff --git a/include/net/mana/gdma.h b/include/net/mana/gdma.h
> index eaa27483f..8649eb789 100644
> --- a/include/net/mana/gdma.h
> +++ b/include/net/mana/gdma.h
> @@ -35,6 +35,8 @@ enum gdma_request_type {
> GDMA_CREATE_MR = 31,
> GDMA_DESTROY_MR = 32,
> GDMA_QUERY_HWC_TIMEOUT = 84, /* 0x54 */
> + GDMA_ALLOC_DM = 96, /* 0x60 */
> + GDMA_DESTROY_DM = 97, /* 0x61 */
> };
>
> #define GDMA_RESOURCE_DOORBELL_PAGE 27
> @@ -861,6 +863,8 @@ enum gdma_mr_type {
> GDMA_MR_TYPE_GVA = 2,
> /* Guest zero-based address MRs */
> GDMA_MR_TYPE_ZBVA = 4,
> + /* Device address MRs */
> + GDMA_MR_TYPE_DM = 5,
> };
>
> struct gdma_create_mr_params {
> @@ -876,6 +880,12 @@ struct gdma_create_mr_params {
> u64 dma_region_handle;
> enum gdma_mr_access_flags access_flags;
> } zbva;
> + struct {
> + u64 dm_handle;
> + u64 offset;
> + u64 length;
> + enum gdma_mr_access_flags access_flags;
> + } da;
> };
> };
>
> @@ -890,13 +900,23 @@ struct gdma_create_mr_request {
> u64 dma_region_handle;
> u64 virtual_address;
> enum gdma_mr_access_flags access_flags;
> - } gva;
> + } __packed gva;
> struct {
> u64 dma_region_handle;
> enum gdma_mr_access_flags access_flags;
> - } zbva;
> - };
> + } __packed zbva;
> + struct {
> + u64 dm_handle;
> + u64 offset;
> + enum gdma_mr_access_flags access_flags;
> + } __packed da;
> + } __packed;
> u32 reserved_2;
> + union {
> + struct {
> + u64 length;
> + } da_ext;
> + };
> };/* HW DATA */
>
> struct gdma_create_mr_response {
> @@ -915,6 +935,27 @@ struct gdma_destroy_mr_response {
> struct gdma_resp_hdr hdr;
> };/* HW DATA */
>
> +struct gdma_alloc_dm_req {
> + struct gdma_req_hdr hdr;
> + u64 length;
> + u32 alignment;
> + u32 flags;
> +}; /* HW Data */
> +
> +struct gdma_alloc_dm_resp {
> + struct gdma_resp_hdr hdr;
> + u64 dm_handle;
> +}; /* HW Data */
> +
> +struct gdma_destroy_dm_req {
> + struct gdma_req_hdr hdr;
> + u64 dm_handle;
> +}; /* HW Data */
> +
> +struct gdma_destroy_dm_resp {
> + struct gdma_resp_hdr hdr;
> +}; /* HW Data */
> +
> int mana_gd_verify_vf_version(struct pci_dev *pdev);
>
> int mana_gd_register_device(struct gdma_dev *gd);
> --
> 2.43.0
>
Powered by blists - more mailing lists