[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID:
<SA6PR21MB4231D6EED2D7C1D4BC7FBCC2CEE02@SA6PR21MB4231.namprd21.prod.outlook.com>
Date: Thu, 23 Jan 2025 06:02:55 +0000
From: Long Li <longli@...rosoft.com>
To: Konstantin Taranov <kotaranov@...ux.microsoft.com>, Konstantin Taranov
<kotaranov@...rosoft.com>, Shiraz Saleem <shirazsaleem@...rosoft.com>,
"pabeni@...hat.com" <pabeni@...hat.com>, Haiyang Zhang
<haiyangz@...rosoft.com>, KY Srinivasan <kys@...rosoft.com>,
"edumazet@...gle.com" <edumazet@...gle.com>, "kuba@...nel.org"
<kuba@...nel.org>, "davem@...emloft.net" <davem@...emloft.net>, Dexuan Cui
<decui@...rosoft.com>, "wei.liu@...nel.org" <wei.liu@...nel.org>,
"sharmaajay@...rosoft.com" <sharmaajay@...rosoft.com>, "jgg@...pe.ca"
<jgg@...pe.ca>, "leon@...nel.org" <leon@...nel.org>
CC: "linux-rdma@...r.kernel.org" <linux-rdma@...r.kernel.org>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
"netdev@...r.kernel.org" <netdev@...r.kernel.org>,
"linux-hyperv@...r.kernel.org" <linux-hyperv@...r.kernel.org>
Subject: RE: [PATCH rdma-next 11/13] RDMA/mana_ib: extend mana QP table
> Subject: [PATCH rdma-next 11/13] RDMA/mana_ib: extend mana QP table
>
> From: Konstantin Taranov <kotaranov@...rosoft.com>
>
> Enable mana QP table to store UD/GSI QPs.
> For send queues, set the most significant bit to one, as send and receive WQs can
> have the same ID in mana.
>
> Signed-off-by: Konstantin Taranov <kotaranov@...rosoft.com>
> Reviewed-by: Shiraz Saleem <shirazsaleem@...rosoft.com>
Reviewed-by: Long Li <longli@...rosoft.com>
> ---
> drivers/infiniband/hw/mana/main.c | 2 +-
> drivers/infiniband/hw/mana/mana_ib.h | 8 ++-
> drivers/infiniband/hw/mana/qp.c | 78 ++++++++++++++++++++++++++--
> 3 files changed, 83 insertions(+), 5 deletions(-)
>
> diff --git a/drivers/infiniband/hw/mana/main.c
> b/drivers/infiniband/hw/mana/main.c
> index b0c55cb..114e391 100644
> --- a/drivers/infiniband/hw/mana/main.c
> +++ b/drivers/infiniband/hw/mana/main.c
> @@ -704,7 +704,7 @@ mana_ib_event_handler(void *ctx, struct gdma_queue
> *q, struct gdma_event *event)
> switch (event->type) {
> case GDMA_EQE_RNIC_QP_FATAL:
> qpn = event->details[0];
> - qp = mana_get_qp_ref(mdev, qpn);
> + qp = mana_get_qp_ref(mdev, qpn, false);
> if (!qp)
> break;
> if (qp->ibqp.event_handler) {
> diff --git a/drivers/infiniband/hw/mana/mana_ib.h
> b/drivers/infiniband/hw/mana/mana_ib.h
> index bd34ad6..5e4ca55 100644
> --- a/drivers/infiniband/hw/mana/mana_ib.h
> +++ b/drivers/infiniband/hw/mana/mana_ib.h
> @@ -23,6 +23,9 @@
> /* MANA doesn't have any limit for MR size */
> #define MANA_IB_MAX_MR_SIZE U64_MAX
>
> +/* Send queue ID mask */
> +#define MANA_SENDQ_MASK BIT(31)
> +
> /*
> * The hardware limit of number of MRs is greater than maximum number of MRs
> * that can possibly represent in 24 bits @@ -438,11 +441,14 @@ static inline
> struct gdma_context *mdev_to_gc(struct mana_ib_dev *mdev) }
>
> static inline struct mana_ib_qp *mana_get_qp_ref(struct mana_ib_dev *mdev,
> - uint32_t qid)
> + u32 qid, bool is_sq)
> {
> struct mana_ib_qp *qp;
> unsigned long flag;
>
> + if (is_sq)
> + qid |= MANA_SENDQ_MASK;
> +
> xa_lock_irqsave(&mdev->qp_table_wq, flag);
> qp = xa_load(&mdev->qp_table_wq, qid);
> if (qp)
> diff --git a/drivers/infiniband/hw/mana/qp.c b/drivers/infiniband/hw/mana/qp.c
> index 051ea03..2528046 100644
> --- a/drivers/infiniband/hw/mana/qp.c
> +++ b/drivers/infiniband/hw/mana/qp.c
> @@ -444,18 +444,82 @@ static enum gdma_queue_type
> mana_ib_queue_type(struct ib_qp_init_attr *attr, u32
> return type;
> }
>
> +static int mana_table_store_rc_qp(struct mana_ib_dev *mdev, struct
> +mana_ib_qp *qp) {
> + return xa_insert_irq(&mdev->qp_table_wq, qp->ibqp.qp_num, qp,
> + GFP_KERNEL);
> +}
> +
> +static void mana_table_remove_rc_qp(struct mana_ib_dev *mdev, struct
> +mana_ib_qp *qp) {
> + xa_erase_irq(&mdev->qp_table_wq, qp->ibqp.qp_num); }
> +
> +static int mana_table_store_ud_qp(struct mana_ib_dev *mdev, struct
> +mana_ib_qp *qp) {
> + u32 qids = qp->ud_qp.queues[MANA_UD_SEND_QUEUE].id |
> MANA_SENDQ_MASK;
> + u32 qidr = qp->ud_qp.queues[MANA_UD_RECV_QUEUE].id;
> + int err;
> +
> + err = xa_insert_irq(&mdev->qp_table_wq, qids, qp, GFP_KERNEL);
> + if (err)
> + return err;
> +
> + err = xa_insert_irq(&mdev->qp_table_wq, qidr, qp, GFP_KERNEL);
> + if (err)
> + goto remove_sq;
> +
> + return 0;
> +
> +remove_sq:
> + xa_erase_irq(&mdev->qp_table_wq, qids);
> + return err;
> +}
> +
> +static void mana_table_remove_ud_qp(struct mana_ib_dev *mdev, struct
> +mana_ib_qp *qp) {
> + u32 qids = qp->ud_qp.queues[MANA_UD_SEND_QUEUE].id |
> MANA_SENDQ_MASK;
> + u32 qidr = qp->ud_qp.queues[MANA_UD_RECV_QUEUE].id;
> +
> + xa_erase_irq(&mdev->qp_table_wq, qids);
> + xa_erase_irq(&mdev->qp_table_wq, qidr); }
> +
> static int mana_table_store_qp(struct mana_ib_dev *mdev, struct mana_ib_qp
> *qp) {
> refcount_set(&qp->refcount, 1);
> init_completion(&qp->free);
> - return xa_insert_irq(&mdev->qp_table_wq, qp->ibqp.qp_num, qp,
> - GFP_KERNEL);
> +
> + switch (qp->ibqp.qp_type) {
> + case IB_QPT_RC:
> + return mana_table_store_rc_qp(mdev, qp);
> + case IB_QPT_UD:
> + case IB_QPT_GSI:
> + return mana_table_store_ud_qp(mdev, qp);
> + default:
> + ibdev_dbg(&mdev->ib_dev, "Unknown QP type for storing in
> mana table, %d\n",
> + qp->ibqp.qp_type);
> + }
> +
> + return -EINVAL;
> }
>
> static void mana_table_remove_qp(struct mana_ib_dev *mdev,
> struct mana_ib_qp *qp)
> {
> - xa_erase_irq(&mdev->qp_table_wq, qp->ibqp.qp_num);
> + switch (qp->ibqp.qp_type) {
> + case IB_QPT_RC:
> + mana_table_remove_rc_qp(mdev, qp);
> + break;
> + case IB_QPT_UD:
> + case IB_QPT_GSI:
> + mana_table_remove_ud_qp(mdev, qp);
> + break;
> + default:
> + ibdev_dbg(&mdev->ib_dev, "Unknown QP type for removing
> from mana table, %d\n",
> + qp->ibqp.qp_type);
> + return;
> + }
> mana_put_qp_ref(qp);
> wait_for_completion(&qp->free);
> }
> @@ -586,8 +650,14 @@ static int mana_ib_create_ud_qp(struct ib_qp *ibqp,
> struct ib_pd *ibpd,
> for (i = 0; i < MANA_UD_QUEUE_TYPE_MAX; ++i)
> qp->ud_qp.queues[i].kmem->id = qp->ud_qp.queues[i].id;
>
> + err = mana_table_store_qp(mdev, qp);
> + if (err)
> + goto destroy_qp;
> +
> return 0;
>
> +destroy_qp:
> + mana_ib_gd_destroy_ud_qp(mdev, qp);
> destroy_shadow_queues:
> destroy_shadow_queue(&qp->shadow_rq);
> destroy_shadow_queue(&qp->shadow_sq);
> @@ -770,6 +840,8 @@ static int mana_ib_destroy_ud_qp(struct mana_ib_qp
> *qp, struct ib_udata *udata)
> container_of(qp->ibqp.device, struct mana_ib_dev, ib_dev);
> int i;
>
> + mana_table_remove_qp(mdev, qp);
> +
> destroy_shadow_queue(&qp->shadow_rq);
> destroy_shadow_queue(&qp->shadow_sq);
>
> --
> 2.43.0
Powered by blists - more mailing lists