[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <F62CF3D3-E605-4CBA-B171-5BB98594C658@oracle.com>
Date: Tue, 11 May 2021 10:59:52 +0000
From: Haakon Bugge <haakon.bugge@...cle.com>
To: Leon Romanovsky <leon@...nel.org>
CC: Doug Ledford <dledford@...hat.com>,
Jason Gunthorpe <jgg@...dia.com>,
Dennis Dalessandro <dennis.dalessandro@...nelisnetworks.com>,
Leon Romanovsky <leonro@...dia.com>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
OFED mailing list <linux-rdma@...r.kernel.org>,
Mike Marciniszyn <mike.marciniszyn@...nelisnetworks.com>
Subject: Re: [PATCH rdma-next] RDMA/rdmavt: Decouple QP and SGE lists
allocations
> On 11 May 2021, at 12:36, Leon Romanovsky <leon@...nel.org> wrote:
>
> From: Leon Romanovsky <leonro@...dia.com>
>
> The rdmavt QP has fields that are both needed for the control and data
> path. Such mixed declaration caused to the very specific allocation flow
> with kzalloc_node and SGE list embedded into the struct rvt_qp.
>
> This patch separates QP creation to two: regular memory allocation for
> the control path and specific code for the SGE list, while the access to
> the later is performed through derefenced pointer.
>
> Such pointer and its context are expected to be in the cache, so
> performance difference is expected to be negligible, if any exists.
>
> Signed-off-by: Leon Romanovsky <leonro@...dia.com>
> ---
> Hi,
>
> This change is needed to convert QP to core allocation scheme. In that
> scheme QP is allocated outside of the driver and size of such allocation
> is constant and can be calculated at the compile time.
>
> Thanks
> ---
> drivers/infiniband/sw/rdmavt/qp.c | 13 ++++++++-----
> include/rdma/rdmavt_qp.h | 2 +-
> 2 files changed, 9 insertions(+), 6 deletions(-)
>
> diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
> index 9d13db68283c..4522071fc220 100644
> --- a/drivers/infiniband/sw/rdmavt/qp.c
> +++ b/drivers/infiniband/sw/rdmavt/qp.c
> @@ -1077,7 +1077,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
> int err;
> struct rvt_swqe *swq = NULL;
> size_t sz;
> - size_t sg_list_sz;
> + size_t sg_list_sz = 0;
> struct ib_qp *ret = ERR_PTR(-ENOMEM);
> struct rvt_dev_info *rdi = ib_to_rvt(ibpd->device);
> void *priv = NULL;
> @@ -1125,8 +1125,6 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
> if (!swq)
> return ERR_PTR(-ENOMEM);
>
> - sz = sizeof(*qp);
> - sg_list_sz = 0;
> if (init_attr->srq) {
> struct rvt_srq *srq = ibsrq_to_rvtsrq(init_attr->srq);
>
> @@ -1136,10 +1134,13 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
> } else if (init_attr->cap.max_recv_sge > 1)
> sg_list_sz = sizeof(*qp->r_sg_list) *
> (init_attr->cap.max_recv_sge - 1);
> - qp = kzalloc_node(sz + sg_list_sz, GFP_KERNEL,
> - rdi->dparms.node);
> + qp = kzalloc(sizeof(*qp), GFP_KERNEL);
Why not kzalloc_node() here?
Thxs, HÃ¥kon
> if (!qp)
> goto bail_swq;
> + qp->r_sg_list =
> + kzalloc_node(sg_list_sz, GFP_KERNEL, rdi->dparms.node);
> + if (!qp->r_sg_list)
> + goto bail_qp;
> qp->allowed_ops = get_allowed_ops(init_attr->qp_type);
>
> RCU_INIT_POINTER(qp->next, NULL);
> @@ -1327,6 +1328,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
>
> bail_qp:
> kfree(qp->s_ack_queue);
> + kfree(qp->r_sg_list);
> kfree(qp);
>
> bail_swq:
> @@ -1761,6 +1763,7 @@ int rvt_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
> kvfree(qp->r_rq.kwq);
> rdi->driver_f.qp_priv_free(rdi, qp);
> kfree(qp->s_ack_queue);
> + kfree(qp->r_sg_list);
> rdma_destroy_ah_attr(&qp->remote_ah_attr);
> rdma_destroy_ah_attr(&qp->alt_ah_attr);
> free_ud_wq_attr(qp);
> diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h
> index 8275954f5ce6..2e58d5e6ac0e 100644
> --- a/include/rdma/rdmavt_qp.h
> +++ b/include/rdma/rdmavt_qp.h
> @@ -444,7 +444,7 @@ struct rvt_qp {
> /*
> * This sge list MUST be last. Do not add anything below here.
> */
> - struct rvt_sge r_sg_list[] /* verified SGEs */
> + struct rvt_sge *r_sg_list /* verified SGEs */
> ____cacheline_aligned_in_smp;
> };
>
> --
> 2.31.1
>
Powered by blists - more mailing lists