lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20231209173352.GC5817@kernel.org>
Date: Sat, 9 Dec 2023 17:33:52 +0000
From: Simon Horman <horms@...nel.org>
To: longli@...uxonhyperv.com
Cc: Jason Gunthorpe <jgg@...pe.ca>, Leon Romanovsky <leon@...nel.org>,
	Ajay Sharma <sharmaajay@...rosoft.com>,
	Dexuan Cui <decui@...rosoft.com>,
	"K. Y. Srinivasan" <kys@...rosoft.com>,
	Haiyang Zhang <haiyangz@...rosoft.com>,
	Wei Liu <wei.liu@...nel.org>,
	"David S. Miller" <davem@...emloft.net>,
	Eric Dumazet <edumazet@...gle.com>,
	Jakub Kicinski <kuba@...nel.org>, Paolo Abeni <pabeni@...hat.com>,
	linux-rdma@...r.kernel.org, linux-hyperv@...r.kernel.org,
	netdev@...r.kernel.org, linux-kernel@...r.kernel.org,
	Long Li <longli@...rosoft.com>
Subject: Re: [Patch v2 3/3] RDMA/mana_ib: Add CQ interrupt support for RAW QP

On Mon, Dec 04, 2023 at 03:02:59PM -0800, longli@...uxonhyperv.com wrote:
> From: Long Li <longli@...rosoft.com>
> 
> At probing time, the MANA core code allocates EQs for supporting interrupts
> on Ethernet queues. The same interrupt mechanisum is used by RAW QP.
> 
> Use the same EQs for delivering interrupts on the CQ for the RAW QP.
> 
> Signed-off-by: Long Li <longli@...rosoft.com>

Hi Long Li,

some minor feedback from my side.

...

> diff --git a/drivers/infiniband/hw/mana/qp.c b/drivers/infiniband/hw/mana/qp.c
> index 4667b18ec1dd..186d9829bb93 100644
> --- a/drivers/infiniband/hw/mana/qp.c
> +++ b/drivers/infiniband/hw/mana/qp.c
> @@ -99,25 +99,34 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
>  	struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp);
>  	struct mana_ib_dev *mdev =
>  		container_of(pd->device, struct mana_ib_dev, ib_dev);
> +	struct ib_ucontext *ib_ucontext = pd->uobject->context;
>  	struct ib_rwq_ind_table *ind_tbl = attr->rwq_ind_tbl;
>  	struct mana_ib_create_qp_rss_resp resp = {};
>  	struct mana_ib_create_qp_rss ucmd = {};
> +	struct mana_ib_ucontext *mana_ucontext;
> +	struct gdma_queue **gdma_cq_allocated;
>  	mana_handle_t *mana_ind_table;
>  	struct mana_port_context *mpc;
> +	struct gdma_queue *gdma_cq;
>  	unsigned int ind_tbl_size;
>  	struct mana_context *mc;
>  	struct net_device *ndev;
> +	struct gdma_context *gc;
>  	struct mana_ib_cq *cq;
>  	struct mana_ib_wq *wq;
>  	struct gdma_dev *gd;
> +	struct mana_eq *eq;
>  	struct ib_cq *ibcq;
>  	struct ib_wq *ibwq;
>  	int i = 0;
>  	u32 port;
>  	int ret;
>  
> -	gd = &mdev->gdma_dev->gdma_context->mana;
> +	gc = mdev->gdma_dev->gdma_context;
> +	gd = &gc->mana;
>  	mc = gd->driver_data;
> +	mana_ucontext =
> +		container_of(ib_ucontext, struct mana_ib_ucontext, ibucontext);
>  
>  	if (!udata || udata->inlen < sizeof(ucmd))
>  		return -EINVAL;

nit: mana_ucontext appears to be set but unused.

     Flagged by W=1 builds.

> @@ -179,6 +188,13 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
>  		goto fail;
>  	}
>  
> +	gdma_cq_allocated = kcalloc(ind_tbl_size, sizeof(*gdma_cq_allocated),
> +				    GFP_KERNEL);
> +	if (!gdma_cq_allocated) {
> +		ret = -ENOMEM;
> +		goto fail;
> +	}
> +
>  	qp->port = port;
>  
>  	for (i = 0; i < ind_tbl_size; i++) {

...

> @@ -219,6 +236,21 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
>  		resp.entries[i].wqid = wq->id;
>  
>  		mana_ind_table[i] = wq->rx_object;
> +
> +		/* Create CQ table entry */
> +		WARN_ON(gc->cq_table[cq->id]);
> +		gdma_cq = kzalloc(sizeof(*gdma_cq), GFP_KERNEL);
> +		if (!gdma_cq) {
> +			ret = -ENOMEM;
> +			goto fail;
> +		}
> +		gdma_cq_allocated[i] = gdma_cq;
> +
> +		gdma_cq->cq.context = cq;
> +		gdma_cq->type = GDMA_CQ;
> +		gdma_cq->cq.callback = mana_ib_cq_handler;
> +		gdma_cq->id = cq->id;
> +		gc->cq_table[cq->id] = gdma_cq;
>  	}
>  	resp.num_entries = i;
>  
> @@ -238,6 +270,7 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
>  		goto fail;
>  	}
>  
> +	kfree(gdma_cq_allocated);
>  	kfree(mana_ind_table);
>  
>  	return 0;
> @@ -247,8 +280,15 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
>  		ibwq = ind_tbl->ind_tbl[i];
>  		wq = container_of(ibwq, struct mana_ib_wq, ibwq);
>  		mana_destroy_wq_obj(mpc, GDMA_RQ, wq->rx_object);
> +
> +		if (gdma_cq_allocated[i]) {

nit: It is not clear to me that condition can ever be false.
     If we get here then gdma_cq_allocated[i] is a valid pointer.

> +			gc->cq_table[gdma_cq_allocated[i]->id] =
> +				NULL;
> +			kfree(gdma_cq_allocated[i]);
> +		}
>  	}
>  
> +	kfree(gdma_cq_allocated);
>  	kfree(mana_ind_table);
>  
>  	return ret;

...

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ