lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250326101419.GZ892515@horms.kernel.org>
Date: Wed, 26 Mar 2025 10:14:19 +0000
From: Simon Horman <horms@...nel.org>
To: Xin Tian <tianx@...silicon.com>
Cc: netdev@...r.kernel.org, leon@...nel.org, andrew+netdev@...n.ch,
	kuba@...nel.org, pabeni@...hat.com, edumazet@...gle.com,
	davem@...emloft.net, jeff.johnson@....qualcomm.com,
	przemyslaw.kitszel@...el.com, weihg@...silicon.com,
	wanry@...silicon.com, jacky@...silicon.com,
	parthiban.veerasooran@...rochip.com, masahiroy@...nel.org,
	kalesh-anakkur.purayil@...adcom.com, geert+renesas@...der.be,
	geert@...ux-m68k.org
Subject: Re: [PATCH net-next v9 11/14] xsc: ndo_open and ndo_stop

On Tue, Mar 18, 2025 at 11:15:16PM +0800, Xin Tian wrote:

...

> diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/main.c b/drivers/net/ethernet/yunsilicon/xsc/net/main.c

...

> +static int xsc_eth_open_rss_qp_rqs(struct xsc_adapter *adapter,
> +				   struct xsc_rq_param *prq_param,
> +				   struct xsc_eth_channels *chls,
> +				   unsigned int num_chl)
> +{
> +	u8 q_log_size = prq_param->rq_attr.q_log_size;
> +	struct xsc_create_multiqp_mbox_in *in;
> +	struct xsc_create_qp_request *req;
> +	unsigned int hw_npages;
> +	struct xsc_channel *c;
> +	int ret = 0, err = 0;
> +	struct xsc_rq *prq;
> +	int paslen = 0;
> +	int entry_len;
> +	u32 rqn_base;
> +	int i, j, n;
> +	int inlen;
> +
> +	for (i = 0; i < num_chl; i++) {
> +		c = &chls->c[i];
> +
> +		for (j = 0; j < c->qp.rq_num; j++) {
> +			prq = &c->qp.rq[j];
> +			ret = xsc_eth_alloc_rq(c, prq, prq_param);
> +			if (ret)
> +				goto err_alloc_rqs;
> +
> +			hw_npages = DIV_ROUND_UP(prq->wq_ctrl.buf.size,
> +						 PAGE_SIZE_4K);
> +			/*support different npages number smoothly*/
> +			entry_len = sizeof(struct xsc_create_qp_request) +
> +				sizeof(__be64) * hw_npages;

Hi Xin Tian,

Here entry_len is calculated for each entry of c->qp.rq, prq.
Based on prq->wq_ctrl.buf.size.

> +
> +			paslen += entry_len;
> +		}
> +	}
> +
> +	inlen = sizeof(struct xsc_create_multiqp_mbox_in) + paslen;
> +	in = kvzalloc(inlen, GFP_KERNEL);
> +	if (!in) {
> +		ret = -ENOMEM;
> +		goto err_create_rss_rqs;
> +	}
> +
> +	in->qp_num = cpu_to_be16(num_chl);
> +	in->qp_type = XSC_QUEUE_TYPE_RAW;
> +	in->req_len = cpu_to_be32(inlen);
> +
> +	req = (struct xsc_create_qp_request *)&in->data[0];
> +	n = 0;
> +	for (i = 0; i < num_chl; i++) {
> +		c = &chls->c[i];
> +		for (j = 0; j < c->qp.rq_num; j++) {
> +			prq = &c->qp.rq[j];
> +
> +			hw_npages = DIV_ROUND_UP(prq->wq_ctrl.buf.size,
> +						 PAGE_SIZE_4K);
> +			/* no use for eth */
> +			req->input_qpn = cpu_to_be16(0);
> +			req->qp_type = XSC_QUEUE_TYPE_RAW;
> +			req->log_rq_sz = ilog2(adapter->xdev->caps.recv_ds_num)
> +						+ q_log_size;
> +			req->pa_num = cpu_to_be16(hw_npages);
> +			req->cqn_recv = cpu_to_be16(prq->cq.xcq.cqn);
> +			req->cqn_send = req->cqn_recv;
> +			req->glb_funcid =
> +				cpu_to_be16(adapter->xdev->glb_func_id);
> +
> +			xsc_core_fill_page_frag_array(&prq->wq_ctrl.buf,
> +						      &req->pas[0],
> +						      hw_npages);
> +			n++;
> +			req = (struct xsc_create_qp_request *)
> +				(&in->data[0] + entry_len * n);

But here the value for the last entry of c->qp.rq for the last channel, in
chls->c[i], as determined by the previous for loop, is used for all entries
of c->qp.rq.

Is this correct?

Flagged by Smatch.

> +		}
> +	}
> +
> +	ret = xsc_core_eth_create_rss_qp_rqs(adapter->xdev, in, inlen,
> +					     &rqn_base);
> +	kvfree(in);
> +	if (ret)
> +		goto err_create_rss_rqs;
> +
> +	n = 0;
> +	for (i = 0; i < num_chl; i++) {
> +		c = &chls->c[i];
> +		for (j = 0; j < c->qp.rq_num; j++) {
> +			prq = &c->qp.rq[j];
> +			prq->rqn = rqn_base + n;
> +			prq->cqp.qpn = prq->rqn;
> +			prq->cqp.event = xsc_eth_qp_event;
> +			prq->cqp.eth_queue_type = XSC_RES_RQ;
> +			ret = xsc_core_create_resource_common(adapter->xdev,
> +							      &prq->cqp);
> +			if (ret) {
> +				err = ret;
> +				netdev_err(adapter->netdev,
> +					   "create resource common error qp:%d errno:%d\n",
> +					   prq->rqn, ret);
> +				continue;
> +			}
> +
> +			n++;
> +		}
> +	}
> +	if (err)
> +		return err;
> +
> +	adapter->channels.rqn_base = rqn_base;
> +	return 0;
> +
> +err_create_rss_rqs:
> +	i = num_chl;
> +err_alloc_rqs:
> +	for (--i; i >= 0; i--) {
> +		c = &chls->c[i];
> +		for (j = 0; j < c->qp.rq_num; j++) {
> +			prq = &c->qp.rq[j];
> +			xsc_free_qp_rq(prq);
> +		}
> +	}
> +	return ret;
> +}

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ