[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <cf5d9158-4833-4355-8e4d-0894411d0d46@linux.dev>
Date: Tue, 26 Aug 2025 17:42:46 +0100
From: Vadim Fedorenko <vadim.fedorenko@...ux.dev>
To: Fan Gong <gongfan1@...wei.com>, Zhu Yikai <zhuyikai1@...artners.com>
Cc: netdev@...r.kernel.org, linux-kernel@...r.kernel.org,
"David S. Miller" <davem@...emloft.net>, Eric Dumazet <edumazet@...gle.com>,
Jakub Kicinski <kuba@...nel.org>, Paolo Abeni <pabeni@...hat.com>,
Simon Horman <horms@...nel.org>, Andrew Lunn <andrew+netdev@...n.ch>,
linux-doc@...r.kernel.org, Jonathan Corbet <corbet@....net>,
Bjorn Helgaas <helgaas@...nel.org>, luosifu <luosifu@...wei.com>,
Xin Guo <guoxin09@...wei.com>, Shen Chenyang <shenchenyang1@...ilicon.com>,
Zhou Shuai <zhoushuai28@...wei.com>, Wu Like <wulike1@...wei.com>,
Shi Jing <shijing34@...wei.com>, Meny Yossefi <meny.yossefi@...wei.com>,
Gur Stavi <gur.stavi@...wei.com>, Lee Trager <lee@...ger.us>,
Michael Ellerman <mpe@...erman.id.au>, Suman Ghosh <sumang@...vell.com>,
Przemek Kitszel <przemyslaw.kitszel@...el.com>,
Christophe JAILLET <christophe.jaillet@...adoo.fr>
Subject: Re: [PATCH net-next v01 08/12] hinic3: Queue pair context
initialization
On 26/08/2025 10:05, Fan Gong wrote:
> Initialize queue pair context of hardware interaction.
>
> Co-developed-by: Xin Guo <guoxin09@...wei.com>
> Signed-off-by: Xin Guo <guoxin09@...wei.com>
> Co-developed-by: Zhu Yikai <zhuyikai1@...artners.com>
> Signed-off-by: Zhu Yikai <zhuyikai1@...artners.com>
> Signed-off-by: Fan Gong <gongfan1@...wei.com>
> ---
a bit of styling nits, but as you still have to do another version it
would be great to fix.
[...]
> +static int init_sq_ctxts(struct hinic3_nic_dev *nic_dev)
> +{
> + struct hinic3_nic_io *nic_io = nic_dev->nic_io;
> + struct hinic3_hwdev *hwdev = nic_dev->hwdev;
> + struct hinic3_sq_ctxt_block *sq_ctxt_block;
> + u16 q_id, curr_id, max_ctxts, i;
> + struct hinic3_sq_ctxt *sq_ctxt;
> + struct hinic3_cmd_buf *cmd_buf;
> + struct hinic3_io_queue *sq;
> + __le64 out_param;
> + int err = 0;
> +
> + cmd_buf = hinic3_alloc_cmd_buf(hwdev);
> + if (!cmd_buf) {
> + dev_err(hwdev->dev, "Failed to allocate cmd buf\n");
> + return -ENOMEM;
> + }
> +
> + q_id = 0;
> + while (q_id < nic_io->num_qps) {
> + sq_ctxt_block = cmd_buf->buf;
> + sq_ctxt = sq_ctxt_block->sq_ctxt;
> +
> + max_ctxts = (nic_io->num_qps - q_id) > HINIC3_Q_CTXT_MAX ?
> + HINIC3_Q_CTXT_MAX : (nic_io->num_qps - q_id);
> +
> + hinic3_qp_prepare_cmdq_header(&sq_ctxt_block->cmdq_hdr,
> + HINIC3_QP_CTXT_TYPE_SQ, max_ctxts,
> + q_id);
> +
> + for (i = 0; i < max_ctxts; i++) {
> + curr_id = q_id + i;
> + sq = &nic_io->sq[curr_id];
> + hinic3_sq_prepare_ctxt(sq, curr_id, &sq_ctxt[i]);
> + }
> +
> + hinic3_cmdq_buf_swab32(sq_ctxt_block, sizeof(*sq_ctxt_block));
> +
> + cmd_buf->size = cpu_to_le16(SQ_CTXT_SIZE(max_ctxts));
> + err = hinic3_cmdq_direct_resp(hwdev, MGMT_MOD_L2NIC,
> + L2NIC_UCODE_CMD_MODIFY_QUEUE_CTX,
> + cmd_buf, &out_param);
> + if (err || out_param != 0) {
no need for "!= 0" ...
> + dev_err(hwdev->dev, "Failed to set SQ ctxts, err: %d, out_param: 0x%llx\n",
> + err, out_param);
> + err = -EFAULT;
> + break;
> + }
> +
> + q_id += max_ctxts;
> + }
> +
> + hinic3_free_cmd_buf(hwdev, cmd_buf);
> +
> + return err;
> +}
> +
> +static int init_rq_ctxts(struct hinic3_nic_dev *nic_dev)
> +{
> + struct hinic3_nic_io *nic_io = nic_dev->nic_io;
> + struct hinic3_hwdev *hwdev = nic_dev->hwdev;
> + struct hinic3_rq_ctxt_block *rq_ctxt_block;
> + u16 q_id, curr_id, max_ctxts, i;
> + struct hinic3_rq_ctxt *rq_ctxt;
> + struct hinic3_cmd_buf *cmd_buf;
> + struct hinic3_io_queue *rq;
> + __le64 out_param;
> + int err = 0;
> +
> + cmd_buf = hinic3_alloc_cmd_buf(hwdev);
> + if (!cmd_buf) {
> + dev_err(hwdev->dev, "Failed to allocate cmd buf\n");
> + return -ENOMEM;
> + }
> +
> + q_id = 0;
> + while (q_id < nic_io->num_qps) {
> + rq_ctxt_block = cmd_buf->buf;
> + rq_ctxt = rq_ctxt_block->rq_ctxt;
> +
> + max_ctxts = (nic_io->num_qps - q_id) > HINIC3_Q_CTXT_MAX ?
> + HINIC3_Q_CTXT_MAX : (nic_io->num_qps - q_id);
> +
> + hinic3_qp_prepare_cmdq_header(&rq_ctxt_block->cmdq_hdr,
> + HINIC3_QP_CTXT_TYPE_RQ, max_ctxts,
> + q_id);
> +
> + for (i = 0; i < max_ctxts; i++) {
> + curr_id = q_id + i;
> + rq = &nic_io->rq[curr_id];
> + hinic3_rq_prepare_ctxt(rq, &rq_ctxt[i]);
> + }
> +
> + hinic3_cmdq_buf_swab32(rq_ctxt_block, sizeof(*rq_ctxt_block));
> +
> + cmd_buf->size = cpu_to_le16(RQ_CTXT_SIZE(max_ctxts));
> +
> + err = hinic3_cmdq_direct_resp(hwdev, MGMT_MOD_L2NIC,
> + L2NIC_UCODE_CMD_MODIFY_QUEUE_CTX,
> + cmd_buf, &out_param);
> + if (err || out_param != 0) {
... here as well
> + dev_err(hwdev->dev, "Failed to set RQ ctxts, err: %d, out_param: 0x%llx\n",
> + err, out_param);
> + err = -EFAULT;
> + break;
> + }
> +
> + q_id += max_ctxts;
> + }
> +
> + hinic3_free_cmd_buf(hwdev, cmd_buf);
> +
> + return err;
> +}
[...]
> +static int clean_queue_offload_ctxt(struct hinic3_nic_dev *nic_dev,
> + enum hinic3_qp_ctxt_type ctxt_type)
> +{
> + struct hinic3_nic_io *nic_io = nic_dev->nic_io;
> + struct hinic3_hwdev *hwdev = nic_dev->hwdev;
> + struct hinic3_clean_queue_ctxt *ctxt_block;
> + struct hinic3_cmd_buf *cmd_buf;
> + __le64 out_param;
> + int err;
> +
> + cmd_buf = hinic3_alloc_cmd_buf(hwdev);
> + if (!cmd_buf) {
> + dev_err(hwdev->dev, "Failed to allocate cmd buf\n");
> + return -ENOMEM;
> + }
> +
> + ctxt_block = cmd_buf->buf;
> + ctxt_block->cmdq_hdr.num_queues = cpu_to_le16(nic_io->max_qps);
> + ctxt_block->cmdq_hdr.queue_type = cpu_to_le16(ctxt_type);
> + ctxt_block->cmdq_hdr.start_qid = 0;
> + ctxt_block->cmdq_hdr.rsvd = 0;
> + ctxt_block->rsvd = 0;
> +
> + hinic3_cmdq_buf_swab32(ctxt_block, sizeof(*ctxt_block));
> +
> + cmd_buf->size = cpu_to_le16(sizeof(*ctxt_block));
> +
> + err = hinic3_cmdq_direct_resp(hwdev, MGMT_MOD_L2NIC,
> + L2NIC_UCODE_CMD_CLEAN_QUEUE_CTX,
> + cmd_buf, &out_param);
> + if ((err) || (out_param)) {
no need for extra parenthesis
> + dev_err(hwdev->dev, "Failed to clean queue offload ctxts, err: %d,out_param: 0x%llx\n",
> + err, out_param);
> +
> + err = -EFAULT;
> + }
> +
> + hinic3_free_cmd_buf(hwdev, cmd_buf);
Powered by blists - more mailing lists