[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <f030aeab-b503-8381-53f5-15862e1333b0@linux.alibaba.com>
Date: Thu, 21 Jul 2022 17:17:03 +0800
From: Cheng Xu <chengyou@...ux.alibaba.com>
To: longli@...rosoft.com, "K. Y. Srinivasan" <kys@...rosoft.com>,
Haiyang Zhang <haiyangz@...rosoft.com>,
Stephen Hemminger <sthemmin@...rosoft.com>,
Wei Liu <wei.liu@...nel.org>, Dexuan Cui <decui@...rosoft.com>,
"David S. Miller" <davem@...emloft.net>,
Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>,
Jason Gunthorpe <jgg@...pe.ca>,
Leon Romanovsky <leon@...nel.org>, edumazet@...gle.com,
shiraz.saleem@...el.com, Ajay Sharma <sharmaajay@...rosoft.com>
Cc: linux-hyperv@...r.kernel.org, netdev@...r.kernel.org,
linux-kernel@...r.kernel.org, linux-rdma@...r.kernel.org
Subject: Re: [Patch v4 12/12] RDMA/mana_ib: Add a driver for Microsoft Azure
Network Adapter
On 6/16/22 10:07 AM, longli@...uxonhyperv.com wrote:
> From: Long Li <longli@...rosoft.com>
>
<...>
> +
> +static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
> + struct ib_qp_init_attr *attr,
> + struct ib_udata *udata)
> +{
> + struct mana_ib_pd *pd = container_of(ibpd, struct mana_ib_pd, ibpd);
> + struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp);
> + struct mana_ib_dev *mdev =
> + container_of(ibpd->device, struct mana_ib_dev, ib_dev);
> + struct mana_ib_cq *send_cq =
> + container_of(attr->send_cq, struct mana_ib_cq, ibcq);
> + struct ib_ucontext *ib_ucontext = ibpd->uobject->context;
> + struct mana_ib_create_qp_resp resp = {};
> + struct mana_ib_ucontext *mana_ucontext;
> + struct gdma_dev *gd = mdev->gdma_dev;
> + struct mana_ib_create_qp ucmd = {};
> + struct mana_obj_spec wq_spec = {};
> + struct mana_obj_spec cq_spec = {};
> + struct mana_port_context *mpc;
> + struct mana_context *mc;
> + struct net_device *ndev;
> + struct ib_umem *umem;
> + int err;
> + u32 port;
> +
> + mana_ucontext =
> + container_of(ib_ucontext, struct mana_ib_ucontext, ibucontext);
> + mc = gd->driver_data;
> +
> + if (udata->inlen < sizeof(ucmd))
> + return -EINVAL;
> +
> + err = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen));
> + if (err) {
> + ibdev_dbg(&mdev->ib_dev,
> + "Failed to copy from udata create qp-raw, %d\n", err);
> + return -EFAULT;
> + }
> +
> + /* IB ports start with 1, MANA Ethernet ports start with 0 */
> + port = ucmd.port;
> + if (ucmd.port > mc->num_ports)
> + return -EINVAL;
> +
> + if (attr->cap.max_send_wr > MAX_SEND_BUFFERS_PER_QUEUE) {
> + ibdev_dbg(&mdev->ib_dev,
> + "Requested max_send_wr %d exceeding limit\n",
> + attr->cap.max_send_wr);
> + return -EINVAL;
> + }
> +
> + if (attr->cap.max_send_sge > MAX_TX_WQE_SGL_ENTRIES) {
> + ibdev_dbg(&mdev->ib_dev,
> + "Requested max_send_sge %d exceeding limit\n",
> + attr->cap.max_send_sge);
> + return -EINVAL;
> + }
> +
> + ndev = mc->ports[port - 1];
> + mpc = netdev_priv(ndev);
> + ibdev_dbg(&mdev->ib_dev, "port %u ndev %p mpc %p\n", port, ndev, mpc);
> +
> + err = mana_ib_cfg_vport(mdev, port - 1, pd, mana_ucontext->doorbell);
> + if (err)
> + return -ENODEV;
> +
> + qp->port = port;
> +
> + ibdev_dbg(&mdev->ib_dev, "ucmd sq_buf_addr 0x%llx port %u\n",
> + ucmd.sq_buf_addr, ucmd.port);
> +
> + umem = ib_umem_get(ibpd->device, ucmd.sq_buf_addr, ucmd.sq_buf_size,
> + IB_ACCESS_LOCAL_WRITE);
> + if (IS_ERR(umem)) {
> + err = PTR_ERR(umem);
> + ibdev_dbg(&mdev->ib_dev,
> + "Failed to get umem for create qp-raw, err %d\n",
> + err);
> + goto err_free_vport;
> + }
> + qp->sq_umem = umem;
> +
> + err = mana_ib_gd_create_dma_region(mdev, qp->sq_umem,
> + &qp->sq_gdma_region, PAGE_SIZE);
> + if (err) {
> + ibdev_err(&mdev->ib_dev,
> + "Failed to create dma region for create qp-raw, %d\n",
> + err);
It is better not print in userspace-triggered paths.
There are also same issues in other paths.
Thanks,
Cheng Xu
Powered by blists - more mailing lists