[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <CALDO+SY1uUBC7DzxYUV3agxzYirrDe9PON+P0Wv-eh1Bhq+SVg@mail.gmail.com>
Date: Sat, 25 Mar 2023 10:02:22 -0700
From: William Tu <u9012063@...il.com>
To: Horatiu Vultur <horatiu.vultur@...rochip.com>
Cc: netdev@...r.kernel.org, jsankararama@...are.com, gyang@...are.com,
doshir@...are.com, alexander.duyck@...il.com,
alexandr.lobakin@...el.com, bang@...are.com,
maciej.fijalkowski@...el.com, witu@...dia.com,
Alexander Duyck <alexanderduyck@...com>
Subject: Re: [PATCH RFC v18 net-next] vmxnet3: Add XDP support.
Hi Horatiu,
Thanks for your review and finding more issues!
On Sun, Mar 19, 2023 at 10:20 AM Horatiu Vultur
<horatiu.vultur@...rochip.com> wrote:
>
> The 03/18/2023 14:49, William Tu wrote:
>
> Hi William,
>
> ...
>
> > +
> > +static int
> > +vmxnet3_xdp_xmit_frame(struct vmxnet3_adapter *adapter,
> > + struct xdp_frame *xdpf,
> > + struct vmxnet3_tx_queue *tq, bool dma_map)
> > +{
> > + struct vmxnet3_tx_buf_info *tbi = NULL;
> > + union Vmxnet3_GenericDesc *gdesc;
> > + struct vmxnet3_tx_ctx ctx;
> > + int tx_num_deferred;
> > + struct page *page;
> > + u32 buf_size;
> > + int ret = 0;
>
> This doesn't seem to be used anywhere, so it can be removed.
yes, I will just return 0 at the end.
>
> > + u32 dw2;
> > +
> > + dw2 = (tq->tx_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT;
> > + dw2 |= xdpf->len;
> > + ctx.sop_txd = tq->tx_ring.base + tq->tx_ring.next2fill;
> > + gdesc = ctx.sop_txd;
> > +
> > + buf_size = xdpf->len;
> > + tbi = tq->buf_info + tq->tx_ring.next2fill;
> > +
> > + if (vmxnet3_cmd_ring_desc_avail(&tq->tx_ring) == 0) {
> > + tq->stats.tx_ring_full++;
> > + return -ENOSPC;
> > + }
> > +
> > + tbi->map_type = VMXNET3_MAP_XDP;
> > + if (dma_map) { /* ndo_xdp_xmit */
> > + tbi->dma_addr = dma_map_single(&adapter->pdev->dev,
> > + xdpf->data, buf_size,
> > + DMA_TO_DEVICE);
> > + if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr))
> > + return -EFAULT;
> > + tbi->map_type |= VMXNET3_MAP_SINGLE;
> > + } else { /* XDP buffer from page pool */
> > + page = virt_to_page(xdpf->data);
> > + tbi->dma_addr = page_pool_get_dma_addr(page) +
> > + XDP_PACKET_HEADROOM;
>
> Shouldn't this be VMXNET3_XDP_HEADROOM?
yes, will fix it.
>
> > + dma_sync_single_for_device(&adapter->pdev->dev,
> > + tbi->dma_addr, buf_size,
> > + DMA_BIDIRECTIONAL);
>
> Shouldn't this be DMA_TO_DEVICE instead of DMA_BIDERECTIONAL?
sure, will fix it.
Originally I looked at stmmac_main.c and I thought it's an optimization since
in the beginning of page pool creation we set it DMA_BIDRIECTIONAL.
I guess it's not applicable here.
>
> > + }
> > + tbi->xdpf = xdpf;
> > + tbi->len = buf_size;
> > +
> > + gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
> > + WARN_ON_ONCE(gdesc->txd.gen == tq->tx_ring.gen);
> > +
> > + gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
> > + gdesc->dword[2] = cpu_to_le32(dw2);
> > +
> > + /* Setup the EOP desc */
> > + gdesc->dword[3] = cpu_to_le32(VMXNET3_TXD_CQ | VMXNET3_TXD_EOP);
> > +
> > + gdesc->txd.om = 0;
> > + gdesc->txd.msscof = 0;
> > + gdesc->txd.hlen = 0;
> > + gdesc->txd.ti = 0;
> > +
> > + tx_num_deferred = le32_to_cpu(tq->shared->txNumDeferred);
> > + le32_add_cpu(&tq->shared->txNumDeferred, 1);
> > + tx_num_deferred++;
> > +
> > + vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
> > +
> > + /* set the last buf_info for the pkt */
> > + tbi->sop_idx = ctx.sop_txd - tq->tx_ring.base;
> > +
> > + dma_wmb();
> > + gdesc->dword[2] = cpu_to_le32(le32_to_cpu(gdesc->dword[2]) ^
> > + VMXNET3_TXD_GEN);
> > +
> > + /* No need to handle the case when tx_num_deferred doesn't reach
> > + * threshold. Backend driver at hypervisor side will poll and reset
> > + * tq->shared->txNumDeferred to 0.
> > + */
> > + if (tx_num_deferred >= le32_to_cpu(tq->shared->txThreshold)) {
> > + tq->shared->txNumDeferred = 0;
> > + VMXNET3_WRITE_BAR0_REG(adapter,
> > + VMXNET3_REG_TXPROD + tq->qid * 8,
> > + tq->tx_ring.next2fill);
> > + }
> > +
> > + return ret;
> > +}
>
> ...
>
> > +static int
> > +vmxnet3_run_xdp(struct vmxnet3_rx_queue *rq, struct xdp_buff *xdp,
> > + struct bpf_prog *prog)
> > +{
> > + struct xdp_frame *xdpf;
> > + struct page *page;
> > + int err;
> > + u32 act;
> > +
> > + act = bpf_prog_run_xdp(prog, xdp);
> > + rq->stats.xdp_packets++;
> > + page = virt_to_page(xdp->data_hard_start);
> > +
> > + switch (act) {
> > + case XDP_PASS:
> > + return act;
> > + case XDP_REDIRECT:
> > + err = xdp_do_redirect(rq->adapter->netdev, xdp, prog);
> > + if (!err)
> > + rq->stats.xdp_redirects++;
> > + else
> > + rq->stats.xdp_drops++;
> > + return act;
> > + case XDP_TX:
> > + xdpf = xdp_convert_buff_to_frame(xdp);
>
> If you want, I think you can drop xdp_convert_buff_to_frame() and pass
> directly the page. And then inside vmxnet3_unmap_pkt() you can use
> page_pool_recycle_direct().
> Of course this requires few other changes (I think you need a new
> map_type) but in the end you might save some CPU usage.
Thanks for the suggestion.
Will leave this for future work.
Thanks!
William
Powered by blists - more mailing lists