[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20191029081851.GA23615@netronome.com>
Date: Tue, 29 Oct 2019 09:18:52 +0100
From: Simon Horman <simon.horman@...ronome.com>
To: Yangchun Fu <yangchun@...gle.com>
Cc: netdev@...r.kernel.org, Catherine Sullivan <csully@...gle.com>
Subject: Re: [PATCH net] gve: Fixes DMA synchronization.
Hi Yungchun,
thanks for your patch.
On Mon, Oct 28, 2019 at 11:23:09AM -0700, Yangchun Fu wrote:
> Synces the DMA buffer properly in order for CPU and device to see
> the most up-to-data data.
>
> Signed-off-by: Yangchun Fu <yangchun@...gle.com>
> Reviewed-by: Catherine Sullivan <csully@...gle.com>
> ---
> drivers/net/ethernet/google/gve/gve_rx.c | 2 ++
> drivers/net/ethernet/google/gve/gve_tx.c | 26 ++++++++++++++++++++++--
> 2 files changed, 26 insertions(+), 2 deletions(-)
>
> diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c
> index 59564ac99d2a..edec61dfc868 100644
> --- a/drivers/net/ethernet/google/gve/gve_rx.c
> +++ b/drivers/net/ethernet/google/gve/gve_rx.c
> @@ -289,6 +289,8 @@ static bool gve_rx(struct gve_rx_ring *rx, struct gve_rx_desc *rx_desc,
>
> len = be16_to_cpu(rx_desc->len) - GVE_RX_PAD;
> page_info = &rx->data.page_info[idx];
> + dma_sync_single_for_cpu(&priv->pdev->dev, rx->data.qpl->page_buses[idx],
> + PAGE_SIZE, DMA_FROM_DEVICE);
>
> /* gvnic can only receive into registered segments. If the buffer
> * can't be recycled, our only choice is to copy the data out of
> diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c
> index 778b87b5a06c..d8342b7b9764 100644
> --- a/drivers/net/ethernet/google/gve/gve_tx.c
> +++ b/drivers/net/ethernet/google/gve/gve_tx.c
> @@ -390,7 +390,23 @@ static void gve_tx_fill_seg_desc(union gve_tx_desc *seg_desc,
> seg_desc->seg.seg_addr = cpu_to_be64(addr);
> }
>
> -static int gve_tx_add_skb(struct gve_tx_ring *tx, struct sk_buff *skb)
> +static inline void gve_dma_sync_for_device(struct gve_priv *priv,
It seems that only priv->pdev->dev is used in this function. Perhaps it
would be better to pass it to this function rather than all of priv.
> + dma_addr_t *page_buses,
> + u64 iov_offset, u64 iov_len)
> +{
> + u64 addr;
> + dma_addr_t dma;
> +
> + for (addr = iov_offset; addr < iov_offset + iov_len;
> + addr += PAGE_SIZE) {
> + dma = page_buses[addr / PAGE_SIZE];
> + dma_sync_single_for_device(&priv->pdev->dev, dma, PAGE_SIZE,
> + DMA_TO_DEVICE);
> + }
> +}
> +
> +static int gve_tx_add_skb(struct gve_tx_ring *tx, struct sk_buff *skb,
> + struct gve_priv *priv)
> {
> int pad_bytes, hlen, hdr_nfrags, payload_nfrags, l4_hdr_offset;
> union gve_tx_desc *pkt_desc, *seg_desc;
> @@ -432,6 +448,9 @@ static int gve_tx_add_skb(struct gve_tx_ring *tx, struct sk_buff *skb)
> skb_copy_bits(skb, 0,
> tx->tx_fifo.base + info->iov[hdr_nfrags - 1].iov_offset,
> hlen);
> + gve_dma_sync_for_device(priv, tx->tx_fifo.qpl->page_buses,
> + info->iov[hdr_nfrags - 1].iov_offset,
> + info->iov[hdr_nfrags - 1].iov_len);
> copy_offset = hlen;
>
> for (i = payload_iov; i < payload_nfrags + payload_iov; i++) {
> @@ -445,6 +464,9 @@ static int gve_tx_add_skb(struct gve_tx_ring *tx, struct sk_buff *skb)
> skb_copy_bits(skb, copy_offset,
> tx->tx_fifo.base + info->iov[i].iov_offset,
> info->iov[i].iov_len);
> + gve_dma_sync_for_device(priv, tx->tx_fifo.qpl->page_buses,
> + info->iov[i].iov_offset,
> + info->iov[i].iov_len);
> copy_offset += info->iov[i].iov_len;
> }
>
> @@ -473,7 +495,7 @@ netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev)
> gve_tx_put_doorbell(priv, tx->q_resources, tx->req);
> return NETDEV_TX_BUSY;
> }
> - nsegs = gve_tx_add_skb(tx, skb);
> + nsegs = gve_tx_add_skb(tx, skb, priv);
>
> netdev_tx_sent_queue(tx->netdev_txq, skb->len);
> skb_tx_timestamp(skb);
> --
> 2.24.0.rc0.303.g954a862665-goog
>
Powered by blists - more mailing lists