[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <AM0PR04MB555448A226207D57557929B5EC680@AM0PR04MB5554.eurprd04.prod.outlook.com>
Date: Tue, 22 Oct 2019 09:10:16 +0000
From: Madalin-cristian Bucur <madalin.bucur@....com>
To: Laurentiu Tudor <laurentiu.tudor@....com>,
"davem@...emloft.net" <davem@...emloft.net>,
"netdev@...r.kernel.org" <netdev@...r.kernel.org>
CC: Roy Pledge <roy.pledge@....com>
Subject: RE: [PATCH net-next 5/6] dpaa_eth: change DMA device
> -----Original Message-----
> From: Laurentiu Tudor
> Sent: Tuesday, October 22, 2019 11:50 AM
> To: Madalin-cristian Bucur <madalin.bucur@....com>; davem@...emloft.net;
> netdev@...r.kernel.org
> Cc: Roy Pledge <roy.pledge@....com>
> Subject: Re: [PATCH net-next 5/6] dpaa_eth: change DMA device
>
> Hello,
>
> On 21.10.2019 15:28, Madalin-cristian Bucur wrote:
> > The DPAA Ethernet driver is using the FMan MAC as the device for DMA
> > mapping. This is not actually correct, as the real DMA device is the
> > FMan port (the FMan Rx port for reception and the FMan Tx port for
> > transmission). Changing the device used for DMA mapping to the Fman
> > Rx and Tx port devices.
> >
> > Signed-off-by: Madalin Bucur <madalin.bucur@....com>
> > Signed-off-by: Laurentiu Tudor <laurentiu.tudor@....com>
> > ---
> > drivers/net/ethernet/freescale/dpaa/dpaa_eth.c | 105 +++++++++++++----
> --------
> > drivers/net/ethernet/freescale/dpaa/dpaa_eth.h | 8 +-
> > 2 files changed, 62 insertions(+), 51 deletions(-)
> >
> > diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
> b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
> > index 8d5686d88d30..639cafaa59b8 100644
> > --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
> > +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
> > @@ -1335,15 +1335,15 @@ static void dpaa_fd_release(const struct
> net_device *net_dev,
> > vaddr = phys_to_virt(qm_fd_addr(fd));
> > sgt = vaddr + qm_fd_get_offset(fd);
> >
> > - dma_unmap_single(dpaa_bp->dev, qm_fd_addr(fd), dpaa_bp->size,
> > - DMA_FROM_DEVICE);
> > + dma_unmap_single(dpaa_bp->priv->rx_dma_dev, qm_fd_addr(fd),
> > + dpaa_bp->size, DMA_FROM_DEVICE);
> >
> > dpaa_release_sgt_members(sgt);
> >
> > - addr = dma_map_single(dpaa_bp->dev, vaddr, dpaa_bp->size,
> > - DMA_FROM_DEVICE);
> > - if (dma_mapping_error(dpaa_bp->dev, addr)) {
> > - dev_err(dpaa_bp->dev, "DMA mapping failed");
> > + addr = dma_map_single(dpaa_bp->priv->rx_dma_dev, vaddr,
> > + dpaa_bp->size, DMA_FROM_DEVICE);
> > + if (dma_mapping_error(dpaa_bp->priv->rx_dma_dev, addr)) {
> > + netdev_err(net_dev, "DMA mapping failed");
> > return;
> > }
> > bm_buffer_set64(&bmb, addr);
> > @@ -1488,7 +1488,7 @@ static int dpaa_enable_tx_csum(struct dpaa_priv
> *priv,
> >
> > static int dpaa_bp_add_8_bufs(const struct dpaa_bp *dpaa_bp)
> > {
> > - struct device *dev = dpaa_bp->dev;
> > + struct net_device *net_dev = dpaa_bp->priv->net_dev;
> > struct bm_buffer bmb[8];
> > dma_addr_t addr;
> > void *new_buf;
> > @@ -1497,16 +1497,18 @@ static int dpaa_bp_add_8_bufs(const struct
> dpaa_bp *dpaa_bp)
> > for (i = 0; i < 8; i++) {
> > new_buf = netdev_alloc_frag(dpaa_bp->raw_size);
> > if (unlikely(!new_buf)) {
> > - dev_err(dev, "netdev_alloc_frag() failed, size %zu\n",
> > - dpaa_bp->raw_size);
> > + netdev_err(net_dev,
> > + "netdev_alloc_frag() failed, size %zu\n",
> > + dpaa_bp->raw_size);
> > goto release_previous_buffs;
> > }
> > new_buf = PTR_ALIGN(new_buf, SMP_CACHE_BYTES);
> >
> > - addr = dma_map_single(dev, new_buf,
> > + addr = dma_map_single(dpaa_bp->priv->rx_dma_dev, new_buf,
> > dpaa_bp->size, DMA_FROM_DEVICE);
> > - if (unlikely(dma_mapping_error(dev, addr))) {
> > - dev_err(dpaa_bp->dev, "DMA map failed");
> > + if (unlikely(dma_mapping_error(dpaa_bp->priv->rx_dma_dev,
> > + addr))) {
> > + netdev_err(net_dev, "DMA map failed");
> > goto release_previous_buffs;
> > }
> >
> > @@ -1634,7 +1636,7 @@ static struct sk_buff *dpaa_cleanup_tx_fd(const
> struct dpaa_priv *priv,
> >
> > if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) {
> > nr_frags = skb_shinfo(skb)->nr_frags;
> > - dma_unmap_single(dev, addr,
> > + dma_unmap_single(priv->tx_dma_dev, addr,
> > qm_fd_get_offset(fd) + DPAA_SGT_SIZE,
> > dma_dir);
> >
> > @@ -1644,21 +1646,21 @@ static struct sk_buff *dpaa_cleanup_tx_fd(const
> struct dpaa_priv *priv,
> > sgt = phys_to_virt(addr + qm_fd_get_offset(fd));
> >
> > /* sgt[0] is from lowmem, was dma_map_single()-ed */
> > - dma_unmap_single(dev, qm_sg_addr(&sgt[0]),
> > + dma_unmap_single(priv->tx_dma_dev, qm_sg_addr(&sgt[0]),
> > qm_sg_entry_get_len(&sgt[0]), dma_dir);
> >
> > /* remaining pages were mapped with skb_frag_dma_map() */
> > for (i = 1; i <= nr_frags; i++) {
> > WARN_ON(qm_sg_entry_is_ext(&sgt[i]));
> >
> > - dma_unmap_page(dev, qm_sg_addr(&sgt[i]),
> > + dma_unmap_page(priv->tx_dma_dev, qm_sg_addr(&sgt[i]),
> > qm_sg_entry_get_len(&sgt[i]), dma_dir);
> > }
> >
> > /* Free the page frag that we allocated on Tx */
> > skb_free_frag(phys_to_virt(addr));
> > } else {
> > - dma_unmap_single(dev, addr,
> > + dma_unmap_single(priv->tx_dma_dev, addr,
> > skb_tail_pointer(skb) - (u8 *)skbh, dma_dir);
> > }
> >
> > @@ -1762,8 +1764,8 @@ static struct sk_buff *sg_fd_to_skb(const struct
> dpaa_priv *priv,
> > goto free_buffers;
> >
> > count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
> > - dma_unmap_single(dpaa_bp->dev, sg_addr, dpaa_bp->size,
> > - DMA_FROM_DEVICE);
> > + dma_unmap_single(dpaa_bp->priv->rx_dma_dev, sg_addr,
> > + dpaa_bp->size, DMA_FROM_DEVICE);
> > if (!skb) {
> > sz = dpaa_bp->size +
> > SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
> > @@ -1853,7 +1855,6 @@ static int skb_to_contig_fd(struct dpaa_priv
> *priv,
> > int *offset)
> > {
> > struct net_device *net_dev = priv->net_dev;
> > - struct device *dev = net_dev->dev.parent;
> > enum dma_data_direction dma_dir;
> > unsigned char *buffer_start;
> > struct sk_buff **skbh;
> > @@ -1889,9 +1890,9 @@ static int skb_to_contig_fd(struct dpaa_priv
> *priv,
> > fd->cmd |= cpu_to_be32(FM_FD_CMD_FCO);
> >
> > /* Map the entire buffer size that may be seen by FMan, but no more
> */
> > - addr = dma_map_single(dev, skbh,
> > + addr = dma_map_single(priv->tx_dma_dev, skbh,
> > skb_tail_pointer(skb) - buffer_start, dma_dir);
> > - if (unlikely(dma_mapping_error(dev, addr))) {
> > + if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) {
> > if (net_ratelimit())
> > netif_err(priv, tx_err, net_dev, "dma_map_single()
> failed\n");
> > return -EINVAL;
> > @@ -1907,7 +1908,6 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
> > const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
> > const int nr_frags = skb_shinfo(skb)->nr_frags;
> > struct net_device *net_dev = priv->net_dev;
> > - struct device *dev = net_dev->dev.parent;
> > struct qm_sg_entry *sgt;
> > struct sk_buff **skbh;
> > int i, j, err, sz;
> > @@ -1946,10 +1946,10 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
> > qm_sg_entry_set_len(&sgt[0], frag_len);
> > sgt[0].bpid = FSL_DPAA_BPID_INV;
> > sgt[0].offset = 0;
> > - addr = dma_map_single(dev, skb->data,
> > + addr = dma_map_single(priv->tx_dma_dev, skb->data,
> > skb_headlen(skb), dma_dir);
> > - if (unlikely(dma_mapping_error(dev, addr))) {
> > - dev_err(dev, "DMA mapping failed");
> > + if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) {
> > + netdev_err(priv->net_dev, "DMA mapping failed");
> > err = -EINVAL;
> > goto sg0_map_failed;
> > }
> > @@ -1960,10 +1960,10 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
> > frag = &skb_shinfo(skb)->frags[i];
> > frag_len = skb_frag_size(frag);
> > WARN_ON(!skb_frag_page(frag));
> > - addr = skb_frag_dma_map(dev, frag, 0,
> > + addr = skb_frag_dma_map(priv->tx_dma_dev, frag, 0,
> > frag_len, dma_dir);
> > - if (unlikely(dma_mapping_error(dev, addr))) {
> > - dev_err(dev, "DMA mapping failed");
> > + if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) {
> > + netdev_err(priv->net_dev, "DMA mapping failed");
> > err = -EINVAL;
> > goto sg_map_failed;
> > }
> > @@ -1986,10 +1986,10 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
> > skbh = (struct sk_buff **)buffer_start;
> > *skbh = skb;
> >
> > - addr = dma_map_single(dev, buffer_start,
> > + addr = dma_map_single(priv->tx_dma_dev, buffer_start,
> > priv->tx_headroom + DPAA_SGT_SIZE, dma_dir);
> > - if (unlikely(dma_mapping_error(dev, addr))) {
> > - dev_err(dev, "DMA mapping failed");
> > + if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) {
> > + netdev_err(priv->net_dev, "DMA mapping failed");
> > err = -EINVAL;
> > goto sgt_map_failed;
> > }
> > @@ -2003,7 +2003,7 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
> > sgt_map_failed:
> > sg_map_failed:
> > for (j = 0; j < i; j++)
> > - dma_unmap_page(dev, qm_sg_addr(&sgt[j]),
> > + dma_unmap_page(priv->tx_dma_dev, qm_sg_addr(&sgt[j]),
> > qm_sg_entry_get_len(&sgt[j]), dma_dir);
> > sg0_map_failed:
> > csum_failed:
> > @@ -2304,7 +2304,8 @@ static enum qman_cb_dqrr_result
> rx_default_dqrr(struct qman_portal *portal,
> > return qman_cb_dqrr_consume;
> > }
> >
> > - dma_unmap_single(dpaa_bp->dev, addr, dpaa_bp->size,
> DMA_FROM_DEVICE);
> > + dma_unmap_single(dpaa_bp->priv->rx_dma_dev, addr, dpaa_bp->size,
> > + DMA_FROM_DEVICE);
> >
> > /* prefetch the first 64 bytes of the frame or the SGT start */
> > vaddr = phys_to_virt(addr);
> > @@ -2659,7 +2660,7 @@ static inline void dpaa_bp_free_pf(const struct
> dpaa_bp *bp,
> > {
> > dma_addr_t addr = bm_buf_addr(bmb);
> >
> > - dma_unmap_single(bp->dev, addr, bp->size, DMA_FROM_DEVICE);
> > + dma_unmap_single(bp->priv->rx_dma_dev, addr, bp->size,
> DMA_FROM_DEVICE);
> >
> > skb_free_frag(phys_to_virt(addr));
> > }
> > @@ -2769,25 +2770,27 @@ static int dpaa_eth_probe(struct platform_device
> *pdev)
> > int err = 0, i, channel;
> > struct device *dev;
> >
> > + dev = &pdev->dev;
> > +
> > err = bman_is_probed();
> > if (!err)
> > return -EPROBE_DEFER;
> > if (err < 0) {
> > - dev_err(&pdev->dev, "failing probe due to bman probe
> error\n");
> > + dev_err(dev, "failing probe due to bman probe error\n");
>
> These changes seem unrelated.
The &pdev->dev to dev replacement is not related directly to the incorrect
DMA mapping device but a device had to be used for the prints and propagating
&pdev->dev did not look like a good idea. Initially I had a separate patch for
this but it's superfluous to add code and remove it in another patch doing almost
nothing.
> > return -ENODEV;
> > }
> > err = qman_is_probed();
> > if (!err)
> > return -EPROBE_DEFER;
> > if (err < 0) {
> > - dev_err(&pdev->dev, "failing probe due to qman probe
> error\n");
> > + dev_err(dev, "failing probe due to qman probe error\n");
> > return -ENODEV;
> > }
> > err = bman_portals_probed();
> > if (!err)
> > return -EPROBE_DEFER;
> > if (err < 0) {
> > - dev_err(&pdev->dev,
> > + dev_err(dev,
> > "failing probe due to bman portals probe error\n");
> > return -ENODEV;
> > }
> > @@ -2795,19 +2798,11 @@ static int dpaa_eth_probe(struct platform_device
> *pdev)
> > if (!err)
> > return -EPROBE_DEFER;
> > if (err < 0) {
> > - dev_err(&pdev->dev,
> > + dev_err(dev,
> > "failing probe due to qman portals probe error\n");
> > return -ENODEV;
> > }
> >
> > - /* device used for DMA mapping */
> > - dev = pdev->dev.parent;
> > - err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(40));
> > - if (err) {
> > - dev_err(dev, "dma_coerce_mask_and_coherent() failed\n");
> > - return err;
> > - }
>
> Why are we dropping this explicit setting of the dma mask?
>
> ---
> Best Regards, Laurentiu
Hi Laurentiu, you are probably reviewing these changes with your initial
patch in mind that was using (incorrectly) the same (Rx) port for DMA
mapping of both receive and transmit traffic. Please take a second look at
the changes in this patch.
Regards,
Madalin
Powered by blists - more mailing lists