lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CANn89iJJWNemzxbyCD4hCZk75Uoxw1nnJ5vLAqM3JGhG_AfqbQ@mail.gmail.com>
Date:   Tue, 4 Apr 2023 07:09:40 +0200
From:   Eric Dumazet <edumazet@...gle.com>
To:     "Russell King (Oracle)" <rmk+kernel@...linux.org.uk>
Cc:     Marek Beh__n <kabel@...nel.org>,
        Thomas Petazzoni <thomas.petazzoni@...tlin.com>,
        "David S. Miller" <davem@...emloft.net>,
        Jakub Kicinski <kuba@...nel.org>,
        Paolo Abeni <pabeni@...hat.com>, netdev@...r.kernel.org
Subject: Re: [PATCH RFC net-next 1/5] net: mvneta: fix transmit path
 dma-unmapping on error

On Mon, Apr 3, 2023 at 8:30 PM Russell King (Oracle)
<rmk+kernel@...linux.org.uk> wrote:
>
> The transmit code assumes that the transmit descriptors that are used
> begin with the first descriptor in the ring, but this may not be the
> case. Fix this by providing a new function that dma-unmaps a range of
> numbered descriptor entries, and use that to do the unmapping.
>
> Signed-off-by: Russell King (Oracle) <rmk+kernel@...linux.org.uk>

Nice patch series !

I guess this one will need to be backported to stable versions. It
would be nice adding:

Fixes: 2adb719d74f6 ("net: mvneta: Implement software TSO")

Thanks.

> ---
>  drivers/net/ethernet/marvell/mvneta.c | 53 +++++++++++++++++----------
>  1 file changed, 33 insertions(+), 20 deletions(-)
>
> diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
> index 2cad76d0a50e..62400ff61e34 100644
> --- a/drivers/net/ethernet/marvell/mvneta.c
> +++ b/drivers/net/ethernet/marvell/mvneta.c
> @@ -2714,14 +2714,40 @@ mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq,
>         return 0;
>  }
>
> +static void mvneta_release_descs(struct mvneta_port *pp,
> +                                struct mvneta_tx_queue *txq,
> +                                int first, int num)
> +{
> +       int desc_idx, i;
> +
> +       desc_idx = first + num;
> +       if (desc_idx >= txq->size)
> +               desc_idx -= txq->size;
> +
> +       for (i = num; i >= 0; i--) {
> +               struct mvneta_tx_desc *tx_desc = txq->descs + desc_idx;
> +
> +               if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr))
> +                       dma_unmap_single(pp->dev->dev.parent,
> +                                        tx_desc->buf_phys_addr,
> +                                        tx_desc->data_size,
> +                                        DMA_TO_DEVICE);
> +
> +               mvneta_txq_desc_put(txq);
> +
> +               if (desc_idx == 0)
> +                       desc_idx = txq->size;
> +               desc_idx -= 1;
> +       }
> +}
> +
>  static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev,
>                          struct mvneta_tx_queue *txq)
>  {
>         int hdr_len, total_len, data_left;
> -       int desc_count = 0;
> +       int first_desc, desc_count = 0;
>         struct mvneta_port *pp = netdev_priv(dev);
>         struct tso_t tso;
> -       int i;
>
>         /* Count needed descriptors */
>         if ((txq->count + tso_count_descs(skb)) >= txq->size)
> @@ -2732,6 +2758,8 @@ static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev,
>                 return 0;
>         }
>
> +       first_desc = txq->txq_put_index;
> +
>         /* Initialize the TSO handler, and prepare the first payload */
>         hdr_len = tso_start(skb, &tso);
>
> @@ -2772,15 +2800,7 @@ static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev,
>         /* Release all used data descriptors; header descriptors must not
>          * be DMA-unmapped.
>          */
> -       for (i = desc_count - 1; i >= 0; i--) {
> -               struct mvneta_tx_desc *tx_desc = txq->descs + i;
> -               if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr))
> -                       dma_unmap_single(pp->dev->dev.parent,
> -                                        tx_desc->buf_phys_addr,
> -                                        tx_desc->data_size,
> -                                        DMA_TO_DEVICE);
> -               mvneta_txq_desc_put(txq);
> -       }
> +       mvneta_release_descs(pp, txq, first_desc, desc_count - 1);
>         return 0;
>  }
>
> @@ -2790,6 +2810,7 @@ static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
>  {
>         struct mvneta_tx_desc *tx_desc;
>         int i, nr_frags = skb_shinfo(skb)->nr_frags;
> +       int first_desc = txq->txq_put_index;
>
>         for (i = 0; i < nr_frags; i++) {
>                 struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
> @@ -2828,15 +2849,7 @@ static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
>         /* Release all descriptors that were used to map fragments of
>          * this packet, as well as the corresponding DMA mappings
>          */
> -       for (i = i - 1; i >= 0; i--) {
> -               tx_desc = txq->descs + i;
> -               dma_unmap_single(pp->dev->dev.parent,
> -                                tx_desc->buf_phys_addr,
> -                                tx_desc->data_size,
> -                                DMA_TO_DEVICE);
> -               mvneta_txq_desc_put(txq);
> -       }
> -
> +       mvneta_release_descs(pp, txq, first_desc, i - 1);
>         return -ENOMEM;
>  }
>
> --
> 2.30.2
>

Powered by blists - more mailing lists