[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <98bed219-5fb4-8376-e300-c77daf4549eb@wp.pl>
Date: Sat, 8 Jan 2022 14:27:13 +0100
From: Aleksander Bajkowski <olek2@...pl>
To: Eric Dumazet <eric.dumazet@...il.com>, hauke@...ke-m.de,
davem@...emloft.net, kuba@...nel.org, netdev@...r.kernel.org,
linux-kernel@...r.kernel.org
Subject: Re: [PATCH net-next] net: lantiq_xrx200: add ingress SG DMA support
Hi Eric,
On 1/4/22 18:41, Eric Dumazet wrote:
>
> On 1/3/22 11:43, Aleksander Jan Bajkowski wrote:
>> This patch adds support for scatter gather DMA. DMA in PMAC splits
>> the packet into several buffers when the MTU on the CPU port is
>> less than the MTU of the switch. The first buffer starts at an
>> offset of NET_IP_ALIGN. In subsequent buffers, dma ignores the
>> offset. Thanks to this patch, the user can still connect to the
>> device in such a situation. For normal configurations, the patch
>> has no effect on performance.
>>
>> Signed-off-by: Aleksander Jan Bajkowski <olek2@...pl>
>> ---
>> drivers/net/ethernet/lantiq_xrx200.c | 47 +++++++++++++++++++++++-----
>> 1 file changed, 40 insertions(+), 7 deletions(-)
>>
>> diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c
>> index 80bfaf2fec92..503fb99c5b90 100644
>> --- a/drivers/net/ethernet/lantiq_xrx200.c
>> +++ b/drivers/net/ethernet/lantiq_xrx200.c
>> @@ -27,6 +27,9 @@
>> #define XRX200_DMA_TX 1
>> #define XRX200_DMA_BURST_LEN 8
>> +#define XRX200_DMA_PACKET_COMPLETE 0
>> +#define XRX200_DMA_PACKET_IN_PROGRESS 1
>> +
>> /* cpu port mac */
>> #define PMAC_RX_IPG 0x0024
>> #define PMAC_RX_IPG_MASK 0xf
>> @@ -62,6 +65,9 @@ struct xrx200_chan {
>> struct ltq_dma_channel dma;
>> struct sk_buff *skb[LTQ_DESC_NUM];
>> + struct sk_buff *skb_head;
>> + struct sk_buff *skb_tail;
>> +
>> struct xrx200_priv *priv;
>> };
>> @@ -205,7 +211,8 @@ static int xrx200_hw_receive(struct xrx200_chan *ch)
>> struct xrx200_priv *priv = ch->priv;
>> struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
>> struct sk_buff *skb = ch->skb[ch->dma.desc];
>> - int len = (desc->ctl & LTQ_DMA_SIZE_MASK);
>> + u32 ctl = desc->ctl;
>> + int len = (ctl & LTQ_DMA_SIZE_MASK);
>> struct net_device *net_dev = priv->net_dev;
>> int ret;
>> @@ -221,12 +228,36 @@ static int xrx200_hw_receive(struct xrx200_chan *ch)
>> }
>> skb_put(skb, len);
>> - skb->protocol = eth_type_trans(skb, net_dev);
>> - netif_receive_skb(skb);
>> - net_dev->stats.rx_packets++;
>> - net_dev->stats.rx_bytes += len;
>> - return 0;
>> + /* add buffers to skb via skb->frag_list */
>> + if (ctl & LTQ_DMA_SOP) {
>> + ch->skb_head = skb;
>> + ch->skb_tail = skb;
>> + } else if (ch->skb_head) {
>> + if (ch->skb_head == ch->skb_tail)
>> + skb_shinfo(ch->skb_tail)->frag_list = skb;
>> + else
>> + ch->skb_tail->next = skb;
>> + ch->skb_tail = skb;
>> + skb_reserve(ch->skb_tail, -NET_IP_ALIGN);
>> + ch->skb_head->len += skb->len;
>> + ch->skb_head->data_len += skb->len;
>> + ch->skb_head->truesize += skb->truesize;
>> + }
>> +
>> + if (ctl & LTQ_DMA_EOP) {
>> + ch->skb_head->protocol = eth_type_trans(ch->skb_head, net_dev);
>> + netif_receive_skb(ch->skb_head);
>> + net_dev->stats.rx_packets++;
>> + net_dev->stats.rx_bytes += ch->skb_head->len;
>
>
> Use after free alert.
>
> Please add/test the following fix.
>
> (It is illegal to deref skb after netif_receive_skb())
>
>
> diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c
> index 503fb99c5b90..bf7e3c7910d1 100644
> --- a/drivers/net/ethernet/lantiq_xrx200.c
> +++ b/drivers/net/ethernet/lantiq_xrx200.c
> @@ -247,9 +247,9 @@ static int xrx200_hw_receive(struct xrx200_chan *ch)
>
> if (ctl & LTQ_DMA_EOP) {
> ch->skb_head->protocol = eth_type_trans(ch->skb_head, net_dev);
> - netif_receive_skb(ch->skb_head);
> net_dev->stats.rx_packets++;
> net_dev->stats.rx_bytes += ch->skb_head->len;
> + netif_receive_skb(ch->skb_head);
> ch->skb_head = NULL;
> ch->skb_tail = NULL;
> ret = XRX200_DMA_PACKET_COMPLETE;
>
>
>
Thanks for spot this bug. I tested this patch and it works
ok. I will sent this patch it soon.
>> + ch->skb_head = NULL;
>> + ch->skb_tail = NULL;
>> + ret = XRX200_DMA_PACKET_COMPLETE;
>> + } else {
>> + ret = XRX200_DMA_PACKET_IN_PROGRESS;
>> + }
>> +
>> + return ret;
>> }
>> static int xrx200_poll_rx(struct napi_struct *napi, int budget)
>> @@ -241,7 +272,9 @@ static int xrx200_poll_rx(struct napi_struct *napi, int budget)
>> if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) {
>> ret = xrx200_hw_receive(ch);
>> - if (ret)
>> + if (ret == XRX200_DMA_PACKET_IN_PROGRESS)
>> + continue;
>> + if (ret != XRX200_DMA_PACKET_COMPLETE)
>> return ret;
>> rx++;
>> } else {
Powered by blists - more mailing lists