[<prev] [next>] [day] [month] [year] [list]
Message-ID: <20250619094538.283723-2-fourier.thomas@gmail.com>
Date: Thu, 19 Jun 2025 11:45:30 +0200
From: Thomas Fourier <fourier.thomas@...il.com>
To:
Cc: Thomas Fourier <fourier.thomas@...il.com>,
Shannon Nelson <shannon.nelson@....com>,
Brett Creeley <brett.creeley@....com>,
Andrew Lunn <andrew+netdev@...n.ch>,
"David S. Miller" <davem@...emloft.net>,
Eric Dumazet <edumazet@...gle.com>,
Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>,
Alexei Starovoitov <ast@...nel.org>,
Daniel Borkmann <daniel@...earbox.net>,
Jesper Dangaard Brouer <hawk@...nel.org>,
John Fastabend <john.fastabend@...il.com>,
Stanislav Fomichev <sdf@...ichev.me>,
Vladimir Oltean <vladimir.oltean@....com>,
Caleb Sander Mateos <csander@...estorage.com>,
Taehee Yoo <ap420073@...il.com>,
netdev@...r.kernel.org,
linux-kernel@...r.kernel.org,
bpf@...r.kernel.org
Subject: [PATCH net v3] ethernet: ionic: Fix DMA mapping tests
Change error values of `ionic_tx_map_single()` and `ionic_tx_map_frag()`
from 0 to `DMA_MAPPING_ERROR` to prevent collision with 0 as a valid
address.
This also fixes the use of `dma_mapping_error()` to test against 0 in
`ionic_xdp_post_frame()`
Fixes: 0f3154e6bcb3 ("ionic: Add Tx and Rx handling")
Fixes: 56e41ee12d2d ("ionic: better dma-map error handling")
Fixes: ac8813c0ab7d ("ionic: convert Rx queue buffers to use page_pool")
Signed-off-by: Thomas Fourier <fourier.thomas@...il.com>
---
drivers/net/ethernet/pensando/ionic/ionic_txrx.c | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index 2ac59564ded1..d10b58ebf603 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -321,7 +321,7 @@ static int ionic_xdp_post_frame(struct ionic_queue *q, struct xdp_frame *frame,
len, DMA_TO_DEVICE);
} else /* XDP_REDIRECT */ {
dma_addr = ionic_tx_map_single(q, frame->data, len);
- if (!dma_addr)
+ if (dma_addr == DMA_MAPPING_ERROR)
return -EIO;
}
@@ -357,7 +357,7 @@ static int ionic_xdp_post_frame(struct ionic_queue *q, struct xdp_frame *frame,
} else {
dma_addr = ionic_tx_map_frag(q, frag, 0,
skb_frag_size(frag));
- if (dma_mapping_error(q->dev, dma_addr)) {
+ if (dma_addr == DMA_MAPPING_ERROR) {
ionic_tx_desc_unmap_bufs(q, desc_info);
return -EIO;
}
@@ -1083,7 +1083,7 @@ static dma_addr_t ionic_tx_map_single(struct ionic_queue *q,
net_warn_ratelimited("%s: DMA single map failed on %s!\n",
dev_name(dev), q->name);
q_to_tx_stats(q)->dma_map_err++;
- return 0;
+ return DMA_MAPPING_ERROR;
}
return dma_addr;
}
@@ -1100,7 +1100,7 @@ static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q,
net_warn_ratelimited("%s: DMA frag map failed on %s!\n",
dev_name(dev), q->name);
q_to_tx_stats(q)->dma_map_err++;
- return 0;
+ return DMA_MAPPING_ERROR;
}
return dma_addr;
}
@@ -1116,7 +1116,7 @@ static int ionic_tx_map_skb(struct ionic_queue *q, struct sk_buff *skb,
int frag_idx;
dma_addr = ionic_tx_map_single(q, skb->data, skb_headlen(skb));
- if (!dma_addr)
+ if (dma_addr == DMA_MAPPING_ERROR)
return -EIO;
buf_info->dma_addr = dma_addr;
buf_info->len = skb_headlen(skb);
@@ -1126,7 +1126,7 @@ static int ionic_tx_map_skb(struct ionic_queue *q, struct sk_buff *skb,
nfrags = skb_shinfo(skb)->nr_frags;
for (frag_idx = 0; frag_idx < nfrags; frag_idx++, frag++) {
dma_addr = ionic_tx_map_frag(q, frag, 0, skb_frag_size(frag));
- if (!dma_addr)
+ if (dma_addr == DMA_MAPPING_ERROR)
goto dma_fail;
buf_info->dma_addr = dma_addr;
buf_info->len = skb_frag_size(frag);
--
2.43.0
Powered by blists - more mailing lists