[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <201912080836.xB88amHd015549@sdf.org>
Date: Sun, 8 Dec 2019 08:36:48 GMT
From: George Spelvin <lkml@....org>
To: michael.chan@...adcom.com, netdev@...r.kernel.org
Cc: hauke@...ke-m.de, lkml@....org
Subject: [RFC PATCH 2/4] b44: Fix off-by-one error in acceptable address range
The requirement is dma_addr + size <= 0x40000000, not 0x3fffffff.
In a logically separate but overlapping change, this patch also
rearranges the logic for detecting failures to use a goto rather
than testing dma_mapping_error() twice. The latter is expensive if
CONFIG_DMA_API_DEBUG is set, but also for bug-proofing reasons I try to
avoid having the same condition in two places that must be kept in sync.
Signed-off-by: George Spelvin <lkml@....org>
---
drivers/net/ethernet/broadcom/b44.c | 42 ++++++++++++++++-------------
1 file changed, 24 insertions(+), 18 deletions(-)
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 394671230c1c..e540d5646aef 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -680,12 +680,13 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
/* Hardware bug work-around, the chip is unable to do PCI DMA
to/from anything above 1GB :-( */
- if (dma_mapping_error(bp->sdev->dma_dev, mapping) ||
- mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
+ if (dma_mapping_error(bp->sdev->dma_dev, mapping))
+ goto workaround;
+ if (mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)+1) {
/* Sigh... */
- if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
- dma_unmap_single(bp->sdev->dma_dev, mapping,
- RX_PKT_BUF_SZ, DMA_FROM_DEVICE);
+ dma_unmap_single(bp->sdev->dma_dev, mapping,
+ RX_PKT_BUF_SZ, DMA_FROM_DEVICE);
+workaround:
dev_kfree_skb_any(skb);
skb = alloc_skb(RX_PKT_BUF_SZ, GFP_ATOMIC | GFP_DMA);
if (skb == NULL)
@@ -693,10 +694,12 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
RX_PKT_BUF_SZ,
DMA_FROM_DEVICE);
- if (dma_mapping_error(bp->sdev->dma_dev, mapping) ||
- mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
- if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
- dma_unmap_single(bp->sdev->dma_dev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE);
+ if (dma_mapping_error(bp->sdev->dma_dev, mapping))
+ goto failed;
+ if (mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)+1) {
+ dma_unmap_single(bp->sdev->dma_dev, mapping,
+ RX_PKT_BUF_SZ, DMA_FROM_DEVICE);
+failed:
dev_kfree_skb_any(skb);
return -ENOMEM;
}
@@ -990,24 +993,27 @@ static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
mapping = dma_map_single_attrs(bp->sdev->dma_dev, skb->data, len,
DMA_TO_DEVICE, DMA_ATTR_NO_WARN);
- if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
+ if (dma_mapping_error(bp->sdev->dma_dev, mapping))
+ goto workaround;
+ if (mapping + len > DMA_BIT_MASK(30)+1) {
struct sk_buff *bounce_skb;
/* Chip can't handle DMA to/from >1GB, use bounce buffer */
- if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
- dma_unmap_single(bp->sdev->dma_dev, mapping, len,
- DMA_TO_DEVICE);
-
+ dma_unmap_single(bp->sdev->dma_dev, mapping, len,
+ DMA_TO_DEVICE);
+workaround:
bounce_skb = alloc_skb(len, GFP_ATOMIC | GFP_DMA);
if (!bounce_skb)
goto err_out;
mapping = dma_map_single(bp->sdev->dma_dev, bounce_skb->data,
len, DMA_TO_DEVICE);
- if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
- if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
- dma_unmap_single(bp->sdev->dma_dev, mapping,
- len, DMA_TO_DEVICE);
+ if (dma_mapping_error(bp->sdev->dma_dev, mapping))
+ goto failed;
+ if (mapping + len > DMA_BIT_MASK(30)+1) {
+ dma_unmap_single(bp->sdev->dma_dev, mapping, len,
+ DMA_TO_DEVICE);
+failed:
dev_kfree_skb_any(bounce_skb);
goto err_out;
}
--
2.24.0
Powered by blists - more mailing lists