[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200110115415.75683-7-andre.przywara@arm.com>
Date: Fri, 10 Jan 2020 11:54:07 +0000
From: Andre Przywara <andre.przywara@....com>
To: "David S . Miller" <davem@...emloft.net>,
Radhey Shyam Pandey <radhey.shyam.pandey@...inx.com>
Cc: Michal Simek <michal.simek@...inx.com>,
Robert Hancock <hancock@...systems.ca>, netdev@...r.kernel.org,
linux-arm-kernel@...ts.infradead.org, linux-kernel@...r.kernel.org
Subject: [PATCH 06/14] net: axienet: Check for DMA mapping errors
Especially with the default 32-bit DMA mask, DMA buffers are a limited
resource, so their allocation can fail.
So as the DMA API documentation requires, add error checking code after
dma_map_single() calls to catch the case where we run out of "low" memory.
Signed-off-by: Andre Przywara <andre.przywara@....com>
---
.../net/ethernet/xilinx/xilinx_axienet_main.c | 22 ++++++++++++++++++-
1 file changed, 21 insertions(+), 1 deletion(-)
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 82abe2b0f16a..8d2b67cbecf9 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -248,6 +248,11 @@ static int axienet_dma_bd_init(struct net_device *ndev)
skb->data,
lp->max_frm_size,
DMA_FROM_DEVICE);
+ if (dma_mapping_error(ndev->dev.parent, lp->rx_bd_v[i].phys)) {
+ dev_kfree_skb(skb);
+ goto out;
+ }
+
lp->rx_bd_v[i].cntrl = lp->max_frm_size;
}
@@ -668,6 +673,7 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
dma_addr_t tail_p;
struct axienet_local *lp = netdev_priv(ndev);
struct axidma_bd *cur_p;
+ u32 orig_tail_ptr = lp->tx_bd_tail;
num_frag = skb_shinfo(skb)->nr_frags;
cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
@@ -703,9 +709,11 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */
}
- cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
cur_p->phys = dma_map_single(ndev->dev.parent, skb->data,
skb_headlen(skb), DMA_TO_DEVICE);
+ if (dma_mapping_error(ndev->dev.parent, cur_p->phys))
+ return NETDEV_TX_BUSY;
+ cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
for (ii = 0; ii < num_frag; ii++) {
if (++lp->tx_bd_tail >= lp->tx_bd_num)
@@ -716,6 +724,13 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
skb_frag_address(frag),
skb_frag_size(frag),
DMA_TO_DEVICE);
+ if (dma_mapping_error(ndev->dev.parent, cur_p->phys)) {
+ axienet_free_tx_chain(ndev, orig_tail_ptr, ii + 1,
+ NULL);
+ lp->tx_bd_tail = orig_tail_ptr;
+
+ return NETDEV_TX_BUSY;
+ }
cur_p->cntrl = skb_frag_size(frag);
}
@@ -796,6 +811,11 @@ static void axienet_recv(struct net_device *ndev)
cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data,
lp->max_frm_size,
DMA_FROM_DEVICE);
+ if (dma_mapping_error(ndev->dev.parent, cur_p->phys)) {
+ dev_kfree_skb(new_skb);
+ return;
+ }
+
cur_p->cntrl = lp->max_frm_size;
cur_p->status = 0;
cur_p->skb = new_skb;
--
2.17.1
Powered by blists - more mailing lists