lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240906063907.9591-1-amishin@t-argos.ru>
Date: Fri, 6 Sep 2024 09:39:07 +0300
From: Aleksandr Mishin <amishin@...rgos.ru>
To: Veerasenareddy Burru <vburru@...vell.com>
CC: Aleksandr Mishin <amishin@...rgos.ru>, Sathesh Edara <sedara@...vell.com>,
	"David S. Miller" <davem@...emloft.net>, Eric Dumazet <edumazet@...gle.com>,
	Jakub Kicinski <kuba@...nel.org>, Paolo Abeni <pabeni@...hat.com>, Abhijit
 Ayarekar <aayarekar@...vell.com>, Satananda Burla <sburla@...vell.com>,
	<netdev@...r.kernel.org>, <linux-kernel@...r.kernel.org>,
	<lvc-project@...uxtesting.org>
Subject: [PATCH net] octeon_ep: Add SKB allocation failures handling in __octep_oq_process_rx()

build_skb() returns NULL in case of a memory allocation failure so handle
it inside __octep_oq_process_rx() to avoid NULL pointer dereference.

__octep_oq_process_rx() is called during NAPI polling by the driver. If
skb allocation fails, keep on pulling packets out of the Rx DMA queue: we
shouldn't break the polling immediately and thus falsely indicate to the
octep_napi_poll() that the Rx pressure is going down. As there is no
associated skb in this case, don't process the packets and don't push them
up the network stack - they are skipped.

The common code with skb and index manipulations is extracted to make the
fix more readable and avoid code duplication. Also 'alloc_failures' counter
is incremented to mark the skb allocation error in driver statistics.

The suggested approach for handling buffer allocation failures in the NAPI
polling functions is also implemented in the Cavium Liquidio driver. It
has the same structure, namely in octeon_droq_fast_process_packets().

A similar situation is present in the __octep_vf_oq_process_rx() of the
Octeon VF driver. First we want to try the fix on __octep_oq_process_rx().

Compile tested only. Marvell folks, could you review and test this, please?

Found by Linux Verification Center (linuxtesting.org) with SVACE.

Fixes: 37d79d059606 ("octeon_ep: add Tx/Rx processing and interrupt support")
Signed-off-by: Aleksandr Mishin <amishin@...rgos.ru>
---
 .../net/ethernet/marvell/octeon_ep/octep_rx.c | 44 ++++++++++---------
 1 file changed, 24 insertions(+), 20 deletions(-)

diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
index 4746a6b258f0..e92afd1a372a 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
@@ -394,32 +394,32 @@ static int __octep_oq_process_rx(struct octep_device *oct,
 			data_offset = OCTEP_OQ_RESP_HW_SIZE;
 			rx_ol_flags = 0;
 		}
+
+		skb = build_skb((void *)resp_hw, PAGE_SIZE);
+		if (skb)
+			skb_reserve(skb, data_offset);
+		else
+			oq->stats.alloc_failures++;
 		rx_bytes += buff_info->len;
+		read_idx++;
+		desc_used++;
+		if (read_idx == oq->max_count)
+			read_idx = 0;
 
 		if (buff_info->len <= oq->max_single_buffer_size) {
-			skb = build_skb((void *)resp_hw, PAGE_SIZE);
-			skb_reserve(skb, data_offset);
-			skb_put(skb, buff_info->len);
-			read_idx++;
-			desc_used++;
-			if (read_idx == oq->max_count)
-				read_idx = 0;
+			if (skb)
+				skb_put(skb, buff_info->len);
 		} else {
 			struct skb_shared_info *shinfo;
 			u16 data_len;
 
-			skb = build_skb((void *)resp_hw, PAGE_SIZE);
-			skb_reserve(skb, data_offset);
 			/* Head fragment includes response header(s);
 			 * subsequent fragments contains only data.
 			 */
-			skb_put(skb, oq->max_single_buffer_size);
-			read_idx++;
-			desc_used++;
-			if (read_idx == oq->max_count)
-				read_idx = 0;
-
-			shinfo = skb_shinfo(skb);
+			if (skb) {
+				skb_put(skb, oq->max_single_buffer_size);
+				shinfo = skb_shinfo(skb);
+			}
 			data_len = buff_info->len - oq->max_single_buffer_size;
 			while (data_len) {
 				dma_unmap_page(oq->dev, oq->desc_ring[read_idx].buffer_ptr,
@@ -434,10 +434,11 @@ static int __octep_oq_process_rx(struct octep_device *oct,
 					data_len -= oq->buffer_size;
 				}
 
-				skb_add_rx_frag(skb, shinfo->nr_frags,
-						buff_info->page, 0,
-						buff_info->len,
-						buff_info->len);
+				if (skb)
+					skb_add_rx_frag(skb, shinfo->nr_frags,
+							buff_info->page, 0,
+							buff_info->len,
+							buff_info->len);
 				buff_info->page = NULL;
 				read_idx++;
 				desc_used++;
@@ -446,6 +447,9 @@ static int __octep_oq_process_rx(struct octep_device *oct,
 			}
 		}
 
+		if (!skb)
+			continue;
+
 		skb->dev = oq->netdev;
 		skb->protocol =  eth_type_trans(skb, skb->dev);
 		if (feat & NETIF_F_RXCSUM &&
-- 
2.30.2


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ