lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [day] [month] [year] [list]
Message-Id: <20260103073457.189244-1-mahdifrmx@gmail.com>
Date: Sat,  3 Jan 2026 11:04:57 +0330
From: Mahdi Faramarzpour <mahdifrmx@...il.com>
To: netdev@...r.kernel.org,
	kuba@...nel.org,
	edumazet@...gle.com
Cc: Mahdi Faramarzpour <mahdifrmx@...il.com>
Subject: [PATCH net] udp: add drop count for packets in udp_prod_queue

in commit b650bf0977d3 the busylock was removed and
per NUMA queues were added for a performance boost.
This commit implements SNMP drop count increment for
the queues.

Signed-off-by: Mahdi Faramarzpour <mahdifrmx@...il.com>
---
 net/ipv4/udp.c | 32 +++++++++++++++++++++++++++++++-
 1 file changed, 31 insertions(+), 1 deletion(-)

diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index ffe074cb5..00a8aeda1 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1709,6 +1709,13 @@ int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb)
 	int dropcount;
 	int nb = 0;
 
+	struct {
+		int rcvbuf4;
+		int rcvbuf6;
+		int mem4;
+		int mem6;
+	} err_count = {0, 0, 0, 0};
+
 	rmem = atomic_read(&sk->sk_rmem_alloc);
 	rcvbuf = READ_ONCE(sk->sk_rcvbuf);
 	size = skb->truesize;
@@ -1760,6 +1767,17 @@ int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb)
 		total_size += size;
 		err = udp_rmem_schedule(sk, size);
 		if (unlikely(err)) {
+			if (err == -ENOMEM) {
+				if (skb->protocol == htons(ETH_P_IP))
+					err_count.rcvbuf4++;
+				else
+					err_count.rcvbuf6++;
+			} else {
+				if (skb->protocol == htons(ETH_P_IP))
+					err_count.mem4++;
+				else
+					err_count.mem6++;
+			}
 			/*  Free the skbs outside of locked section. */
 			skb->next = to_drop;
 			to_drop = skb;
@@ -1797,10 +1815,22 @@ int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb)
 			skb = to_drop;
 			to_drop = skb->next;
 			skb_mark_not_on_list(skb);
-			/* TODO: update SNMP values. */
 			sk_skb_reason_drop(sk, skb, SKB_DROP_REASON_PROTO_MEM);
 		}
 		numa_drop_add(&udp_sk(sk)->drop_counters, nb);
+
+		SNMP_ADD_STATS(__UDPX_MIB(sk, true), UDP_MIB_RCVBUFERRORS,
+			err_count.rcvbuf4);
+		SNMP_ADD_STATS(__UDPX_MIB(sk, true), UDP_MIB_MEMERRORS,
+			err_count.mem4);
+		SNMP_ADD_STATS(__UDPX_MIB(sk, true), UDP_MIB_INERRORS,
+			err_count.mem4 + err_count.rcvbuf4);
+		SNMP_ADD_STATS(__UDPX_MIB(sk, false), UDP_MIB_RCVBUFERRORS,
+			err_count.rcvbuf6);
+		SNMP_ADD_STATS(__UDPX_MIB(sk, false), UDP_MIB_MEMERRORS,
+			err_count.mem6);
+		SNMP_ADD_STATS(__UDPX_MIB(sk, false), UDP_MIB_INERRORS,
+			err_count.mem6 + err_count.rcvbuf6);
 	}
 
 	atomic_sub(total_size, &udp_prod_queue->rmem_alloc);
-- 
2.34.1


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ