lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <60ed19a1-97e1-4342-8dfc-a5db684afa6f@redhat.com>
Date: Fri, 23 Jan 2026 09:14:06 +0100
From: Paolo Abeni <pabeni@...hat.com>
To: Willem de Bruijn <willemdebruijn.kernel@...il.com>,
 Mahdi Faramarzpour <mahdifrmx@...il.com>, netdev@...r.kernel.org
Cc: davem@...emloft.net, dsahern@...nel.org, edumazet@...gle.com,
 kuba@...nel.org, horms@...nel.org, kshitiz.bartariya@...omail.in
Subject: Re: [PATCH net-next] udp: add drop count for packets in
 udp_prod_queue

On 1/22/26 10:26 PM, Willem de Bruijn wrote:
> Mahdi Faramarzpour wrote:
>> This commit adds SNMP drop count increment for the packets in
>> per NUMA queues which were introduced in commit b650bf0977d3
>> ("udp: remove busylock and add per NUMA queues"). note that SNMP
>> counters are incremented currently by the caller for skb. And
>> that these skbs on the intermediate queue cannot be counted
>> there so need similar logic in their error path.
>>
>> Signed-off-by: Mahdi Faramarzpour <mahdifrmx@...il.com>
>> ---
>> v5:
>>   - check if drop counts are non-zero before increasing countrers
>> v4: https://lore.kernel.org/netdev/20260108102950.49417-1-mahdifrmx@gmail.com/
>>   - move all changes to unlikely(to_drop) branch
>> v3: https://lore.kernel.org/netdev/20260105114732.140719-1-mahdifrmx@gmail.com/
>>   - remove the unreachable UDP_MIB_RCVBUFERRORS code
>> v2: https://lore.kernel.org/netdev/20260105071218.10785-1-mahdifrmx@gmail.com/
>>   - change ENOMEM to ENOBUFS
>> v1: https://lore.kernel.org/netdev/20260104105732.427691-1-mahdifrmx@gmail.com/
>> ---
>>  net/ipv4/udp.c | 19 ++++++++++++++++++-
>>  1 file changed, 18 insertions(+), 1 deletion(-)
>>
>> diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
>> index ffe074cb5..41cf8f7ab 100644
>> --- a/net/ipv4/udp.c
>> +++ b/net/ipv4/udp.c
>> @@ -1793,14 +1793,31 @@ int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb)
>>  	}
>>  
>>  	if (unlikely(to_drop)) {
>> +		int err_ipv4 = 0;
>> +		int err_ipv6 = 0;
> 
> nit: whitespace between variable definition and code block
> 
>>  		for (nb = 0; to_drop != NULL; nb++) {
>>  			skb = to_drop;
>> +			if (skb->protocol == htons(ETH_P_IP))
>> +				err_ipv4++;
>> +			else
>> +				err_ipv6++;
> 
> No need for separate counters.
> 
> All counters will either update IPv4 or IPv6 stats. Similar to how
> the caller of _udp_enqueue_schedule_sk is either __udp_queue_rcv_skb
> or __udpv6_queue_rcv_skb and chooses the SNMP stat based on that.
> 
> Can check sk_family == PF_INET6 once.

I think that doing the SNMP accounting in __udp_enqueue_schedule_skb()
(for `to_drop`), __udp_queue_rcv_skb() and __udpv6_queue_rcv_skb() (for
`skb`) is a little confusing and possible error prone in the long run.

I'm wondering if something alike the following (completely untested, not
even built! just to give the idea) would be better?

Note that the additional argument in __udp_enqueue_schedule_skb() could
be avoided piggybacking the `to_drop` return argument into the
__udp_enqueue_schedule_skb() return value.

---
diff --git a/include/net/udp.h b/include/net/udp.h
index a061d1b22ddc..673497eb79b7 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -399,7 +399,8 @@ static inline bool udp_sk_bound_dev_eq(const struct
net *net, int bound_dev_if,
 /* net/ipv4/udp.c */
 void udp_destruct_common(struct sock *sk);
 void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len);
-int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb);
+int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb,
+			       struct sk_buff *to_drop);
 void udp_skb_destructor(struct sock *sk, struct sk_buff *skb);
 struct sk_buff *__skb_recv_udp(struct sock *sk, unsigned int flags, int
*off,
 			       int *err);
@@ -411,6 +412,21 @@ static inline struct sk_buff *skb_recv_udp(struct
sock *sk, unsigned int flags,
 	return __skb_recv_udp(sk, flags, &off, err);
 }

+static inline int udp_drop_skbs(struct sock *sk, struct sk_buff *to_drop)
+{
+	struct sk_buff *skb;
+	int nb;
+
+	for (nb = 0; to_drop != NULL; nb++) {
+		skb = to_drop;
+		to_drop = skb->next;
+		skb_mark_not_on_list(skb);
+		sk_skb_reason_drop(sk, skb, SKB_DROP_REASON_PROTO_MEM);
+	}
+	numa_drop_add(&udp_sk(sk)->drop_counters, nb);
+	return nb;
+}
+
 enum skb_drop_reason udp_v4_early_demux(struct sk_buff *skb);
 bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst);
 int udp_err(struct sk_buff *, u32);
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index ee63af0ef42c..0869e115c0d3 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1696,14 +1696,15 @@ static int udp_rmem_schedule(struct sock *sk,
int size)
 	return 0;
 }

-int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb)
+int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb,
+			       struct sk_buff **to_drop)
 {
 	struct sk_buff_head *list = &sk->sk_receive_queue;
 	struct udp_prod_queue *udp_prod_queue;
-	struct sk_buff *next, *to_drop = NULL;
 	struct llist_node *ll_list;
 	unsigned int rmem, rcvbuf;
 	int size, err = -ENOMEM;
+	struct sk_buff *next;
 	int total_size = 0;
 	int q_size = 0;
 	int dropcount;
@@ -1761,8 +1762,8 @@ int __udp_enqueue_schedule_skb(struct sock *sk,
struct sk_buff *skb)
 		err = udp_rmem_schedule(sk, size);
 		if (unlikely(err)) {
 			/*  Free the skbs outside of locked section. */
-			skb->next = to_drop;
-			to_drop = skb;
+			skb->next = *to_drop;
+			*to_drop = skb;
 			continue;
 		}

@@ -1792,17 +1793,6 @@ int __udp_enqueue_schedule_skb(struct sock *sk,
struct sk_buff *skb)
 		}
 	}

-	if (unlikely(to_drop)) {
-		for (nb = 0; to_drop != NULL; nb++) {
-			skb = to_drop;
-			to_drop = skb->next;
-			skb_mark_not_on_list(skb);
-			/* TODO: update SNMP values. */
-			sk_skb_reason_drop(sk, skb, SKB_DROP_REASON_PROTO_MEM);
-		}
-		numa_drop_add(&udp_sk(sk)->drop_counters, nb);
-	}
-
 	atomic_sub(total_size, &udp_prod_queue->rmem_alloc);

 	return 0;
@@ -2333,6 +2323,7 @@ void udp_v4_rehash(struct sock *sk)

 static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
 {
+	struct sk_buff *to_drop = NULL;
 	int rc;

 	if (inet_sk(sk)->inet_daddr) {
@@ -2343,7 +2334,13 @@ static int __udp_queue_rcv_skb(struct sock *sk,
struct sk_buff *skb)
 		sk_mark_napi_id_once(sk, skb);
 	}

-	rc = __udp_enqueue_schedule_skb(sk, skb);
+	rc = __udp_enqueue_schedule_skb(sk, skb, &to_drop);
+	if (unlikely(to_drop)) {
+		int cnt = udp_drop_skb_list(sk, to_drop);
+
+		SNMP_ADD_STATS(__UDPX_MIB(sk, true), UDP_MIB_MEMERRORS, cnt);
+		SNMP_ADD_STATS(__UDPX_MIB(sk, true), UDP_MIB_INERRORS, cnt);
+	}
 	if (rc < 0) {
 		int is_udplite = IS_UDPLITE(sk);
 		int drop_reason;
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 794c13674e8a..5950ec699deb 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -782,6 +782,7 @@ int __udp6_lib_err(struct sk_buff *skb, struct
inet6_skb_parm *opt,

 static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
 {
+	struct sk_buff *to_drop = NULL;
 	int rc;

 	if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
@@ -792,7 +793,13 @@ static int __udpv6_queue_rcv_skb(struct sock *sk,
struct sk_buff *skb)
 		sk_mark_napi_id_once(sk, skb);
 	}

-	rc = __udp_enqueue_schedule_skb(sk, skb);
+	rc = __udp_enqueue_schedule_skb(sk, skb, &to_drop);
+	if (unlikely(to_drop)) {
+		int cnt = udp_drop_skb_list(sk, to_drop);
+
+		SNMP_ADD_STATS(__UDPX_MIB(sk, true), UDP_MIB_MEMERRORS, cnt);
+		SNMP_ADD_STATS(__UDPX_MIB(sk, true), UDP_MIB_INERRORS, cnt);
+	}
 	if (rc < 0) {
 		int is_udplite = IS_UDPLITE(sk);
 		enum skb_drop_reason drop_reason;


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ