[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251112072720.5076-10-mmietus97@yahoo.com>
Date: Wed, 12 Nov 2025 08:27:15 +0100
From: Marek Mietus <mmietus97@...oo.com>
To: netdev@...r.kernel.org,
sd@...asysnail.net,
kuba@...nel.org
Cc: Marek Mietus <mmietus97@...oo.com>
Subject: [PATCH net-next v4 09/14] net: wireguard: convert send{4,6} to use a noref dst when possible
send{4,6} unnecessarily reference the dst_entry from the
dst_cache when interacting with the cache.
Reduce this overhead by avoiding the redundant refcount increments.
This is only possible in flows where the cache is used. Otherwise, we
fall-back to a referenced dst.
These changes are safe as both ipv4 and ip6 support noref xmit under RCU
which is already the case for the wireguard send{4,6} functions.
Signed-off-by: Marek Mietus <mmietus97@...oo.com>
---
drivers/net/wireguard/socket.c | 34 ++++++++++++++++++++++++----------
1 file changed, 24 insertions(+), 10 deletions(-)
diff --git a/drivers/net/wireguard/socket.c b/drivers/net/wireguard/socket.c
index 342247d324ab..51c61527da94 100644
--- a/drivers/net/wireguard/socket.c
+++ b/drivers/net/wireguard/socket.c
@@ -29,6 +29,7 @@ static int send4(struct wg_device *wg, struct sk_buff *skb,
};
struct rtable *rt = NULL;
struct sock *sock;
+ dstref_t dstref;
int ret = 0;
skb_mark_not_on_list(skb);
@@ -45,8 +46,10 @@ static int send4(struct wg_device *wg, struct sk_buff *skb,
fl.fl4_sport = inet_sk(sock)->inet_sport;
- if (cache)
- rt = dst_cache_get_ip4(cache, &fl.saddr);
+ if (cache) {
+ rt = dst_cache_get_ip4_rcu(cache, &fl.saddr);
+ dstref = dst_to_dstref_noref(&rt->dst);
+ }
if (!rt) {
security_sk_classify_flow(sock, flowi4_to_flowi_common(&fl));
@@ -77,12 +80,16 @@ static int send4(struct wg_device *wg, struct sk_buff *skb,
wg->dev->name, &endpoint->addr, ret);
goto err;
}
- if (cache)
- dst_cache_set_ip4(cache, &rt->dst, fl.saddr);
+ if (cache) {
+ dst_cache_steal_ip4(cache, &rt->dst, fl.saddr);
+ dstref = dst_to_dstref_noref(&rt->dst);
+ } else {
+ dstref = dst_to_dstref(&rt->dst);
+ }
}
skb->ignore_df = 1;
- udp_tunnel_xmit_skb(dst_to_dstref(&rt->dst), sock, skb, fl.saddr, fl.daddr, ds,
+ udp_tunnel_xmit_skb(dstref, sock, skb, fl.saddr, fl.daddr, ds,
ip4_dst_hoplimit(&rt->dst), 0, fl.fl4_sport,
fl.fl4_dport, false, false, 0);
goto out;
@@ -109,6 +116,7 @@ static int send6(struct wg_device *wg, struct sk_buff *skb,
};
struct dst_entry *dst = NULL;
struct sock *sock;
+ dstref_t dstref;
int ret = 0;
skb_mark_not_on_list(skb);
@@ -125,8 +133,10 @@ static int send6(struct wg_device *wg, struct sk_buff *skb,
fl.fl6_sport = inet_sk(sock)->inet_sport;
- if (cache)
- dst = dst_cache_get_ip6(cache, &fl.saddr);
+ if (cache) {
+ dst = dst_cache_get_ip6_rcu(cache, &fl.saddr);
+ dstref = dst_to_dstref_noref(dst);
+ }
if (!dst) {
security_sk_classify_flow(sock, flowi6_to_flowi_common(&fl));
@@ -144,12 +154,16 @@ static int send6(struct wg_device *wg, struct sk_buff *skb,
wg->dev->name, &endpoint->addr, ret);
goto err;
}
- if (cache)
- dst_cache_set_ip6(cache, dst, &fl.saddr);
+ if (cache) {
+ dst_cache_steal_ip6(cache, dst, &fl.saddr);
+ dstref = dst_to_dstref_noref(dst);
+ } else {
+ dstref = dst_to_dstref(dst);
+ }
}
skb->ignore_df = 1;
- udp_tunnel6_xmit_skb(dst_to_dstref(dst), sock, skb, skb->dev, &fl.saddr, &fl.daddr, ds,
+ udp_tunnel6_xmit_skb(dstref, sock, skb, skb->dev, &fl.saddr, &fl.daddr, ds,
ip6_dst_hoplimit(dst), 0, fl.fl6_sport,
fl.fl6_dport, false, 0);
goto out;
--
2.51.0
Powered by blists - more mailing lists