[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <20260113162954.5948-8-mmietus97@yahoo.com>
Date: Tue, 13 Jan 2026 17:29:50 +0100
From: Marek Mietus <mmietus97@...oo.com>
To: netdev@...r.kernel.org,
sd@...asysnail.net,
kuba@...nel.org
Cc: Jason@...c4.com,
Marek Mietus <mmietus97@...oo.com>
Subject: [PATCH net-next v5 07/11] net: tunnel: convert ip_md_tunnel_xmit to use a noref dst when possible
ip_md_tunnel_xmit unnecessarily references the dst_entry from the
dst_cache when interacting with the cache.
Reduce this overhead by avoiding the redundant refcount increments.
This is only possible in flows where the cache is used. Otherwise, we
fall-back to a referenced dst.
This change is safe since ipv4 supports noref xmit under RCU which is
already the case for ip_md_tunnel_xmit.
Signed-off-by: Marek Mietus <mmietus97@...oo.com>
---
net/ipv4/ip_tunnel.c | 18 +++++++++++-------
1 file changed, 11 insertions(+), 7 deletions(-)
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index 8a0c611ab1bf..ab10759dd2e4 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -609,7 +609,7 @@ void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
use_cache = ip_tunnel_dst_cache_usable(skb, tun_info);
if (use_cache)
- rt = dst_cache_get_ip4(&tun_info->dst_cache, &fl4.saddr);
+ rt = dst_cache_get_ip4_rcu(&tun_info->dst_cache, &fl4.saddr);
if (!rt) {
rt = ip_route_output_key(tunnel->net, &fl4);
if (IS_ERR(rt)) {
@@ -617,11 +617,12 @@ void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
goto tx_error;
}
if (use_cache)
- dst_cache_set_ip4(&tun_info->dst_cache, &rt->dst,
- fl4.saddr);
+ dst_cache_steal_ip4(&tun_info->dst_cache, &rt->dst,
+ fl4.saddr);
}
if (rt->dst.dev == dev) {
- ip_rt_put(rt);
+ if (!use_cache)
+ ip_rt_put(rt);
DEV_STATS_INC(dev, collisions);
goto tx_error;
}
@@ -630,7 +631,8 @@ void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
df = htons(IP_DF);
if (tnl_update_pmtu(dev, skb, rt, df, inner_iph, tunnel_hlen,
key->u.ipv4.dst, true)) {
- ip_rt_put(rt);
+ if (!use_cache)
+ ip_rt_put(rt);
goto tx_error;
}
@@ -647,7 +649,8 @@ void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
headroom += LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len;
if (skb_cow_head(skb, headroom)) {
- ip_rt_put(rt);
+ if (!use_cache)
+ ip_rt_put(rt);
goto tx_dropped;
}
@@ -655,7 +658,8 @@ void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
iptunnel_xmit(NULL, rt, skb, fl4.saddr, fl4.daddr, proto, tos, ttl,
df, !net_eq(tunnel->net, dev_net(dev)), 0);
- ip_rt_put(rt);
+ if (!use_cache)
+ ip_rt_put(rt);
return;
tx_error:
DEV_STATS_INC(dev, tx_errors);
--
2.51.0
Powered by blists - more mailing lists