[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251112073324.5301-3-mmietus97@yahoo.com>
Date: Wed, 12 Nov 2025 08:33:22 +0100
From: Marek Mietus <mmietus97@...oo.com>
To: netdev@...r.kernel.org,
sd@...asysnail.net,
kuba@...nel.org
Cc: Marek Mietus <mmietus97@...oo.com>
Subject: [PATCH net-next v4 12/14] net: sit: convert ipip6_tunnel_xmit to use a noref dst
ipip6_tunnel_xmit unnecessarily references the dst_entry from the
dst_cache when interacting with the cache.
Reduce this overhead by avoiding the redundant refcount increments.
This change is safe since ipv4 supports noref xmit under RCU which is
already the case for ipip6_tunnel_xmit.
Signed-off-by: Marek Mietus <mmietus97@...oo.com>
---
net/ipv6/sit.c | 15 ++++-----------
1 file changed, 4 insertions(+), 11 deletions(-)
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index ba65bb93b799..98f2e5fb5957 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -933,31 +933,28 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
IPPROTO_IPV6, 0, dst, tiph->saddr, 0, 0,
sock_net_uid(tunnel->net, NULL));
- rt = dst_cache_get_ip4(&tunnel->dst_cache, &fl4.saddr);
+ rt = dst_cache_get_ip4_rcu(&tunnel->dst_cache, &fl4.saddr);
if (!rt) {
rt = ip_route_output_flow(tunnel->net, &fl4, NULL);
if (IS_ERR(rt)) {
DEV_STATS_INC(dev, tx_carrier_errors);
goto tx_error_icmp;
}
- dst_cache_set_ip4(&tunnel->dst_cache, &rt->dst, fl4.saddr);
+ dst_cache_steal_ip4(&tunnel->dst_cache, &rt->dst, fl4.saddr);
}
if (rt->rt_type != RTN_UNICAST && rt->rt_type != RTN_LOCAL) {
- ip_rt_put(rt);
DEV_STATS_INC(dev, tx_carrier_errors);
goto tx_error_icmp;
}
tdev = rt->dst.dev;
if (tdev == dev) {
- ip_rt_put(rt);
DEV_STATS_INC(dev, collisions);
goto tx_error;
}
if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP4)) {
- ip_rt_put(rt);
goto tx_error;
}
@@ -966,7 +963,6 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
if (mtu < IPV4_MIN_MTU) {
DEV_STATS_INC(dev, collisions);
- ip_rt_put(rt);
goto tx_error;
}
@@ -980,7 +976,6 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
if (skb->len > mtu && !skb_is_gso(skb)) {
icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
- ip_rt_put(rt);
goto tx_error;
}
}
@@ -1003,7 +998,6 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
(skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
if (!new_skb) {
- ip_rt_put(rt);
DEV_STATS_INC(dev, tx_dropped);
kfree_skb(skb);
return NETDEV_TX_OK;
@@ -1020,14 +1014,13 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
tos = INET_ECN_encapsulate(tos, ipv6_get_dsfield(iph6));
if (ip_tunnel_encap(skb, &tunnel->encap, &protocol, &fl4) < 0) {
- ip_rt_put(rt);
goto tx_error;
}
skb_set_inner_ipproto(skb, IPPROTO_IPV6);
- iptunnel_xmit(NULL, dst_to_dstref(&rt->dst), skb, fl4.saddr, fl4.daddr, protocol, tos, ttl,
- df, !net_eq(tunnel->net, dev_net(dev)), 0);
+ iptunnel_xmit(NULL, dst_to_dstref_noref(&rt->dst), skb, fl4.saddr, fl4.daddr, protocol,
+ tos, ttl, df, !net_eq(tunnel->net, dev_net(dev)), 0);
return NETDEV_TX_OK;
tx_error_icmp:
--
2.51.0
Powered by blists - more mailing lists