[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <d326726d-7050-4e88-b950-f49cf5901d34@uliege.be>
Date: Fri, 11 Apr 2025 20:34:54 +0200
From: Justin Iurman <justin.iurman@...ege.be>
To: Alexei Starovoitov <alexei.starovoitov@...il.com>
Cc: Sebastian Sewior <bigeasy@...utronix.de>,
Stanislav Fomichev <stfomichev@...il.com>,
Network Development <netdev@...r.kernel.org>,
"David S. Miller" <davem@...emloft.net>, Eric Dumazet <edumazet@...gle.com>,
Jakub Kicinski <kuba@...nel.org>, Paolo Abeni <pabeni@...hat.com>,
Simon Horman <horms@...nel.org>, Kuniyuki Iwashima <kuniyu@...zon.com>,
bpf <bpf@...r.kernel.org>, Andrea Mayer <andrea.mayer@...roma2.it>
Subject: Re: [PATCH net] net: lwtunnel: disable preemption when required
On 4/7/25 19:54, Alexei Starovoitov wrote:
> On Sun, Apr 6, 2025 at 1:59 AM Justin Iurman <justin.iurman@...ege.be> wrote:
>>
>> On 4/4/25 16:19, Sebastian Sewior wrote:
>>> Alexei, thank you for the Cc.
>>>
>>> On 2025-04-03 13:35:10 [-0700], Alexei Starovoitov wrote:
>>>> Stating the obvious...
>>>> Sebastian did a lot of work removing preempt_disable from the networking
>>>> stack.
>>>> We're certainly not adding them back.
>>>> This patch is no go.
>>>
>>> While looking through the code, it looks as if lwtunnel_xmit() lacks a
>>> local_bh_disable().
>>
>> Thanks Sebastian for the confirmation, as the initial idea was to use
>> local_bh_disable() as well. Then I thought preempt_disable() would be
>> enough in this context, but I didn't realize you made efforts to remove
>> it from the networking stack.
>>
>> @Alexei, just to clarify: would you ACK this patch if we do
>> s/preempt_{disable|enable}()/local_bh_{disable|enable}()/g ?
>
> You need to think it through and not sprinkle local_bh_disable in
> every lwt related function.
> Like lwtunnel_input should be running with bh disabled already.
Having nested calls to local_bh_{disable|enable}() is fine (i.e.,
disabling BHs when they're already disabled), but I guess it's cleaner
to avoid it here as you suggest. And since lwtunnel_input() is indeed
(always) running with BHs disabled, no changes needed. Thanks for the
reminder.
> I don't remember the exact conditions where bh is disabled in xmit path.
Right. Not sure for lwtunnel_xmit(), but lwtunnel_output() can
definitely run with or without BHs disabled. So, what I propose is the
following logic (applied to lwtunnel_xmit() too): if BHs disabled then
NOP else local_bh_disable(). Thoughts on this new version? (sorry, my
mailer messes it up, but you got the idea):
diff --git a/net/core/lwtunnel.c b/net/core/lwtunnel.c
index e39a459540ec..d44d341683c5 100644
--- a/net/core/lwtunnel.c
+++ b/net/core/lwtunnel.c
@@ -331,8 +331,13 @@ int lwtunnel_output(struct net *net, struct sock
*sk, struct sk_buff *skb)
const struct lwtunnel_encap_ops *ops;
struct lwtunnel_state *lwtstate;
struct dst_entry *dst;
+ bool in_softirq;
int ret;
+ in_softirq = in_softirq();
+ if (!in_softirq)
+ local_bh_disable();
+
if (dev_xmit_recursion()) {
net_crit_ratelimited("%s(): recursion limit reached on datapath\n",
__func__);
@@ -345,11 +350,13 @@ int lwtunnel_output(struct net *net, struct sock
*sk, struct sk_buff *skb)
ret = -EINVAL;
goto drop;
}
- lwtstate = dst->lwtstate;
+ lwtstate = dst->lwtstate;
if (lwtstate->type == LWTUNNEL_ENCAP_NONE ||
- lwtstate->type > LWTUNNEL_ENCAP_MAX)
- return 0;
+ lwtstate->type > LWTUNNEL_ENCAP_MAX) {
+ ret = 0;
+ goto out;
+ }
ret = -EOPNOTSUPP;
rcu_read_lock();
@@ -364,10 +371,12 @@ int lwtunnel_output(struct net *net, struct sock
*sk, struct sk_buff *skb)
if (ret == -EOPNOTSUPP)
goto drop;
- return ret;
-
+ goto out;
drop:
kfree_skb(skb);
+out:
+ if (!in_softirq)
+ local_bh_enable();
return ret;
}
@@ -378,8 +387,13 @@ int lwtunnel_xmit(struct sk_buff *skb)
const struct lwtunnel_encap_ops *ops;
struct lwtunnel_state *lwtstate;
struct dst_entry *dst;
+ bool in_softirq;
int ret;
+ in_softirq = in_softirq();
+ if (!in_softirq)
+ local_bh_disable();
+
if (dev_xmit_recursion()) {
net_crit_ratelimited("%s(): recursion limit reached on datapath\n",
__func__);
@@ -394,10 +408,11 @@ int lwtunnel_xmit(struct sk_buff *skb)
}
lwtstate = dst->lwtstate;
-
if (lwtstate->type == LWTUNNEL_ENCAP_NONE ||
- lwtstate->type > LWTUNNEL_ENCAP_MAX)
- return 0;
+ lwtstate->type > LWTUNNEL_ENCAP_MAX) {
+ ret = 0;
+ goto out;
+ }
ret = -EOPNOTSUPP;
rcu_read_lock();
@@ -412,10 +427,12 @@ int lwtunnel_xmit(struct sk_buff *skb)
if (ret == -EOPNOTSUPP)
goto drop;
- return ret;
-
+ goto out;
drop:
kfree_skb(skb);
+out:
+ if (!in_softirq)
+ local_bh_enable();
return ret;
}
@@ -428,6 +445,8 @@ int lwtunnel_input(struct sk_buff *skb)
struct dst_entry *dst;
int ret;
+ WARN_ON_ONCE(!in_softirq());
+
if (dev_xmit_recursion()) {
net_crit_ratelimited("%s(): recursion limit reached on datapath\n",
__func__);
@@ -440,8 +459,8 @@ int lwtunnel_input(struct sk_buff *skb)
ret = -EINVAL;
goto drop;
}
- lwtstate = dst->lwtstate;
+ lwtstate = dst->lwtstate;
if (lwtstate->type == LWTUNNEL_ENCAP_NONE ||
lwtstate->type > LWTUNNEL_ENCAP_MAX)
return 0;
@@ -460,10 +479,8 @@ int lwtunnel_input(struct sk_buff *skb)
goto drop;
return ret;
-
drop:
kfree_skb(skb);
-
return ret;
}
EXPORT_SYMBOL_GPL(lwtunnel_input);
Powered by blists - more mailing lists