[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250827125349.3505302-5-edumazet@google.com>
Date: Wed, 27 Aug 2025 12:53:49 +0000
From: Eric Dumazet <edumazet@...gle.com>
To: "David S . Miller" <davem@...emloft.net>, Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>
Cc: Simon Horman <horms@...nel.org>, Jamal Hadi Salim <jhs@...atatu.com>,
Cong Wang <xiyou.wangcong@...il.com>, Jiri Pirko <jiri@...nulli.us>, netdev@...r.kernel.org,
eric.dumazet@...il.com, Eric Dumazet <edumazet@...gle.com>
Subject: [PATCH net-next 4/4] net_sched: act_skbmod: use RCU in tcf_skbmod_dump()
Also storing tcf_action into struct tcf_skbmod_params
makes sure there is no discrepancy in tcf_skbmod_act().
No longer block BH in tcf_skbmod_init() when acquiring tcf_lock.
Signed-off-by: Eric Dumazet <edumazet@...gle.com>
---
include/net/tc_act/tc_skbmod.h | 1 +
net/sched/act_skbmod.c | 26 ++++++++++++--------------
2 files changed, 13 insertions(+), 14 deletions(-)
diff --git a/include/net/tc_act/tc_skbmod.h b/include/net/tc_act/tc_skbmod.h
index 7c240d2fed4e3cdf686016588cd78eb52b80765b..626704cd6241b37f20539f9dd1270275ba19e578 100644
--- a/include/net/tc_act/tc_skbmod.h
+++ b/include/net/tc_act/tc_skbmod.h
@@ -12,6 +12,7 @@
struct tcf_skbmod_params {
struct rcu_head rcu;
u64 flags; /*up to 64 types of operations; extend if needed */
+ int action;
u8 eth_dst[ETH_ALEN];
u16 eth_type;
u8 eth_src[ETH_ALEN];
diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c
index dc022969346188c17a43f3ef40f3c203272954c4..fce625eafcb2b793cba7bebb740b136bf8498aa1 100644
--- a/net/sched/act_skbmod.c
+++ b/net/sched/act_skbmod.c
@@ -27,19 +27,18 @@ TC_INDIRECT_SCOPE int tcf_skbmod_act(struct sk_buff *skb,
struct tcf_result *res)
{
struct tcf_skbmod *d = to_skbmod(a);
- int action, max_edit_len, err;
struct tcf_skbmod_params *p;
+ int max_edit_len, err;
u64 flags;
tcf_lastuse_update(&d->tcf_tm);
bstats_update(this_cpu_ptr(d->common.cpu_bstats), skb);
- action = READ_ONCE(d->tcf_action);
- if (unlikely(action == TC_ACT_SHOT))
+ p = rcu_dereference_bh(d->skbmod_p);
+ if (unlikely(p->action == TC_ACT_SHOT))
goto drop;
max_edit_len = skb_mac_header_len(skb);
- p = rcu_dereference_bh(d->skbmod_p);
flags = p->flags;
/* tcf_skbmod_init() guarantees "flags" to be one of the following:
@@ -85,7 +84,7 @@ TC_INDIRECT_SCOPE int tcf_skbmod_act(struct sk_buff *skb,
INET_ECN_set_ce(skb);
out:
- return action;
+ return p->action;
drop:
qstats_overlimit_inc(this_cpu_ptr(d->common.cpu_qstats));
@@ -193,9 +192,9 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla,
}
p->flags = lflags;
-
+ p->action = parm->action;
if (ovr)
- spin_lock_bh(&d->tcf_lock);
+ spin_lock(&d->tcf_lock);
/* Protected by tcf_lock if overwriting existing action. */
goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
p_old = rcu_dereference_protected(d->skbmod_p, 1);
@@ -209,7 +208,7 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla,
rcu_assign_pointer(d->skbmod_p, p);
if (ovr)
- spin_unlock_bh(&d->tcf_lock);
+ spin_unlock(&d->tcf_lock);
if (p_old)
kfree_rcu(p_old, rcu);
@@ -248,10 +247,9 @@ static int tcf_skbmod_dump(struct sk_buff *skb, struct tc_action *a,
opt.index = d->tcf_index;
opt.refcnt = refcount_read(&d->tcf_refcnt) - ref;
opt.bindcnt = atomic_read(&d->tcf_bindcnt) - bind;
- spin_lock_bh(&d->tcf_lock);
- opt.action = d->tcf_action;
- p = rcu_dereference_protected(d->skbmod_p,
- lockdep_is_held(&d->tcf_lock));
+ rcu_read_lock();
+ p = rcu_dereference(d->skbmod_p);
+ opt.action = p->action;
opt.flags = p->flags;
if (nla_put(skb, TCA_SKBMOD_PARMS, sizeof(opt), &opt))
goto nla_put_failure;
@@ -269,10 +267,10 @@ static int tcf_skbmod_dump(struct sk_buff *skb, struct tc_action *a,
if (nla_put_64bit(skb, TCA_SKBMOD_TM, sizeof(t), &t, TCA_SKBMOD_PAD))
goto nla_put_failure;
- spin_unlock_bh(&d->tcf_lock);
+ rcu_read_unlock();
return skb->len;
nla_put_failure:
- spin_unlock_bh(&d->tcf_lock);
+ rcu_read_unlock();
nlmsg_trim(skb, b);
return -1;
}
--
2.51.0.261.g7ce5a0a67e-goog
Powered by blists - more mailing lists