[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20260121131954.2710459-5-p@1g4.org>
Date: Wed, 21 Jan 2026 13:20:50 +0000
From: Paul Moses <p@....org>
To: netdev@...r.kernel.org
Cc: Jamal Hadi Salim <jhs@...atatu.com>, Cong Wang <xiyou.wangcong@...il.com>, Jiri Pirko <jiri@...nulli.us>, "David S. Miller" <davem@...emloft.net>, Eric Dumazet <edumazet@...gle.com>, Jakub Kicinski <kuba@...nel.org>, Paolo Abeni <pabeni@...hat.com>, Simon Horman <horms@...nel.org>, linux-kernel@...r.kernel.org, Paul Moses <p@....org>, stable@...r.kernel.org
Subject: [PATCH net v3 4/7] net/sched: act_gate: read schedule via RCU
Switch dump/accessor reads to RCU read-side sections. This matches other
actions that read params under rcu_read_lock(), e.g. act_tunnel_key dump
(commit e97ae742972f6c), act_ctinfo dump (commit 799c94178cf9c9), and
act_skbedit dump (commit 1f376373bd225c).
Dump reads tcf_action via READ_ONCE, following the lockless action reads used
in act_sample (commit 5c5670fae43027) and act_gact.
Timer logic stays under tcf_lock and uses rcu_dereference_protected(), keeping
RCU readers cheap while preserving lock-serialized timer updates.
Fixes: a51c328df310 ("net: qos: introduce a gate control flow action")
Signed-off-by: Paul Moses <p@....org>
Cc: stable@...r.kernel.org
---
include/net/tc_act/tc_gate.h | 38 +++++++++++++++++++++++-------------
net/sched/act_gate.c | 32 +++++++++++++++---------------
2 files changed, 40 insertions(+), 30 deletions(-)
diff --git a/include/net/tc_act/tc_gate.h b/include/net/tc_act/tc_gate.h
index 05968b3822392..9587d9e9fa38f 100644
--- a/include/net/tc_act/tc_gate.h
+++ b/include/net/tc_act/tc_gate.h
@@ -57,9 +57,10 @@ static inline s32 tcf_gate_prio(const struct tc_action *a)
s32 tcfg_prio;
struct tcf_gate_params *p;
- p = rcu_dereference_protected(to_gate(a)->param,
- lockdep_rtnl_is_held());
+ rcu_read_lock();
+ p = rcu_dereference(to_gate(a)->param);
tcfg_prio = p->tcfg_priority;
+ rcu_read_unlock();
return tcfg_prio;
}
@@ -69,9 +70,10 @@ static inline u64 tcf_gate_basetime(const struct tc_action *a)
u64 tcfg_basetime;
struct tcf_gate_params *p;
- p = rcu_dereference_protected(to_gate(a)->param,
- lockdep_rtnl_is_held());
+ rcu_read_lock();
+ p = rcu_dereference(to_gate(a)->param);
tcfg_basetime = p->tcfg_basetime;
+ rcu_read_unlock();
return tcfg_basetime;
}
@@ -81,9 +83,10 @@ static inline u64 tcf_gate_cycletime(const struct tc_action *a)
u64 tcfg_cycletime;
struct tcf_gate_params *p;
- p = rcu_dereference_protected(to_gate(a)->param,
- lockdep_rtnl_is_held());
+ rcu_read_lock();
+ p = rcu_dereference(to_gate(a)->param);
tcfg_cycletime = p->tcfg_cycletime;
+ rcu_read_unlock();
return tcfg_cycletime;
}
@@ -93,9 +96,10 @@ static inline u64 tcf_gate_cycletimeext(const struct tc_action *a)
u64 tcfg_cycletimeext;
struct tcf_gate_params *p;
- p = rcu_dereference_protected(to_gate(a)->param,
- lockdep_rtnl_is_held());
+ rcu_read_lock();
+ p = rcu_dereference(to_gate(a)->param);
tcfg_cycletimeext = p->tcfg_cycletime_ext;
+ rcu_read_unlock();
return tcfg_cycletimeext;
}
@@ -105,9 +109,10 @@ static inline u32 tcf_gate_num_entries(const struct tc_action *a)
u32 num_entries;
struct tcf_gate_params *p;
- p = rcu_dereference_protected(to_gate(a)->param,
- lockdep_rtnl_is_held());
+ rcu_read_lock();
+ p = rcu_dereference(to_gate(a)->param);
num_entries = p->num_entries;
+ rcu_read_unlock();
return num_entries;
}
@@ -121,19 +126,23 @@ static inline struct action_gate_entry
u32 num_entries;
int i = 0;
- p = rcu_dereference_protected(to_gate(a)->param,
- lockdep_rtnl_is_held());
+ rcu_read_lock();
+ p = rcu_dereference(to_gate(a)->param);
num_entries = p->num_entries;
list_for_each_entry(entry, &p->entries, list)
i++;
- if (i != num_entries)
+ if (i != num_entries) {
+ rcu_read_unlock();
return NULL;
+ }
oe = kcalloc(num_entries, sizeof(*oe), GFP_ATOMIC);
- if (!oe)
+ if (!oe) {
+ rcu_read_unlock();
return NULL;
+ }
i = 0;
list_for_each_entry(entry, &p->entries, list) {
@@ -143,6 +152,7 @@ static inline struct action_gate_entry
oe[i].maxoctets = entry->maxoctets;
i++;
}
+ rcu_read_unlock();
return oe;
}
diff --git a/net/sched/act_gate.c b/net/sched/act_gate.c
index 016708c10a8e0..da4802bbaf4ca 100644
--- a/net/sched/act_gate.c
+++ b/net/sched/act_gate.c
@@ -624,66 +624,66 @@ static int tcf_gate_dump(struct sk_buff *skb, struct tc_action *a,
{
unsigned char *b = skb_tail_pointer(skb);
struct tcf_gate *gact = to_gate(a);
- struct tc_gate opt = { };
struct tcfg_gate_entry *entry;
struct tcf_gate_params *p;
struct nlattr *entry_list;
+ struct tc_gate opt = { };
struct tcf_t t;
opt.index = gact->tcf_index;
opt.refcnt = refcount_read(&gact->tcf_refcnt) - ref;
opt.bindcnt = atomic_read(&gact->tcf_bindcnt) - bind;
- spin_lock_bh(&gact->tcf_lock);
- opt.action = gact->tcf_action;
-
- p = rcu_dereference_protected(gact->param,
- lockdep_is_held(&gact->tcf_lock));
+ opt.action = READ_ONCE(gact->tcf_action);
if (nla_put(skb, TCA_GATE_PARMS, sizeof(opt), &opt))
goto nla_put_failure;
+ rcu_read_lock();
+ p = rcu_dereference(gact->param);
+
if (nla_put_u64_64bit(skb, TCA_GATE_BASE_TIME,
p->tcfg_basetime, TCA_GATE_PAD))
- goto nla_put_failure;
+ goto nla_put_failure_rcu;
if (nla_put_u64_64bit(skb, TCA_GATE_CYCLE_TIME,
p->tcfg_cycletime, TCA_GATE_PAD))
- goto nla_put_failure;
+ goto nla_put_failure_rcu;
if (nla_put_u64_64bit(skb, TCA_GATE_CYCLE_TIME_EXT,
p->tcfg_cycletime_ext, TCA_GATE_PAD))
- goto nla_put_failure;
+ goto nla_put_failure_rcu;
if (nla_put_s32(skb, TCA_GATE_CLOCKID, p->tcfg_clockid))
- goto nla_put_failure;
+ goto nla_put_failure_rcu;
if (nla_put_u32(skb, TCA_GATE_FLAGS, p->tcfg_flags))
- goto nla_put_failure;
+ goto nla_put_failure_rcu;
if (nla_put_s32(skb, TCA_GATE_PRIORITY, p->tcfg_priority))
- goto nla_put_failure;
+ goto nla_put_failure_rcu;
entry_list = nla_nest_start_noflag(skb, TCA_GATE_ENTRY_LIST);
if (!entry_list)
- goto nla_put_failure;
+ goto nla_put_failure_rcu;
list_for_each_entry(entry, &p->entries, list) {
if (dumping_entry(skb, entry) < 0)
- goto nla_put_failure;
+ goto nla_put_failure_rcu;
}
nla_nest_end(skb, entry_list);
+ rcu_read_unlock();
tcf_tm_dump(&t, &gact->tcf_tm);
if (nla_put_64bit(skb, TCA_GATE_TM, sizeof(t), &t, TCA_GATE_PAD))
goto nla_put_failure;
- spin_unlock_bh(&gact->tcf_lock);
return skb->len;
+nla_put_failure_rcu:
+ rcu_read_unlock();
nla_put_failure:
- spin_unlock_bh(&gact->tcf_lock);
nlmsg_trim(skb, b);
return -1;
}
--
2.52.GIT
Powered by blists - more mailing lists