[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20260120004720.1886632-2-p@1g4.org>
Date: Tue, 20 Jan 2026 00:48:29 +0000
From: Paul Moses <p@....org>
To: netdev@...r.kernel.org
Cc: Jamal Hadi Salim <jhs@...atatu.com>, Cong Wang <xiyou.wangcong@...il.com>, Jiri Pirko <jiri@...nulli.us>, "David S . Miller" <davem@...emloft.net>, Eric Dumazet <edumazet@...gle.com>, Jakub Kicinski <kuba@...nel.org>, Paolo Abeni <pabeni@...hat.com>, Simon Horman <horms@...nel.org>, linux-kernel@...r.kernel.org, Paul Moses <p@....org>, stable@...r.kernel.org
Subject: [PATCH 1/2] net/sched: act_gate: fix schedule updates with RCU swap
Switch act_gate parameters to an RCU-protected pointer and update schedule
changes using a prepare-then-swap pattern. This avoids races between the
timer/data paths and configuration updates, and cancels the hrtimer
before swapping schedules.
A gate action replace could free and swap schedules while the hrtimer
callback or data path still dereferences the old entries, leaving a
use-after-free window during updates. The deferred swap and RCU free
close that window. A reproducer is available on request.
Also clear params on early error for newly created actions to avoid
leaving a dangling reference.
Fixes: a51c328df310 ("net: qos: introduce a gate control flow action")
Cc: stable@...r.kernel.org
Signed-off-by: Paul Moses <p@....org>
---
include/net/tc_act/tc_gate.h | 49 +++++-
net/sched/act_gate.c | 298 +++++++++++++++++++++++++++--------
2 files changed, 270 insertions(+), 77 deletions(-)
diff --git a/include/net/tc_act/tc_gate.h b/include/net/tc_act/tc_gate.h
index c1a67149c6b62..a2a24a62dff85 100644
--- a/include/net/tc_act/tc_gate.h
+++ b/include/net/tc_act/tc_gate.h
@@ -32,6 +32,7 @@ struct tcf_gate_params {
s32 tcfg_clockid;
size_t num_entries;
struct list_head entries;
+ struct rcu_head rcu;
};
#define GATE_ACT_GATE_OPEN BIT(0)
@@ -39,7 +40,7 @@ struct tcf_gate_params {
struct tcf_gate {
struct tc_action common;
- struct tcf_gate_params param;
+ struct tcf_gate_params __rcu *param;
u8 current_gate_status;
ktime_t current_close_time;
u32 current_entry_octets;
@@ -53,45 +54,75 @@ struct tcf_gate {
static inline s32 tcf_gate_prio(const struct tc_action *a)
{
+ struct tcf_gate *gact = to_gate(a);
+ struct tcf_gate_params *p;
s32 tcfg_prio;
- tcfg_prio = to_gate(a)->param.tcfg_priority;
+ p = rcu_dereference_protected(gact->param,
+ lockdep_is_held(&a->tcfa_lock) ||
+ lockdep_is_held(&gact->tcf_lock) ||
+ lockdep_rtnl_is_held());
+ tcfg_prio = p->tcfg_priority;
return tcfg_prio;
}
static inline u64 tcf_gate_basetime(const struct tc_action *a)
{
+ struct tcf_gate *gact = to_gate(a);
+ struct tcf_gate_params *p;
u64 tcfg_basetime;
- tcfg_basetime = to_gate(a)->param.tcfg_basetime;
+ p = rcu_dereference_protected(gact->param,
+ lockdep_is_held(&a->tcfa_lock) ||
+ lockdep_is_held(&gact->tcf_lock) ||
+ lockdep_rtnl_is_held());
+ tcfg_basetime = p->tcfg_basetime;
return tcfg_basetime;
}
static inline u64 tcf_gate_cycletime(const struct tc_action *a)
{
+ struct tcf_gate *gact = to_gate(a);
+ struct tcf_gate_params *p;
u64 tcfg_cycletime;
- tcfg_cycletime = to_gate(a)->param.tcfg_cycletime;
+ p = rcu_dereference_protected(gact->param,
+ lockdep_is_held(&a->tcfa_lock) ||
+ lockdep_is_held(&gact->tcf_lock) ||
+ lockdep_rtnl_is_held());
+ tcfg_cycletime = p->tcfg_cycletime;
return tcfg_cycletime;
}
static inline u64 tcf_gate_cycletimeext(const struct tc_action *a)
{
+ struct tcf_gate *gact = to_gate(a);
+ struct tcf_gate_params *p;
u64 tcfg_cycletimeext;
- tcfg_cycletimeext = to_gate(a)->param.tcfg_cycletime_ext;
+ p = rcu_dereference_protected(gact->param,
+ lockdep_is_held(&a->tcfa_lock) ||
+ lockdep_is_held(&gact->tcf_lock) ||
+ lockdep_rtnl_is_held());
+ tcfg_cycletimeext = p->tcfg_cycletime_ext;
return tcfg_cycletimeext;
}
static inline u32 tcf_gate_num_entries(const struct tc_action *a)
{
+ struct tcf_gate *gact = to_gate(a);
+ struct tcf_gate_params *p;
u32 num_entries;
- num_entries = to_gate(a)->param.num_entries;
+ p = rcu_dereference_protected(gact->param,
+ lockdep_is_held(&a->tcfa_lock) ||
+ lockdep_is_held(&gact->tcf_lock) ||
+ lockdep_rtnl_is_held());
+ num_entries = p->num_entries;
return num_entries;
}
@@ -100,12 +131,16 @@ static inline struct action_gate_entry
*tcf_gate_get_list(const struct tc_action *a)
{
struct action_gate_entry *oe;
+ struct tcf_gate *gact = to_gate(a);
struct tcf_gate_params *p;
struct tcfg_gate_entry *entry;
u32 num_entries;
int i = 0;
- p = &to_gate(a)->param;
+ p = rcu_dereference_protected(gact->param,
+ lockdep_is_held(&a->tcfa_lock) ||
+ lockdep_is_held(&gact->tcf_lock) ||
+ lockdep_rtnl_is_held());
num_entries = p->num_entries;
list_for_each_entry(entry, &p->entries, list)
diff --git a/net/sched/act_gate.c b/net/sched/act_gate.c
index c1f75f2727576..3ee07c3deaf97 100644
--- a/net/sched/act_gate.c
+++ b/net/sched/act_gate.c
@@ -6,6 +6,7 @@
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
+#include <linux/limits.h>
#include <linux/skbuff.h>
#include <linux/rtnetlink.h>
#include <linux/init.h>
@@ -32,9 +33,10 @@ static ktime_t gate_get_time(struct tcf_gate *gact)
return KTIME_MAX;
}
-static void gate_get_start_time(struct tcf_gate *gact, ktime_t *start)
+static void gate_get_start_time(struct tcf_gate *gact,
+ struct tcf_gate_params *param,
+ ktime_t *start)
{
- struct tcf_gate_params *param = &gact->param;
ktime_t now, base, cycle;
u64 n;
@@ -69,12 +71,14 @@ static enum hrtimer_restart gate_timer_func(struct hrtimer *timer)
{
struct tcf_gate *gact = container_of(timer, struct tcf_gate,
hitimer);
- struct tcf_gate_params *p = &gact->param;
+ struct tcf_gate_params *p;
struct tcfg_gate_entry *next;
ktime_t close_time, now;
spin_lock(&gact->tcf_lock);
+ p = rcu_dereference_protected(gact->param,
+ lockdep_is_held(&gact->tcf_lock));
next = gact->next_entry;
/* cycle start, clear pending bit, clear total octets */
@@ -225,6 +229,14 @@ static void release_entry_list(struct list_head *entries)
}
}
+static void tcf_gate_params_release(struct rcu_head *rcu)
+{
+ struct tcf_gate_params *p = container_of(rcu, struct tcf_gate_params, rcu);
+
+ release_entry_list(&p->entries);
+ kfree(p);
+}
+
static int parse_gate_list(struct nlattr *list_attr,
struct tcf_gate_params *sched,
struct netlink_ext_ack *extack)
@@ -270,24 +282,12 @@ static int parse_gate_list(struct nlattr *list_attr,
return err;
}
-static void gate_setup_timer(struct tcf_gate *gact, u64 basetime,
- enum tk_offsets tko, s32 clockid,
- bool do_init)
+static void gate_setup_timer(struct tcf_gate *gact,
+ enum tk_offsets tko, s32 clockid)
{
- if (!do_init) {
- if (basetime == gact->param.tcfg_basetime &&
- tko == gact->tk_offset &&
- clockid == gact->param.tcfg_clockid)
- return;
-
- spin_unlock_bh(&gact->tcf_lock);
- hrtimer_cancel(&gact->hitimer);
- spin_lock_bh(&gact->tcf_lock);
- }
- gact->param.tcfg_basetime = basetime;
- gact->param.tcfg_clockid = clockid;
gact->tk_offset = tko;
- hrtimer_setup(&gact->hitimer, gate_timer_func, clockid, HRTIMER_MODE_ABS_SOFT);
+ hrtimer_setup(&gact->hitimer, gate_timer_func, clockid,
+ HRTIMER_MODE_ABS_SOFT);
}
static int tcf_gate_init(struct net *net, struct nlattr *nla,
@@ -296,20 +296,26 @@ static int tcf_gate_init(struct net *net, struct nlattr *nla,
struct netlink_ext_ack *extack)
{
struct tc_action_net *tn = net_generic(net, act_gate_ops.net_id);
- enum tk_offsets tk_offset = TK_OFFS_TAI;
- bool bind = flags & TCA_ACT_FLAGS_BIND;
struct nlattr *tb[TCA_GATE_MAX + 1];
struct tcf_chain *goto_ch = NULL;
- u64 cycletime = 0, basetime = 0;
- struct tcf_gate_params *p;
- s32 clockid = CLOCK_TAI;
+ struct tcf_gate_params *p, *oldp;
struct tcf_gate *gact;
struct tc_gate *parm;
- int ret = 0, err;
- u32 gflags = 0;
- s32 prio = -1;
+ struct tcf_gate_params newp = { };
ktime_t start;
+ u64 cycletime = 0, basetime = 0, cycletime_ext = 0;
+ enum tk_offsets tk_offset = TK_OFFS_TAI;
+ s32 clockid = CLOCK_TAI;
+ u32 gflags = 0;
u32 index;
+ s32 prio = -1;
+ bool bind = flags & TCA_ACT_FLAGS_BIND;
+ bool clockid_set = false;
+ bool setup_timer = false;
+ bool update_timer = false;
+ int ret = 0, err;
+
+ INIT_LIST_HEAD(&newp.entries);
if (!nla)
return -EINVAL;
@@ -323,6 +329,7 @@ static int tcf_gate_init(struct net *net, struct nlattr *nla,
if (tb[TCA_GATE_CLOCKID]) {
clockid = nla_get_s32(tb[TCA_GATE_CLOCKID]);
+ clockid_set = true;
switch (clockid) {
case CLOCK_REALTIME:
tk_offset = TK_OFFS_REAL;
@@ -349,9 +356,6 @@ static int tcf_gate_init(struct net *net, struct nlattr *nla,
if (err < 0)
return err;
- if (err && bind)
- return ACT_P_BOUND;
-
if (!err) {
ret = tcf_idr_create_from_flags(tn, index, est, a,
&act_gate_ops, bind, flags);
@@ -361,94 +365,245 @@ static int tcf_gate_init(struct net *net, struct nlattr *nla,
}
ret = ACT_P_CREATED;
- } else if (!(flags & TCA_ACT_FLAGS_REPLACE)) {
- tcf_idr_release(*a, bind);
- return -EEXIST;
+ gact = to_gate(*a);
+ } else {
+ if (bind)
+ return ACT_P_BOUND;
+
+ if (!(flags & TCA_ACT_FLAGS_REPLACE)) {
+ tcf_idr_release(*a, bind);
+ return -EEXIST;
+ }
+ gact = to_gate(*a);
}
+ if (ret != ACT_P_CREATED)
+ oldp = rcu_dereference_protected(gact->param,
+ lockdep_is_held(&gact->common.tcfa_lock) ||
+ lockdep_is_held(&gact->tcf_lock) ||
+ lockdep_rtnl_is_held());
+ else
+ oldp = NULL;
+
if (tb[TCA_GATE_PRIORITY])
prio = nla_get_s32(tb[TCA_GATE_PRIORITY]);
+ else if (ret != ACT_P_CREATED)
+ prio = oldp->tcfg_priority;
- if (tb[TCA_GATE_BASE_TIME])
+ if (tb[TCA_GATE_BASE_TIME]) {
basetime = nla_get_u64(tb[TCA_GATE_BASE_TIME]);
+ if (basetime > (u64)S64_MAX) {
+ NL_SET_ERR_MSG(extack, "Base time out of range");
+ err = -EINVAL;
+ goto release_idr;
+ }
+ } else if (ret != ACT_P_CREATED) {
+ basetime = oldp->tcfg_basetime;
+ }
if (tb[TCA_GATE_FLAGS])
gflags = nla_get_u32(tb[TCA_GATE_FLAGS]);
-
- gact = to_gate(*a);
- if (ret == ACT_P_CREATED)
- INIT_LIST_HEAD(&gact->param.entries);
+ else if (ret != ACT_P_CREATED)
+ gflags = oldp->tcfg_flags;
err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
if (err < 0)
goto release_idr;
- spin_lock_bh(&gact->tcf_lock);
- p = &gact->param;
+ if (!clockid_set) {
+ if (ret != ACT_P_CREATED)
+ clockid = oldp->tcfg_clockid;
+ else
+ clockid = CLOCK_TAI;
+ switch (clockid) {
+ case CLOCK_REALTIME:
+ tk_offset = TK_OFFS_REAL;
+ break;
+ case CLOCK_MONOTONIC:
+ tk_offset = TK_OFFS_MAX;
+ break;
+ case CLOCK_BOOTTIME:
+ tk_offset = TK_OFFS_BOOT;
+ break;
+ case CLOCK_TAI:
+ tk_offset = TK_OFFS_TAI;
+ break;
+ default:
+ NL_SET_ERR_MSG(extack, "Invalid 'clockid'");
+ err = -EINVAL;
+ goto put_chain;
+ }
+ }
- if (tb[TCA_GATE_CYCLE_TIME])
+ if (ret == ACT_P_CREATED)
+ update_timer = true;
+ else if (basetime != oldp->tcfg_basetime ||
+ tk_offset != gact->tk_offset ||
+ clockid != oldp->tcfg_clockid)
+ update_timer = true;
+
+ if (ret == ACT_P_CREATED)
+ setup_timer = true;
+ else if (clockid != oldp->tcfg_clockid)
+ setup_timer = true;
+
+ if (tb[TCA_GATE_CYCLE_TIME]) {
cycletime = nla_get_u64(tb[TCA_GATE_CYCLE_TIME]);
+ if (cycletime > (u64)S64_MAX) {
+ NL_SET_ERR_MSG(extack, "Cycle time out of range");
+ err = -EINVAL;
+ goto put_chain;
+ }
+ }
if (tb[TCA_GATE_ENTRY_LIST]) {
- err = parse_gate_list(tb[TCA_GATE_ENTRY_LIST], p, extack);
- if (err < 0)
- goto chain_put;
+ err = parse_gate_list(tb[TCA_GATE_ENTRY_LIST], &newp, extack);
+ if (err <= 0) {
+ if (!err)
+ NL_SET_ERR_MSG(extack,
+ "Missing gate schedule (entry list)");
+ err = -EINVAL;
+ goto put_chain;
+ }
+ newp.num_entries = err;
+ } else if (ret == ACT_P_CREATED) {
+ NL_SET_ERR_MSG(extack, "Missing schedule entry list");
+ err = -EINVAL;
+ goto put_chain;
}
+ if (tb[TCA_GATE_CYCLE_TIME_EXT])
+ cycletime_ext = nla_get_u64(tb[TCA_GATE_CYCLE_TIME_EXT]);
+ else if (ret != ACT_P_CREATED)
+ cycletime_ext = oldp->tcfg_cycletime_ext;
+
if (!cycletime) {
struct tcfg_gate_entry *entry;
- ktime_t cycle = 0;
+ struct list_head *entries;
+ u64 cycle = 0;
+
+ if (!list_empty(&newp.entries))
+ entries = &newp.entries;
+ else if (ret != ACT_P_CREATED)
+ entries = &oldp->entries;
+ else
+ entries = NULL;
+
+ if (!entries) {
+ NL_SET_ERR_MSG(extack, "Invalid cycle time");
+ err = -EINVAL;
+ goto release_new_entries;
+ }
+
+ list_for_each_entry(entry, entries, list) {
+ if (entry->interval > (u64)S64_MAX) {
+ NL_SET_ERR_MSG(extack,
+ "Cycle time out of range");
+ err = -EINVAL;
+ goto release_new_entries;
+ }
+ if (cycle > (u64)S64_MAX - entry->interval) {
+ NL_SET_ERR_MSG(extack,
+ "Cycle time out of range");
+ err = -EINVAL;
+ goto release_new_entries;
+ }
+ cycle += entry->interval;
+ }
- list_for_each_entry(entry, &p->entries, list)
- cycle = ktime_add_ns(cycle, entry->interval);
cycletime = cycle;
if (!cycletime) {
+ NL_SET_ERR_MSG(extack, "Invalid cycle time");
err = -EINVAL;
- goto chain_put;
+ goto release_new_entries;
}
}
- p->tcfg_cycletime = cycletime;
- if (tb[TCA_GATE_CYCLE_TIME_EXT])
- p->tcfg_cycletime_ext =
- nla_get_u64(tb[TCA_GATE_CYCLE_TIME_EXT]);
+ if (ret != ACT_P_CREATED &&
+ (tb[TCA_GATE_ENTRY_LIST] || tb[TCA_GATE_CYCLE_TIME] ||
+ cycletime != oldp->tcfg_cycletime))
+ update_timer = true;
- gate_setup_timer(gact, basetime, tk_offset, clockid,
- ret == ACT_P_CREATED);
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
+ if (!p) {
+ err = -ENOMEM;
+ goto release_new_entries;
+ }
+
+ INIT_LIST_HEAD(&p->entries);
p->tcfg_priority = prio;
+ p->tcfg_basetime = basetime;
+ p->tcfg_cycletime = cycletime;
+ p->tcfg_cycletime_ext = cycletime_ext;
p->tcfg_flags = gflags;
- gate_get_start_time(gact, &start);
+ p->tcfg_clockid = clockid;
+
+ if (!list_empty(&newp.entries)) {
+ list_splice_init(&newp.entries, &p->entries);
+ p->num_entries = newp.num_entries;
+ } else if (ret != ACT_P_CREATED) {
+ struct tcfg_gate_entry *entry, *ne;
+
+ list_for_each_entry(entry, &oldp->entries, list) {
+ ne = kmemdup(entry, sizeof(*ne), GFP_KERNEL);
+ if (!ne) {
+ err = -ENOMEM;
+ goto free_p;
+ }
+ INIT_LIST_HEAD(&ne->list);
+ list_add_tail(&ne->list, &p->entries);
+ }
+ p->num_entries = oldp->num_entries;
+ }
- gact->current_close_time = start;
- gact->current_gate_status = GATE_ACT_GATE_OPEN | GATE_ACT_PENDING;
+ if (update_timer && ret != ACT_P_CREATED)
+ hrtimer_cancel(&gact->hitimer);
+
+ spin_lock_bh(&gact->tcf_lock);
+ if (setup_timer)
+ gate_setup_timer(gact, tk_offset, clockid);
+ gate_get_start_time(gact, p, &start);
+ gact->current_close_time = start;
gact->next_entry = list_first_entry(&p->entries,
struct tcfg_gate_entry, list);
+ gact->current_entry_octets = 0;
+ gact->current_gate_status = GATE_ACT_GATE_OPEN | GATE_ACT_PENDING;
goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
gate_start_timer(gact, start);
+ oldp = rcu_replace_pointer(gact->param, p,
+ lockdep_is_held(&gact->tcf_lock));
+
spin_unlock_bh(&gact->tcf_lock);
+ if (oldp)
+ call_rcu(&oldp->rcu, tcf_gate_params_release);
+
if (goto_ch)
tcf_chain_put_by_act(goto_ch);
return ret;
-chain_put:
- spin_unlock_bh(&gact->tcf_lock);
-
+free_p:
+ release_entry_list(&p->entries);
+ kfree(p);
+release_new_entries:
+ release_entry_list(&newp.entries);
+put_chain:
if (goto_ch)
tcf_chain_put_by_act(goto_ch);
release_idr:
- /* action is not inserted in any list: it's safe to init hitimer
- * without taking tcf_lock.
- */
- if (ret == ACT_P_CREATED)
- gate_setup_timer(gact, gact->param.tcfg_basetime,
- gact->tk_offset, gact->param.tcfg_clockid,
- true);
+ if (ret == ACT_P_CREATED) {
+ p = rcu_dereference_protected(gact->param, 1);
+ if (p) {
+ release_entry_list(&p->entries);
+ kfree(p);
+ rcu_assign_pointer(gact->param, NULL);
+ }
+ }
tcf_idr_release(*a, bind);
return err;
}
@@ -458,9 +613,11 @@ static void tcf_gate_cleanup(struct tc_action *a)
struct tcf_gate *gact = to_gate(a);
struct tcf_gate_params *p;
- p = &gact->param;
hrtimer_cancel(&gact->hitimer);
- release_entry_list(&p->entries);
+
+ p = rcu_dereference_protected(gact->param, 1);
+ if (p)
+ call_rcu(&p->rcu, tcf_gate_params_release);
}
static int dumping_entry(struct sk_buff *skb,
@@ -512,7 +669,8 @@ static int tcf_gate_dump(struct sk_buff *skb, struct tc_action *a,
spin_lock_bh(&gact->tcf_lock);
opt.action = gact->tcf_action;
- p = &gact->param;
+ p = rcu_dereference_protected(gact->param,
+ lockdep_is_held(&gact->tcf_lock));
if (nla_put(skb, TCA_GATE_PARMS, sizeof(opt), &opt))
goto nla_put_failure;
--
2.52.GIT
Powered by blists - more mailing lists