[<prev] [next>] [day] [month] [year] [list]
Message-ID: <20260116113049.159824-1-p@1g4.org>
Date: Fri, 16 Jan 2026 11:30:56 +0000
From: Paul Moses <p@....org>
To: netdev@...r.kernel.org
Cc: Paul Moses <p@....org>
Subject: [PATCH net-next v1] net/sched: act_gate: pack schedule entries into a single blob
Store gate schedule entries in a flexible array within tcf_gate_params to
reduce allocations and improve cache locality. Update the timer, dump,
and parsing paths to use index-based access, and reject overflowing
cycle times.
Signed-off-by: Paul Moses <p@....org>
Depends-on: <20260116112522.159480-3-p@....org>
---
include/net/tc_act/tc_gate.h | 23 ++---
net/sched/act_gate.c | 184 +++++++++++++----------------------
2 files changed, 73 insertions(+), 134 deletions(-)
diff --git a/include/net/tc_act/tc_gate.h b/include/net/tc_act/tc_gate.h
index 5fa6a500b9288..23510206b6c9e 100644
--- a/include/net/tc_act/tc_gate.h
+++ b/include/net/tc_act/tc_gate.h
@@ -14,13 +14,11 @@ struct action_gate_entry {
s32 maxoctets;
};
-struct tcfg_gate_entry {
- int index;
+struct gate_entry {
u8 gate_state;
u32 interval;
s32 ipv;
s32 maxoctets;
- struct list_head list;
};
struct tcf_gate_params {
@@ -30,9 +28,9 @@ struct tcf_gate_params {
u64 tcfg_cycletime_ext;
u32 tcfg_flags;
s32 tcfg_clockid;
- size_t num_entries;
- struct list_head entries;
+ u32 num_entries;
struct rcu_head rcu;
+ struct gate_entry entries[];
};
#define GATE_ACT_GATE_OPEN BIT(0)
@@ -45,7 +43,7 @@ struct tcf_gate {
ktime_t current_close_time;
u32 current_entry_octets;
s32 current_max_octets;
- struct tcfg_gate_entry *next_entry;
+ u32 next_idx;
struct hrtimer hitimer;
enum tk_offsets tk_offset;
};
@@ -128,7 +126,7 @@ static inline struct action_gate_entry
struct action_gate_entry *oe;
struct tcf_gate *gact = to_gate(a);
struct tcf_gate_params *p;
- struct tcfg_gate_entry *entry;
+ struct gate_entry *entry;
u32 num_entries;
int i = 0;
@@ -137,23 +135,18 @@ static inline struct action_gate_entry
lockdep_rtnl_is_held());
num_entries = p->num_entries;
- list_for_each_entry(entry, &p->entries, list)
- i++;
-
- if (i != num_entries)
- return NULL;
+ i = num_entries;
oe = kcalloc(num_entries, sizeof(*oe), GFP_ATOMIC);
if (!oe)
return NULL;
- i = 0;
- list_for_each_entry(entry, &p->entries, list) {
+ for (i = 0; i < num_entries; i++) {
+ entry = &p->entries[i];
oe[i].gate_state = entry->gate_state;
oe[i].interval = entry->interval;
oe[i].ipv = entry->ipv;
oe[i].maxoctets = entry->maxoctets;
- i++;
}
return oe;
diff --git a/net/sched/act_gate.c b/net/sched/act_gate.c
index 043ad856361d7..74394e3767387 100644
--- a/net/sched/act_gate.c
+++ b/net/sched/act_gate.c
@@ -72,14 +72,16 @@ static enum hrtimer_restart gate_timer_func(struct hrtimer *timer)
struct tcf_gate *gact = container_of(timer, struct tcf_gate,
hitimer);
struct tcf_gate_params *p;
- struct tcfg_gate_entry *next;
+ struct gate_entry *next;
+ u32 next_idx;
ktime_t close_time, now;
spin_lock(&gact->tcf_lock);
p = rcu_dereference_protected(gact->param,
lockdep_is_held(&gact->tcf_lock));
- next = gact->next_entry;
+ next_idx = gact->next_idx;
+ next = &p->entries[next_idx];
/* cycle start, clear pending bit, clear total octets */
gact->current_gate_status = next->gate_state ? GATE_ACT_GATE_OPEN : 0;
@@ -91,11 +93,9 @@ static enum hrtimer_restart gate_timer_func(struct hrtimer *timer)
close_time = gact->current_close_time;
- if (list_is_last(&next->list, &p->entries))
- next = list_first_entry(&p->entries,
- struct tcfg_gate_entry, list);
- else
- next = list_next_entry(next, list);
+ next_idx++;
+ if (next_idx >= p->num_entries)
+ next_idx = 0;
now = gate_get_time(gact);
@@ -109,7 +109,7 @@ static enum hrtimer_restart gate_timer_func(struct hrtimer *timer)
close_time = ktime_add_ns(base, (n + 1) * cycle);
}
- gact->next_entry = next;
+ gact->next_idx = next_idx;
hrtimer_set_expires(&gact->hitimer, close_time);
@@ -177,7 +177,7 @@ static const struct nla_policy gate_policy[TCA_GATE_MAX + 1] = {
[TCA_GATE_CLOCKID] = { .type = NLA_S32 },
};
-static int fill_gate_entry(struct nlattr **tb, struct tcfg_gate_entry *entry,
+static int fill_gate_entry(struct nlattr **tb, struct gate_entry *entry,
struct netlink_ext_ack *extack)
{
u32 interval = 0;
@@ -202,8 +202,8 @@ static int fill_gate_entry(struct nlattr **tb, struct tcfg_gate_entry *entry,
return 0;
}
-static int parse_gate_entry(struct nlattr *n, struct tcfg_gate_entry *entry,
- int index, struct netlink_ext_ack *extack)
+static int parse_gate_entry(struct nlattr *n, struct gate_entry *entry,
+ struct netlink_ext_ack *extack)
{
struct nlattr *tb[TCA_GATE_ENTRY_MAX + 1] = { };
int err;
@@ -214,34 +214,20 @@ static int parse_gate_entry(struct nlattr *n, struct tcfg_gate_entry *entry,
return -EINVAL;
}
- entry->index = index;
-
return fill_gate_entry(tb, entry, extack);
}
-static void release_entry_list(struct list_head *entries)
-{
- struct tcfg_gate_entry *entry, *e;
-
- list_for_each_entry_safe(entry, e, entries, list) {
- list_del(&entry->list);
- kfree(entry);
- }
-}
-
static void tcf_gate_params_release(struct rcu_head *rcu)
{
struct tcf_gate_params *p = container_of(rcu, struct tcf_gate_params, rcu);
- release_entry_list(&p->entries);
kfree(p);
}
-static int parse_gate_list(struct nlattr *list_attr,
- struct tcf_gate_params *sched,
- struct netlink_ext_ack *extack)
+static int gate_setup_gate_list(struct nlattr *list_attr,
+ struct tcf_gate_params *p,
+ struct netlink_ext_ack *extack)
{
- struct tcfg_gate_entry *entry;
struct nlattr *n;
int err, rem;
int i = 0;
@@ -255,31 +241,13 @@ static int parse_gate_list(struct nlattr *list_attr,
continue;
}
- entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
- if (!entry) {
- NL_SET_ERR_MSG(extack, "Not enough memory for entry");
- err = -ENOMEM;
- goto release_list;
- }
-
- err = parse_gate_entry(n, entry, i, extack);
- if (err < 0) {
- kfree(entry);
- goto release_list;
- }
-
- list_add_tail(&entry->list, &sched->entries);
+ err = parse_gate_entry(n, &p->entries[i], extack);
+ if (err < 0)
+ return err;
i++;
}
- sched->num_entries = i;
-
- return i;
-
-release_list:
- release_entry_list(&sched->entries);
-
- return err;
+ return 0;
}
static void gate_setup_timer(struct tcf_gate *gact, u64 basetime,
@@ -304,13 +272,13 @@ static void gate_setup_timer(struct tcf_gate *gact, u64 basetime,
hrtimer_setup(&gact->hitimer, gate_timer_func, clockid, HRTIMER_MODE_ABS_SOFT);
}
-static int gate_calc_cycletime(struct list_head *entries, u64 *cycletime)
+static int gate_calc_cycletime(struct tcf_gate_params *p, u64 *cycletime)
{
- struct tcfg_gate_entry *entry;
u64 sum = 0;
+ u32 i;
- list_for_each_entry(entry, entries, list) {
- if (check_add_overflow(sum, (u64)entry->interval, &sum))
+ for (i = 0; i < p->num_entries; i++) {
+ if (check_add_overflow(sum, (u64)p->entries[i].interval, &sum))
return -EOVERFLOW;
}
@@ -332,20 +300,18 @@ static int tcf_gate_init(struct net *net, struct nlattr *nla,
struct tcf_gate_params *p, *oldp;
struct tcf_gate *gact;
struct tc_gate *parm;
- struct tcf_gate_params newp = { };
ktime_t start;
u64 cycletime = 0, basetime = 0, cycletime_ext = 0;
enum tk_offsets tk_offset = TK_OFFS_TAI;
s32 clockid = CLOCK_TAI;
u32 gflags = 0;
u32 index;
+ u32 num_entries = 0;
s32 prio = -1;
bool bind = flags & TCA_ACT_FLAGS_BIND;
bool clockid_set = false;
int ret = 0, err;
- INIT_LIST_HEAD(&newp.entries);
-
if (!nla)
return -EINVAL;
@@ -395,12 +361,11 @@ static int tcf_gate_init(struct net *net, struct nlattr *nla,
ret = ACT_P_CREATED;
gact = to_gate(*a);
- p = kzalloc(sizeof(*p), GFP_KERNEL);
+ p = kzalloc(struct_size(p, entries, 0), GFP_KERNEL);
if (!p) {
tcf_idr_release(*a, bind);
return -ENOMEM;
}
- INIT_LIST_HEAD(&p->entries);
rcu_assign_pointer(gact->param, p);
gate_setup_timer(gact, basetime, tk_offset, clockid, true);
} else {
@@ -469,16 +434,19 @@ static int tcf_gate_init(struct net *net, struct nlattr *nla,
}
if (tb[TCA_GATE_ENTRY_LIST]) {
- INIT_LIST_HEAD(&newp.entries);
- err = parse_gate_list(tb[TCA_GATE_ENTRY_LIST], &newp, extack);
- if (err <= 0) {
- if (!err)
- NL_SET_ERR_MSG(extack,
- "Missing gate schedule (entry list)");
+ struct nlattr *n;
+ int rem;
+
+ nla_for_each_nested(n, tb[TCA_GATE_ENTRY_LIST], rem) {
+ if (nla_type(n) != TCA_GATE_ONE_ENTRY)
+ continue;
+ num_entries++;
+ }
+ if (!num_entries) {
+ NL_SET_ERR_MSG(extack, "Empty schedule entry list");
err = -EINVAL;
goto put_chain;
}
- newp.num_entries = err;
} else if (ret == ACT_P_CREATED) {
NL_SET_ERR_MSG(extack, "Missing schedule entry list");
err = -EINVAL;
@@ -493,39 +461,39 @@ static int tcf_gate_init(struct net *net, struct nlattr *nla,
else if (ret != ACT_P_CREATED)
cycletime_ext = oldp->tcfg_cycletime_ext;
- if (!cycletime) {
- struct list_head *entries;
-
- if (!list_empty(&newp.entries))
- entries = &newp.entries;
- else if (ret != ACT_P_CREATED)
- entries = &oldp->entries;
- else
- entries = NULL;
+ if (ret != ACT_P_CREATED)
+ hrtimer_cancel(&gact->hitimer);
- if (!entries) {
- NL_SET_ERR_MSG(extack, "Invalid cycle time");
- err = -EINVAL;
- goto release_new_entries;
+ if (num_entries) {
+ p = kzalloc(struct_size(p, entries, num_entries), GFP_KERNEL);
+ if (!p) {
+ err = -ENOMEM;
+ goto put_chain;
}
+ p->num_entries = num_entries;
+ err = gate_setup_gate_list(tb[TCA_GATE_ENTRY_LIST], p, extack);
+ if (err < 0)
+ goto free_p;
+ } else {
+ num_entries = oldp->num_entries;
+ p = kzalloc(struct_size(p, entries, num_entries), GFP_KERNEL);
+ if (!p) {
+ err = -ENOMEM;
+ goto put_chain;
+ }
+ p->num_entries = num_entries;
+ memcpy(p->entries, oldp->entries,
+ flex_array_size(p, entries, num_entries));
+ }
- err = gate_calc_cycletime(entries, &cycletime);
+ if (!cycletime) {
+ err = gate_calc_cycletime(p, &cycletime);
if (err < 0) {
NL_SET_ERR_MSG(extack, "Invalid cycle time");
- goto release_new_entries;
+ goto free_p;
}
}
- if (ret != ACT_P_CREATED)
- hrtimer_cancel(&gact->hitimer);
-
- p = kzalloc(sizeof(*p), GFP_KERNEL);
- if (!p) {
- err = -ENOMEM;
- goto release_new_entries;
- }
-
- INIT_LIST_HEAD(&p->entries);
p->tcfg_priority = prio;
p->tcfg_basetime = basetime;
p->tcfg_cycletime = cycletime;
@@ -533,31 +501,12 @@ static int tcf_gate_init(struct net *net, struct nlattr *nla,
p->tcfg_flags = gflags;
p->tcfg_clockid = clockid;
- if (!list_empty(&newp.entries)) {
- list_splice_init(&newp.entries, &p->entries);
- p->num_entries = newp.num_entries;
- } else if (ret != ACT_P_CREATED) {
- struct tcfg_gate_entry *entry, *ne;
-
- list_for_each_entry(entry, &oldp->entries, list) {
- ne = kmemdup(entry, sizeof(*ne), GFP_KERNEL);
- if (!ne) {
- err = -ENOMEM;
- goto free_p;
- }
- INIT_LIST_HEAD(&ne->list);
- list_add_tail(&ne->list, &p->entries);
- }
- p->num_entries = oldp->num_entries;
- }
-
spin_lock_bh(&gact->tcf_lock);
gate_setup_timer(gact, basetime, tk_offset, clockid, ret == ACT_P_CREATED);
gate_get_start_time(gact, p, &start);
gact->current_close_time = start;
- gact->next_entry = list_first_entry(&p->entries,
- struct tcfg_gate_entry, list);
+ gact->next_idx = 0;
gact->current_entry_octets = 0;
gact->current_gate_status = GATE_ACT_PENDING;
@@ -580,8 +529,6 @@ static int tcf_gate_init(struct net *net, struct nlattr *nla,
free_p:
kfree(p);
-release_new_entries:
- release_entry_list(&newp.entries);
put_chain:
if (goto_ch)
tcf_chain_put_by_act(goto_ch);
@@ -589,7 +536,6 @@ static int tcf_gate_init(struct net *net, struct nlattr *nla,
if (ret == ACT_P_CREATED) {
p = rcu_dereference_protected(gact->param, 1);
if (p) {
- release_entry_list(&p->entries);
kfree(p);
rcu_assign_pointer(gact->param, NULL);
}
@@ -611,7 +557,7 @@ static void tcf_gate_cleanup(struct tc_action *a)
}
static int dumping_entry(struct sk_buff *skb,
- struct tcfg_gate_entry *entry)
+ struct gate_entry *entry, u32 index)
{
struct nlattr *item;
@@ -619,7 +565,7 @@ static int dumping_entry(struct sk_buff *skb,
if (!item)
return -ENOSPC;
- if (nla_put_u32(skb, TCA_GATE_ENTRY_INDEX, entry->index))
+ if (nla_put_u32(skb, TCA_GATE_ENTRY_INDEX, index))
goto nla_put_failure;
if (entry->gate_state && nla_put_flag(skb, TCA_GATE_ENTRY_GATE))
@@ -645,12 +591,12 @@ static int tcf_gate_dump(struct sk_buff *skb, struct tc_action *a,
int bind, int ref)
{
struct tcf_gate *gact = to_gate(a);
- struct tcfg_gate_entry *entry;
struct tcf_gate_params *p;
struct nlattr *entry_list;
struct tc_gate opt = { };
struct tcf_t t;
unsigned char *b = skb_tail_pointer(skb);
+ u32 i;
spin_lock_bh(&gact->tcf_lock);
opt.index = gact->tcf_index;
@@ -689,8 +635,8 @@ static int tcf_gate_dump(struct sk_buff *skb, struct tc_action *a,
if (!entry_list)
goto nla_put_failure;
- list_for_each_entry(entry, &p->entries, list) {
- if (dumping_entry(skb, entry) < 0)
+ for (i = 0; i < p->num_entries; i++) {
+ if (dumping_entry(skb, &p->entries[i], i) < 0)
goto nla_put_failure;
}
--
2.52.GIT
Powered by blists - more mailing lists