[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250328201634.3876474-2-tavip@google.com>
Date: Fri, 28 Mar 2025 13:16:32 -0700
From: Octavian Purdila <tavip@...gle.com>
To: jhs@...atatu.com, xiyou.wangcong@...il.com, jiri@...nulli.us
Cc: davem@...emloft.net, edumazet@...gle.com, kuba@...nel.org,
pabeni@...hat.com, horms@...nel.org, shuah@...nel.org, netdev@...r.kernel.org,
Octavian Purdila <tavip@...gle.com>
Subject: [PATCH net 1/3] net_sched: sch_sfq: use a temporary work area for
validating configuration
Many configuration parameters have influence on others (e.g. divisor
-> flows -> limit, depth -> limit) and so it is difficult to correctly
do all of the validation before applying the configuration. And if a
validation error is detected late it is difficult to roll back a
partially applied configuration.
To avoid these issues use a temporary work area to update and validate
the configuration and only then apply the configuration to the
internal state.
Signed-off-by: Octavian Purdila <tavip@...gle.com>
---
net/sched/sch_sfq.c | 60 ++++++++++++++++++++++++++++++++++++---------
1 file changed, 48 insertions(+), 12 deletions(-)
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 65d5b59da583..027a3fde2139 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -631,6 +631,18 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt,
struct red_parms *p = NULL;
struct sk_buff *to_free = NULL;
struct sk_buff *tail = NULL;
+ /* work area for validating changes before committing them */
+ struct {
+ int limit;
+ unsigned int divisor;
+ unsigned int maxflows;
+ int perturb_period;
+ unsigned int quantum;
+ u8 headdrop;
+ u8 maxdepth;
+ u8 flags;
+ } tmp;
+
if (opt->nla_len < nla_attr_size(sizeof(*ctl)))
return -EINVAL;
@@ -656,36 +668,60 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt,
NL_SET_ERR_MSG_MOD(extack, "invalid limit");
return -EINVAL;
}
+
sch_tree_lock(sch);
+
+ /* copy configuration to work area */
+ tmp.limit = q->limit;
+ tmp.divisor = q->divisor;
+ tmp.headdrop = q->headdrop;
+ tmp.maxdepth = q->maxdepth;
+ tmp.maxflows = q->maxflows;
+ tmp.perturb_period = q->perturb_period;
+ tmp.quantum = q->quantum;
+ tmp.flags = q->flags;
+
+ /* update and validate configuration */
if (ctl->quantum)
- q->quantum = ctl->quantum;
- WRITE_ONCE(q->perturb_period, ctl->perturb_period * HZ);
+ tmp.quantum = ctl->quantum;
+ tmp.perturb_period = ctl->perturb_period * HZ;
if (ctl->flows)
- q->maxflows = min_t(u32, ctl->flows, SFQ_MAX_FLOWS);
+ tmp.maxflows = min_t(u32, ctl->flows, SFQ_MAX_FLOWS);
if (ctl->divisor) {
- q->divisor = ctl->divisor;
- q->maxflows = min_t(u32, q->maxflows, q->divisor);
+ tmp.divisor = ctl->divisor;
+ tmp.maxflows = min_t(u32, tmp.maxflows, tmp.divisor);
}
if (ctl_v1) {
if (ctl_v1->depth)
- q->maxdepth = min_t(u32, ctl_v1->depth, SFQ_MAX_DEPTH);
+ tmp.maxdepth = min_t(u32, ctl_v1->depth, SFQ_MAX_DEPTH);
if (p) {
- swap(q->red_parms, p);
- red_set_parms(q->red_parms,
+ red_set_parms(p,
ctl_v1->qth_min, ctl_v1->qth_max,
ctl_v1->Wlog,
ctl_v1->Plog, ctl_v1->Scell_log,
NULL,
ctl_v1->max_P);
}
- q->flags = ctl_v1->flags;
- q->headdrop = ctl_v1->headdrop;
+ tmp.flags = ctl_v1->flags;
+ tmp.headdrop = ctl_v1->headdrop;
}
if (ctl->limit) {
- q->limit = min_t(u32, ctl->limit, q->maxdepth * q->maxflows);
- q->maxflows = min_t(u32, q->maxflows, q->limit);
+ tmp.limit = min_t(u32, ctl->limit, tmp.maxdepth * tmp.maxflows);
+ tmp.maxflows = min_t(u32, tmp.maxflows, tmp.limit);
}
+ /* commit configuration, no return from this point further */
+ q->limit = tmp.limit;
+ q->divisor = tmp.divisor;
+ q->headdrop = tmp.headdrop;
+ q->maxdepth = tmp.maxdepth;
+ q->maxflows = tmp.maxflows;
+ WRITE_ONCE(q->perturb_period, tmp.perturb_period);
+ q->quantum = tmp.quantum;
+ q->flags = tmp.flags;
+ if (p)
+ swap(q->red_parms, p);
+
qlen = sch->q.qlen;
while (sch->q.qlen > q->limit) {
dropped += sfq_drop(sch, &to_free);
--
2.49.0.472.ge94155a9ec-goog
Powered by blists - more mailing lists