[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20231219081453.718489-2-faizal.abdul.rahim@linux.intel.com>
Date: Tue, 19 Dec 2023 03:14:50 -0500
From: Faizal Rahim <faizal.abdul.rahim@...ux.intel.com>
To: Vladimir Oltean <vladimir.oltean@....com>,
Vinicius Costa Gomes <vinicius.gomes@...el.com>,
Jamal Hadi Salim <jhs@...atatu.com>,
Cong Wang <xiyou.wangcong@...il.com>,
Jiri Pirko <jiri@...nulli.us>,
"David S . Miller" <davem@...emloft.net>,
Eric Dumazet <edumazet@...gle.com>,
Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>
Cc: netdev@...r.kernel.org,
linux-kernel@...r.kernel.org
Subject: [PATCH v3 net 1/4] net/sched: taprio: fix too early schedules switching
Switching the schedule prematurely leads to a situation where the last
entry from oper schedule is still running, during this period, calls
to taprio_skb_exceeds_queue_max_sdu() in the enqueue path, such as
taprio_enqueue_segmented(), will inspect q->oper_sched. At this point,
q->oper_sched refers to the new admin schedule instead of the ongoing
oper schedule.
Fixes: a878fd46fe43 ("net/sched: keep the max_frm_len information inside struct sched_gate_list")
Signed-off-by: Faizal Rahim <faizal.abdul.rahim@...ux.intel.com>
---
net/sched/sch_taprio.c | 12 ++++++++++--
1 file changed, 10 insertions(+), 2 deletions(-)
diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
index 31a8252bd09c..bbcaf05d40ba 100644
--- a/net/sched/sch_taprio.c
+++ b/net/sched/sch_taprio.c
@@ -41,6 +41,7 @@ static struct static_key_false taprio_have_working_mqprio;
#define TXTIME_ASSIST_IS_ENABLED(flags) ((flags) & TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST)
#define FULL_OFFLOAD_IS_ENABLED(flags) ((flags) & TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD)
#define TAPRIO_FLAGS_INVALID U32_MAX
+#define CYCLE_TIME_CORRECTION_UNSPEC S64_MIN
struct sched_entry {
/* Durations between this GCL entry and the GCL entry where the
@@ -75,6 +76,7 @@ struct sched_gate_list {
ktime_t cycle_end_time;
s64 cycle_time;
s64 cycle_time_extension;
+ s64 cycle_time_correction;
s64 base_time;
};
@@ -213,6 +215,11 @@ static void switch_schedules(struct taprio_sched *q,
*admin = NULL;
}
+static bool sched_switch_pending(const struct sched_gate_list *oper)
+{
+ return oper->cycle_time_correction != CYCLE_TIME_CORRECTION_UNSPEC;
+}
+
/* Get how much time has been already elapsed in the current cycle. */
static s32 get_cycle_time_elapsed(struct sched_gate_list *sched, ktime_t time)
{
@@ -940,7 +947,7 @@ static enum hrtimer_restart advance_sched(struct hrtimer *timer)
admin = rcu_dereference_protected(q->admin_sched,
lockdep_is_held(&q->current_entry_lock));
- if (!oper)
+ if (!oper || sched_switch_pending(oper))
switch_schedules(q, &admin, &oper);
/* This can happen in two cases: 1. this is the very first run
@@ -981,7 +988,7 @@ static enum hrtimer_restart advance_sched(struct hrtimer *timer)
* schedule runs.
*/
end_time = sched_base_time(admin);
- switch_schedules(q, &admin, &oper);
+ oper->cycle_time_correction = 0;
}
next->end_time = end_time;
@@ -1174,6 +1181,7 @@ static int parse_taprio_schedule(struct taprio_sched *q, struct nlattr **tb,
}
taprio_calculate_gate_durations(q, new);
+ new->cycle_time_correction = CYCLE_TIME_CORRECTION_UNSPEC;
return 0;
}
--
2.25.1
Powered by blists - more mailing lists