[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190410003305.24646-4-vinicius.gomes@intel.com>
Date: Tue, 9 Apr 2019 17:33:02 -0700
From: Vinicius Costa Gomes <vinicius.gomes@...el.com>
To: netdev@...r.kernel.org
Cc: Vinicius Costa Gomes <vinicius.gomes@...el.com>, jhs@...atatu.com,
xiyou.wangcong@...il.com, jiri@...nulli.us, olteanv@...il.com,
timo.koskiahde@...ech.com, m-karicheri2@...com
Subject: [RFC net-next v1 3/6] taprio: Add support for setting the cycle-time manually
IEEE 802.1Q-2018 defines that a the cycle-time of a schedule may be
overridden, so the schedule is truncated to a determined "width".
Signed-off-by: Vinicius Costa Gomes <vinicius.gomes@...el.com>
---
include/uapi/linux/pkt_sched.h | 1 +
net/sched/sch_taprio.c | 56 ++++++++++++++++++++++++++++------
2 files changed, 47 insertions(+), 10 deletions(-)
diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
index d59770d0eb84..7a32276838e1 100644
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -1167,6 +1167,7 @@ enum {
TCA_TAPRIO_ATTR_SCHED_CLOCKID, /* s32 */
TCA_TAPRIO_PAD,
TCA_TAPRIO_ATTR_ADMIN_SCHED, /* The admin sched, only used in dump */
+ TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME, /* s64 */
__TCA_TAPRIO_ATTR_MAX,
};
diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
index af12353232a5..70bba9782449 100644
--- a/net/sched/sch_taprio.c
+++ b/net/sched/sch_taprio.c
@@ -42,6 +42,8 @@ struct sched_gate_list {
struct rcu_head rcu;
struct list_head entries;
size_t num_entries;
+ ktime_t cycle_close_time;
+ s64 cycle_time;
s64 base_time;
};
@@ -250,7 +252,13 @@ static bool should_restart_cycle(const struct sched_gate_list *oper,
{
WARN_ON(!entry);
- return list_is_last(&entry->list, &oper->entries);
+ if (list_is_last(&entry->list, &oper->entries))
+ return true;
+
+ if (ktime_compare(entry->close_time, oper->cycle_close_time) == 0)
+ return true;
+
+ return false;
}
static bool should_change_schedules(const struct sched_gate_list *admin,
@@ -306,13 +314,17 @@ static enum hrtimer_restart advance_sched(struct hrtimer *timer)
goto first_run;
}
- if (should_restart_cycle(oper, entry))
+ if (should_restart_cycle(oper, entry)) {
next = list_first_entry(&oper->entries, struct sched_entry,
list);
- else
+ oper->cycle_close_time = ktime_add_ns(oper->cycle_close_time,
+ oper->cycle_time);
+ } else {
next = list_next_entry(entry, list);
+ }
close_time = ktime_add_ns(entry->close_time, next->interval);
+ close_time = min_t(ktime_t, close_time, oper->cycle_close_time);
if (should_change_schedules(admin, oper, close_time)) {
/* Set things so the next time this runs, the new
@@ -358,6 +370,7 @@ static const struct nla_policy taprio_policy[TCA_TAPRIO_ATTR_MAX + 1] = {
[TCA_TAPRIO_ATTR_SCHED_BASE_TIME] = { .type = NLA_S64 },
[TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY] = { .type = NLA_NESTED },
[TCA_TAPRIO_ATTR_SCHED_CLOCKID] = { .type = NLA_S32 },
+ [TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME] = { .type = NLA_S64 },
};
static int fill_sched_entry(struct nlattr **tb, struct sched_entry *entry,
@@ -459,6 +472,9 @@ static int parse_taprio_schedule(struct nlattr **tb,
if (tb[TCA_TAPRIO_ATTR_SCHED_BASE_TIME])
new->base_time = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_BASE_TIME]);
+ if (tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME])
+ new->cycle_time = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME]);
+
if (tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST])
err = parse_sched_list(
tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST], new, extack);
@@ -530,22 +546,32 @@ static int taprio_parse_mqprio_opt(struct net_device *dev,
return 0;
}
+static ktime_t get_cycle_time(struct sched_gate_list *sched)
+{
+ struct sched_entry *entry;
+ ktime_t cycle = 0;
+
+ if (sched->cycle_time != 0)
+ return sched->cycle_time;
+
+ list_for_each_entry(entry, &sched->entries, list)
+ cycle = ktime_add_ns(cycle, entry->interval);
+
+ sched->cycle_time = cycle;
+
+ return cycle;
+}
+
static ktime_t taprio_get_start_time(struct Qdisc *sch,
struct sched_gate_list *sched)
{
struct taprio_sched *q = qdisc_priv(sch);
- struct sched_entry *entry;
ktime_t now, base, cycle;
s64 n;
base = ns_to_ktime(sched->base_time);
- cycle = 0;
-
- /* Calculate the cycle_time, by summing all the intervals.
- */
- list_for_each_entry(entry, &sched->entries, list)
- cycle = ktime_add_ns(cycle, entry->interval);
+ cycle = get_cycle_time(sched);
if (!cycle)
return base;
@@ -566,10 +592,16 @@ static void setup_first_close_time(struct taprio_sched *q,
struct sched_gate_list *sched, ktime_t base)
{
struct sched_entry *first;
+ ktime_t cycle;
first = list_first_entry(&sched->entries,
struct sched_entry, list);
+ cycle = get_cycle_time(sched);
+
+ /* FIXME: find a better place to do this */
+ sched->cycle_close_time = ktime_add_ns(base, cycle);
+
first->close_time = ktime_add_ns(base, first->interval);
atomic_set(&first->budget,
(first->interval * 1000) / q->picos_per_byte);
@@ -912,6 +944,10 @@ static int dump_schedule(struct sk_buff *msg,
root->base_time, TCA_TAPRIO_PAD))
return -1;
+ if (nla_put_s64(msg, TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME,
+ root->cycle_time, TCA_TAPRIO_PAD))
+ return -1;
+
entry_list = nla_nest_start(msg, TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST);
if (!entry_list)
goto error_nest;
--
2.21.0
Powered by blists - more mailing lists