[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190313224312.6735-2-leandro.maciel.dorileo@intel.com>
Date: Wed, 13 Mar 2019 15:43:11 -0700
From: Leandro Dorileo <leandro.maciel.dorileo@...el.com>
To: netdev@...r.kernel.org
Cc: Jamal Hadi Salim <jhs@...atatu.com>,
Cong Wang <xiyou.wangcong@...il.com>,
Jiri Pirko <jiri@...nulli.us>,
"David S . Miller" <davem@...emloft.net>,
Vinicius Costa Gomes <vinicius.gomes@...el.com>
Subject: [PATCH net 1/2] net/sched: taprio: fix picos_per_byte miscalculation
The Time Aware Priority Scheduler is heavily dependent to link speed,
it relies on it to calculate transmission bytes per cycle, we can't
properly calculate the so called budget if the device has failed
to report the link speed.
In that case we can't dequeue packets assuming a wrong budget.
This patch makes sure we fail to dequeue case:
1) __ethtool_get_link_ksettings() reports error or 2) the ethernet
driver failed to set the ksettings' speed value (setting link speed
to SPEED_UNKNOWN).
Additionally we re calculate the budget whenever the link speed is
changed.
Fixes: 5a781ccbd19e4 ("tc: Add support for configuring the taprio scheduler")
Signed-off-by: Leandro Dorileo <leandro.maciel.dorileo@...el.com>
---
net/sched/sch_taprio.c | 90 ++++++++++++++++++++++++++++++++++--------
1 file changed, 74 insertions(+), 16 deletions(-)
diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
index 206e4dbed12f..20c7a6779cc9 100644
--- a/net/sched/sch_taprio.c
+++ b/net/sched/sch_taprio.c
@@ -20,6 +20,9 @@
#include <net/pkt_cls.h>
#include <net/sch_generic.h>
+static LIST_HEAD(taprio_list);
+static DEFINE_SPINLOCK(taprio_list_lock);
+
#define TAPRIO_ALL_GATES_OPEN -1
struct sched_entry {
@@ -42,9 +45,9 @@ struct taprio_sched {
struct Qdisc *root;
s64 base_time;
int clockid;
- int picos_per_byte; /* Using picoseconds because for 10Gbps+
- * speeds it's sub-nanoseconds per byte
- */
+ atomic64_t picos_per_byte; /* Using picoseconds because for 10Gbps+
+ * speeds it's sub-nanoseconds per byte
+ */
size_t num_entries;
/* Protects the update side of the RCU protected current_entry */
@@ -53,6 +56,7 @@ struct taprio_sched {
struct list_head entries;
ktime_t (*get_time)(void);
struct hrtimer advance_timer;
+ struct list_head taprio_list;
};
static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch,
@@ -117,7 +121,7 @@ static struct sk_buff *taprio_peek(struct Qdisc *sch)
static inline int length_to_duration(struct taprio_sched *q, int len)
{
- return (len * q->picos_per_byte) / 1000;
+ return (len * atomic64_read(&q->picos_per_byte)) / 1000;
}
static struct sk_buff *taprio_dequeue(struct Qdisc *sch)
@@ -129,6 +133,11 @@ static struct sk_buff *taprio_dequeue(struct Qdisc *sch)
u32 gate_mask;
int i;
+ if (atomic64_read(&q->picos_per_byte) == -1) {
+ WARN_ONCE(1, "taprio: dequeue() called with unknown picos per byte.");
+ return NULL;
+ }
+
rcu_read_lock();
entry = rcu_dereference(q->current_entry);
/* if there's no entry, it means that the schedule didn't
@@ -233,7 +242,7 @@ static enum hrtimer_restart advance_sched(struct hrtimer *timer)
next->close_time = close_time;
atomic_set(&next->budget,
- (next->interval * 1000) / q->picos_per_byte);
+ (next->interval * 1000) / atomic64_read(&q->picos_per_byte));
first_run:
rcu_assign_pointer(q->current_entry, next);
@@ -567,7 +576,8 @@ static void taprio_start_sched(struct Qdisc *sch, ktime_t start)
first->close_time = ktime_add_ns(start, first->interval);
atomic_set(&first->budget,
- (first->interval * 1000) / q->picos_per_byte);
+ (first->interval * 1000) /
+ atomic64_read(&q->picos_per_byte));
rcu_assign_pointer(q->current_entry, NULL);
spin_unlock_irqrestore(&q->current_entry_lock, flags);
@@ -575,6 +585,45 @@ static void taprio_start_sched(struct Qdisc *sch, ktime_t start)
hrtimer_start(&q->advance_timer, start, HRTIMER_MODE_ABS);
}
+static void taprio_set_picos_per_byte(struct net_device *dev,
+ struct taprio_sched *q)
+{
+ struct ethtool_link_ksettings ecmd;
+ int picos_per_byte = -1;
+
+ if (!__ethtool_get_link_ksettings(dev, &ecmd) &&
+ ecmd.base.speed != SPEED_UNKNOWN)
+ picos_per_byte = div64_s64(NSEC_PER_SEC * 1000LL * 8,
+ ecmd.base.speed * 1000 * 1000);
+
+ atomic64_set(&q->picos_per_byte, picos_per_byte);
+ pr_info("taprio: set %s's picos_per_byte to: %llu, linkspeed: %d\n",
+ dev->name, atomic64_read(&q->picos_per_byte), ecmd.base.speed);
+}
+
+static int taprio_dev_notifier(struct notifier_block *nb, unsigned long event,
+ void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct taprio_sched *q;
+ struct net_device *qdev;
+
+ ASSERT_RTNL();
+
+ if (event != NETDEV_UP && event != NETDEV_CHANGE)
+ return NOTIFY_DONE;
+
+ spin_lock(&taprio_list_lock);
+ list_for_each_entry(q, &taprio_list, taprio_list) {
+ qdev = qdisc_dev(q->root);
+ if (qdev == dev)
+ taprio_set_picos_per_byte(dev, q);
+ }
+ spin_unlock(&taprio_list_lock);
+
+ return NOTIFY_DONE;
+}
+
static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
struct netlink_ext_ack *extack)
{
@@ -582,9 +631,7 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
struct taprio_sched *q = qdisc_priv(sch);
struct net_device *dev = qdisc_dev(sch);
struct tc_mqprio_qopt *mqprio = NULL;
- struct ethtool_link_ksettings ecmd;
int i, err, size;
- s64 link_speed;
ktime_t start;
err = nla_parse_nested(tb, TCA_TAPRIO_ATTR_MAX, opt,
@@ -657,14 +704,7 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
mqprio->prio_tc_map[i]);
}
- if (!__ethtool_get_link_ksettings(dev, &ecmd))
- link_speed = ecmd.base.speed;
- else
- link_speed = SPEED_1000;
-
- q->picos_per_byte = div64_s64(NSEC_PER_SEC * 1000LL * 8,
- link_speed * 1000 * 1000);
-
+ taprio_set_picos_per_byte(dev, q);
start = taprio_get_start_time(sch);
if (!start)
return 0;
@@ -681,6 +721,10 @@ static void taprio_destroy(struct Qdisc *sch)
struct sched_entry *entry, *n;
unsigned int i;
+ spin_lock(&taprio_list_lock);
+ list_del(&q->taprio_list);
+ spin_unlock(&taprio_list_lock);
+
hrtimer_cancel(&q->advance_timer);
if (q->qdiscs) {
@@ -735,6 +779,10 @@ static int taprio_init(struct Qdisc *sch, struct nlattr *opt,
if (!opt)
return -EINVAL;
+ spin_lock(&taprio_list_lock);
+ list_add(&q->taprio_list, &taprio_list);
+ spin_unlock(&taprio_list_lock);
+
return taprio_change(sch, opt, extack);
}
@@ -947,14 +995,24 @@ static struct Qdisc_ops taprio_qdisc_ops __read_mostly = {
.owner = THIS_MODULE,
};
+static struct notifier_block taprio_device_notifier = {
+ .notifier_call = taprio_dev_notifier,
+};
+
static int __init taprio_module_init(void)
{
+ int err = register_netdevice_notifier(&taprio_device_notifier);
+
+ if (err)
+ return err;
+
return register_qdisc(&taprio_qdisc_ops);
}
static void __exit taprio_module_exit(void)
{
unregister_qdisc(&taprio_qdisc_ops);
+ unregister_netdevice_notifier(&taprio_device_notifier);
}
module_init(taprio_module_init);
--
2.21.0
Powered by blists - more mailing lists