[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-Id: <20201012235642.1384318-3-vinicius.gomes@intel.com>
Date: Mon, 12 Oct 2020 16:56:42 -0700
From: Vinicius Costa Gomes <vinicius.gomes@...el.com>
To: netdev@...r.kernel.org
Cc: Vinicius Costa Gomes <vinicius.gomes@...el.com>, jhs@...atatu.com,
xiyou.wangcong@...il.com, jiri@...nulli.us, kuba@...nel.org,
m-karicheri2@...com, vladimir.oltean@....com,
Jose.Abreu@...opsys.com, po.liu@....com
Subject: [RFC net-next v2 2/2] taprio: Add support for frame preemption offload
This adds a way to configure which queues are marked as preemptible
and which are marked as express.
Even if this is not a "real" offload, because it can't be executed
purely in software, having this information near where the mapping of
queues is specified, makes it, hopefully, easier to understand.
Signed-off-by: Vinicius Costa Gomes <vinicius.gomes@...el.com>
---
include/linux/netdevice.h | 1 +
include/net/pkt_sched.h | 4 ++++
include/uapi/linux/pkt_sched.h | 1 +
net/sched/sch_taprio.c | 41 ++++++++++++++++++++++++++++++----
4 files changed, 43 insertions(+), 4 deletions(-)
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index a0df43b13839..99589945bb10 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -852,6 +852,7 @@ enum tc_setup_type {
TC_SETUP_QDISC_ETS,
TC_SETUP_QDISC_TBF,
TC_SETUP_QDISC_FIFO,
+ TC_SETUP_PREEMPT,
};
/* These structures hold the attributes of bpf state that are being passed
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index 4ed32e6b0201..71b50b644cfa 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -178,6 +178,10 @@ struct tc_taprio_qopt_offload {
struct tc_taprio_sched_entry entries[];
};
+struct tc_preempt_qopt_offload {
+ u32 preemptible_queues;
+};
+
/* Reference counting */
struct tc_taprio_qopt_offload *taprio_offload_get(struct tc_taprio_qopt_offload
*offload);
diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
index 9e7c2c607845..f0240ddaeee3 100644
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -1240,6 +1240,7 @@ enum {
TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION, /* s64 */
TCA_TAPRIO_ATTR_FLAGS, /* u32 */
TCA_TAPRIO_ATTR_TXTIME_DELAY, /* u32 */
+ TCA_TAPRIO_ATTR_PREEMPT_QUEUES, /* u32 */
__TCA_TAPRIO_ATTR_MAX,
};
diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
index b0ad7687ee2c..f9aa3f26aad9 100644
--- a/net/sched/sch_taprio.c
+++ b/net/sched/sch_taprio.c
@@ -63,6 +63,7 @@ struct taprio_sched {
struct Qdisc **qdiscs;
struct Qdisc *root;
u32 flags;
+ u32 preemptible_queues;
enum tk_offsets tk_offset;
int clockid;
atomic64_t picos_per_byte; /* Using picoseconds because for 10Gbps+
@@ -775,6 +776,7 @@ static const struct nla_policy taprio_policy[TCA_TAPRIO_ATTR_MAX + 1] = {
[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION] = { .type = NLA_S64 },
[TCA_TAPRIO_ATTR_FLAGS] = { .type = NLA_U32 },
[TCA_TAPRIO_ATTR_TXTIME_DELAY] = { .type = NLA_U32 },
+ [TCA_TAPRIO_ATTR_PREEMPT_QUEUES] = { .type = NLA_U32 },
};
static int fill_sched_entry(struct taprio_sched *q, struct nlattr **tb,
@@ -1267,6 +1269,7 @@ static int taprio_disable_offload(struct net_device *dev,
struct netlink_ext_ack *extack)
{
const struct net_device_ops *ops = dev->netdev_ops;
+ struct tc_preempt_qopt_offload preempt = { };
struct tc_taprio_qopt_offload *offload;
int err;
@@ -1285,13 +1288,15 @@ static int taprio_disable_offload(struct net_device *dev,
offload->enable = 0;
err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_TAPRIO, offload);
- if (err < 0) {
+ if (err < 0)
+ NL_SET_ERR_MSG(extack,
+ "Device failed to disable offload");
+
+ err = ops->ndo_setup_tc(dev, TC_SETUP_PREEMPT, &preempt);
+ if (err < 0)
NL_SET_ERR_MSG(extack,
"Device failed to disable offload");
- goto out;
- }
-out:
taprio_offload_free(offload);
return err;
@@ -1508,6 +1513,29 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
mqprio->prio_tc_map[i]);
}
+ /* It's valid to enable frame preemption without any kind of
+ * offloading being enabled, so keep it separated.
+ */
+ if (tb[TCA_TAPRIO_ATTR_PREEMPT_QUEUES]) {
+ u32 preempt = nla_get_u32(tb[TCA_TAPRIO_ATTR_PREEMPT_QUEUES]);
+ struct tc_preempt_qopt_offload qopt = { };
+
+ if (preempt == U32_MAX) {
+ NL_SET_ERR_MSG(extack, "At least one queue must be not be preemptible");
+ err = -EINVAL;
+ goto free_sched;
+ }
+
+ qopt.preemptible_queues = preempt;
+
+ err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_PREEMPT,
+ &qopt);
+ if (err)
+ goto free_sched;
+
+ q->preemptible_queues = preempt;
+ }
+
if (FULL_OFFLOAD_IS_ENABLED(q->flags))
err = taprio_enable_offload(dev, q, new_admin, extack);
else
@@ -1649,6 +1677,7 @@ static int taprio_init(struct Qdisc *sch, struct nlattr *opt,
*/
q->clockid = -1;
q->flags = TAPRIO_FLAGS_INVALID;
+ q->preemptible_queues = U32_MAX;
spin_lock(&taprio_list_lock);
list_add(&q->taprio_list, &taprio_list);
@@ -1832,6 +1861,10 @@ static int taprio_dump(struct Qdisc *sch, struct sk_buff *skb)
if (q->flags && nla_put_u32(skb, TCA_TAPRIO_ATTR_FLAGS, q->flags))
goto options_error;
+ if (q->preemptible_queues != U32_MAX &&
+ nla_put_u32(skb, TCA_TAPRIO_ATTR_PREEMPT_QUEUES, q->preemptible_queues))
+ goto options_error;
+
if (q->txtime_delay &&
nla_put_u32(skb, TCA_TAPRIO_ATTR_TXTIME_DELAY, q->txtime_delay))
goto options_error;
--
2.28.0
Powered by blists - more mailing lists