[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20200311150920.306de7c6@kicinski-fedora-PC1C0HJN>
Date: Wed, 11 Mar 2020 15:09:20 -0700
From: Jakub Kicinski <kuba@...nel.org>
To: Petr Machata <petrm@...lanox.com>
Cc: netdev@...r.kernel.org, Roman Mashak <mrv@...atatu.com>,
jhs@...atatu.com, xiyou.wangcong@...il.com, davem@...emloft.net,
jiri@...lanox.com, mlxsw@...lanox.com
Subject: Re: [PATCH net-next v2 2/6] net: sched: Allow extending set of
supported RED flags
On Wed, 11 Mar 2020 19:33:52 +0200 Petr Machata wrote:
> diff --git a/include/net/red.h b/include/net/red.h
> index 9665582c4687..5718d2b25637 100644
> --- a/include/net/red.h
> +++ b/include/net/red.h
> @@ -179,6 +179,31 @@ static inline bool red_check_params(u32 qth_min, u32 qth_max, u8 Wlog)
> return true;
> }
>
> +static inline bool red_get_flags(unsigned char flags,
> + unsigned char historic_mask,
> + struct nlattr *flags_attr,
> + unsigned int supported_mask,
> + unsigned int *p_flags, unsigned char *p_userbits,
> + struct netlink_ext_ack *extack)
> +{
> + if (flags && flags_attr) {
> + NL_SET_ERR_MSG_MOD(extack, "flags should be passed either through qopt, or through a dedicated attribute");
> + return false;
> + }
> +
> + *p_flags = flags & historic_mask;
> + if (flags_attr)
> + *p_flags |= nla_get_u32(flags_attr);
It's less error prone for callers not to modify the output parameters
until we're sure the call won't fail.
> + if (*p_flags & ~supported_mask) {
> + NL_SET_ERR_MSG_MOD(extack, "unsupported RED flags specified");
> + return false;
> + }
> +
> + *p_userbits = flags & ~historic_mask;
> + return true;
> +}
> +
> +#define TC_RED_HISTORIC_FLAGS (TC_RED_ECN | TC_RED_HARDDROP | TC_RED_ADAPTATIVE)
> +
> struct tc_red_xstats {
> __u32 early; /* Early drops */
> __u32 pdrop; /* Drops due to queue limits */
> diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
> index 1695421333e3..61d7c5a61279 100644
> --- a/net/sched/sch_red.c
> +++ b/net/sched/sch_red.c
> @@ -35,7 +35,11 @@
>
> struct red_sched_data {
> u32 limit; /* HARD maximal queue length */
> - unsigned char flags;
> +
> + u32 flags;
Can we stick to uchar until the number of flags grows?
> + /* Non-flags in tc_red_qopt.flags. */
> + unsigned char userbits;
> +
> struct timer_list adapt_timer;
> struct Qdisc *sch;
> struct red_parms parms;
> @@ -44,6 +48,8 @@ struct red_sched_data {
> struct Qdisc *qdisc;
> };
>
> +#define RED_SUPPORTED_FLAGS TC_RED_HISTORIC_FLAGS
> +
> static inline int red_use_ecn(struct red_sched_data *q)
> {
> return q->flags & TC_RED_ECN;
> @@ -186,6 +192,7 @@ static const struct nla_policy red_policy[TCA_RED_MAX + 1] = {
> [TCA_RED_PARMS] = { .len = sizeof(struct tc_red_qopt) },
> [TCA_RED_STAB] = { .len = RED_STAB_SIZE },
> [TCA_RED_MAX_P] = { .type = NLA_U32 },
> + [TCA_RED_FLAGS] = { .type = NLA_U32 },
BITFIELD32? And then perhaps turn the define into a const validation
data?
Also policy needs a .strict_start_type now.
> };
>
> static int red_change(struct Qdisc *sch, struct nlattr *opt,
> @@ -302,7 +317,8 @@ static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
> struct nlattr *opts = NULL;
> struct tc_red_qopt opt = {
> .limit = q->limit,
> - .flags = q->flags,
> + .flags = ((q->flags & TC_RED_HISTORIC_FLAGS) |
> + q->userbits),
nit: parens unnecessary
> .qth_min = q->parms.qth_min >> q->parms.Wlog,
> .qth_max = q->parms.qth_max >> q->parms.Wlog,
> .Wlog = q->parms.Wlog,
> @@ -321,6 +337,8 @@ static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
> if (nla_put(skb, TCA_RED_PARMS, sizeof(opt), &opt) ||
> nla_put_u32(skb, TCA_RED_MAX_P, q->parms.max_P))
> goto nla_put_failure;
> + if (q->flags & ~TC_RED_HISTORIC_FLAGS)
> + nla_put_u32(skb, TCA_RED_FLAGS, q->flags);
Not 100% sure if conditional is needed, but please check the return
code.
> return nla_nest_end(skb, opts);
>
> nla_put_failure:
Powered by blists - more mailing lists