[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CANn89i+MF3F8Q063USmLYvyrffBKQRvb3ZM2c1MhAbFOwk9B-A@mail.gmail.com>
Date: Wed, 17 Apr 2024 10:54:30 +0200
From: Eric Dumazet <edumazet@...gle.com>
To: Simon Horman <horms@...nel.org>
Cc: "David S. Miller" <davem@...emloft.net>, Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>, Jamal Hadi Salim <jhs@...atatu.com>,
Cong Wang <xiyou.wangcong@...il.com>, Jiri Pirko <jiri@...nulli.us>, netdev@...r.kernel.org,
eric.dumazet@...il.com, Toke Høiland-Jørgensen <toke@...e.dk>,
cake@...ts.bufferbloat.net
Subject: Re: [PATCH net-next 02/14] net_sched: cake: implement lockless cake_dump()
On Wed, Apr 17, 2024 at 10:35 AM Simon Horman <horms@...nel.org> wrote:
>
> + Toke Høiland-Jørgensen <toke@...e.dk>
> cake@...ts.bufferbloat.net
>
> On Mon, Apr 15, 2024 at 01:20:42PM +0000, Eric Dumazet wrote:
> > Instead of relying on RTNL, cake_dump() can use READ_ONCE()
> > annotations, paired with WRITE_ONCE() ones in cake_change().
> >
> > Signed-off-by: Eric Dumazet <edumazet@...gle.com>
>
> ...
>
> > @@ -2774,68 +2783,71 @@ static int cake_dump(struct Qdisc *sch, struct sk_buff *skb)
> > {
> > struct cake_sched_data *q = qdisc_priv(sch);
> > struct nlattr *opts;
> > + u16 rate_flags;
> >
> > opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
> > if (!opts)
> > goto nla_put_failure;
> >
> > - if (nla_put_u64_64bit(skb, TCA_CAKE_BASE_RATE64, q->rate_bps,
> > - TCA_CAKE_PAD))
> > + if (nla_put_u64_64bit(skb, TCA_CAKE_BASE_RATE64,
> > + READ_ONCE(q->rate_bps), TCA_CAKE_PAD))
> > goto nla_put_failure;
> >
> > if (nla_put_u32(skb, TCA_CAKE_FLOW_MODE,
> > - q->flow_mode & CAKE_FLOW_MASK))
> > + READ_ONCE(q->flow_mode) & CAKE_FLOW_MASK))
> > goto nla_put_failure;
>
> Hi Eric,
>
> q->flow_mode is read twice in this function. Once here...
>
> >
> > - if (nla_put_u32(skb, TCA_CAKE_RTT, q->interval))
> > + if (nla_put_u32(skb, TCA_CAKE_RTT, READ_ONCE(q->interval)))
> > goto nla_put_failure;
> >
> > - if (nla_put_u32(skb, TCA_CAKE_TARGET, q->target))
> > + if (nla_put_u32(skb, TCA_CAKE_TARGET, READ_ONCE(q->target)))
> > goto nla_put_failure;
> >
> > - if (nla_put_u32(skb, TCA_CAKE_MEMORY, q->buffer_config_limit))
> > + if (nla_put_u32(skb, TCA_CAKE_MEMORY,
> > + READ_ONCE(q->buffer_config_limit)))
> > goto nla_put_failure;
> >
> > + rate_flags = READ_ONCE(q->rate_flags);
> > if (nla_put_u32(skb, TCA_CAKE_AUTORATE,
> > - !!(q->rate_flags & CAKE_FLAG_AUTORATE_INGRESS)))
> > + !!(rate_flags & CAKE_FLAG_AUTORATE_INGRESS)))
> > goto nla_put_failure;
> >
> > if (nla_put_u32(skb, TCA_CAKE_INGRESS,
> > - !!(q->rate_flags & CAKE_FLAG_INGRESS)))
> > + !!(rate_flags & CAKE_FLAG_INGRESS)))
> > goto nla_put_failure;
> >
> > - if (nla_put_u32(skb, TCA_CAKE_ACK_FILTER, q->ack_filter))
> > + if (nla_put_u32(skb, TCA_CAKE_ACK_FILTER, READ_ONCE(q->ack_filter)))
> > goto nla_put_failure;
> >
> > if (nla_put_u32(skb, TCA_CAKE_NAT,
> > - !!(q->flow_mode & CAKE_FLOW_NAT_FLAG)))
> > + !!(READ_ONCE(q->flow_mode) & CAKE_FLOW_NAT_FLAG)))
> > goto nla_put_failure;
>
> ... and once here.
>
> I am assuming that it isn't a big deal, but perhaps it is better to save
> q->flow_mode into a local variable.
>
> Also, more importantly, q->flow_mode does not seem to be handled
> using WRITE_ONCE() in cake_change(). It's a non-trivial case,
> which I guess is well served by a mechanism built around a local variable.
Thanks !
I will squash in v2:
diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index bb37a0dedcc1e4b3418f6681d87108aad7ea066f..9602dafe32e61d38dc00b0a35e1ee3f530989610
100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -2573,6 +2573,7 @@ static int cake_change(struct Qdisc *sch, struct
nlattr *opt,
struct cake_sched_data *q = qdisc_priv(sch);
struct nlattr *tb[TCA_CAKE_MAX + 1];
u16 rate_flags;
+ u8 flow_mode;
int err;
err = nla_parse_nested_deprecated(tb, TCA_CAKE_MAX, opt, cake_policy,
@@ -2580,10 +2581,11 @@ static int cake_change(struct Qdisc *sch,
struct nlattr *opt,
if (err < 0)
return err;
+ flow_mode = q->flow_mode;
if (tb[TCA_CAKE_NAT]) {
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
- q->flow_mode &= ~CAKE_FLOW_NAT_FLAG;
- q->flow_mode |= CAKE_FLOW_NAT_FLAG *
+ flow_mode &= ~CAKE_FLOW_NAT_FLAG;
+ flow_mode |= CAKE_FLOW_NAT_FLAG *
!!nla_get_u32(tb[TCA_CAKE_NAT]);
#else
NL_SET_ERR_MSG_ATTR(extack, tb[TCA_CAKE_NAT],
@@ -2609,7 +2611,7 @@ static int cake_change(struct Qdisc *sch, struct
nlattr *opt,
}
if (tb[TCA_CAKE_FLOW_MODE])
- q->flow_mode = ((q->flow_mode & CAKE_FLOW_NAT_FLAG) |
+ flow_mode = ((flow_mode & CAKE_FLOW_NAT_FLAG) |
(nla_get_u32(tb[TCA_CAKE_FLOW_MODE]) &
CAKE_FLOW_MASK));
@@ -2689,6 +2691,7 @@ static int cake_change(struct Qdisc *sch, struct
nlattr *opt,
}
WRITE_ONCE(q->rate_flags, rate_flags);
+ WRITE_ONCE(q->flow_mode, flow_mode);
if (q->tins) {
sch_tree_lock(sch);
cake_reconfigure(sch);
@@ -2784,6 +2787,7 @@ static int cake_dump(struct Qdisc *sch, struct
sk_buff *skb)
struct cake_sched_data *q = qdisc_priv(sch);
struct nlattr *opts;
u16 rate_flags;
+ u8 flow_mode;
opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
if (!opts)
@@ -2793,8 +2797,8 @@ static int cake_dump(struct Qdisc *sch, struct
sk_buff *skb)
READ_ONCE(q->rate_bps), TCA_CAKE_PAD))
goto nla_put_failure;
- if (nla_put_u32(skb, TCA_CAKE_FLOW_MODE,
- READ_ONCE(q->flow_mode) & CAKE_FLOW_MASK))
+ flow_mode = READ_ONCE(q->flow_mode);
+ if (nla_put_u32(skb, TCA_CAKE_FLOW_MODE, flow_mode & CAKE_FLOW_MASK))
goto nla_put_failure;
if (nla_put_u32(skb, TCA_CAKE_RTT, READ_ONCE(q->interval)))
@@ -2820,7 +2824,7 @@ static int cake_dump(struct Qdisc *sch, struct
sk_buff *skb)
goto nla_put_failure;
if (nla_put_u32(skb, TCA_CAKE_NAT,
- !!(READ_ONCE(q->flow_mode) & CAKE_FLOW_NAT_FLAG)))
+ !!(flow_mode & CAKE_FLOW_NAT_FLAG)))
goto nla_put_failure;
if (nla_put_u32(skb, TCA_CAKE_DIFFSERV_MODE, READ_ONCE(q->tin_mode)))
Powered by blists - more mailing lists