[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <DB5PR05MB18955C246FD11968AA6F6CF5ACE90@DB5PR05MB1895.eurprd05.prod.outlook.com>
Date: Wed, 17 Jan 2018 07:33:38 +0000
From: Nogah Frankel <nogahf@...lanox.com>
To: Jakub Kicinski <jakub.kicinski@...ronome.com>,
"davem@...emloft.net" <davem@...emloft.net>,
"jiri@...nulli.us" <jiri@...nulli.us>
CC: "netdev@...r.kernel.org" <netdev@...r.kernel.org>,
"oss-drivers@...ronome.com" <oss-drivers@...ronome.com>,
"xiyou.wangcong@...il.com" <xiyou.wangcong@...il.com>,
"edumazet@...gle.com" <edumazet@...gle.com>,
Yuval Mintz <yuvalm@...lanox.com>
Subject: RE: [PATCH net-next v2] net: sched: red: don't reset the backlog on
every stat dump
> -----Original Message-----
> From: Jakub Kicinski [mailto:jakub.kicinski@...ronome.com]
> Sent: Monday, January 15, 2018 6:01 AM
> To: davem@...emloft.net; jiri@...nulli.us; Nogah Frankel
> <nogahf@...lanox.com>
> Cc: netdev@...r.kernel.org; oss-drivers@...ronome.com;
> xiyou.wangcong@...il.com; edumazet@...gle.com; Yuval Mintz
> <yuvalm@...lanox.com>; Jakub Kicinski <jakub.kicinski@...ronome.com>
> Subject: [PATCH net-next v2] net: sched: red: don't reset the backlog on every
> stat dump
>
> Commit 0dfb33a0d7e2 ("sch_red: report backlog information") copied
> child's backlog into RED's backlog. Back then RED did not maintain
> its own backlog counts. This has changed after commit 2ccccf5fb43f
> ("net_sched: update hierarchical backlog too") and commit d7f4f332f082
> ("sch_red: update backlog as well"). Copying is no longer necessary.
>
> Tested:
>
> $ tc -s qdisc show dev veth0
> qdisc red 1: root refcnt 2 limit 400000b min 30000b max 30000b ecn
> Sent 20942 bytes 221 pkt (dropped 0, overlimits 0 requeues 0)
> backlog 1260b 14p requeues 14
> marked 0 early 0 pdrop 0 other 0
> qdisc tbf 2: parent 1: rate 1Kbit burst 15000b lat 3585.0s
> Sent 20942 bytes 221 pkt (dropped 0, overlimits 138 requeues 0)
> backlog 1260b 14p requeues 14
>
> Recently RED offload was added. We need to make sure drivers don't
> depend on resetting the stats. This means backlog should be treated
> like any other statistic:
>
> total_stat = new_hw_stat - prev_hw_stat;
>
> Adjust mlxsw.
>
> Signed-off-by: Jakub Kicinski <jakub.kicinski@...ronome.com>
Acked-by: Nogah Frankel <nogahf@...lanox.com>
Thanks
Nogah
> ---
> v2:
> - reuse the mlxsw infra added for prio;
> - align the way qstats are passed with prio.
>
> .../net/ethernet/mellanox/mlxsw/spectrum_qdisc.c | 26
> +++++++++++++++++++---
> include/net/pkt_cls.h | 1 +
> net/sched/sch_red.c | 2 +-
> 3 files changed, 25 insertions(+), 4 deletions(-)
>
> diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
> b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
> index e11a0abfc663..8cac5202b913 100644
> --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
> +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
> @@ -247,6 +247,8 @@ mlxsw_sp_setup_tc_qdisc_red_clean_stats(struct
> mlxsw_sp_port *mlxsw_sp_port,
>
> stats_base->overlimits = red_base->prob_drop + red_base-
> >prob_mark;
> stats_base->drops = red_base->prob_drop + red_base->pdrop;
> +
> + stats_base->backlog = 0;
> }
>
> static int
> @@ -306,6 +308,19 @@ mlxsw_sp_qdisc_red_replace(struct mlxsw_sp_port
> *mlxsw_sp_port,
> max, prob, p->is_ecn);
> }
>
> +static void
> +mlxsw_sp_qdisc_red_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
> + struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
> + void *params)
> +{
> + struct tc_red_qopt_offload_params *p = params;
> + u64 backlog;
> +
> + backlog = mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
> + mlxsw_sp_qdisc->stats_base.backlog);
> + p->qstats->backlog -= backlog;
> +}
> +
> static int
> mlxsw_sp_qdisc_get_red_xstats(struct mlxsw_sp_port *mlxsw_sp_port,
> struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
> @@ -338,7 +353,7 @@ mlxsw_sp_qdisc_get_red_stats(struct mlxsw_sp_port
> *mlxsw_sp_port,
> struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
> struct tc_qopt_offload_stats *stats_ptr)
> {
> - u64 tx_bytes, tx_packets, overlimits, drops;
> + u64 tx_bytes, tx_packets, overlimits, drops, backlog;
> u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
> struct mlxsw_sp_qdisc_stats *stats_base;
> struct mlxsw_sp_port_xstats *xstats;
> @@ -354,14 +369,18 @@ mlxsw_sp_qdisc_get_red_stats(struct
> mlxsw_sp_port *mlxsw_sp_port,
> stats_base->overlimits;
> drops = xstats->wred_drop[tclass_num] + xstats-
> >tail_drop[tclass_num] -
> stats_base->drops;
> + backlog = xstats->backlog[tclass_num];
>
> _bstats_update(stats_ptr->bstats, tx_bytes, tx_packets);
> stats_ptr->qstats->overlimits += overlimits;
> stats_ptr->qstats->drops += drops;
> stats_ptr->qstats->backlog +=
> - mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
> - xstats->backlog[tclass_num]);
> + mlxsw_sp_cells_bytes(mlxsw_sp_port-
> >mlxsw_sp,
> + backlog) -
> + mlxsw_sp_cells_bytes(mlxsw_sp_port-
> >mlxsw_sp,
> + stats_base->backlog);
>
> + stats_base->backlog = backlog;
> stats_base->drops += drops;
> stats_base->overlimits += overlimits;
> stats_base->tx_bytes += tx_bytes;
> @@ -375,6 +394,7 @@ static struct mlxsw_sp_qdisc_ops
> mlxsw_sp_qdisc_ops_red = {
> .type = MLXSW_SP_QDISC_RED,
> .check_params = mlxsw_sp_qdisc_red_check_params,
> .replace = mlxsw_sp_qdisc_red_replace,
> + .unoffload = mlxsw_sp_qdisc_red_unoffload,
> .destroy = mlxsw_sp_qdisc_red_destroy,
> .get_stats = mlxsw_sp_qdisc_get_red_stats,
> .get_xstats = mlxsw_sp_qdisc_get_red_xstats,
> diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
> index 9c341f003091..cc23c041a6d7 100644
> --- a/include/net/pkt_cls.h
> +++ b/include/net/pkt_cls.h
> @@ -748,6 +748,7 @@ struct tc_red_qopt_offload_params {
> u32 max;
> u32 probability;
> bool is_ecn;
> + struct gnet_stats_queue *qstats;
> };
>
> struct tc_red_qopt_offload {
> diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
> index 0af1c1254e0b..16644b3d2362 100644
> --- a/net/sched/sch_red.c
> +++ b/net/sched/sch_red.c
> @@ -167,6 +167,7 @@ static int red_offload(struct Qdisc *sch, bool enable)
> opt.set.max = q->parms.qth_max >> q->parms.Wlog;
> opt.set.probability = q->parms.max_P;
> opt.set.is_ecn = red_use_ecn(q);
> + opt.set.qstats = &sch->qstats;
> } else {
> opt.command = TC_RED_DESTROY;
> }
> @@ -322,7 +323,6 @@ static int red_dump(struct Qdisc *sch, struct sk_buff
> *skb)
> };
> int err;
>
> - sch->qstats.backlog = q->qdisc->qstats.backlog;
> err = red_dump_offload_stats(sch, &opt);
> if (err)
> goto nla_put_failure;
> --
> 2.15.1
Powered by blists - more mailing lists