[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAHp75VcdUoNMxzoQ4n2y4LrbYX5nTh3Y8rFh=5J9cv7iU-V=Hg@mail.gmail.com>
Date: Sun, 30 Aug 2020 23:02:19 +0300
From: Andy Shevchenko <andy.shevchenko@...il.com>
To: trix@...hat.com
Cc: pshelar@....org, "David S. Miller" <davem@...emloft.net>,
Jakub Kicinski <kuba@...nel.org>,
Nathan Chancellor <natechancellor@...il.com>,
Nick Desaulniers <ndesaulniers@...gle.com>,
netdev <netdev@...r.kernel.org>, dev@...nvswitch.org,
Linux Kernel Mailing List <linux-kernel@...r.kernel.org>
Subject: Re: [PATCH] net: openvswitch: pass NULL for unused parameters
On Sun, Aug 30, 2020 at 6:17 PM <trix@...hat.com> wrote:
>
> From: Tom Rix <trix@...hat.com>
>
> clang static analysis flags these problems
>
> flow_table.c:713:2: warning: The expression is an uninitialized
> value. The computed value will also be garbage
> (*n_mask_hit)++;
> ^~~~~~~~~~~~~~~
> flow_table.c:748:5: warning: The expression is an uninitialized
> value. The computed value will also be garbage
> (*n_cache_hit)++;
> ^~~~~~~~~~~~~~~~
>
> These are not problems because neither pararmeter is used
parameter
> by the calling function.
>
> Looking at all of the calling functions, there are many
> cases where the results are unused. Passing unused
> parameters is a waste.
>
> To avoid passing unused parameters, rework the
> masked_flow_lookup() and flow_lookup() routines to check
> for NULL parameters and change the unused parameters to NULL.
>
> Signed-off-by: Tom Rix <trix@...hat.com>
> ---
> net/openvswitch/flow_table.c | 16 +++++++---------
> 1 file changed, 7 insertions(+), 9 deletions(-)
>
> diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
> index e2235849a57e..18e7fa3aa67e 100644
> --- a/net/openvswitch/flow_table.c
> +++ b/net/openvswitch/flow_table.c
> @@ -710,7 +710,8 @@ static struct sw_flow *masked_flow_lookup(struct table_instance *ti,
> ovs_flow_mask_key(&masked_key, unmasked, false, mask);
> hash = flow_hash(&masked_key, &mask->range);
> head = find_bucket(ti, hash);
> - (*n_mask_hit)++;
> + if (n_mask_hit)
> + (*n_mask_hit)++;
>
> hlist_for_each_entry_rcu(flow, head, flow_table.node[ti->node_ver],
> lockdep_ovsl_is_held()) {
> @@ -745,7 +746,8 @@ static struct sw_flow *flow_lookup(struct flow_table *tbl,
> u64_stats_update_begin(&ma->syncp);
> usage_counters[*index]++;
> u64_stats_update_end(&ma->syncp);
> - (*n_cache_hit)++;
> + if (n_cache_hit)
> + (*n_cache_hit)++;
> return flow;
> }
> }
> @@ -798,9 +800,8 @@ struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
> *n_cache_hit = 0;
> if (unlikely(!skb_hash || mc->cache_size == 0)) {
> u32 mask_index = 0;
> - u32 cache = 0;
>
> - return flow_lookup(tbl, ti, ma, key, n_mask_hit, &cache,
> + return flow_lookup(tbl, ti, ma, key, n_mask_hit, NULL,
> &mask_index);
Can it be done for mask_index as well?
> }
>
> @@ -849,11 +850,9 @@ struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
> {
> struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
> struct mask_array *ma = rcu_dereference_ovsl(tbl->mask_array);
> - u32 __always_unused n_mask_hit;
> - u32 __always_unused n_cache_hit;
> u32 index = 0;
>
> - return flow_lookup(tbl, ti, ma, key, &n_mask_hit, &n_cache_hit, &index);
> + return flow_lookup(tbl, ti, ma, key, NULL, NULL, &index);
Ditto.
> }
>
> struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
> @@ -865,7 +864,6 @@ struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
> /* Always called under ovs-mutex. */
> for (i = 0; i < ma->max; i++) {
> struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
> - u32 __always_unused n_mask_hit;
> struct sw_flow_mask *mask;
> struct sw_flow *flow;
>
> @@ -873,7 +871,7 @@ struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
> if (!mask)
> continue;
>
> - flow = masked_flow_lookup(ti, match->key, mask, &n_mask_hit);
> + flow = masked_flow_lookup(ti, match->key, mask, NULL);
> if (flow && ovs_identifier_is_key(&flow->id) &&
> ovs_flow_cmp_unmasked_key(flow, match)) {
> return flow;
> --
> 2.18.1
>
--
With Best Regards,
Andy Shevchenko
Powered by blists - more mailing lists