[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <ECDAB6E2-EBFE-435C-B5E5-0E27BABA822F@redhat.com>
Date: Fri, 03 Feb 2023 12:23:49 +0100
From: Eelco Chaudron <echaudro@...hat.com>
To: Eddy Tao <taoyuan_eddy@...mail.com>
Cc: netdev@...r.kernel.org, Pravin B Shelar <pshelar@....org>,
"David S. Miller" <davem@...emloft.net>,
Eric Dumazet <edumazet@...gle.com>,
Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>, dev@...nvswitch.org,
linux-kernel@...r.kernel.org
Subject: Re: [PATCH net-next v6 1/1] net:openvswitch:reduce cpu_used_mask
memory
On 3 Feb 2023, at 10:51, Eddy Tao wrote:
> Use actual CPU number instead of hardcoded value to decide the size
> of 'cpu_used_mask' in 'struct sw_flow'. Below is the reason.
>
> 'struct cpumask cpu_used_mask' is embedded in struct sw_flow.
> Its size is hardcoded to CONFIG_NR_CPUS bits, which can be
> 8192 by default, it costs memory and slows down ovs_flow_alloc
>
> To address this, redefine cpu_used_mask to pointer
> append cpumask_size() bytes after 'stat' to hold cpumask
>
> cpumask APIs like cpumask_next and cpumask_set_cpu never access
> bits beyond cpu count, cpumask_size() bytes of memory is enough
>
> Signed-off-by: Eddy Tao <taoyuan_eddy@...mail.com>
Hi Eddy,
Thanks for this patch, I have one small nit, but the rest looks good.
Acked-by: Eelco Chaudron <echaudro@...hat.com>
> ---
> net/openvswitch/flow.c | 9 ++++++---
> net/openvswitch/flow.h | 2 +-
> net/openvswitch/flow_table.c | 8 +++++---
> 3 files changed, 12 insertions(+), 7 deletions(-)
>
> diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
> index e20d1a973417..416976f70322 100644
> --- a/net/openvswitch/flow.c
> +++ b/net/openvswitch/flow.c
> @@ -107,7 +107,8 @@ void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags,
>
> rcu_assign_pointer(flow->stats[cpu],
> new_stats);
> - cpumask_set_cpu(cpu, &flow->cpu_used_mask);
> + cpumask_set_cpu(cpu,
> + flow->cpu_used_mask);
> goto unlock;
> }
> }
> @@ -135,7 +136,8 @@ void ovs_flow_stats_get(const struct sw_flow *flow,
> memset(ovs_stats, 0, sizeof(*ovs_stats));
>
> /* We open code this to make sure cpu 0 is always considered */
> - for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask)) {
> + for (cpu = 0; cpu < nr_cpu_ids;
> + cpu = cpumask_next(cpu, flow->cpu_used_mask)) {
> struct sw_flow_stats *stats = rcu_dereference_ovsl(flow->stats[cpu]);
>
> if (stats) {
> @@ -159,7 +161,8 @@ void ovs_flow_stats_clear(struct sw_flow *flow)
> int cpu;
>
> /* We open code this to make sure cpu 0 is always considered */
> - for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask)) {
> + for (cpu = 0; cpu < nr_cpu_ids;
> + cpu = cpumask_next(cpu, flow->cpu_used_mask)) {
> struct sw_flow_stats *stats = ovsl_dereference(flow->stats[cpu]);
>
> if (stats) {
> diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h
> index 073ab73ffeaa..b5711aff6e76 100644
> --- a/net/openvswitch/flow.h
> +++ b/net/openvswitch/flow.h
> @@ -229,7 +229,7 @@ struct sw_flow {
> */
> struct sw_flow_key key;
> struct sw_flow_id id;
> - struct cpumask cpu_used_mask;
> + struct cpumask *cpu_used_mask;
> struct sw_flow_mask *mask;
> struct sw_flow_actions __rcu *sf_acts;
> struct sw_flow_stats __rcu *stats[]; /* One for each CPU. First one
> diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
> index 0a0e4c283f02..dc6a174c3194 100644
> --- a/net/openvswitch/flow_table.c
> +++ b/net/openvswitch/flow_table.c
> @@ -87,11 +87,12 @@ struct sw_flow *ovs_flow_alloc(void)
> if (!stats)
> goto err;
>
> + flow->cpu_used_mask = (struct cpumask *)&flow->stats[nr_cpu_ids];
nit: I would move this up with the other flow structure initialisation.
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
index dc6a174c3194..791504b7f42b 100644
--- a/net/openvswitch/flow_table.c
+++ b/net/openvswitch/flow_table.c
@@ -79,6 +79,7 @@ struct sw_flow *ovs_flow_alloc(void)
return ERR_PTR(-ENOMEM);
flow->stats_last_writer = -1;
+ flow->cpu_used_mask = (struct cpumask *)&flow->stats[nr_cpu_ids];
/* Initialize the default stat node. */
stats = kmem_cache_alloc_node(flow_stats_cache,
@@ -87,7 +88,6 @@ struct sw_flow *ovs_flow_alloc(void)
if (!stats)
goto err;
- flow->cpu_used_mask = (struct cpumask *)&flow->stats[nr_cpu_ids];
spin_lock_init(&stats->lock);
> spin_lock_init(&stats->lock);
>
> RCU_INIT_POINTER(flow->stats[0], stats);
>
> - cpumask_set_cpu(0, &flow->cpu_used_mask);
> + cpumask_set_cpu(0, flow->cpu_used_mask);
>
> return flow;
> err:
> @@ -115,7 +116,7 @@ static void flow_free(struct sw_flow *flow)
> flow->sf_acts);
> /* We open code this to make sure cpu 0 is always considered */
> for (cpu = 0; cpu < nr_cpu_ids;
> - cpu = cpumask_next(cpu, &flow->cpu_used_mask)) {
> + cpu = cpumask_next(cpu, flow->cpu_used_mask)) {
> if (flow->stats[cpu])
> kmem_cache_free(flow_stats_cache,
> (struct sw_flow_stats __force *)flow->stats[cpu]);
> @@ -1196,7 +1197,8 @@ int ovs_flow_init(void)
>
> flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow)
> + (nr_cpu_ids
> - * sizeof(struct sw_flow_stats *)),
> + * sizeof(struct sw_flow_stats *))
> + + cpumask_size(),
> 0, 0, NULL);
> if (flow_cache == NULL)
> return -ENOMEM;
> --
> 2.27.0
Powered by blists - more mailing lists