lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Wed, 3 Mar 2021 17:11:47 +0100
From:   Pablo Neira Ayuso <pablo@...filter.org>
To:     Oz Shlomo <ozsh@...dia.com>
Cc:     netdev@...r.kernel.org, netfilter-devel@...r.kernel.org,
        Saeed Mahameed <saeedm@...dia.com>,
        Paul Blakey <paulb@...dia.com>
Subject: Re: [PATCH nf-next] netfilter: flowtable: separate replace, destroy
 and stats to different workqueues

Hi,

On Wed, Mar 03, 2021 at 02:59:53PM +0200, Oz Shlomo wrote:
> Currently the flow table offload replace, destroy and stats work items are
> executed on a single workqueue. As such, DESTROY and STATS commands may
> be backloged after a burst of REPLACE work items. This scenario can bloat
> up memory and may cause active connections to age.
> 
> Instatiate add, del and stats workqueues to avoid backlogs of non-dependent
> actions. Provide sysfs control over the workqueue attributes, allowing
> userspace applications to control the workqueue cpumask.

Probably it would be good to place REPLACE and DESTROY in one single
queue so workqueues don't race? In case connections are quickly
created and destroyed, we might get an out of order execution, instead
of:

  REPLACE -> DESTROY -> REPLACE

events could be reordered to:

  REPLACE -> REPLACE -> DESTROY

So would it work for you if REPLACE and DESTROY go into one single
workqueue and stats go into another?

Or probably make the cookie unique is sufficient? The cookie refers to
the memory address but memory can be recycled very quickly. If the
cookie helps to catch the reorder scenario, then the conntrack id
could be used instead of the memory address as cookie.

Regarding exposing sysfs toogles, what kind of tuning are you
expecting from users?  I'd prefer that the workqueue subsystem selects
for me what is best (autotuning). I'm not a fan of exposing toggles to
userspace that I don't know what users would do with it.

Let me know, thanks.

> Signed-off-by: Oz Shlomo <ozsh@...dia.com>
> Reviewed-by: Paul Blakey <paulb@...dia.com>
> ---
>  net/netfilter/nf_flow_table_offload.c | 44 ++++++++++++++++++++++++++++-------
>  1 file changed, 36 insertions(+), 8 deletions(-)
> 
> diff --git a/net/netfilter/nf_flow_table_offload.c b/net/netfilter/nf_flow_table_offload.c
> index 2a6993fa40d7..1b979c8b3ba0 100644
> --- a/net/netfilter/nf_flow_table_offload.c
> +++ b/net/netfilter/nf_flow_table_offload.c
> @@ -13,7 +13,9 @@
>  #include <net/netfilter/nf_conntrack_core.h>
>  #include <net/netfilter/nf_conntrack_tuple.h>
>  
> -static struct workqueue_struct *nf_flow_offload_wq;
> +static struct workqueue_struct *nf_flow_offload_add_wq;
> +static struct workqueue_struct *nf_flow_offload_del_wq;
> +static struct workqueue_struct *nf_flow_offload_stats_wq;
>  
>  struct flow_offload_work {
>  	struct list_head	list;
> @@ -826,7 +828,12 @@ static void flow_offload_work_handler(struct work_struct *work)
>  
>  static void flow_offload_queue_work(struct flow_offload_work *offload)
>  {
> -	queue_work(nf_flow_offload_wq, &offload->work);
> +	if (offload->cmd == FLOW_CLS_REPLACE)
> +		queue_work(nf_flow_offload_add_wq, &offload->work);
> +	else if (offload->cmd == FLOW_CLS_DESTROY)
> +		queue_work(nf_flow_offload_del_wq, &offload->work);
> +	else
> +		queue_work(nf_flow_offload_stats_wq, &offload->work);
>  }
>  
>  static struct flow_offload_work *
> @@ -898,8 +905,11 @@ void nf_flow_offload_stats(struct nf_flowtable *flowtable,
>  
>  void nf_flow_table_offload_flush(struct nf_flowtable *flowtable)
>  {
> -	if (nf_flowtable_hw_offload(flowtable))
> -		flush_workqueue(nf_flow_offload_wq);
> +	if (nf_flowtable_hw_offload(flowtable)) {
> +		flush_workqueue(nf_flow_offload_add_wq);
> +		flush_workqueue(nf_flow_offload_del_wq);
> +		flush_workqueue(nf_flow_offload_stats_wq);
> +	}
>  }
>  
>  static int nf_flow_table_block_setup(struct nf_flowtable *flowtable,
> @@ -1011,15 +1021,33 @@ int nf_flow_table_offload_setup(struct nf_flowtable *flowtable,
>  
>  int nf_flow_table_offload_init(void)
>  {
> -	nf_flow_offload_wq  = alloc_workqueue("nf_flow_table_offload",
> -					      WQ_UNBOUND, 0);
> -	if (!nf_flow_offload_wq)
> +	nf_flow_offload_add_wq  = alloc_workqueue("nf_ft_offload_add",
> +						  WQ_UNBOUND | WQ_SYSFS, 0);
> +	if (!nf_flow_offload_add_wq)
>  		return -ENOMEM;
>  
> +	nf_flow_offload_del_wq  = alloc_workqueue("nf_ft_offload_del",
> +						  WQ_UNBOUND | WQ_SYSFS, 0);
> +	if (!nf_flow_offload_del_wq)
> +		goto err_del_wq;
> +
> +	nf_flow_offload_stats_wq  = alloc_workqueue("nf_ft_offload_stats",
> +						    WQ_UNBOUND | WQ_SYSFS, 0);
> +	if (!nf_flow_offload_stats_wq)
> +		goto err_stats_wq;
> +
>  	return 0;
> +
> +err_stats_wq:
> +	destroy_workqueue(nf_flow_offload_del_wq);
> +err_del_wq:
> +	destroy_workqueue(nf_flow_offload_add_wq);
> +	return -ENOMEM;
>  }
>  
>  void nf_flow_table_offload_exit(void)
>  {
> -	destroy_workqueue(nf_flow_offload_wq);
> +	destroy_workqueue(nf_flow_offload_add_wq);
> +	destroy_workqueue(nf_flow_offload_del_wq);
> +	destroy_workqueue(nf_flow_offload_stats_wq);
>  }
> -- 
> 1.8.3.1
> 

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ