[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20200615142434.GS47542@localhost.localdomain>
Date: Mon, 15 Jun 2020 11:24:34 -0300
From: Marcelo Ricardo Leitner <mleitner@...hat.com>
To: Roi Dayan <roid@...lanox.com>
Cc: netdev@...r.kernel.org, pablo@...filter.org, davem@...emloft.net,
Jiri Pirko <jiri@...lanox.com>,
Paul Blakey <paulb@...lanox.com>,
Oz Shlomo <ozsh@...lanox.com>, Alaa Hleihel <alaa@...lanox.com>
Subject: Re: [PATCH net 2/2] netfilter: flowtable: Make
nf_flow_table_offload_add/del_cb inline
On Sun, Jun 14, 2020 at 02:12:49PM +0300, Roi Dayan wrote:
> From: Alaa Hleihel <alaa@...lanox.com>
>
> Currently, nf_flow_table_offload_add/del_cb are exported by nf_flow_table
> module, therefore modules using them will have hard-dependency
> on nf_flow_table and will require loading it all the time.
>
> This can lead to an unnecessary overhead on systems that do not
> use this API.
>
> To relax the hard-dependency between the modules, we unexport these
> functions and make them static inline.
>
> Fixes: 978703f42549 ("netfilter: flowtable: Add API for registering to flow table events")
> Signed-off-by: Alaa Hleihel <alaa@...lanox.com>
> Reviewed-by: Roi Dayan <roid@...lanox.com>
Reviewed-by: Marcelo Ricardo Leitner <marcelo.leitner@...il.com>
> ---
> include/net/netfilter/nf_flow_table.h | 49 ++++++++++++++++++++++++++++++++---
> net/netfilter/nf_flow_table_core.c | 45 --------------------------------
> 2 files changed, 45 insertions(+), 49 deletions(-)
>
> diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
> index c54a7f707e50..8a8f0e64edc3 100644
> --- a/include/net/netfilter/nf_flow_table.h
> +++ b/include/net/netfilter/nf_flow_table.h
> @@ -161,10 +161,51 @@ struct nf_flow_route {
> struct flow_offload *flow_offload_alloc(struct nf_conn *ct);
> void flow_offload_free(struct flow_offload *flow);
>
> -int nf_flow_table_offload_add_cb(struct nf_flowtable *flow_table,
> - flow_setup_cb_t *cb, void *cb_priv);
> -void nf_flow_table_offload_del_cb(struct nf_flowtable *flow_table,
> - flow_setup_cb_t *cb, void *cb_priv);
> +static inline int
> +nf_flow_table_offload_add_cb(struct nf_flowtable *flow_table,
> + flow_setup_cb_t *cb, void *cb_priv)
> +{
> + struct flow_block *block = &flow_table->flow_block;
> + struct flow_block_cb *block_cb;
> + int err = 0;
> +
> + down_write(&flow_table->flow_block_lock);
> + block_cb = flow_block_cb_lookup(block, cb, cb_priv);
> + if (block_cb) {
> + err = -EEXIST;
> + goto unlock;
> + }
> +
> + block_cb = flow_block_cb_alloc(cb, cb_priv, cb_priv, NULL);
> + if (IS_ERR(block_cb)) {
> + err = PTR_ERR(block_cb);
> + goto unlock;
> + }
> +
> + list_add_tail(&block_cb->list, &block->cb_list);
> +
> +unlock:
> + up_write(&flow_table->flow_block_lock);
> + return err;
> +}
> +
> +static inline void
> +nf_flow_table_offload_del_cb(struct nf_flowtable *flow_table,
> + flow_setup_cb_t *cb, void *cb_priv)
> +{
> + struct flow_block *block = &flow_table->flow_block;
> + struct flow_block_cb *block_cb;
> +
> + down_write(&flow_table->flow_block_lock);
> + block_cb = flow_block_cb_lookup(block, cb, cb_priv);
> + if (block_cb) {
> + list_del(&block_cb->list);
> + flow_block_cb_free(block_cb);
> + } else {
> + WARN_ON(true);
> + }
> + up_write(&flow_table->flow_block_lock);
> +}
>
> int flow_offload_route_init(struct flow_offload *flow,
> const struct nf_flow_route *route);
> diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
> index 42da6e337276..647680175213 100644
> --- a/net/netfilter/nf_flow_table_core.c
> +++ b/net/netfilter/nf_flow_table_core.c
> @@ -387,51 +387,6 @@ static void nf_flow_offload_work_gc(struct work_struct *work)
> queue_delayed_work(system_power_efficient_wq, &flow_table->gc_work, HZ);
> }
>
> -int nf_flow_table_offload_add_cb(struct nf_flowtable *flow_table,
> - flow_setup_cb_t *cb, void *cb_priv)
> -{
> - struct flow_block *block = &flow_table->flow_block;
> - struct flow_block_cb *block_cb;
> - int err = 0;
> -
> - down_write(&flow_table->flow_block_lock);
> - block_cb = flow_block_cb_lookup(block, cb, cb_priv);
> - if (block_cb) {
> - err = -EEXIST;
> - goto unlock;
> - }
> -
> - block_cb = flow_block_cb_alloc(cb, cb_priv, cb_priv, NULL);
> - if (IS_ERR(block_cb)) {
> - err = PTR_ERR(block_cb);
> - goto unlock;
> - }
> -
> - list_add_tail(&block_cb->list, &block->cb_list);
> -
> -unlock:
> - up_write(&flow_table->flow_block_lock);
> - return err;
> -}
> -EXPORT_SYMBOL_GPL(nf_flow_table_offload_add_cb);
> -
> -void nf_flow_table_offload_del_cb(struct nf_flowtable *flow_table,
> - flow_setup_cb_t *cb, void *cb_priv)
> -{
> - struct flow_block *block = &flow_table->flow_block;
> - struct flow_block_cb *block_cb;
> -
> - down_write(&flow_table->flow_block_lock);
> - block_cb = flow_block_cb_lookup(block, cb, cb_priv);
> - if (block_cb) {
> - list_del(&block_cb->list);
> - flow_block_cb_free(block_cb);
> - } else {
> - WARN_ON(true);
> - }
> - up_write(&flow_table->flow_block_lock);
> -}
> -EXPORT_SYMBOL_GPL(nf_flow_table_offload_del_cb);
>
> static int nf_flow_nat_port_tcp(struct sk_buff *skb, unsigned int thoff,
> __be16 port, __be16 new_port)
> --
> 2.8.4
>
Powered by blists - more mailing lists