[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20231103173442.GB768996@kernel.org>
Date: Fri, 3 Nov 2023 17:34:42 +0000
From: Simon Horman <horms@...nel.org>
To: Pablo Neira Ayuso <pablo@...filter.org>
Cc: netfilter-devel@...r.kernel.org, davem@...emloft.net,
netdev@...r.kernel.org, kuba@...nel.org, pabeni@...hat.com,
edumazet@...gle.com, fw@...len.de
Subject: Re: [PATCH net-next 02/19] netfilter: nft_set_rbtree: prefer sync gc
to async worker
On Wed, Oct 25, 2023 at 11:25:38PM +0200, Pablo Neira Ayuso wrote:
> From: Florian Westphal <fw@...len.de>
>
> There is no need for asynchronous garbage collection, rbtree inserts
> can only happen from the netlink control plane.
>
> We already perform on-demand gc on insertion, in the area of the
> tree where the insertion takes place, but we don't do a full tree
> walk there for performance reasons.
>
> Do a full gc walk at the end of the transaction instead and
> remove the async worker.
>
> Signed-off-by: Florian Westphal <fw@...len.de>
> Signed-off-by: Pablo Neira Ayuso <pablo@...filter.org>
...
> @@ -515,11 +523,7 @@ static void nft_rbtree_remove(const struct net *net,
> struct nft_rbtree *priv = nft_set_priv(set);
> struct nft_rbtree_elem *rbe = elem->priv;
>
> - write_lock_bh(&priv->lock);
> - write_seqcount_begin(&priv->count);
> - rb_erase(&rbe->node, &priv->root);
> - write_seqcount_end(&priv->count);
> - write_unlock_bh(&priv->lock);
> + nft_rbtree_erase(priv, rbe);
> }
>
> static void nft_rbtree_activate(const struct net *net,
> @@ -613,45 +617,40 @@ static void nft_rbtree_walk(const struct nft_ctx *ctx,
> read_unlock_bh(&priv->lock);
> }
>
> -static void nft_rbtree_gc(struct work_struct *work)
> +static void nft_rbtree_gc_remove(struct net *net, struct nft_set *set,
> + struct nft_rbtree *priv,
> + struct nft_rbtree_elem *rbe)
> {
> + struct nft_set_elem elem = {
> + .priv = rbe,
> + };
> +
> + nft_setelem_data_deactivate(net, set, &elem);
> + nft_rbtree_erase(priv, rbe);
> +}
> +
> +static void nft_rbtree_gc(struct nft_set *set)
> +{
> + struct nft_rbtree *priv = nft_set_priv(set);
> struct nft_rbtree_elem *rbe, *rbe_end = NULL;
> struct nftables_pernet *nft_net;
Hi Florian and Pablo,
I understand that this patch has been accepted upstream,
and that by implication this feedback is rather slow,
but I noticed that with this patch nft_net is now
set but otherwise unused in this function.
As flagged by clang-16 and gcc-13 W=1 builds.
> - struct nft_rbtree *priv;
> + struct rb_node *node, *next;
> struct nft_trans_gc *gc;
> - struct rb_node *node;
> - struct nft_set *set;
> - unsigned int gc_seq;
> struct net *net;
>
> - priv = container_of(work, struct nft_rbtree, gc_work.work);
> set = nft_set_container_of(priv);
> net = read_pnet(&set->net);
> nft_net = nft_pernet(net);
> - gc_seq = READ_ONCE(nft_net->gc_seq);
>
> - if (nft_set_gc_is_pending(set))
> - goto done;
> -
> - gc = nft_trans_gc_alloc(set, gc_seq, GFP_KERNEL);
> + gc = nft_trans_gc_alloc(set, 0, GFP_KERNEL);
> if (!gc)
> - goto done;
> -
> - read_lock_bh(&priv->lock);
> - for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) {
> + return;
>
> - /* Ruleset has been updated, try later. */
> - if (READ_ONCE(nft_net->gc_seq) != gc_seq) {
> - nft_trans_gc_destroy(gc);
> - gc = NULL;
> - goto try_later;
> - }
> + for (node = rb_first(&priv->root); node ; node = next) {
> + next = rb_next(node);
>
> rbe = rb_entry(node, struct nft_rbtree_elem, node);
>
> - if (nft_set_elem_is_dead(&rbe->ext))
> - goto dead_elem;
> -
> /* elements are reversed in the rbtree for historical reasons,
> * from highest to lowest value, that is why end element is
> * always visited before the start element.
...
Powered by blists - more mailing lists