[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20231010145343.12551-2-fw@strlen.de>
Date: Tue, 10 Oct 2023 16:53:31 +0200
From: Florian Westphal <fw@...len.de>
To: <netdev@...r.kernel.org>
Cc: Paolo Abeni <pabeni@...hat.com>,
"David S. Miller" <davem@...emloft.net>,
Eric Dumazet <edumazet@...gle.com>,
Jakub Kicinski <kuba@...nel.org>,
<netfilter-devel@...r.kernel.org>,
Phil Sutter <phil@....cc>,
Pablo Neira Ayuso <pablo@...filter.org>
Subject: [PATCH net-next 1/8] netfilter: nf_tables: Always allocate nft_rule_dump_ctx
From: Phil Sutter <phil@....cc>
It will move into struct netlink_callback's scratch area later, just put
nf_tables_dump_rules_start in shape to reduce churn later.
Suggested-by: Pablo Neira Ayuso <pablo@...filter.org>
Signed-off-by: Phil Sutter <phil@....cc>
Signed-off-by: Florian Westphal <fw@...len.de>
---
net/netfilter/nf_tables_api.c | 48 +++++++++++++++--------------------
1 file changed, 21 insertions(+), 27 deletions(-)
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index b4405db710b0..ea30bee41a6e 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -3521,10 +3521,10 @@ static int nf_tables_dump_rules(struct sk_buff *skb,
if (family != NFPROTO_UNSPEC && family != table->family)
continue;
- if (ctx && ctx->table && strcmp(ctx->table, table->name) != 0)
+ if (ctx->table && strcmp(ctx->table, table->name) != 0)
continue;
- if (ctx && ctx->table && ctx->chain) {
+ if (ctx->table && ctx->chain) {
struct rhlist_head *list, *tmp;
list = rhltable_lookup(&table->chains_ht, ctx->chain,
@@ -3548,7 +3548,7 @@ static int nf_tables_dump_rules(struct sk_buff *skb,
goto done;
}
- if (ctx && ctx->table)
+ if (ctx->table)
break;
}
done:
@@ -3563,27 +3563,23 @@ static int nf_tables_dump_rules_start(struct netlink_callback *cb)
const struct nlattr * const *nla = cb->data;
struct nft_rule_dump_ctx *ctx = NULL;
- if (nla[NFTA_RULE_TABLE] || nla[NFTA_RULE_CHAIN]) {
- ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC);
- if (!ctx)
- return -ENOMEM;
+ ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC);
+ if (!ctx)
+ return -ENOMEM;
- if (nla[NFTA_RULE_TABLE]) {
- ctx->table = nla_strdup(nla[NFTA_RULE_TABLE],
- GFP_ATOMIC);
- if (!ctx->table) {
- kfree(ctx);
- return -ENOMEM;
- }
+ if (nla[NFTA_RULE_TABLE]) {
+ ctx->table = nla_strdup(nla[NFTA_RULE_TABLE], GFP_ATOMIC);
+ if (!ctx->table) {
+ kfree(ctx);
+ return -ENOMEM;
}
- if (nla[NFTA_RULE_CHAIN]) {
- ctx->chain = nla_strdup(nla[NFTA_RULE_CHAIN],
- GFP_ATOMIC);
- if (!ctx->chain) {
- kfree(ctx->table);
- kfree(ctx);
- return -ENOMEM;
- }
+ }
+ if (nla[NFTA_RULE_CHAIN]) {
+ ctx->chain = nla_strdup(nla[NFTA_RULE_CHAIN], GFP_ATOMIC);
+ if (!ctx->chain) {
+ kfree(ctx->table);
+ kfree(ctx);
+ return -ENOMEM;
}
}
@@ -3595,11 +3591,9 @@ static int nf_tables_dump_rules_done(struct netlink_callback *cb)
{
struct nft_rule_dump_ctx *ctx = cb->data;
- if (ctx) {
- kfree(ctx->table);
- kfree(ctx->chain);
- kfree(ctx);
- }
+ kfree(ctx->table);
+ kfree(ctx->chain);
+ kfree(ctx);
return 0;
}
--
2.41.0
Powered by blists - more mailing lists