[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220307091645.425786626@linuxfoundation.org>
Date: Mon, 7 Mar 2022 10:18:47 +0100
From: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
To: linux-kernel@...r.kernel.org
Cc: Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
stable@...r.kernel.org, Eric Dumazet <eric.dumazet@...il.com>,
Eric Dumazet <edumazet@...gle.com>,
Florian Westphal <fw@...len.de>
Subject: [PATCH 5.10 044/105] netfilter: nf_queue: fix possible use-after-free
From: Florian Westphal <fw@...len.de>
commit c3873070247d9e3c7a6b0cf9bf9b45e8018427b1 upstream.
Eric Dumazet says:
The sock_hold() side seems suspect, because there is no guarantee
that sk_refcnt is not already 0.
On failure, we cannot queue the packet and need to indicate an
error. The packet will be dropped by the caller.
v2: split skb prefetch hunk into separate change
Fixes: 271b72c7fa82c ("udp: RCU handling for Unicast packets.")
Reported-by: Eric Dumazet <eric.dumazet@...il.com>
Reviewed-by: Eric Dumazet <edumazet@...gle.com>
Signed-off-by: Florian Westphal <fw@...len.de>
Signed-off-by: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
---
include/net/netfilter/nf_queue.h | 2 +-
net/netfilter/nf_queue.c | 13 +++++++++----
net/netfilter/nfnetlink_queue.c | 12 +++++++++---
3 files changed, 19 insertions(+), 8 deletions(-)
--- a/include/net/netfilter/nf_queue.h
+++ b/include/net/netfilter/nf_queue.h
@@ -37,7 +37,7 @@ void nf_register_queue_handler(struct ne
void nf_unregister_queue_handler(struct net *net);
void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict);
-void nf_queue_entry_get_refs(struct nf_queue_entry *entry);
+bool nf_queue_entry_get_refs(struct nf_queue_entry *entry);
void nf_queue_entry_free(struct nf_queue_entry *entry);
static inline void init_hashrandom(u32 *jhash_initval)
--- a/net/netfilter/nf_queue.c
+++ b/net/netfilter/nf_queue.c
@@ -100,16 +100,17 @@ static void __nf_queue_entry_init_physde
}
/* Bump dev refs so they don't vanish while packet is out */
-void nf_queue_entry_get_refs(struct nf_queue_entry *entry)
+bool nf_queue_entry_get_refs(struct nf_queue_entry *entry)
{
struct nf_hook_state *state = &entry->state;
+ if (state->sk && !refcount_inc_not_zero(&state->sk->sk_refcnt))
+ return false;
+
if (state->in)
dev_hold(state->in);
if (state->out)
dev_hold(state->out);
- if (state->sk)
- sock_hold(state->sk);
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
if (entry->physin)
@@ -117,6 +118,7 @@ void nf_queue_entry_get_refs(struct nf_q
if (entry->physout)
dev_hold(entry->physout);
#endif
+ return true;
}
EXPORT_SYMBOL_GPL(nf_queue_entry_get_refs);
@@ -205,7 +207,10 @@ static int __nf_queue(struct sk_buff *sk
__nf_queue_entry_init_physdevs(entry);
- nf_queue_entry_get_refs(entry);
+ if (!nf_queue_entry_get_refs(entry)) {
+ kfree(entry);
+ return -ENOTCONN;
+ }
switch (entry->state.pf) {
case AF_INET:
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -712,9 +712,15 @@ static struct nf_queue_entry *
nf_queue_entry_dup(struct nf_queue_entry *e)
{
struct nf_queue_entry *entry = kmemdup(e, e->size, GFP_ATOMIC);
- if (entry)
- nf_queue_entry_get_refs(entry);
- return entry;
+
+ if (!entry)
+ return NULL;
+
+ if (nf_queue_entry_get_refs(entry))
+ return entry;
+
+ kfree(entry);
+ return NULL;
}
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
Powered by blists - more mailing lists