[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <lsq.1471110178.698656387@decadent.org.uk>
Date: Sat, 13 Aug 2016 18:42:58 +0100
From: Ben Hutchings <ben@...adent.org.uk>
To: linux-kernel@...r.kernel.org, stable@...r.kernel.org
CC: akpm@...ux-foundation.org,
"Marek Lindner" <mareklindner@...mailbox.ch>,
"Amadeus Alfa" <amadeus@...mnitz.freifunk.net>,
"Sven Eckelmann" <sven@...fation.org>,
"Martin Weinelt" <martin@...mstadt.freifunk.net>,
"David S. Miller" <davem@...emloft.net>
Subject: [PATCH 3.2 75/94] batman-adv: Fix use-after-free/double-free of
tt_req_node
3.2.82-rc1 review patch. If anyone has any objections, please let me know.
------------------
From: Sven Eckelmann <sven@...fation.org>
commit 9c4604a298e0a9807eaf2cd912d1ebf24d98fbeb upstream.
The tt_req_node is added and removed from a list inside a spinlock. But the
locking is sometimes removed even when the object is still referenced and
will be used later via this reference. For example batadv_send_tt_request
can create a new tt_req_node (including add to a list) and later
re-acquires the lock to remove it from the list and to free it. But at this
time another context could have already removed this tt_req_node from the
list and freed it.
CPU#0
batadv_batman_skb_recv from net_device 0
-> batadv_iv_ogm_receive
-> batadv_iv_ogm_process
-> batadv_iv_ogm_process_per_outif
-> batadv_tvlv_ogm_receive
-> batadv_tvlv_ogm_receive
-> batadv_tvlv_containers_process
-> batadv_tvlv_call_handler
-> batadv_tt_tvlv_ogm_handler_v1
-> batadv_tt_update_orig
-> batadv_send_tt_request
-> batadv_tt_req_node_new
spin_lock(...)
allocates new tt_req_node and adds it to list
spin_unlock(...)
return tt_req_node
CPU#1
batadv_batman_skb_recv from net_device 1
-> batadv_recv_unicast_tvlv
-> batadv_tvlv_containers_process
-> batadv_tvlv_call_handler
-> batadv_tt_tvlv_unicast_handler_v1
-> batadv_handle_tt_response
spin_lock(...)
tt_req_node gets removed from list and is freed
spin_unlock(...)
CPU#0
<- returned to batadv_send_tt_request
spin_lock(...)
tt_req_node gets removed from list and is freed
MEMORY CORRUPTION/SEGFAULT/...
spin_unlock(...)
This can only be solved via reference counting to allow multiple contexts
to handle the list manipulation while making sure that only the last
context holding a reference will free the object.
Fixes: a73105b8d4c7 ("batman-adv: improved client announcement mechanism")
Signed-off-by: Sven Eckelmann <sven@...fation.org>
Tested-by: Martin Weinelt <martin@...mstadt.freifunk.net>
Tested-by: Amadeus Alfa <amadeus@...mnitz.freifunk.net>
Signed-off-by: Marek Lindner <mareklindner@...mailbox.ch>
Signed-off-by: David S. Miller <davem@...emloft.net>
[bwh: Backported to 3.2:
- Adjust context
- Use struct tt_req_node instead of struct batadv_tt_req_node
- Use list_empty() instead of hlist_unhashed()
- Drop kernel-doc change]
Signed-off-by: Ben Hutchings <ben@...adent.org.uk>
---
net/batman-adv/translation-table.c | 43 ++++++++++++++++++++++++++++++++------
net/batman-adv/types.h | 2 ++
2 files changed, 39 insertions(+), 6 deletions(-)
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -952,6 +952,29 @@ uint16_t tt_local_crc(struct bat_priv *b
return total;
}
+/**
+ * batadv_tt_req_node_release - free tt_req node entry
+ * @ref: kref pointer of the tt req_node entry
+ */
+static void batadv_tt_req_node_release(struct kref *ref)
+{
+ struct tt_req_node *tt_req_node;
+
+ tt_req_node = container_of(ref, struct tt_req_node, refcount);
+
+ kfree(tt_req_node);
+}
+
+/**
+ * batadv_tt_req_node_put - decrement the tt_req_node refcounter and
+ * possibly release it
+ * @tt_req_node: tt_req_node to be free'd
+ */
+static void batadv_tt_req_node_put(struct tt_req_node *tt_req_node)
+{
+ kref_put(&tt_req_node->refcount, batadv_tt_req_node_release);
+}
+
static void tt_req_list_free(struct bat_priv *bat_priv)
{
struct tt_req_node *node, *safe;
@@ -960,7 +983,7 @@ static void tt_req_list_free(struct bat_
list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) {
list_del(&node->list);
- kfree(node);
+ batadv_tt_req_node_put(node);
}
spin_unlock_bh(&bat_priv->tt_req_list_lock);
@@ -995,7 +1018,7 @@ static void tt_req_purge(struct bat_priv
if (is_out_of_time(node->issued_at,
TT_REQUEST_TIMEOUT * 1000)) {
list_del(&node->list);
- kfree(node);
+ batadv_tt_req_node_put(node);
}
}
spin_unlock_bh(&bat_priv->tt_req_list_lock);
@@ -1020,9 +1043,11 @@ static struct tt_req_node *new_tt_req_no
if (!tt_req_node)
goto unlock;
+ kref_init(&tt_req_node->refcount);
memcpy(tt_req_node->addr, orig_node->orig, ETH_ALEN);
tt_req_node->issued_at = jiffies;
+ kref_get(&tt_req_node->refcount);
list_add(&tt_req_node->list, &bat_priv->tt_req_list);
unlock:
spin_unlock_bh(&bat_priv->tt_req_list_lock);
@@ -1174,12 +1199,19 @@ out:
hardif_free_ref(primary_if);
if (ret)
kfree_skb(skb);
+
if (ret && tt_req_node) {
spin_lock_bh(&bat_priv->tt_req_list_lock);
- list_del(&tt_req_node->list);
+ if (!list_empty(&tt_req_node->list)) {
+ list_del(&tt_req_node->list);
+ batadv_tt_req_node_put(tt_req_node);
+ }
spin_unlock_bh(&bat_priv->tt_req_list_lock);
- kfree(tt_req_node);
}
+
+ if (tt_req_node)
+ batadv_tt_req_node_put(tt_req_node);
+
return ret;
}
@@ -1552,7 +1584,7 @@ void handle_tt_response(struct bat_priv
if (!compare_eth(node->addr, tt_response->src))
continue;
list_del(&node->list);
- kfree(node);
+ batadv_tt_req_node_put(node);
}
spin_unlock_bh(&bat_priv->tt_req_list_lock);
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -250,6 +250,7 @@ struct tt_change_node {
struct tt_req_node {
uint8_t addr[ETH_ALEN];
unsigned long issued_at;
+ struct kref refcount;
struct list_head list;
};
Powered by blists - more mailing lists