[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20161209161748.418529610@linuxfoundation.org>
Date: Fri, 9 Dec 2016 17:17:54 +0100
From: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
To: linux-kernel@...r.kernel.org
Cc: Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
stable@...r.kernel.org, Herbert Xu <herbert@...dor.apana.org.au>,
Andrey Konovalov <andreyknvl@...gle.com>,
"David S. Miller" <davem@...emloft.net>
Subject: [PATCH 4.4 12/28] netlink: Do not schedule work from sk_destruct
4.4-stable review patch. If anyone has any objections, please let me know.
------------------
From: Herbert Xu <herbert@...dor.apana.org.au>
[ Upstream commit ed5d7788a934a4b6d6d025e948ed4da496b4f12e ]
It is wrong to schedule a work from sk_destruct using the socket
as the memory reserve because the socket will be freed immediately
after the return from sk_destruct.
Instead we should do the deferral prior to sk_free.
This patch does just that.
Fixes: 707693c8a498 ("netlink: Call cb->done from a worker thread")
Signed-off-by: Herbert Xu <herbert@...dor.apana.org.au>
Tested-by: Andrey Konovalov <andreyknvl@...gle.com>
Signed-off-by: David S. Miller <davem@...emloft.net>
Signed-off-by: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
---
net/netlink/af_netlink.c | 32 +++++++++++++++-----------------
1 file changed, 15 insertions(+), 17 deletions(-)
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -924,11 +924,13 @@ static void netlink_skb_set_owner_r(stru
sk_mem_charge(sk, skb->truesize);
}
-static void __netlink_sock_destruct(struct sock *sk)
+static void netlink_sock_destruct(struct sock *sk)
{
struct netlink_sock *nlk = nlk_sk(sk);
if (nlk->cb_running) {
+ if (nlk->cb.done)
+ nlk->cb.done(&nlk->cb);
module_put(nlk->cb.module);
kfree_skb(nlk->cb.skb);
}
@@ -962,21 +964,7 @@ static void netlink_sock_destruct_work(s
struct netlink_sock *nlk = container_of(work, struct netlink_sock,
work);
- nlk->cb.done(&nlk->cb);
- __netlink_sock_destruct(&nlk->sk);
-}
-
-static void netlink_sock_destruct(struct sock *sk)
-{
- struct netlink_sock *nlk = nlk_sk(sk);
-
- if (nlk->cb_running && nlk->cb.done) {
- INIT_WORK(&nlk->work, netlink_sock_destruct_work);
- schedule_work(&nlk->work);
- return;
- }
-
- __netlink_sock_destruct(sk);
+ sk_free(&nlk->sk);
}
/* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on
@@ -1284,8 +1272,18 @@ out_module:
static void deferred_put_nlk_sk(struct rcu_head *head)
{
struct netlink_sock *nlk = container_of(head, struct netlink_sock, rcu);
+ struct sock *sk = &nlk->sk;
+
+ if (!atomic_dec_and_test(&sk->sk_refcnt))
+ return;
+
+ if (nlk->cb_running && nlk->cb.done) {
+ INIT_WORK(&nlk->work, netlink_sock_destruct_work);
+ schedule_work(&nlk->work);
+ return;
+ }
- sock_put(&nlk->sk);
+ sk_free(sk);
}
static int netlink_release(struct socket *sock)
Powered by blists - more mailing lists