[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190111131049.284504602@linuxfoundation.org>
Date: Fri, 11 Jan 2019 15:07:50 +0100
From: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
To: linux-kernel@...r.kernel.org
Cc: Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
stable@...r.kernel.org, Cong Wang <xiyou.wangcong@...il.com>,
"David S. Miller" <davem@...emloft.net>,
syzbot+f621cda8b7e598908efa@...kaller.appspotmail.com
Subject: [PATCH 4.4 21/88] netrom: fix locking in nr_find_socket()
4.4-stable review patch. If anyone has any objections, please let me know.
------------------
From: Cong Wang <xiyou.wangcong@...il.com>
[ Upstream commit 7314f5480f3e37e570104dc5e0f28823ef849e72 ]
nr_find_socket(), nr_find_peer() and nr_find_listener() lock the
sock after finding it in the global list. However, the call path
requires BH disabled for the sock lock consistently.
Actually the locking is unnecessary at this point, we can just hold
the sock refcnt to make sure it is not gone after we unlock the global
list, and lock it later only when needed.
Reported-and-tested-by: syzbot+f621cda8b7e598908efa@...kaller.appspotmail.com
Signed-off-by: Cong Wang <xiyou.wangcong@...il.com>
Signed-off-by: David S. Miller <davem@...emloft.net>
Signed-off-by: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
---
net/netrom/af_netrom.c | 15 ++++++++++-----
1 file changed, 10 insertions(+), 5 deletions(-)
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -153,7 +153,7 @@ static struct sock *nr_find_listener(ax2
sk_for_each(s, &nr_list)
if (!ax25cmp(&nr_sk(s)->source_addr, addr) &&
s->sk_state == TCP_LISTEN) {
- bh_lock_sock(s);
+ sock_hold(s);
goto found;
}
s = NULL;
@@ -174,7 +174,7 @@ static struct sock *nr_find_socket(unsig
struct nr_sock *nr = nr_sk(s);
if (nr->my_index == index && nr->my_id == id) {
- bh_lock_sock(s);
+ sock_hold(s);
goto found;
}
}
@@ -198,7 +198,7 @@ static struct sock *nr_find_peer(unsigne
if (nr->your_index == index && nr->your_id == id &&
!ax25cmp(&nr->dest_addr, dest)) {
- bh_lock_sock(s);
+ sock_hold(s);
goto found;
}
}
@@ -224,7 +224,7 @@ static unsigned short nr_find_next_circu
if (i != 0 && j != 0) {
if ((sk=nr_find_socket(i, j)) == NULL)
break;
- bh_unlock_sock(sk);
+ sock_put(sk);
}
id++;
@@ -918,6 +918,7 @@ int nr_rx_frame(struct sk_buff *skb, str
}
if (sk != NULL) {
+ bh_lock_sock(sk);
skb_reset_transport_header(skb);
if (frametype == NR_CONNACK && skb->len == 22)
@@ -927,6 +928,7 @@ int nr_rx_frame(struct sk_buff *skb, str
ret = nr_process_rx_frame(sk, skb);
bh_unlock_sock(sk);
+ sock_put(sk);
return ret;
}
@@ -958,10 +960,12 @@ int nr_rx_frame(struct sk_buff *skb, str
(make = nr_make_new(sk)) == NULL) {
nr_transmit_refusal(skb, 0);
if (sk)
- bh_unlock_sock(sk);
+ sock_put(sk);
return 0;
}
+ bh_lock_sock(sk);
+
window = skb->data[20];
skb->sk = make;
@@ -1014,6 +1018,7 @@ int nr_rx_frame(struct sk_buff *skb, str
sk->sk_data_ready(sk);
bh_unlock_sock(sk);
+ sock_put(sk);
nr_insert_socket(make);
Powered by blists - more mailing lists