[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190107104455.202756371@linuxfoundation.org>
Date: Mon, 7 Jan 2019 13:30:45 +0100
From: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
To: linux-kernel@...r.kernel.org
Cc: Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
stable@...r.kernel.org, Cong Wang <xiyou.wangcong@...il.com>,
"David S. Miller" <davem@...emloft.net>,
syzbot+f621cda8b7e598908efa@...kaller.appspotmail.com
Subject: [PATCH 4.19 018/170] netrom: fix locking in nr_find_socket()
4.19-stable review patch. If anyone has any objections, please let me know.
------------------
From: Cong Wang <xiyou.wangcong@...il.com>
[ Upstream commit 7314f5480f3e37e570104dc5e0f28823ef849e72 ]
nr_find_socket(), nr_find_peer() and nr_find_listener() lock the
sock after finding it in the global list. However, the call path
requires BH disabled for the sock lock consistently.
Actually the locking is unnecessary at this point, we can just hold
the sock refcnt to make sure it is not gone after we unlock the global
list, and lock it later only when needed.
Reported-and-tested-by: syzbot+f621cda8b7e598908efa@...kaller.appspotmail.com
Signed-off-by: Cong Wang <xiyou.wangcong@...il.com>
Signed-off-by: David S. Miller <davem@...emloft.net>
Signed-off-by: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
---
net/netrom/af_netrom.c | 15 ++++++++++-----
1 file changed, 10 insertions(+), 5 deletions(-)
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -153,7 +153,7 @@ static struct sock *nr_find_listener(ax2
sk_for_each(s, &nr_list)
if (!ax25cmp(&nr_sk(s)->source_addr, addr) &&
s->sk_state == TCP_LISTEN) {
- bh_lock_sock(s);
+ sock_hold(s);
goto found;
}
s = NULL;
@@ -174,7 +174,7 @@ static struct sock *nr_find_socket(unsig
struct nr_sock *nr = nr_sk(s);
if (nr->my_index == index && nr->my_id == id) {
- bh_lock_sock(s);
+ sock_hold(s);
goto found;
}
}
@@ -198,7 +198,7 @@ static struct sock *nr_find_peer(unsigne
if (nr->your_index == index && nr->your_id == id &&
!ax25cmp(&nr->dest_addr, dest)) {
- bh_lock_sock(s);
+ sock_hold(s);
goto found;
}
}
@@ -224,7 +224,7 @@ static unsigned short nr_find_next_circu
if (i != 0 && j != 0) {
if ((sk=nr_find_socket(i, j)) == NULL)
break;
- bh_unlock_sock(sk);
+ sock_put(sk);
}
id++;
@@ -920,6 +920,7 @@ int nr_rx_frame(struct sk_buff *skb, str
}
if (sk != NULL) {
+ bh_lock_sock(sk);
skb_reset_transport_header(skb);
if (frametype == NR_CONNACK && skb->len == 22)
@@ -929,6 +930,7 @@ int nr_rx_frame(struct sk_buff *skb, str
ret = nr_process_rx_frame(sk, skb);
bh_unlock_sock(sk);
+ sock_put(sk);
return ret;
}
@@ -960,10 +962,12 @@ int nr_rx_frame(struct sk_buff *skb, str
(make = nr_make_new(sk)) == NULL) {
nr_transmit_refusal(skb, 0);
if (sk)
- bh_unlock_sock(sk);
+ sock_put(sk);
return 0;
}
+ bh_lock_sock(sk);
+
window = skb->data[20];
skb->sk = make;
@@ -1016,6 +1020,7 @@ int nr_rx_frame(struct sk_buff *skb, str
sk->sk_data_ready(sk);
bh_unlock_sock(sk);
+ sock_put(sk);
nr_insert_socket(make);
Powered by blists - more mailing lists