lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Thu, 30 Jun 2016 14:02:02 +0300
From:	"Pranay Kr. Srivastava" <pranjas@...il.com>
To:	mpa@...gutronix.de, nbd-general@...ts.sourceforge.net,
	linux-kernel@...r.kernel.org
Cc:	"Pranay Kr. Srivastava" <pranjas@...il.com>
Subject: [PATCH v4 2/5]nbd: fix might_sleep warning on socket shutdown

spinlocked ranges should be small and not contain calls into huge
subfunctions. Fix my mistake and just get the pointer to the socket
instead of doing everything with spinlock held.

Reported-by: Mikulas Patocka <mikulas@...bright.com>
Signed-off-by: Markus Pargmann <mpa@...gutronix.de>

Changelog:
Pranay Kr. Srivastava<pranjas@...il.com>:

1) Use spin_lock instead of irq version for sock_shutdown.

2) Use system work queue to actually trigger the shutdown of
socket. This solves the issue when kernel_sendmsg is currently
blocked while a timeout occurs.

Signed-off-by: Pranay Kr. Srivastava <pranjas@...il.com>
---
 drivers/block/nbd.c | 57 +++++++++++++++++++++++++++++++++--------------------
 1 file changed, 36 insertions(+), 21 deletions(-)

diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 766c401..e362d44 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -39,6 +39,7 @@
 #include <asm/types.h>
 
 #include <linux/nbd.h>
+#include <linux/workqueue.h>
 
 struct nbd_device {
 	u32 flags;
@@ -69,6 +70,8 @@ struct nbd_device {
 #if IS_ENABLED(CONFIG_DEBUG_FS)
 	struct dentry *dbg_dir;
 #endif
+	/* This is specifically for calling sock_shutdown, for now. */
+	struct work_struct ws_shutdown;
 };
 
 #if IS_ENABLED(CONFIG_DEBUG_FS)
@@ -95,6 +98,8 @@ static int max_part;
  */
 static DEFINE_SPINLOCK(nbd_lock);
 
+static void nbd_ws_func_shutdown(struct work_struct *);
+
 static inline struct device *nbd_to_dev(struct nbd_device *nbd)
 {
 	return disk_to_dev(nbd->disk);
@@ -172,39 +177,36 @@ static void nbd_end_request(struct nbd_device *nbd, struct request *req)
  */
 static void sock_shutdown(struct nbd_device *nbd)
 {
-	spin_lock_irq(&nbd->sock_lock);
-
-	if (!nbd->sock) {
-		spin_unlock_irq(&nbd->sock_lock);
-		return;
-	}
+	struct socket *sock;
 
-	dev_warn(disk_to_dev(nbd->disk), "shutting down socket\n");
-	kernel_sock_shutdown(nbd->sock, SHUT_RDWR);
-	sockfd_put(nbd->sock);
+	spin_lock(&nbd->sock_lock);
+	sock = nbd->sock;
 	nbd->sock = NULL;
-	spin_unlock_irq(&nbd->sock_lock);
+	spin_unlock(&nbd->sock_lock);
+
+	if (!sock)
+		return;
 
 	del_timer(&nbd->timeout_timer);
+	dev_warn(disk_to_dev(nbd->disk), "shutting down socket\n");
+	kernel_sock_shutdown(sock, SHUT_RDWR);
+	sockfd_put(sock);
 }
 
 static void nbd_xmit_timeout(unsigned long arg)
 {
 	struct nbd_device *nbd = (struct nbd_device *)arg;
-	unsigned long flags;
 
 	if (list_empty(&nbd->queue_head))
 		return;
 
-	spin_lock_irqsave(&nbd->sock_lock, flags);
-
 	nbd->timedout = true;
-
-	if (nbd->sock)
-		kernel_sock_shutdown(nbd->sock, SHUT_RDWR);
-
-	spin_unlock_irqrestore(&nbd->sock_lock, flags);
-
+	/*
+	 * Make sure sender thread sees nbd->timedout.
+	 */
+	smp_wmb();
+	schedule_work(&nbd->ws_shutdown);
+	wake_up(&nbd->waiting_wq);
 	dev_err(nbd_to_dev(nbd), "Connection timed out, shutting down connection\n");
 }
 
@@ -588,7 +590,11 @@ static int nbd_thread_send(void *data)
 		spin_unlock_irq(&nbd->queue_lock);
 
 		/* handle request */
-		nbd_handle_req(nbd, req);
+		if (nbd->timedout) {
+			req->errors++;
+			nbd_end_request(nbd, req);
+		} else
+			nbd_handle_req(nbd, req);
 	}
 
 	nbd->task_send = NULL;
@@ -663,6 +669,7 @@ static void nbd_reset(struct nbd_device *nbd)
 	set_capacity(nbd->disk, 0);
 	nbd->flags = 0;
 	nbd->xmit_timeout = 0;
+	INIT_WORK(&nbd->ws_shutdown, nbd_ws_func_shutdown);
 	queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
 	del_timer_sync(&nbd->timeout_timer);
 }
@@ -797,11 +804,11 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
 		error = nbd_thread_recv(nbd, bdev);
 		nbd_dev_dbg_close(nbd);
 		kthread_stop(thread);
+		sock_shutdown(nbd);
 
 		mutex_lock(&nbd->tx_lock);
 		nbd->task_recv = NULL;
 
-		sock_shutdown(nbd);
 		nbd_clear_que(nbd);
 		kill_bdev(bdev);
 		nbd_bdev_reset(bdev);
@@ -857,6 +864,14 @@ static const struct block_device_operations nbd_fops = {
 	.compat_ioctl =	nbd_ioctl,
 };
 
+static void nbd_ws_func_shutdown(struct work_struct *ws_nbd)
+{
+	struct nbd_device *nbd_dev = container_of(ws_nbd, struct nbd_device,
+			ws_shutdown);
+
+	sock_shutdown(nbd_dev);
+}
+
 #if IS_ENABLED(CONFIG_DEBUG_FS)
 
 static int nbd_dbg_tasks_show(struct seq_file *s, void *unused)
-- 
1.9.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ