[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1464089188-6155-4-git-send-email-pranjas@gmail.com>
Date: Tue, 24 May 2016 14:26:27 +0300
From: "Pranay Kr. Srivastava" <pranjas@...il.com>
To: mpa@...gutronix.de, nbd-general@...ts.sourceforge.net,
linux-kernel@...r.kernel.org
Cc: "Pranay Kr. Srivastava" <pranjas@...il.com>
Subject: [PATCH 3/4] make nbd device wait for its users.
When a timeout occurs or a recv fails, then
instead of abruplty killing nbd block device
wait for it's users to finish.
This is more required when filesystem(s) like
ext2 or ext3 don't expect their buffer heads to
disappear while the filesystem is mounted.
Use a kref for users using this. The device will
be released for kref count of 2, not less or more.
Signed-off-by: Pranay Kr. Srivastava <pranjas@...il.com>
---
drivers/block/nbd.c | 51 +++++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 51 insertions(+)
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index af86c9b..59db890 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -71,6 +71,8 @@ struct nbd_device {
struct dentry *dbg_dir;
#endif
struct work_struct ws_nbd;
+ struct kref users;
+ struct completion user_completion;
};
#if IS_ENABLED(CONFIG_DEBUG_FS)
@@ -674,6 +676,7 @@ static void nbd_reset(struct nbd_device *nbd)
nbd->flags = 0;
nbd->xmit_timeout = 0;
INIT_WORK(&nbd->ws_nbd, nbd_work_func);
+ init_completion(&nbd->user_completion);
queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
del_timer_sync(&nbd->timeout_timer);
}
@@ -807,6 +810,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
kthread_stop(thread);
sock_shutdown(nbd);
+ wait_for_completion(&nbd->user_completion);
mutex_lock(&nbd->tx_lock);
nbd_clear_que(nbd);
kill_bdev(bdev);
@@ -858,12 +862,58 @@ static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
return error;
}
+static void nbd_kref_release(struct kref *kref_users)
+{
+ struct nbd_device *nbd = container_of(kref_users, struct nbd_device,
+ users);
+ pr_debug("Releasing kref [%s]\n", __FUNCTION__);
+ complete(&nbd->user_completion);
+
+}
+
+static int nbd_open(struct block_device *bdev, fmode_t mode)
+{
+ struct nbd_device *nbd_dev = bdev->bd_disk->private_data;
+
+ kref_get(&nbd_dev->users);
+ pr_debug("Opening nbd_dev %s. Active users = %u\n",
+ bdev->bd_disk->disk_name,
+ atomic_read(&nbd_dev->users.refcount) - 1);
+ return 0;
+}
+
+static void nbd_release(struct gendisk *disk, fmode_t mode)
+{
+ struct nbd_device *nbd_dev = disk->private_data;
+ /*
+ *kref_init initializes ref count to 1, so we
+ *we check for refcount to be 2 for a final put.
+ *
+ *kref needs to be re-initialized just here as the
+ *other process holding it must see the ref count as 2.
+ */
+ kref_put(&nbd_dev->users, nbd_kref_release);
+
+ if (atomic_read(&nbd_dev->users.refcount) == 2) {
+ kref_sub(&nbd_dev->users, 2, nbd_kref_release);
+ kref_init(&nbd_dev->users);
+ kref_get(&nbd_dev->users);
+ }
+
+ pr_debug("Closing nbd_dev %s. Active users = %u\n",
+ disk->disk_name,
+ atomic_read(&nbd_dev->users.refcount) - 1);
+}
+
static const struct block_device_operations nbd_fops = {
.owner = THIS_MODULE,
.ioctl = nbd_ioctl,
.compat_ioctl = nbd_ioctl,
+ .open = nbd_open,
+ .release = nbd_release
};
+
static void nbd_work_func(struct work_struct *ws_nbd)
{
struct nbd_device *nbd_dev = container_of(ws_nbd, struct nbd_device,
@@ -1098,6 +1148,7 @@ static int __init nbd_init(void)
disk->first_minor = i << part_shift;
disk->fops = &nbd_fops;
disk->private_data = &nbd_dev[i];
+ kref_init(&nbd_dev[i].users);
sprintf(disk->disk_name, "nbd%d", i);
nbd_reset(&nbd_dev[i]);
add_disk(disk);
--
1.7.9.5
Powered by blists - more mailing lists