lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <20210630073310epcms1p2ad6803cfd9dbc8ab501c4c99f799f4da@epcms1p2>
Date:   Wed, 30 Jun 2021 16:33:10 +0900
From:   권오훈 <ohoono.kwon@...sung.com>
To:     "akpm@...ux-foundation.org" <akpm@...ux-foundation.org>,
        "konrad.wilk@...cle.com" <konrad.wilk@...cle.com>,
        "gregkh@...uxfoundation.org" <gregkh@...uxfoundation.org>
CC:     권오훈 <ohoono.kwon@...sung.com>,
        "ohkwon1043@...il.com" <ohkwon1043@...il.com>,
        "linux-mm@...ck.org" <linux-mm@...ck.org>,
        "linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>
Subject: [PATCH] mm: cleancache: fix potential race in cleancache apis

Current cleancache api implementation has potential race as follows,
which might lead to corruption in filesystems using cleancache.

thread 0                thread 1                        thread 2

                        in put_page
                        get pool_id K for fs1
invalidate_fs on fs1
frees pool_id K
                                                        init_fs for fs2
                                                        allocates pool_id K
                        put_page puts page
                        which belongs to fs1
                        into cleancache pool for fs2

At this point, a file cache which originally belongs to fs1 might be
copied back to cleancache pool of fs2, which might be later used as if
it were normal cleancache of fs2, and could eventually corrupt fs2 when
flushed back.

Add rwlock in order to synchronize invalidate_fs with other cleancache
operations.

In normal situations where filesystems are not frequently mounted or
unmounted, there will be little performance impact since
read_lock/read_unlock apis are used.

Signed-off-by: Ohhoon Kwon <ohoono.kwon@...sung.com>
---
 fs/super.c         |  1 +
 include/linux/fs.h |  1 +
 mm/cleancache.c    | 29 ++++++++++++++++++++++++++---
 3 files changed, 28 insertions(+), 3 deletions(-)

diff --git a/fs/super.c b/fs/super.c
index 11b7e7213fd1..6810b685490c 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -261,6 +261,7 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags,
 	s->s_time_min = TIME64_MIN;
 	s->s_time_max = TIME64_MAX;
 	s->cleancache_poolid = CLEANCACHE_NO_POOL;
+	rwlock_init(&s->cleancache_pool_lock);
 
 	s->s_shrink.seeks = DEFAULT_SEEKS;
 	s->s_shrink.scan_objects = super_cache_scan;
diff --git a/include/linux/fs.h b/include/linux/fs.h
index c3c88fdb9b2a..f61008c9e8fc 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1501,6 +1501,7 @@ struct super_block {
 	 * Saved pool identifier for cleancache (-1 means none)
 	 */
 	int cleancache_poolid;
+	rwlock_t cleancache_pool_lock;
 
 	struct shrinker s_shrink;	/* per-sb shrinker handle */
 
diff --git a/mm/cleancache.c b/mm/cleancache.c
index db7eee9c0886..10b436a28219 100644
--- a/mm/cleancache.c
+++ b/mm/cleancache.c
@@ -114,12 +114,14 @@ void __cleancache_init_fs(struct super_block *sb)
 {
 	int pool_id = CLEANCACHE_NO_BACKEND;
 
+	write_lock(&sb->cleancache_pool_lock);
 	if (cleancache_ops) {
 		pool_id = cleancache_ops->init_fs(PAGE_SIZE);
 		if (pool_id < 0)
 			pool_id = CLEANCACHE_NO_POOL;
 	}
 	sb->cleancache_poolid = pool_id;
+	write_unlock(&sb->cleancache_pool_lock);
 }
 EXPORT_SYMBOL(__cleancache_init_fs);
 
@@ -128,12 +130,14 @@ void __cleancache_init_shared_fs(struct super_block *sb)
 {
 	int pool_id = CLEANCACHE_NO_BACKEND_SHARED;
 
+	write_lock(&sb->cleancache_pool_lock);
 	if (cleancache_ops) {
 		pool_id = cleancache_ops->init_shared_fs(&sb->s_uuid, PAGE_SIZE);
 		if (pool_id < 0)
 			pool_id = CLEANCACHE_NO_POOL;
 	}
 	sb->cleancache_poolid = pool_id;
+	write_unlock(&sb->cleancache_pool_lock);
 }
 EXPORT_SYMBOL(__cleancache_init_shared_fs);
 
@@ -185,6 +189,7 @@ int __cleancache_get_page(struct page *page)
 	}
 
 	VM_BUG_ON_PAGE(!PageLocked(page), page);
+	read_lock(&page->mapping->host->i_sb->cleancache_pool_lock);
 	pool_id = page->mapping->host->i_sb->cleancache_poolid;
 	if (pool_id < 0)
 		goto out;
@@ -198,6 +203,7 @@ int __cleancache_get_page(struct page *page)
 	else
 		cleancache_failed_gets++;
 out:
+	read_unlock(&page->mapping->host->i_sb->cleancache_pool_lock);
 	return ret;
 }
 EXPORT_SYMBOL(__cleancache_get_page);
@@ -223,12 +229,14 @@ void __cleancache_put_page(struct page *page)
 	}
 
 	VM_BUG_ON_PAGE(!PageLocked(page), page);
+	read_lock(&page->mapping->host->i_sb->cleancache_pool_lock);
 	pool_id = page->mapping->host->i_sb->cleancache_poolid;
 	if (pool_id >= 0 &&
 		cleancache_get_key(page->mapping->host, &key) >= 0) {
 		cleancache_ops->put_page(pool_id, key, page->index, page);
 		cleancache_puts++;
 	}
+	read_unlock(&page->mapping->host->i_sb->cleancache_pool_lock);
 }
 EXPORT_SYMBOL(__cleancache_put_page);
 
@@ -244,12 +252,15 @@ void __cleancache_invalidate_page(struct address_space *mapping,
 					struct page *page)
 {
 	/* careful... page->mapping is NULL sometimes when this is called */
-	int pool_id = mapping->host->i_sb->cleancache_poolid;
+	int pool_id;
 	struct cleancache_filekey key = { .u.key = { 0 } };
 
 	if (!cleancache_ops)
 		return;
 
+	read_lock(&mapping->host->i_sb->cleancache_pool_lock);
+	pool_id = mapping->host->i_sb->cleancache_poolid;
+
 	if (pool_id >= 0) {
 		VM_BUG_ON_PAGE(!PageLocked(page), page);
 		if (cleancache_get_key(mapping->host, &key) >= 0) {
@@ -258,6 +269,7 @@ void __cleancache_invalidate_page(struct address_space *mapping,
 			cleancache_invalidates++;
 		}
 	}
+	read_unlock(&mapping->host->i_sb->cleancache_pool_lock);
 }
 EXPORT_SYMBOL(__cleancache_invalidate_page);
 
@@ -272,14 +284,19 @@ EXPORT_SYMBOL(__cleancache_invalidate_page);
  */
 void __cleancache_invalidate_inode(struct address_space *mapping)
 {
-	int pool_id = mapping->host->i_sb->cleancache_poolid;
+	int pool_id;
 	struct cleancache_filekey key = { .u.key = { 0 } };
 
 	if (!cleancache_ops)
 		return;
 
+	read_lock(&mapping->host->i_sb->cleancache_pool_lock);
+	pool_id = mapping->host->i_sb->cleancache_poolid;
+
 	if (pool_id >= 0 && cleancache_get_key(mapping->host, &key) >= 0)
 		cleancache_ops->invalidate_inode(pool_id, key);
+
+	read_unlock(&mapping->host->i_sb->cleancache_pool_lock);
 }
 EXPORT_SYMBOL(__cleancache_invalidate_inode);
 
@@ -292,11 +309,17 @@ void __cleancache_invalidate_fs(struct super_block *sb)
 {
 	int pool_id;
 
+	if (!cleancache_ops)
+		return;
+
+	write_lock(&sb->cleancache_pool_lock);
 	pool_id = sb->cleancache_poolid;
 	sb->cleancache_poolid = CLEANCACHE_NO_POOL;
 
-	if (cleancache_ops && pool_id >= 0)
+	if (pool_id >= 0)
 		cleancache_ops->invalidate_fs(pool_id);
+
+	write_unlock(&sb->cleancache_pool_lock);
 }
 EXPORT_SYMBOL(__cleancache_invalidate_fs);
 
-- 
2.17.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ