lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20191008125616.183715-2-gaoxiang25@huawei.com>
Date:   Tue, 8 Oct 2019 20:56:13 +0800
From:   Gao Xiang <gaoxiang25@...wei.com>
To:     Chao Yu <yuchao0@...wei.com>, <linux-erofs@...ts.ozlabs.org>
CC:     LKML <linux-kernel@...r.kernel.org>, Miao Xie <miaoxie@...wei.com>,
        "Li Guifu" <bluce.liguifu@...wei.com>,
        Gao Xiang <gaoxiang25@...wei.com>
Subject: [PATCH for-next 2/5] erofs: remove dead code since managed cache is now built-in

After commit 4279f3f9889f ("staging: erofs: turn cache
strategies into mount options"), cache strategies are
changed into mount options rather than old build configs.

Let's kill useless code for obsoleted build options.

Signed-off-by: Gao Xiang <gaoxiang25@...wei.com>
---
 fs/erofs/utils.c | 13 ++++++-------
 fs/erofs/zdata.c | 25 ++++---------------------
 2 files changed, 10 insertions(+), 28 deletions(-)

diff --git a/fs/erofs/utils.c b/fs/erofs/utils.c
index d92b3e753a6f..f66043ee16b9 100644
--- a/fs/erofs/utils.c
+++ b/fs/erofs/utils.c
@@ -149,8 +149,7 @@ static void erofs_workgroup_unfreeze_final(struct erofs_workgroup *grp)
 }
 
 static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi,
-					   struct erofs_workgroup *grp,
-					   bool cleanup)
+					   struct erofs_workgroup *grp)
 {
 	/*
 	 * If managed cache is on, refcount of workgroups
@@ -188,8 +187,7 @@ static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi,
 }
 
 static unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi,
-					      unsigned long nr_shrink,
-					      bool cleanup)
+					      unsigned long nr_shrink)
 {
 	pgoff_t first_index = 0;
 	void *batch[PAGEVEC_SIZE];
@@ -208,7 +206,7 @@ static unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi,
 		first_index = grp->index + 1;
 
 		/* try to shrink each valid workgroup */
-		if (!erofs_try_to_release_workgroup(sbi, grp, cleanup))
+		if (!erofs_try_to_release_workgroup(sbi, grp))
 			continue;
 
 		++freed;
@@ -245,7 +243,8 @@ void erofs_shrinker_unregister(struct super_block *sb)
 	struct erofs_sb_info *const sbi = EROFS_SB(sb);
 
 	mutex_lock(&sbi->umount_mutex);
-	erofs_shrink_workstation(sbi, ~0UL, true);
+	/* clean up all remaining workgroups in memory */
+	erofs_shrink_workstation(sbi, ~0UL);
 
 	spin_lock(&erofs_sb_list_lock);
 	list_del(&sbi->list);
@@ -294,7 +293,7 @@ static unsigned long erofs_shrink_scan(struct shrinker *shrink,
 		spin_unlock(&erofs_sb_list_lock);
 		sbi->shrinker_run_no = run_no;
 
-		freed += erofs_shrink_workstation(sbi, nr, false);
+		freed += erofs_shrink_workstation(sbi, nr);
 
 		spin_lock(&erofs_sb_list_lock);
 		/* Get the next list element before we move this one */
diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
index ef32757d1aac..93f8bc1a64f6 100644
--- a/fs/erofs/zdata.c
+++ b/fs/erofs/zdata.c
@@ -574,7 +574,7 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
 				struct list_head *pagepool)
 {
 	struct inode *const inode = fe->inode;
-	struct erofs_sb_info *const sbi __maybe_unused = EROFS_I_SB(inode);
+	struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
 	struct erofs_map_blocks *const map = &fe->map;
 	struct z_erofs_collector *const clt = &fe->clt;
 	const loff_t offset = page_offset(page);
@@ -997,8 +997,6 @@ static struct page *pickup_page_for_submission(struct z_erofs_pcluster *pcl,
 					       struct address_space *mc,
 					       gfp_t gfp)
 {
-	/* determined at compile time to avoid too many #ifdefs */
-	const bool nocache = __builtin_constant_p(mc) ? !mc : false;
 	const pgoff_t index = pcl->obj.index;
 	bool tocache = false;
 
@@ -1019,7 +1017,7 @@ static struct page *pickup_page_for_submission(struct z_erofs_pcluster *pcl,
 	 * the cached page has not been allocated and
 	 * an placeholder is out there, prepare it now.
 	 */
-	if (!nocache && page == PAGE_UNALLOCATED) {
+	if (page == PAGE_UNALLOCATED) {
 		tocache = true;
 		goto out_allocpage;
 	}
@@ -1031,21 +1029,6 @@ static struct page *pickup_page_for_submission(struct z_erofs_pcluster *pcl,
 
 	mapping = READ_ONCE(page->mapping);
 
-	/*
-	 * if managed cache is disabled, it's no way to
-	 * get such a cached-like page.
-	 */
-	if (nocache) {
-		/* if managed cache is disabled, it is impossible `justfound' */
-		DBG_BUGON(justfound);
-
-		/* and it should be locked, not uptodate, and not truncated */
-		DBG_BUGON(!PageLocked(page));
-		DBG_BUGON(PageUptodate(page));
-		DBG_BUGON(!mapping);
-		goto out;
-	}
-
 	/*
 	 * unmanaged (file) pages are all locked solidly,
 	 * therefore it is impossible for `mapping' to be NULL.
@@ -1102,7 +1085,7 @@ static struct page *pickup_page_for_submission(struct z_erofs_pcluster *pcl,
 		cpu_relax();
 		goto repeat;
 	}
-	if (nocache || !tocache)
+	if (!tocache)
 		goto out;
 	if (add_to_page_cache_lru(page, mc, index + nr, gfp)) {
 		page->mapping = Z_EROFS_MAPPING_STAGING;
@@ -1208,7 +1191,7 @@ static bool z_erofs_vle_submit_all(struct super_block *sb,
 				   struct z_erofs_unzip_io *fgq,
 				   bool force_fg)
 {
-	struct erofs_sb_info *const sbi __maybe_unused = EROFS_SB(sb);
+	struct erofs_sb_info *const sbi = EROFS_SB(sb);
 	z_erofs_next_pcluster_t qtail[NR_JOBQUEUES];
 	struct z_erofs_unzip_io *q[NR_JOBQUEUES];
 	struct bio *bio;
-- 
2.17.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ