lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20250605221037.7872-8-shikemeng@huaweicloud.com>
Date: Fri,  6 Jun 2025 06:10:37 +0800
From: Kemeng Shi <shikemeng@...weicloud.com>
To: hughd@...gle.com,
	baolin.wang@...ux.alibaba.com,
	willy@...radead.org,
	akpm@...ux-foundation.org
Cc: linux-mm@...ck.org,
	linux-kernel@...r.kernel.org,
	linux-fsdevel@...r.kernel.org
Subject: [PATCH 7/7] mm: shmem: eliminate unneeded page counting in shmem_unuse_swap_entries()

Caller of shmem_unuse_swap_entries() will not use the count of pages
swapped in, so eliminate unneeded page counting in
shmem_unuse_swap_entries().

Signed-off-by: Kemeng Shi <shikemeng@...weicloud.com>
---
 mm/shmem.c | 23 ++++++++---------------
 1 file changed, 8 insertions(+), 15 deletions(-)

diff --git a/mm/shmem.c b/mm/shmem.c
index c6ea45d542d2..c83baabc169d 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1480,14 +1480,13 @@ static unsigned int shmem_find_swap_entries(struct address_space *mapping,
 }
 
 /*
- * Move the swapped pages for an inode to page cache. Returns the count
- * of pages swapped in, or the error in case of failure.
+ * Move the swapped pages for an inode to page cache. Returns 0 if success,
+ * or returns error in case of failure.
  */
 static int shmem_unuse_swap_entries(struct inode *inode,
 		struct folio_batch *fbatch, pgoff_t *indices)
 {
 	int i = 0;
-	int ret = 0;
 	int error = 0;
 	struct address_space *mapping = inode->i_mapping;
 
@@ -1499,13 +1498,11 @@ static int shmem_unuse_swap_entries(struct inode *inode,
 		if (error == 0) {
 			folio_unlock(folio);
 			folio_put(folio);
-			ret++;
 		}
 		if (error == -ENOMEM)
-			break;
-		error = 0;
+			return error;
 	}
-	return error ? error : ret;
+	return 0;
 }
 
 /*
@@ -1517,24 +1514,20 @@ static int shmem_unuse_inode(struct inode *inode, unsigned int type)
 	pgoff_t start = 0;
 	struct folio_batch fbatch;
 	pgoff_t indices[PAGEVEC_SIZE];
-	int ret = 0;
+	int ret;
 
 	do {
 		folio_batch_init(&fbatch);
 		if (!shmem_find_swap_entries(mapping, start, &fbatch,
-					     indices, type)) {
-			ret = 0;
-			break;
-		}
+					     indices, type))
+			return 0;
 
 		ret = shmem_unuse_swap_entries(inode, &fbatch, indices);
 		if (ret < 0)
-			break;
+			return ret;
 
 		start = indices[folio_batch_count(&fbatch) - 1];
 	} while (true);
-
-	return ret;
 }
 
 /*
-- 
2.30.0


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ