lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20260108075938.360282-2-chizhiling@163.com>
Date: Thu,  8 Jan 2026 15:59:37 +0800
From: Chi Zhiling <chizhiling@....com>
To: linux-fsdevel@...r.kernel.org,
	linux-kernel@...r.kernel.org
Cc: Namjae Jeon <linkinjeon@...nel.org>,
	Sungjong Seo <sj1557.seo@...sung.com>,
	Yuezhang Mo <yuezhang.mo@...y.com>,
	Chi Zhiling <chizhiling@...inos.cn>
Subject: [PATCH v2 12/13] exfat: return the start of next cache in exfat_cache_lookup

From: Chi Zhiling <chizhiling@...inos.cn>

Change exfat_cache_lookup to return the cluster number of the last
cluster before the next cache (i.e., the end of the current cache range)
or the given 'end' if there is no next cache. This allows the caller to
know whether the next cluster after the current cache is cached.

The function signature is changed to accept an 'end' parameter, which
is the upper bound of the search range. The function now stops early
if it finds a cache that starts within the current cache's tail, meaning
caches are contiguous. The return value is the cluster number at which
the next cache starts (minus one) or the original 'end' if no next cache
is found.

The new behavior is illustrated as follows:

cache:  [ccccccc-------ccccccccc]
search: [..................]
return:               ^

Signed-off-by: Chi Zhiling <chizhiling@...inos.cn>
---
 fs/exfat/cache.c | 49 ++++++++++++++++++++++++++++++++++++------------
 1 file changed, 37 insertions(+), 12 deletions(-)

diff --git a/fs/exfat/cache.c b/fs/exfat/cache.c
index 73147e153c2c..5cdeac014a3d 100644
--- a/fs/exfat/cache.c
+++ b/fs/exfat/cache.c
@@ -80,41 +80,66 @@ static inline void exfat_cache_update_lru(struct inode *inode,
 		list_move(&cache->cache_list, &ei->cache_lru);
 }
 
-static unsigned int exfat_cache_lookup(struct inode *inode,
-		unsigned int fclus, struct exfat_cache_id *cid,
+/*
+ * Find the cache that covers or precedes 'fclus' and return the last
+ * cluster before the next cache range.
+ */
+static inline unsigned int
+exfat_cache_lookup(struct inode *inode, struct exfat_cache_id *cid,
+		unsigned int fclus, unsigned int end,
 		unsigned int *cached_fclus, unsigned int *cached_dclus)
 {
 	struct exfat_inode_info *ei = EXFAT_I(inode);
 	static struct exfat_cache nohit = { .fcluster = 0, };
 	struct exfat_cache *hit = &nohit, *p;
-	unsigned int offset = EXFAT_EOF_CLUSTER;
+	unsigned int tail = 0;		/* End boundary of hit cache */
 
+	/*
+	 * Search range [fclus, end]. Stop early if:
+	 * 1. Cache covers entire range, or
+	 * 2. Next cache starts at current cache tail
+	 */
 	spin_lock(&ei->cache_lru_lock);
 	list_for_each_entry(p, &ei->cache_lru, cache_list) {
 		/* Find the cache of "fclus" or nearest cache. */
-		if (p->fcluster <= fclus && hit->fcluster <= p->fcluster) {
+		if (p->fcluster <= fclus) {
+			if (p->fcluster < hit->fcluster)
+				continue;
+
 			hit = p;
-			if (hit->fcluster + hit->nr_contig < fclus) {
-				offset = hit->nr_contig;
-			} else {
-				offset = fclus - hit->fcluster;
+			tail = hit->fcluster + hit->nr_contig;
+
+			/* Current cache covers [fclus, end] completely */
+			if (tail >= end)
+				break;
+		} else if (p->fcluster <= end) {
+			end = p->fcluster - 1;
+
+			/*
+			 * If we have a hit and next cache starts within/at
+			 * its tail, caches are contiguous, stop searching.
+			 */
+			if (tail && tail >= end)
 				break;
-			}
 		}
 	}
 	if (hit != &nohit) {
-		exfat_cache_update_lru(inode, hit);
+		unsigned int offset;
 
+		exfat_cache_update_lru(inode, hit);
 		cid->id = ei->cache_valid_id;
 		cid->nr_contig = hit->nr_contig;
 		cid->fcluster = hit->fcluster;
 		cid->dcluster = hit->dcluster;
+
+		offset = min(cid->nr_contig, fclus - cid->fcluster);
 		*cached_fclus = cid->fcluster + offset;
 		*cached_dclus = cid->dcluster + offset;
 	}
 	spin_unlock(&ei->cache_lru_lock);
 
-	return offset;
+	/* Return next cache start or 'end' if no more caches */
+	return end;
 }
 
 static struct exfat_cache *exfat_cache_merge(struct inode *inode,
@@ -260,7 +285,7 @@ int exfat_get_cluster(struct inode *inode, unsigned int cluster,
 		return 0;
 
 	cache_init(&cid, fclus, *dclus);
-	exfat_cache_lookup(inode, cluster, &cid, &fclus, dclus);
+	exfat_cache_lookup(inode, &cid, cluster, cluster, &fclus, dclus);
 
 	if (fclus == cluster)
 		return 0;
-- 
2.43.0


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ