lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Date: Fri,  2 Feb 2024 10:20:29 +0800
From: Ming Lei <ming.lei@...hat.com>
To: Andrew Morton <akpm@...ux-foundation.org>,
	linux-mm@...ck.org,
	linux-fsdevel@...r.kernel.org
Cc: linux-kernel@...r.kernel.org,
	Ming Lei <ming.lei@...hat.com>,
	David Hildenbrand <david@...hat.com>,
	Matthew Wilcox <willy@...radead.org>,
	Alexander Viro <viro@...iv.linux.org.uk>,
	Christian Brauner <brauner@...nel.org>,
	Don Dutile <ddutile@...hat.com>,
	Rafael Aquini <raquini@...hat.com>,
	Dave Chinner <david@...morbit.com>,
	Mike Snitzer <snitzer@...nel.org>
Subject: [PATCH] mm/madvise: set ra_pages as device max request size during ADV_POPULATE_READ

madvise(MADV_POPULATE_READ) tries to populate all page tables in the
specific range, so it is usually sequential IO if VMA is backed by
file.

Set ra_pages as device max request size for the involved readahead in
the ADV_POPULATE_READ, this way reduces latency of madvise(MADV_POPULATE_READ)
to 1/10 when running madvise(MADV_POPULATE_READ) over one 1GB file with
usual(default) 128KB of read_ahead_kb.

Cc: David Hildenbrand <david@...hat.com>
Cc: Matthew Wilcox <willy@...radead.org>
Cc: Alexander Viro <viro@...iv.linux.org.uk>
Cc: Christian Brauner <brauner@...nel.org>
Cc: Don Dutile <ddutile@...hat.com>
Cc: Rafael Aquini <raquini@...hat.com>
Cc: Dave Chinner <david@...morbit.com>
Cc: Mike Snitzer <snitzer@...nel.org>
Cc: Andrew Morton <akpm@...ux-foundation.org>
Signed-off-by: Ming Lei <ming.lei@...hat.com>
---
 mm/madvise.c | 52 +++++++++++++++++++++++++++++++++++++++++++++++++++-
 1 file changed, 51 insertions(+), 1 deletion(-)

diff --git a/mm/madvise.c b/mm/madvise.c
index 912155a94ed5..db5452c8abdd 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -900,6 +900,37 @@ static long madvise_dontneed_free(struct vm_area_struct *vma,
 		return -EINVAL;
 }
 
+static void madvise_restore_ra_win(struct file **file, unsigned int ra_pages)
+{
+	if (*file) {
+		struct file *f = *file;
+
+		f->f_ra.ra_pages = ra_pages;
+		fput(f);
+		*file = NULL;
+	}
+}
+
+static struct file *madvise_override_ra_win(struct file *f,
+		unsigned long start, unsigned long end,
+		unsigned int *old_ra_pages)
+{
+	unsigned int io_pages;
+
+	if (!f || !f->f_mapping || !f->f_mapping->host)
+		return NULL;
+
+	io_pages = inode_to_bdi(f->f_mapping->host)->io_pages;
+	if (((end - start) >> PAGE_SHIFT) < io_pages)
+		return NULL;
+
+	f = get_file(f);
+	*old_ra_pages = f->f_ra.ra_pages;
+	f->f_ra.ra_pages = io_pages;
+
+	return f;
+}
+
 static long madvise_populate(struct vm_area_struct *vma,
 			     struct vm_area_struct **prev,
 			     unsigned long start, unsigned long end,
@@ -908,9 +939,21 @@ static long madvise_populate(struct vm_area_struct *vma,
 	const bool write = behavior == MADV_POPULATE_WRITE;
 	struct mm_struct *mm = vma->vm_mm;
 	unsigned long tmp_end;
+	unsigned int ra_pages;
+	struct file *file;
 	int locked = 1;
 	long pages;
 
+	/*
+	 * In case of file backing mapping, increase readahead window
+	 * for reducing the whole populate latency, and restore it
+	 * after the populate is done
+	 */
+	if (behavior == MADV_POPULATE_READ)
+		file = madvise_override_ra_win(vma->vm_file, start, end,
+				&ra_pages);
+	else
+		file = NULL;
 	*prev = vma;
 
 	while (start < end) {
@@ -920,8 +963,10 @@ static long madvise_populate(struct vm_area_struct *vma,
 		 */
 		if (!vma || start >= vma->vm_end) {
 			vma = vma_lookup(mm, start);
-			if (!vma)
+			if (!vma) {
+				madvise_restore_ra_win(&file, ra_pages);
 				return -ENOMEM;
+			}
 		}
 
 		tmp_end = min_t(unsigned long, end, vma->vm_end);
@@ -935,6 +980,9 @@ static long madvise_populate(struct vm_area_struct *vma,
 			vma = NULL;
 		}
 		if (pages < 0) {
+			/* restore ra pages back in case of any failure */
+			madvise_restore_ra_win(&file, ra_pages);
+
 			switch (pages) {
 			case -EINTR:
 				return -EINTR;
@@ -954,6 +1002,8 @@ static long madvise_populate(struct vm_area_struct *vma,
 		}
 		start += pages * PAGE_SIZE;
 	}
+
+	madvise_restore_ra_win(&file, ra_pages);
 	return 0;
 }
 
-- 
2.41.0


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ