[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20250616052223.723982-14-ankur.a.arora@oracle.com>
Date: Sun, 15 Jun 2025 22:22:23 -0700
From: Ankur Arora <ankur.a.arora@...cle.com>
To: linux-kernel@...r.kernel.org, linux-mm@...ck.org, x86@...nel.org
Cc: akpm@...ux-foundation.org, bp@...en8.de, dave.hansen@...ux.intel.com,
hpa@...or.com, mingo@...hat.com, mjguzik@...il.com, luto@...nel.org,
peterz@...radead.org, acme@...nel.org, namhyung@...nel.org,
tglx@...utronix.de, willy@...radead.org, jon.grimm@....com,
bharata@....com, raghavendra.kt@....com, boris.ostrovsky@...cle.com,
konrad.wilk@...cle.com, ankur.a.arora@...cle.com
Subject: [PATCH v4 13/13] x86/folio_zero_user: Add multi-page clearing
Override the common code version of folio_zero_user() so we can use
clear_pages() to do multi-page clearing instead of the standard
page-at-a-time clearing. This allows us to advertise the full
region-size to the processor, which when using string instructions
(REP; STOS), can use the knowledge of the extent to optimize the
clearing.
Apart from this we have two other considerations: cache locality when
clearing 2MB pages, and preemption latency when clearing GB pages.
The first is handled by breaking the clearing in three parts: the
faulting page and its immediate locality, its left and right regions;
with the local neighbourhood cleared last.
The second is only an issue for kernels running under cooperative
preemption. Limit the worst case preemption latency by clearing in
PAGE_RESCHED_CHUNK (8MB) units.
The resultant performance falls in two buckets depending on the kinds of
optimizations that the uarch can do for the clearing extent. Two classes
of optimizations:
- amortize each clearing iteration over a large range instead of at
a page granularity.
- cacheline allocation elision (seen only on AMD Zen models)
A demand fault workload shows that the resultant performance falls in two
buckets depending on if the extent being zeroed is large enough to allow
for cacheline allocation elision.
AMD Milan (EPYC 7J13, boost=0, region=64GB on the local NUMA node):
$ perf bench mem map -p $page-size -f demand -s 64GB -l 5
mm/folio_zero_user x86/folio_zero_user change
(GB/s +- %stdev) (GB/s +- %stdev)
pg-sz=2MB 11.82 +- 0.67% 16.48 +- 0.30% + 39.4%
pg-sz=1GB 17.51 +- 1.19% 40.03 +- 7.26% [#] +129.9%
[#] Milan uses a threshold of LLC-size (~32MB) for eliding cacheline
allocation, which is higher than PAGE_RESCHED_CHUNK, so
preempt=none|voluntary sees no improvement for this test.
pg-sz=1GB 17.14 +- 1.39% 17.42 +- 0.98% + 1.6%
The dropoff in cacheline allocations for pg-sz=1GB can be seen with
perf-stat:
- 44,513,459,667 cycles # 2.420 GHz ( +- 0.44% ) (35.71%)
- 1,378,032,592 instructions # 0.03 insn per cycle
- 11,224,288,082 L1-dcache-loads # 610.187 M/sec ( +- 0.08% ) (35.72%)
- 5,373,473,118 L1-dcache-load-misses # 47.87% of all L1-dcache accesses ( +- 0.00% ) (35.71%)
+ 20,093,219,076 cycles # 2.421 GHz ( +- 3.64% ) (35.69%)
+ 1,378,032,592 instructions # 0.03 insn per cycle
+ 186,525,095 L1-dcache-loads # 22.479 M/sec ( +- 2.11% ) (35.74%)
+ 73,479,687 L1-dcache-load-misses # 39.39% of all L1-dcache accesses ( +- 3.03% ) (35.74%)
Also note that as mentioned earlier, this improvement is not specific to
AMD Zen*. Intel Icelakex (pg-sz=2MB|1GB) sees a similar improvement as
the Milan pg-sz=2MB workload above (~35%).
Signed-off-by: Ankur Arora <ankur.a.arora@...cle.com>
---
arch/x86/mm/Makefile | 1 +
arch/x86/mm/memory.c | 97 ++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 98 insertions(+)
create mode 100644 arch/x86/mm/memory.c
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
index 5b9908f13dcf..9031faf21849 100644
--- a/arch/x86/mm/Makefile
+++ b/arch/x86/mm/Makefile
@@ -48,6 +48,7 @@ obj-$(CONFIG_MMIOTRACE_TEST) += testmmiotrace.o
obj-$(CONFIG_NUMA) += numa.o
obj-$(CONFIG_AMD_NUMA) += amdtopology.o
obj-$(CONFIG_ACPI_NUMA) += srat.o
+obj-$(CONFIG_PREEMPTION) += memory.o
obj-$(CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS) += pkeys.o
obj-$(CONFIG_RANDOMIZE_MEMORY) += kaslr.o
diff --git a/arch/x86/mm/memory.c b/arch/x86/mm/memory.c
new file mode 100644
index 000000000000..a799c0cc3c5f
--- /dev/null
+++ b/arch/x86/mm/memory.c
@@ -0,0 +1,97 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+#include <linux/mm.h>
+#include <linux/range.h>
+#include <linux/minmax.h>
+
+/*
+ * Limit the optimized version of folio_zero_user() to !CONFIG_HIGHMEM.
+ * We do that because clear_pages() works on contiguous kernel pages
+ * which might not be true under HIGHMEM.
+ */
+#ifndef CONFIG_HIGHMEM
+/*
+ * For voluntary preemption models, operate with a max chunk-size of 8MB.
+ * (Worst case resched latency of ~1ms, with a clearing BW of ~10GBps.)
+ */
+#define PAGE_RESCHED_CHUNK (8 << (20 - PAGE_SHIFT))
+
+static void clear_pages_resched(void *addr, int npages)
+{
+ int i, remaining;
+
+ if (preempt_model_preemptible()) {
+ clear_pages(addr, npages);
+ goto out;
+ }
+
+ for (i = 0; i < npages/PAGE_RESCHED_CHUNK; i++) {
+ clear_pages(addr + i * PAGE_RESCHED_CHUNK * PAGE_SIZE, PAGE_RESCHED_CHUNK);
+ cond_resched();
+ }
+
+ remaining = npages % PAGE_RESCHED_CHUNK;
+
+ if (remaining)
+ clear_pages(addr + i * PAGE_RESCHED_CHUNK * PAGE_SHIFT, remaining);
+out:
+ cond_resched();
+}
+
+/*
+ * folio_zero_user() - multi-page clearing.
+ *
+ * @folio: hugepage folio
+ * @addr_hint: faulting address (if any)
+ *
+ * Overrides common code folio_zero_user(). This version takes advantage of
+ * the fact that string instructions in clear_pages() are more performant
+ * on larger extents compared to the usual page-at-a-time clearing.
+ *
+ * Clearing of 2MB pages is split in three parts: pages in the immediate
+ * locality of the faulting page, and its left, right regions; with the local
+ * neighbourhood cleared last in order to keep cache lines of the target
+ * region hot.
+ *
+ * For GB pages, there is no expectation of cache locality so just do a
+ * straight zero.
+ *
+ * Note that the folio is fully allocated already so we don't do any exception
+ * handling.
+ */
+void folio_zero_user(struct folio *folio, unsigned long addr_hint)
+{
+ unsigned long base_addr = ALIGN_DOWN(addr_hint, folio_size(folio));
+ const long fault_idx = (addr_hint - base_addr) / PAGE_SIZE;
+ const struct range pg = DEFINE_RANGE(0, folio_nr_pages(folio) - 1);
+ const int width = 2; /* number of pages cleared last on either side */
+ struct range r[3];
+ int i;
+
+ if (folio_nr_pages(folio) > MAX_ORDER_NR_PAGES) {
+ clear_pages_resched(page_address(folio_page(folio, 0)), folio_nr_pages(folio));
+ return;
+ }
+
+ /*
+ * Faulting page and its immediate neighbourhood. Cleared at the end to
+ * ensure it sticks around in the cache.
+ */
+ r[2] = DEFINE_RANGE(clamp_t(s64, fault_idx - width, pg.start, pg.end),
+ clamp_t(s64, fault_idx + width, pg.start, pg.end));
+
+ /* Region to the left of the fault */
+ r[1] = DEFINE_RANGE(pg.start,
+ clamp_t(s64, r[2].start-1, pg.start-1, r[2].start));
+
+ /* Region to the right of the fault: always valid for the common fault_idx=0 case. */
+ r[0] = DEFINE_RANGE(clamp_t(s64, r[2].end+1, r[2].end, pg.end+1),
+ pg.end);
+
+ for (i = 0; i <= 2; i++) {
+ int npages = range_len(&r[i]);
+
+ if (npages > 0)
+ clear_pages_resched(page_address(folio_page(folio, r[i].start)), npages);
+ }
+}
+#endif /* CONFIG_HIGHMEM */
--
2.31.1
Powered by blists - more mailing lists