[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20250220052027.58847-10-byungchul@sk.com>
Date: Thu, 20 Feb 2025 14:20:10 +0900
From: Byungchul Park <byungchul@...com>
To: linux-kernel@...r.kernel.org,
linux-mm@...ck.org
Cc: kernel_team@...ynix.com,
akpm@...ux-foundation.org,
ying.huang@...el.com,
vernhao@...cent.com,
mgorman@...hsingularity.net,
hughd@...gle.com,
willy@...radead.org,
david@...hat.com,
peterz@...radead.org,
luto@...nel.org,
tglx@...utronix.de,
mingo@...hat.com,
bp@...en8.de,
dave.hansen@...ux.intel.com,
rjgolo@...il.com
Subject: [RFC PATCH v12 09/26] mm: introduce API to perform tlb shootdown on exit from page allocator
Functionally, no change. This is a preparation for luf mechanism that
performs tlb shootdown required on exit from page allocator.
This patch introduced a new API rather than making use of existing
try_to_unmap_flush() to avoid repeated and redundant tlb shootdown due
to frequent page allocations during a session of batched unmap flush.
Signed-off-by: Byungchul Park <byungchul@...com>
---
include/linux/sched.h | 1 +
mm/internal.h | 4 ++++
mm/rmap.c | 20 ++++++++++++++++++++
3 files changed, 25 insertions(+)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index bb343136ddd05..8e6e7a83332cf 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1375,6 +1375,7 @@ struct task_struct {
#endif
struct tlbflush_unmap_batch tlb_ubc;
+ struct tlbflush_unmap_batch tlb_ubc_takeoff;
/* Cache last used pipe for splice(): */
struct pipe_inode_info *splice_pipe;
diff --git a/mm/internal.h b/mm/internal.h
index b38a9ae9d6993..cbdebf8a02437 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -1239,6 +1239,7 @@ extern struct workqueue_struct *mm_percpu_wq;
#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
void try_to_unmap_flush(void);
void try_to_unmap_flush_dirty(void);
+void try_to_unmap_flush_takeoff(void);
void flush_tlb_batched_pending(struct mm_struct *mm);
void fold_batch(struct tlbflush_unmap_batch *dst, struct tlbflush_unmap_batch *src, bool reset);
void fold_luf_batch(struct luf_batch *dst, struct luf_batch *src);
@@ -1249,6 +1250,9 @@ static inline void try_to_unmap_flush(void)
static inline void try_to_unmap_flush_dirty(void)
{
}
+static inline void try_to_unmap_flush_takeoff(void)
+{
+}
static inline void flush_tlb_batched_pending(struct mm_struct *mm)
{
}
diff --git a/mm/rmap.c b/mm/rmap.c
index 74fbf6c2fb3a7..72c5e665e59a4 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -772,6 +772,26 @@ void fold_luf_batch(struct luf_batch *dst, struct luf_batch *src)
read_unlock_irqrestore(&src->lock, flags);
}
+void try_to_unmap_flush_takeoff(void)
+{
+ struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc;
+ struct tlbflush_unmap_batch *tlb_ubc_takeoff = ¤t->tlb_ubc_takeoff;
+
+ if (!tlb_ubc_takeoff->flush_required)
+ return;
+
+ arch_tlbbatch_flush(&tlb_ubc_takeoff->arch);
+
+ /*
+ * Now that tlb shootdown of tlb_ubc_takeoff has been performed,
+ * it's good chance to shrink tlb_ubc if possible.
+ */
+ if (arch_tlbbatch_done(&tlb_ubc->arch, &tlb_ubc_takeoff->arch))
+ reset_batch(tlb_ubc);
+
+ reset_batch(tlb_ubc_takeoff);
+}
+
/*
* Flush TLB entries for recently unmapped pages from remote CPUs. It is
* important if a PTE was dirty when it was unmapped that it's flushed
--
2.17.1
Powered by blists - more mailing lists