[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20231109045908.54996-2-byungchul@sk.com>
Date: Thu, 9 Nov 2023 13:59:06 +0900
From: Byungchul Park <byungchul@...com>
To: linux-kernel@...r.kernel.org, linux-mm@...ck.org
Cc: kernel_team@...ynix.com, akpm@...ux-foundation.org,
ying.huang@...el.com, namit@...are.com, xhao@...ux.alibaba.com,
mgorman@...hsingularity.net, hughd@...gle.com, willy@...radead.org,
david@...hat.com, peterz@...radead.org, luto@...nel.org,
tglx@...utronix.de, mingo@...hat.com, bp@...en8.de,
dave.hansen@...ux.intel.com
Subject: [v4 1/3] mm/rmap: Recognize read-only TLB entries during batched TLB flush
Functionally, no change. This is a preparation for migrc mechanism that
requires to recognize read-only TLB entries and makes use of them to
batch more aggressively.
Signed-off-by: Byungchul Park <byungchul@...com>
---
arch/x86/include/asm/tlbflush.h | 3 +++
arch/x86/mm/tlb.c | 11 +++++++++++
include/linux/sched.h | 1 +
mm/internal.h | 4 ++++
mm/rmap.c | 30 +++++++++++++++++++++++++++++-
5 files changed, 48 insertions(+), 1 deletion(-)
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 25726893c6f4..5c618a8821de 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -292,6 +292,9 @@ static inline void arch_flush_tlb_batched_pending(struct mm_struct *mm)
}
extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);
+extern void arch_tlbbatch_clear(struct arch_tlbflush_unmap_batch *batch);
+extern void arch_tlbbatch_fold(struct arch_tlbflush_unmap_batch *bdst,
+ struct arch_tlbflush_unmap_batch *bsrc);
static inline bool pte_flags_need_flush(unsigned long oldflags,
unsigned long newflags,
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 453ea95b667d..d3c89a3d91eb 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -1274,6 +1274,17 @@ void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
put_cpu();
}
+void arch_tlbbatch_clear(struct arch_tlbflush_unmap_batch *batch)
+{
+ cpumask_clear(&batch->cpumask);
+}
+
+void arch_tlbbatch_fold(struct arch_tlbflush_unmap_batch *bdst,
+ struct arch_tlbflush_unmap_batch *bsrc)
+{
+ cpumask_or(&bdst->cpumask, &bdst->cpumask, &bsrc->cpumask);
+}
+
/*
* Blindly accessing user memory from NMI context can be dangerous
* if we're in the middle of switching the current user task or
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 77f01ac385f7..8a31527d9ed8 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1324,6 +1324,7 @@ struct task_struct {
#endif
struct tlbflush_unmap_batch tlb_ubc;
+ struct tlbflush_unmap_batch tlb_ubc_ro;
/* Cache last used pipe for splice(): */
struct pipe_inode_info *splice_pipe;
diff --git a/mm/internal.h b/mm/internal.h
index 30cf724ddbce..9764b240e259 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -861,6 +861,7 @@ extern struct workqueue_struct *mm_percpu_wq;
void try_to_unmap_flush(void);
void try_to_unmap_flush_dirty(void);
void flush_tlb_batched_pending(struct mm_struct *mm);
+void fold_ubc_ro(void);
#else
static inline void try_to_unmap_flush(void)
{
@@ -871,6 +872,9 @@ static inline void try_to_unmap_flush_dirty(void)
static inline void flush_tlb_batched_pending(struct mm_struct *mm)
{
}
+static inline void fold_ubc_ro(void)
+{
+}
#endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
extern const struct trace_print_flags pageflag_names[];
diff --git a/mm/rmap.c b/mm/rmap.c
index 9f795b93cf40..c787ae94b4c6 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -605,6 +605,28 @@ struct anon_vma *folio_lock_anon_vma_read(struct folio *folio,
}
#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
+
+void fold_ubc_ro(void)
+{
+ struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc;
+ struct tlbflush_unmap_batch *tlb_ubc_ro = ¤t->tlb_ubc_ro;
+
+ if (!tlb_ubc_ro->flush_required)
+ return;
+
+ /*
+ * Fold tlb_ubc_ro's data to tlb_ubc.
+ */
+ arch_tlbbatch_fold(&tlb_ubc->arch, &tlb_ubc_ro->arch);
+ tlb_ubc->flush_required = true;
+
+ /*
+ * Reset tlb_ubc_ro's data.
+ */
+ arch_tlbbatch_clear(&tlb_ubc_ro->arch);
+ tlb_ubc_ro->flush_required = false;
+}
+
/*
* Flush TLB entries for recently unmapped pages from remote CPUs. It is
* important if a PTE was dirty when it was unmapped that it's flushed
@@ -615,6 +637,7 @@ void try_to_unmap_flush(void)
{
struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc;
+ fold_ubc_ro();
if (!tlb_ubc->flush_required)
return;
@@ -645,13 +668,18 @@ void try_to_unmap_flush_dirty(void)
static void set_tlb_ubc_flush_pending(struct mm_struct *mm, pte_t pteval,
unsigned long uaddr)
{
- struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc;
+ struct tlbflush_unmap_batch *tlb_ubc;
int batch;
bool writable = pte_dirty(pteval);
if (!pte_accessible(mm, pteval))
return;
+ if (pte_write(pteval) || writable)
+ tlb_ubc = ¤t->tlb_ubc;
+ else
+ tlb_ubc = ¤t->tlb_ubc_ro;
+
arch_tlbbatch_add_pending(&tlb_ubc->arch, mm, uaddr);
tlb_ubc->flush_required = true;
--
2.17.1
Powered by blists - more mailing lists