[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1429179766-26711-3-git-send-email-mgorman@suse.de>
Date: Thu, 16 Apr 2015 11:22:44 +0100
From: Mel Gorman <mgorman@...e.de>
To: Linux-MM <linux-mm@...ck.org>
Cc: Rik van Riel <riel@...hat.com>, Hugh Dickins <hughd@...gle.com>,
Minchan Kim <minchan@...nel.org>,
Dave Hansen <dave.hansen@...el.com>,
Andi Kleen <andi@...stfloor.org>,
LKML <linux-kernel@...r.kernel.org>, Mel Gorman <mgorman@...e.de>
Subject: [PATCH 2/4] mm: Send a single IPI to TLB flush multiple pages when unmapping
An IPI is sent to flush remote TLBs when a page is unmapped that was
recently accessed by other CPUs. There are many circumstances where this
happens but the obvious one is kswapd reclaiming pages belonging to a
running process as kswapd and the task are likely running on separate CPUs.
On small machines, this is not a significant problem but as machine
gets larger with more cores and more memory, the cost of these IPIs can
be high. This patch uses a structure similar in principle to a pagevec
to collect a list of PFNs and CPUs that require flushing. It then sends
one IPI to flush the list of PFNs. A new TLB flush helper is required for
this and one is added for x86. Other architectures will need to decide if
batching like this is both safe and worth the memory overhead. Specifically
the requirement is;
If a clean page is unmapped and not immediately flushed, the
architecture must guarantee that a write to that page from a CPU
with a cached TLB entry will trap a page fault.
This is essentially what the kernel already depends on but the window is
much larger with this patch applied and is worth highlighting.
The impact of this patch depends on the workload as measuring any benefit
requires both mapped pages co-located on the LRU and memory pressure. The
case with the biggest impact is multiple processes reading mapped pages
taken from the vm-scalability test suite. The test case uses NR_CPU readers
of mapped files that consume 10*RAM.
vmscale on a 4-node machine with 64G RAM and 48 CPUs
4.0.0 4.0.0
vanilla batchunmap-v2
lru-file-mmap-read-elapsed 159.76 ( 0.00%) 119.26 ( 26.91%)
4.0.0 4.0.0
vanilla batchunmap-v2
User 567.53 613.38
System 5949.65 4120.03
Elapsed 161.08 120.60
This is showing that the readers completed 26% with 30% less CPU time. From
vmstats, it is known that the vanilla kernel was interrupted roughly 900K
times per second during the steady phase of the test and the patched kernel
was interrupts 180K times per second.
The impact is much lower on a small machine
vmscale on a 1-node machine with 8G RAM and 1 CPU
4.0.0 4.0.0
vanilla batchunmap-v2
Ops lru-file-mmap-read-elapsed 22.50 ( 0.00%) 19.91 ( 11.51%)
4.0.0 4.0.0
vanilla batchunmap-v1
User 33.64 33.22
System 36.22 33.63
Elapsed 24.11 21.46
It's still a noticeable improvement with vmstat showing interrupts went
from roughly 500K per second to 45K per second.
The patch will have no impact on workloads with no memory pressure or
have relatively few mapped pages.
Signed-off-by: Mel Gorman <mgorman@...e.de>
---
arch/x86/Kconfig | 1 +
arch/x86/include/asm/tlbflush.h | 2 +
include/linux/init_task.h | 8 ++++
include/linux/rmap.h | 3 ++
include/linux/sched.h | 14 ++++++
init/Kconfig | 8 ++++
kernel/fork.c | 5 +++
kernel/sched/core.c | 3 ++
mm/internal.h | 11 +++++
mm/rmap.c | 99 ++++++++++++++++++++++++++++++++++++++++-
mm/vmscan.c | 32 ++++++++++++-
11 files changed, 183 insertions(+), 3 deletions(-)
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index b7d31ca55187..290844263218 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -30,6 +30,7 @@ config X86
select ARCH_MIGHT_HAVE_PC_SERIO
select HAVE_AOUT if X86_32
select HAVE_UNSTABLE_SCHED_CLOCK
+ select ARCH_SUPPORTS_LOCAL_TLB_PFN_FLUSH
select ARCH_SUPPORTS_NUMA_BALANCING if X86_64
select ARCH_SUPPORTS_INT128 if X86_64
select HAVE_IDE
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index cd791948b286..96a27051a70a 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -152,6 +152,8 @@ static inline void __flush_tlb_one(unsigned long addr)
* and page-granular flushes are available only on i486 and up.
*/
+#define flush_local_tlb_addr(addr) __flush_tlb_one(addr)
+
#ifndef CONFIG_SMP
/* "_up" is for UniProcessor.
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 696d22312b31..0771937b47e1 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -175,6 +175,13 @@ extern struct task_group root_task_group;
# define INIT_NUMA_BALANCING(tsk)
#endif
+#ifdef CONFIG_ARCH_SUPPORTS_LOCAL_TLB_PFN_FLUSH
+# define INIT_TLBFLUSH_UNMAP_BATCH_CONTROL(tsk) \
+ .tlb_ubc = NULL,
+#else
+# define INIT_TLBFLUSH_UNMAP_BATCH_CONTROL(tsk)
+#endif
+
#ifdef CONFIG_KASAN
# define INIT_KASAN(tsk) \
.kasan_depth = 1,
@@ -257,6 +264,7 @@ extern struct task_group root_task_group;
INIT_RT_MUTEXES(tsk) \
INIT_VTIME(tsk) \
INIT_NUMA_BALANCING(tsk) \
+ INIT_TLBFLUSH_UNMAP_BATCH_CONTROL(tsk) \
INIT_KASAN(tsk) \
}
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index c4c559a45dc8..8d23914b219e 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -89,6 +89,9 @@ enum ttu_flags {
TTU_IGNORE_MLOCK = (1 << 8), /* ignore mlock */
TTU_IGNORE_ACCESS = (1 << 9), /* don't age */
TTU_IGNORE_HWPOISON = (1 << 10),/* corrupted page is recoverable */
+ TTU_BATCH_FLUSH = (1 << 11), /* Batch TLB flushes where possible
+ * and caller guarantees they will
+ * do a final flush if necessary */
};
#ifdef CONFIG_MMU
diff --git a/include/linux/sched.h b/include/linux/sched.h
index a419b65770d6..5c09db02fe78 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1275,6 +1275,16 @@ enum perf_event_task_context {
perf_nr_task_contexts,
};
+/* Matches SWAP_CLUSTER_MAX but refined to limit header dependencies */
+#define BATCH_TLBFLUSH_SIZE 32UL
+
+/* Track pages that require TLB flushes */
+struct tlbflush_unmap_batch {
+ struct cpumask cpumask;
+ unsigned long nr_pages;
+ unsigned long pfns[BATCH_TLBFLUSH_SIZE];
+};
+
struct task_struct {
volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
void *stack;
@@ -1634,6 +1644,10 @@ struct task_struct {
unsigned long numa_pages_migrated;
#endif /* CONFIG_NUMA_BALANCING */
+#ifdef CONFIG_ARCH_SUPPORTS_LOCAL_TLB_PFN_FLUSH
+ struct tlbflush_unmap_batch *tlb_ubc;
+#endif
+
struct rcu_head rcu;
/*
diff --git a/init/Kconfig b/init/Kconfig
index f5dbc6d4261b..f519fbb6ac35 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -889,6 +889,14 @@ config ARCH_SUPPORTS_NUMA_BALANCING
bool
#
+# For architectures that have a local TLB flush for a PFN without knowledge
+# of the VMA. The architecture must provide guarantees on what happens if
+# a clean TLB cache entry is written after the unmap. Details are in mm/rmap.c
+# near the check for should_defer_flush.
+config ARCH_SUPPORTS_LOCAL_TLB_PFN_FLUSH
+ bool
+
+#
# For architectures that know their GCC __int128 support is sound
#
config ARCH_SUPPORTS_INT128
diff --git a/kernel/fork.c b/kernel/fork.c
index cf65139615a0..86c872fec9fb 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -246,6 +246,11 @@ void __put_task_struct(struct task_struct *tsk)
delayacct_tsk_free(tsk);
put_signal_struct(tsk->signal);
+#ifdef CONFIG_ARCH_SUPPORTS_LOCAL_TLB_PFN_FLUSH
+ kfree(tsk->tlb_ubc);
+ tsk->tlb_ubc = NULL;
+#endif
+
if (!profile_handoff_task(tsk))
free_task(tsk);
}
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 62671f53202a..9836a28d001b 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1823,6 +1823,9 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
p->numa_group = NULL;
#endif /* CONFIG_NUMA_BALANCING */
+#ifdef CONFIG_ARCH_SUPPORTS_LOCAL_TLB_PFN_FLUSH
+ p->tlb_ubc = NULL;
+#endif /* CONFIG_ARCH_SUPPORTS_LOCAL_TLB_PFN_FLUSH */
}
#ifdef CONFIG_NUMA_BALANCING
diff --git a/mm/internal.h b/mm/internal.h
index a96da5b0029d..35aba439c275 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -431,4 +431,15 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
#define ALLOC_CMA 0x80 /* allow allocations from CMA areas */
#define ALLOC_FAIR 0x100 /* fair zone allocation */
+enum ttu_flags;
+struct tlbflush_unmap_batch;
+
+#ifdef CONFIG_ARCH_SUPPORTS_LOCAL_TLB_PFN_FLUSH
+void try_to_unmap_flush(void);
+#else
+static inline void try_to_unmap_flush(void)
+{
+}
+
+#endif /* CONFIG_ARCH_SUPPORTS_LOCAL_TLB_PFN_FLUSH */
#endif /* __MM_INTERNAL_H */
diff --git a/mm/rmap.c b/mm/rmap.c
index c161a14b6a8f..576267f64655 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -60,6 +60,8 @@
#include <asm/tlbflush.h>
+#include <trace/events/tlb.h>
+
#include "internal.h"
static struct kmem_cache *anon_vma_cachep;
@@ -581,6 +583,74 @@ vma_address(struct page *page, struct vm_area_struct *vma)
return address;
}
+#ifdef CONFIG_ARCH_SUPPORTS_LOCAL_TLB_PFN_FLUSH
+static void percpu_flush_tlb_batch_pages(void *data)
+{
+ struct tlbflush_unmap_batch *tlb_ubc = data;
+ int i;
+
+ count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
+ for (i = 0; i < tlb_ubc->nr_pages; i++)
+ flush_local_tlb_addr(tlb_ubc->pfns[i] << PAGE_SHIFT);
+}
+
+void try_to_unmap_flush(void)
+{
+ struct tlbflush_unmap_batch *tlb_ubc = current->tlb_ubc;
+
+ if (!tlb_ubc || !tlb_ubc->nr_pages)
+ return;
+
+ trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, tlb_ubc->nr_pages);
+ smp_call_function_many(&tlb_ubc->cpumask, percpu_flush_tlb_batch_pages,
+ (void *)tlb_ubc, true);
+ cpumask_clear(&tlb_ubc->cpumask);
+ tlb_ubc->nr_pages = 0;
+}
+
+static void set_tlb_ubc_flush_pending(struct mm_struct *mm,
+ struct page *page)
+{
+ struct tlbflush_unmap_batch *tlb_ubc = current->tlb_ubc;
+
+ cpumask_or(&tlb_ubc->cpumask, &tlb_ubc->cpumask, mm_cpumask(mm));
+ tlb_ubc->pfns[tlb_ubc->nr_pages] = page_to_pfn(page);
+ tlb_ubc->nr_pages++;
+
+ if (tlb_ubc->nr_pages == BATCH_TLBFLUSH_SIZE)
+ try_to_unmap_flush();
+}
+
+/*
+ * Returns true if the TLB flush should be deferred to the end of a batch of
+ * unmap operations to reduce IPIs.
+ */
+static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
+{
+ bool should_defer = false;
+
+ if (!current->tlb_ubc || !(flags & TTU_BATCH_FLUSH))
+ return false;
+
+ /* If remote CPUs need to be flushed then defer batch the flush */
+ if (cpumask_any_but(mm_cpumask(mm), get_cpu()) < nr_cpu_ids)
+ should_defer = true;
+ put_cpu();
+
+ return should_defer;
+}
+#else
+static void set_tlb_ubc_flush_pending(struct mm_struct *mm,
+ struct page *page)
+{
+}
+
+static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
+{
+ return false;
+}
+#endif /* CONFIG_ARCH_SUPPORTS_LOCAL_TLB_PFN_FLUSH */
+
/*
* At what user virtual address is page expected in vma?
* Caller should check the page is actually part of the vma.
@@ -1186,6 +1256,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
pte_t pteval;
spinlock_t *ptl;
int ret = SWAP_AGAIN;
+ bool deferred;
enum ttu_flags flags = (enum ttu_flags)arg;
pte = page_check_address(page, mm, address, &ptl, 0);
@@ -1213,11 +1284,35 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
/* Nuke the page table entry. */
flush_cache_page(vma, address, page_to_pfn(page));
- pteval = ptep_clear_flush(vma, address, pte);
+ deferred = should_defer_flush(mm, flags);
+ if (deferred) {
+ /*
+ * We clear the PTE but do not flush so potentially a remote
+ * CPU could still be writing to the page. If the entry was
+ * previously clean then the architecture must guarantee that
+ * a clear->dirty transition on a cached TLB entry is written
+ * through and traps if the PTE is unmapped. If the entry is
+ * already dirty then it's handled below by the pte_dirty check.
+ */
+ pteval = ptep_get_and_clear(mm, address, pte);
+ set_tlb_ubc_flush_pending(mm, page);
+ } else {
+ pteval = ptep_clear_flush(vma, address, pte);
+ }
/* Move the dirty bit to the physical page now the pte is gone. */
- if (pte_dirty(pteval))
+ if (pte_dirty(pteval)) {
+ /*
+ * If the PTE was dirty then the TLB must be flushed before
+ * the page is unlocked as IO can start in parallel. Without
+ * the flush, writes could still happen and data would be
+ * potentially lost.
+ */
+ if (deferred)
+ flush_tlb_page(vma, address);
+
set_page_dirty(page);
+ }
/* Update high watermark before we lower rss */
update_hiwater_rss(mm);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 5e8eadd71bac..f668c8fa78fd 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1024,7 +1024,8 @@ static unsigned long shrink_page_list(struct list_head *page_list,
* processes. Try to unmap it here.
*/
if (page_mapped(page) && mapping) {
- switch (try_to_unmap(page, ttu_flags)) {
+ switch (try_to_unmap(page,
+ ttu_flags|TTU_BATCH_FLUSH)) {
case SWAP_FAIL:
goto activate_locked;
case SWAP_AGAIN:
@@ -1211,6 +1212,7 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
ret = shrink_page_list(&clean_pages, zone, &sc,
TTU_UNMAP|TTU_IGNORE_ACCESS,
&dummy1, &dummy2, &dummy3, &dummy4, &dummy5, true);
+ try_to_unmap_flush();
list_splice(&clean_pages, page_list);
mod_zone_page_state(zone, NR_ISOLATED_FILE, -ret);
return ret;
@@ -2223,6 +2225,7 @@ static void shrink_lruvec(struct lruvec *lruvec, int swappiness,
scan_adjusted = true;
}
blk_finish_plug(&plug);
+ try_to_unmap_flush();
sc->nr_reclaimed += nr_reclaimed;
/*
@@ -2762,6 +2765,30 @@ out:
return false;
}
+#ifdef CONFIG_ARCH_SUPPORTS_LOCAL_TLB_PFN_FLUSH
+static inline void alloc_tlb_ubc(void)
+{
+ if (current->tlb_ubc)
+ return;
+
+ /*
+ * Allocate the control structure for batch TLB flushing. Harmless if
+ * the allocation fails as reclaimer will just send more IPIs.
+ */
+ current->tlb_ubc = kmalloc(sizeof(struct tlbflush_unmap_batch),
+ GFP_ATOMIC | __GFP_NOWARN);
+ if (!current->tlb_ubc)
+ return;
+
+ cpumask_clear(¤t->tlb_ubc->cpumask);
+ current->tlb_ubc->nr_pages = 0;
+}
+#else
+static inline void alloc_tlb_ubc(void)
+{
+}
+#endif /* CONFIG_ARCH_SUPPORTS_LOCAL_TLB_PFN_FLUSH */
+
unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
gfp_t gfp_mask, nodemask_t *nodemask)
{
@@ -2789,6 +2816,7 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
sc.may_writepage,
gfp_mask);
+ alloc_tlb_ubc();
nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
trace_mm_vmscan_direct_reclaim_end(nr_reclaimed);
@@ -3364,6 +3392,8 @@ static int kswapd(void *p)
lockdep_set_current_reclaim_state(GFP_KERNEL);
+ alloc_tlb_ubc();
+
if (!cpumask_empty(cpumask))
set_cpus_allowed_ptr(tsk, cpumask);
current->reclaim_state = &reclaim_state;
--
2.1.2
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists