[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1486579524-14901-52-git-send-email-mingo@kernel.org>
Date: Wed, 8 Feb 2017 19:45:22 +0100
From: Ingo Molnar <mingo@...nel.org>
To: linux-kernel@...r.kernel.org
Cc: Andrew Morton <akpm@...ux-foundation.org>,
Linus Torvalds <torvalds@...ux-foundation.org>,
Mike Galbraith <efault@....de>,
Oleg Nesterov <oleg@...hat.com>,
Peter Zijlstra <peterz@...radead.org>,
Thomas Gleixner <tglx@...utronix.de>
Subject: [PATCH 51/53] sched/headers, mm: Move 'struct tlbflush_unmap_batch' from <linux/sched.h> to <linux/mm_types_task.h>
Unclutter <linux/sched.h> some more.
Also move the CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH condition inside the
structure body definition, to remove a pair of #ifdefs from sched.h.
Cc: Linus Torvalds <torvalds@...ux-foundation.org>
Cc: Mike Galbraith <efault@....de>
Cc: Peter Zijlstra <peterz@...radead.org>
Cc: Thomas Gleixner <tglx@...utronix.de>
Cc: linux-kernel@...r.kernel.org
Signed-off-by: Ingo Molnar <mingo@...nel.org>
---
include/linux/mm_types_task.h | 22 ++++++++++++++++++++++
include/linux/sched.h | 21 ---------------------
2 files changed, 22 insertions(+), 21 deletions(-)
diff --git a/include/linux/mm_types_task.h b/include/linux/mm_types_task.h
index 9526d8b9fe0e..136dfdf63ba1 100644
--- a/include/linux/mm_types_task.h
+++ b/include/linux/mm_types_task.h
@@ -10,6 +10,7 @@
#include <linux/types.h>
#include <linux/threads.h>
#include <linux/atomic.h>
+#include <linux/cpumask.h>
#include <asm/page.h>
@@ -62,4 +63,25 @@ struct page_frag {
#endif
};
+/* Track pages that require TLB flushes */
+struct tlbflush_unmap_batch {
+#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
+ /*
+ * Each bit set is a CPU that potentially has a TLB entry for one of
+ * the PFNs being flushed. See set_tlb_ubc_flush_pending().
+ */
+ struct cpumask cpumask;
+
+ /* True if any bit in cpumask is set */
+ bool flush_required;
+
+ /*
+ * If true then the PTE was dirty when unmapped. The entry must be
+ * flushed before IO is initiated or a stale TLB entry potentially
+ * allows an update without redirtying the page.
+ */
+ bool writable;
+#endif
+};
+
#endif /* _LINUX_MM_TYPES_TASK_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index fbd3a2dc35bb..531732bf7b13 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -480,25 +480,6 @@ enum perf_event_task_context {
perf_nr_task_contexts,
};
-/* Track pages that require TLB flushes */
-struct tlbflush_unmap_batch {
- /*
- * Each bit set is a CPU that potentially has a TLB entry for one of
- * the PFNs being flushed. See set_tlb_ubc_flush_pending().
- */
- struct cpumask cpumask;
-
- /* True if any bit in cpumask is set */
- bool flush_required;
-
- /*
- * If true then the PTE was dirty when unmapped. The entry must be
- * flushed before IO is initiated or a stale TLB entry potentially
- * allows an update without redirtying the page.
- */
- bool writable;
-};
-
struct task_struct {
#ifdef CONFIG_THREAD_INFO_IN_TASK
/*
@@ -891,9 +872,7 @@ struct task_struct {
unsigned long numa_pages_migrated;
#endif /* CONFIG_NUMA_BALANCING */
-#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
struct tlbflush_unmap_batch tlb_ubc;
-#endif
struct rcu_head rcu;
--
2.7.4
Powered by blists - more mailing lists