[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <Y3SWzbS4OJoz6ppv@li-4a3a4a4c-28e5-11b2-a85c-a8d192c6f089.ibm.com>
Date: Wed, 16 Nov 2022 08:52:45 +0100
From: Alexander Gordeev <agordeev@...ux.ibm.com>
To: Linus Torvalds <torvalds@...ux-foundation.org>
Cc: Hugh Dickins <hughd@...gle.com>,
Johannes Weiner <hannes@...xchg.org>,
Andrew Morton <akpm@...ux-foundation.org>,
linux-kernel@...r.kernel.org, linux-mm@...ck.org,
Nadav Amit <nadav.amit@...il.com>,
Will Deacon <will@...nel.org>,
Aneesh Kumar <aneesh.kumar@...ux.ibm.com>,
Nick Piggin <npiggin@...il.com>,
Heiko Carstens <hca@...ux.ibm.com>,
Vasily Gorbik <gor@...ux.ibm.com>,
Christian Borntraeger <borntraeger@...ux.ibm.com>,
Sven Schnelle <svens@...ux.ibm.com>,
Peter Zijlstra <peterz@...radead.org>,
Gerald Schaefer <gerald.schaefer@...ux.ibm.com>
Subject: [PATCH 3/4] mm: mmu_gather: turn delayed rmap macros into inlines
Make tlb_delay_rmap() and friend macros inline functions
by using forward declarations, which allows defining ones
after the 'struct mmu_gather' definition.
Signed-off-by: Alexander Gordeev <agordeev@...ux.ibm.com>
---
include/asm-generic/tlb.h | 56 ++++++++++++++++++++++++++++++---------
1 file changed, 44 insertions(+), 12 deletions(-)
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index 317bef9eee3c..33943a4de5a7 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -261,13 +261,10 @@ extern bool __tlb_remove_page_size(struct mmu_gather *tlb,
int page_size);
#ifdef CONFIG_SMP
-/*
- * This both sets 'delayed_rmap', and returns true. It would be an inline
- * function, except we define it before the 'struct mmu_gather'.
- */
-#define tlb_delay_rmap(tlb) (((tlb)->delayed_rmap = 1), true)
-#define tlb_reset_delay_rmap(tlb) ((tlb)->delayed_rmap = 0)
-#define tlb_rmap_delayed(tlb) ((tlb)->delayed_rmap)
+#define tlb_delay_rmap tlb_delay_rmap
+static inline bool tlb_delay_rmap(struct mmu_gather *tlb);
+static inline void tlb_reset_delay_rmap(struct mmu_gather *tlb);
+static inline bool tlb_rmap_delayed(struct mmu_gather *tlb);
extern void tlb_flush_rmaps(struct mmu_gather *tlb, struct vm_area_struct *vma);
#endif
@@ -338,6 +335,27 @@ struct mmu_gather {
#endif
};
+#ifdef tlb_delay_rmap
+
+static inline bool tlb_delay_rmap(struct mmu_gather *tlb)
+{
+ tlb->delayed_rmap = 1;
+
+ return true;
+}
+
+static inline void tlb_reset_delay_rmap(struct mmu_gather *tlb)
+{
+ tlb->delayed_rmap = 0;
+}
+
+static inline bool tlb_rmap_delayed(struct mmu_gather *tlb)
+{
+ return tlb->delayed_rmap;
+}
+
+#else
+
/*
* We have a no-op version of the rmap removal that doesn't
* delay anything. That is used on S390, which flushes remote
@@ -345,11 +363,25 @@ struct mmu_gather {
* remote TLBs to flush and is not preemptible due to this
* all happening under the page table lock.
*/
-#ifndef tlb_delay_rmap
-#define tlb_delay_rmap(tlb) (false)
-#define tlb_reset_delay_rmap(tlb) do { } while (0)
-#define tlb_rmap_delayed(tlb) (false)
-static inline void tlb_flush_rmaps(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
+#define tlb_delay_rmap tlb_delay_rmap
+static inline bool tlb_delay_rmap(struct mmu_gather *tlb)
+{
+ return false;
+}
+
+static inline void tlb_reset_delay_rmap(struct mmu_gather *tlb)
+{
+}
+
+static inline bool tlb_rmap_delayed(struct mmu_gather *tlb)
+{
+ return false;
+}
+
+static inline void tlb_flush_rmaps(struct mmu_gather *tlb, struct vm_area_struct *vma)
+{
+}
+
#endif
void tlb_flush_mmu(struct mmu_gather *tlb);
--
2.31.1
Powered by blists - more mailing lists