[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20260211153608.78083-1-qq570070308@gmail.com>
Date: Wed, 11 Feb 2026 23:36:08 +0800
From: Xie Yuanbin <qq570070308@...il.com>
To: dave.hansen@...el.com,
peterz@...radead.org,
tglx@...nel.org,
riel@...riel.com,
segher@...nel.crashing.org,
david@...nel.org,
hpa@...or.com,
arnd@...db.de,
anna-maria@...utronix.de,
mingo@...hat.com,
juri.lelli@...hat.com,
vincent.guittot@...aro.org,
dietmar.eggemann@....com,
rostedt@...dmis.org,
bsegall@...gle.com,
mgorman@...e.de,
vschneid@...hat.com,
bp@...en8.de,
dave.hansen@...ux.intel.com,
luto@...nel.org,
houwenlong.hwl@...group.com,
frederic@...nel.org,
akpm@...ux-foundation.org,
lorenzo.stoakes@...cle.com,
bhe@...hat.com,
ryan.roberts@....com,
urezki@...il.com,
nysal@...ux.ibm.com,
max.kellermann@...os.com
Cc: linux-kernel@...r.kernel.org,
x86@...nel.org,
Xie Yuanbin <qq570070308@...il.com>
Subject: Re: [PATCH v6 RESEND 1/3] x86/mm/tlb: Make enter_lazy_tlb() always inline on x86
On Tue, 10 Feb 2026 08:07:38 -0800, Dave Hansen wrote:
> On 2/10/26 07:32, Xie Yuanbin wrote:
>> This was just to fix a compilation error:
>> 'cpu_tlbstate' is defined in 'include/asm/tlbflush.h' inside of
>> '#ifndef MODULE'. So if the '#ifndef MODULE' is not added here,
>> the compilation error will be triggered:
>
> Rather than sprinkling #ifdefs and 'cpu_tlbstate' references around,
> could we try to keep them confined to asm/tlbflush.h as much as
> possible, please?
>
> I mean, mmu_context.h _does_ include asm/tlbflush.h, so even if you
> stuck enter_lazy_tlb() in asm/tlbflush.h, everything should keep
> working, no?
>
> That would both prevent adding a random #ifdef and keep 'cpu_tlbstate'
> references limited.
I apologize for misunderstanding your meaning bedore. I tried your
suggestion and there were no compilation warnings or errors:
```patch
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index 1acafb1c6a93..ef5b507de34e 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -136,9 +136,6 @@ static inline void mm_reset_untag_mask(struct mm_struct *mm)
}
#endif
-#define enter_lazy_tlb enter_lazy_tlb
-extern void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk);
-
extern void mm_init_global_asid(struct mm_struct *mm);
extern void mm_free_global_asid(struct mm_struct *mm);
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 5a3cdc439e38..4bc5eb84e135 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -172,6 +172,15 @@ struct tlb_state_shared {
};
DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state_shared, cpu_tlbstate_shared);
+static __always_inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
+{
+ if (this_cpu_read(cpu_tlbstate.loaded_mm) == &init_mm)
+ return;
+
+ this_cpu_write(cpu_tlbstate_shared.is_lazy, true);
+}
+#define enter_lazy_tlb enter_lazy_tlb
+
bool nmi_uaccess_okay(void);
#define nmi_uaccess_okay nmi_uaccess_okay
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 621e09d049cb..55143cbd6cd2 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -984,13 +984,6 @@ void switch_mm_irqs_off(struct mm_struct *unused, struct mm_struct *next,
* in a row. It will notify us that we're going back to a real mm by
* calling switch_mm_irqs_off().
*/
-void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
-{
- if (this_cpu_read(cpu_tlbstate.loaded_mm) == &init_mm)
- return;
-
- this_cpu_write(cpu_tlbstate_shared.is_lazy, true);
-}
/*
* Using a temporary mm allows to set temporary mappings that are not accessible
```
However, I'm a little worried that moving
'#define enter_lazy_tlb enter_lazy_tlb' into '#ifndef MODULE' might cause
other issues, so I placed it outside '#ifndef MODULE' outside in my
first patch.
What about this (compilation tested):
```patch
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index 1acafb1c6a93..14e776876c69 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -136,8 +136,12 @@ static inline void mm_reset_untag_mask(struct mm_struct *mm)
}
#endif
-#define enter_lazy_tlb enter_lazy_tlb
-extern void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk);
+#define enter_lazy_tlb(mm, tsk) \
+ do { \
+ if (this_cpu_read(cpu_tlbstate.loaded_mm) == &init_mm) \
+ break; \
+ this_cpu_write(cpu_tlbstate_shared.is_lazy, true); \
+ } while (0)
extern void mm_init_global_asid(struct mm_struct *mm);
extern void mm_free_global_asid(struct mm_struct *mm);
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 621e09d049cb..55143cbd6cd2 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -984,13 +984,6 @@ void switch_mm_irqs_off(struct mm_struct *unused, struct mm_struct *next,
* in a row. It will notify us that we're going back to a real mm by
* calling switch_mm_irqs_off().
*/
-void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
-{
- if (this_cpu_read(cpu_tlbstate.loaded_mm) == &init_mm)
- return;
-
- this_cpu_write(cpu_tlbstate_shared.is_lazy, true);
-}
/*
* Using a temporary mm allows to set temporary mappings that are not accessible
```
Powered by blists - more mailing lists