[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20241001225207.2215639-3-andrii@kernel.org>
Date: Tue, 1 Oct 2024 15:52:04 -0700
From: Andrii Nakryiko <andrii@...nel.org>
To: linux-trace-kernel@...r.kernel.org,
peterz@...radead.org,
oleg@...hat.com
Cc: rostedt@...dmis.org,
mhiramat@...nel.org,
bpf@...r.kernel.org,
linux-kernel@...r.kernel.org,
jolsa@...nel.org,
paulmck@...nel.org,
willy@...radead.org,
surenb@...gle.com,
akpm@...ux-foundation.org,
linux-mm@...ck.org,
mjguzik@...il.com,
brauner@...nel.org,
jannh@...gle.com,
mhocko@...nel.org,
vbabka@...e.cz,
mingo@...nel.org,
Andrii Nakryiko <andrii@...nel.org>
Subject: [PATCH v2 tip/perf/core 2/5] mm: switch to 64-bit mm_lock_seq/vm_lock_seq on 64-bit architectures
To increase mm->mm_lock_seq robustness, switch it from int to long, so
that it's a 64-bit counter on 64-bit systems and we can stop worrying
about it wrapping around in just ~4 billion iterations. Same goes for
VMA's matching vm_lock_seq, which is derived from mm_lock_seq.
I didn't use __u64 outright to keep 32-bit architectures unaffected, but
if it seems important enough, I have nothing against using __u64.
Suggested-by: Jann Horn <jannh@...gle.com>
Signed-off-by: Andrii Nakryiko <andrii@...nel.org>
---
include/linux/mm.h | 6 +++---
include/linux/mm_types.h | 4 ++--
include/linux/mmap_lock.h | 4 ++--
3 files changed, 7 insertions(+), 7 deletions(-)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 6549d0979b28..f8e75d0642a8 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -716,7 +716,7 @@ static inline void vma_end_read(struct vm_area_struct *vma)
}
/* WARNING! Can only be used if mmap_lock is expected to be write-locked */
-static bool __is_vma_write_locked(struct vm_area_struct *vma, int *mm_lock_seq)
+static bool __is_vma_write_locked(struct vm_area_struct *vma, long *mm_lock_seq)
{
mmap_assert_write_locked(vma->vm_mm);
@@ -735,7 +735,7 @@ static bool __is_vma_write_locked(struct vm_area_struct *vma, int *mm_lock_seq)
*/
static inline void vma_start_write(struct vm_area_struct *vma)
{
- int mm_lock_seq;
+ long mm_lock_seq;
if (__is_vma_write_locked(vma, &mm_lock_seq))
return;
@@ -753,7 +753,7 @@ static inline void vma_start_write(struct vm_area_struct *vma)
static inline void vma_assert_write_locked(struct vm_area_struct *vma)
{
- int mm_lock_seq;
+ long mm_lock_seq;
VM_BUG_ON_VMA(!__is_vma_write_locked(vma, &mm_lock_seq), vma);
}
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index d5e3f907eea4..c045543f43d9 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -705,7 +705,7 @@ struct vm_area_struct {
* counter reuse can only lead to occasional unnecessary use of the
* slowpath.
*/
- int vm_lock_seq;
+ long vm_lock_seq;
struct vma_lock *vm_lock;
#endif
@@ -887,7 +887,7 @@ struct mm_struct {
* Can be read with ACQUIRE semantics if not holding write
* mmap_lock.
*/
- int mm_lock_seq;
+ long mm_lock_seq;
#endif
diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h
index 9d23635bc701..fca527dece63 100644
--- a/include/linux/mmap_lock.h
+++ b/include/linux/mmap_lock.h
@@ -105,7 +105,7 @@ static inline void inc_mm_lock_seq(struct mm_struct *mm, bool acquire)
}
}
-static inline bool mmap_lock_speculation_start(struct mm_struct *mm, int *seq)
+static inline bool mmap_lock_speculation_start(struct mm_struct *mm, long *seq)
{
/* Pairs with RELEASE semantics in inc_mm_lock_seq(). */
*seq = smp_load_acquire(&mm->mm_lock_seq);
@@ -113,7 +113,7 @@ static inline bool mmap_lock_speculation_start(struct mm_struct *mm, int *seq)
return (*seq & 1) == 0;
}
-static inline bool mmap_lock_speculation_end(struct mm_struct *mm, int seq)
+static inline bool mmap_lock_speculation_end(struct mm_struct *mm, long seq)
{
/* Pairs with ACQUIRE semantics in inc_mm_lock_seq(). */
smp_rmb();
--
2.43.5
Powered by blists - more mailing lists