[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220429133552.33768-11-zhengqi.arch@bytedance.com>
Date: Fri, 29 Apr 2022 21:35:44 +0800
From: Qi Zheng <zhengqi.arch@...edance.com>
To: akpm@...ux-foundation.org, tglx@...utronix.de,
kirill.shutemov@...ux.intel.com, mika.penttila@...tfour.com,
david@...hat.com, jgg@...dia.com, tj@...nel.org, dennis@...nel.org,
ming.lei@...hat.com
Cc: linux-doc@...r.kernel.org, linux-kernel@...r.kernel.org,
linux-mm@...ck.org, songmuchun@...edance.com,
zhouchengming@...edance.com, Qi Zheng <zhengqi.arch@...edance.com>
Subject: [RFC PATCH 10/18] mm: add pte_tryget_map{_lock}() helper
Now, we usually use pte_offset_map{_lock}() to get the pte_t pointer
before accessing the PTE page table page. After adding the
FREE_USER_PTE, we also need to call the pte_tryget() before calling
pte_offset_map{_lock}(), which is used to try to get the reference
count of the PTE to prevent the PTE page table page from being freed
during the access process.
This patch adds pte_tryget_map{_lock}() to help us to do that. A
return value of NULL indicates that we failed to get the percpu_ref,
and there is a concurrent thread that is releasing this PTE (or has
already been released). It needs to be treated as the case of pte_none().
Signed-off-by: Qi Zheng <zhengqi.arch@...edance.com>
---
include/linux/pgtable.h | 37 +++++++++++++++++++++++++++++++++++--
1 file changed, 35 insertions(+), 2 deletions(-)
diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
index d1218cb1013e..6f205fee6348 100644
--- a/include/linux/pgtable.h
+++ b/include/linux/pgtable.h
@@ -228,6 +228,8 @@ static inline spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud)
return ptl;
}
+#include <linux/pte_ref.h>
+
#ifndef pte_offset_kernel
static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
{
@@ -240,12 +242,38 @@ static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
#define pte_offset_map(dir, address) \
((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
pte_index((address)))
-#define pte_unmap(pte) kunmap_atomic((pte))
+#define __pte_unmap(pte) kunmap_atomic((pte))
#else
#define pte_offset_map(dir, address) pte_offset_kernel((dir), (address))
-#define pte_unmap(pte) ((void)(pte)) /* NOP */
+#define __pte_unmap(pte) ((void)(pte)) /* NOP */
#endif
+#define pte_tryget_map(mm, pmd, address) \
+({ \
+ pte_t *__pte = NULL; \
+ if (pte_tryget(mm, pmd, address)) \
+ __pte = pte_offset_map(pmd, address); \
+ __pte; \
+})
+
+#define pte_unmap(pte) do { \
+ pte_put(pte); \
+ __pte_unmap(pte); \
+} while (0)
+
+#define pte_tryget_map_lock(mm, pmd, address, ptlp) \
+({ \
+ spinlock_t *__ptl = NULL; \
+ pte_t *__pte = NULL; \
+ if (pte_tryget(mm, pmd, address)) { \
+ __ptl = pte_lockptr(mm, pmd); \
+ __pte = pte_offset_map(pmd, address); \
+ *(ptlp) = __ptl; \
+ spin_lock(__ptl); \
+ } \
+ __pte; \
+})
+
#define pte_offset_map_lock(mm, pmd, address, ptlp) \
({ \
spinlock_t *__ptl = pte_lockptr(mm, pmd); \
@@ -260,6 +288,11 @@ static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
pte_unmap(pte); \
} while (0)
+#define __pte_unmap_unlock(pte, ptl) do { \
+ spin_unlock(ptl); \
+ __pte_unmap(pte); \
+} while (0)
+
/* Find an entry in the second-level page table.. */
#ifndef pmd_offset
static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
--
2.20.1
Powered by blists - more mailing lists