[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20221003040427.1082050-1-ira.weiny@intel.com>
Date: Sun, 2 Oct 2022 21:04:27 -0700
From: ira.weiny@...el.com
To: Andrew Morton <akpm@...ux-foundation.org>
Cc: Ira Weiny <ira.weiny@...el.com>,
"Fabio M. De Francesco" <fmdefrancesco@...il.com>,
Thomas Gleixner <tglx@...utronix.de>,
Christoph Hellwig <hch@....de>,
Al Viro <viro@...iv.linux.org.uk>,
Linus Walleij <linus.walleij@...aro.org>, linux-mm@...ck.org,
linux-kernel@...r.kernel.org
Subject: [PATCH] highmem: Fix kmap_to_page() for kmap_local_page() addresses
From: Ira Weiny <ira.weiny@...el.com>
kmap_to_page() is used to get the page for a virtual address which may
be kmap'ed. Unfortunately, kmap_local_page() stores mappings in a
thread local array separate from kmap(). These mappings were not
checked by the call.
Check the kmap_local_page() mappings and return the page if found.
Because it is intended to remove kmap_to_page() add a warn on once to
the kmap checks to flag potential issues early.
Cc: "Fabio M. De Francesco" <fmdefrancesco@...il.com>
Cc: Thomas Gleixner <tglx@...utronix.de>
Cc: Christoph Hellwig <hch@....de>
Cc: Andrew Morton <akpm@...ux-foundation.org>
Reported-by: Al Viro <viro@...iv.linux.org.uk>
Signed-off-by: Ira Weiny <ira.weiny@...el.com>
---
I'm still working toward getting rid of kmap_to_page.[1] But until then
this fix should be applied.
[1] https://lore.kernel.org/linux-mm/20221002002326.946620-1-ira.weiny@intel.com/
---
mm/highmem.c | 40 ++++++++++++++++++++++++++++++----------
1 file changed, 30 insertions(+), 10 deletions(-)
diff --git a/mm/highmem.c b/mm/highmem.c
index c707d7202d5f..29423c1afb3e 100644
--- a/mm/highmem.c
+++ b/mm/highmem.c
@@ -140,16 +140,45 @@ pte_t *pkmap_page_table;
do { spin_unlock(&kmap_lock); (void)(flags); } while (0)
#endif
+static inline int kmap_local_calc_idx(int idx)
+{
+ return idx + KM_MAX_IDX * smp_processor_id();
+}
+
+#ifndef arch_kmap_local_map_idx
+#define arch_kmap_local_map_idx(idx, pfn) kmap_local_calc_idx(idx)
+#endif
+
struct page *__kmap_to_page(void *vaddr)
{
+ unsigned long base = (unsigned long) vaddr & PAGE_MASK;
+ struct kmap_ctrl *kctrl = ¤t->kmap_ctrl;
unsigned long addr = (unsigned long)vaddr;
+ int i;
- if (addr >= PKMAP_ADDR(0) && addr < PKMAP_ADDR(LAST_PKMAP)) {
+ /* kmap() mappings */
+ if (WARN_ON_ONCE(addr >= PKMAP_ADDR(0) &&
+ addr < PKMAP_ADDR(LAST_PKMAP))) {
int i = PKMAP_NR(addr);
return pte_page(pkmap_page_table[i]);
}
+ /* kmap_local_page() mappings */
+ if (WARN_ON_ONCE(base >= __fix_to_virt(FIX_KMAP_END) &&
+ base < __fix_to_virt(FIX_KMAP_BEGIN))) {
+ for (i = 0; i < kctrl->idx; i++) {
+ unsigned long base_addr;
+ int idx;
+
+ idx = arch_kmap_local_map_idx(i, pte_pfn(pteval));
+ base_addr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+
+ if (base_addr == base)
+ return pte_page(kctrl->pteval[i]);
+ }
+ }
+
return virt_to_page(vaddr);
}
EXPORT_SYMBOL(__kmap_to_page);
@@ -462,10 +491,6 @@ static inline void kmap_local_idx_pop(void)
# define arch_kmap_local_post_unmap(vaddr) do { } while (0)
#endif
-#ifndef arch_kmap_local_map_idx
-#define arch_kmap_local_map_idx(idx, pfn) kmap_local_calc_idx(idx)
-#endif
-
#ifndef arch_kmap_local_unmap_idx
#define arch_kmap_local_unmap_idx(idx, vaddr) kmap_local_calc_idx(idx)
#endif
@@ -494,11 +519,6 @@ static inline bool kmap_high_unmap_local(unsigned long vaddr)
return false;
}
-static inline int kmap_local_calc_idx(int idx)
-{
- return idx + KM_MAX_IDX * smp_processor_id();
-}
-
static pte_t *__kmap_pte;
static pte_t *kmap_get_pte(unsigned long vaddr, int idx)
base-commit: 274d7803837da78dfc911bcda0d593412676fc20
--
2.37.2
Powered by blists - more mailing lists