[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20210412080611.635125063@infradead.org>
Date: Mon, 12 Apr 2021 10:00:14 +0200
From: Peter Zijlstra <peterz@...radead.org>
To: akpm@...ux-foundation.org
Cc: linux-kernel@...r.kernel.org, peterz@...radead.org,
boris.ostrovsky@...cle.com, jgross@...e.com,
sstabellini@...nel.org, x86@...nel.org,
jani.nikula@...ux.intel.com, joonas.lahtinen@...ux.intel.com,
rodrigo.vivi@...el.com, chris@...is-wilson.co.uk,
intel-gfx@...ts.freedesktop.org, linux-mm@...ck.org,
keescook@...omium.org, hch@....de
Subject: [PATCH 2/7] xen/gntdev,x86: Remove apply_to_page_range() use from module
Instead of relying on apply_to_page_range() being available to
modules, move its use into core kernel code and export it's
application.
NOTE: ideally we do: use_ptemod = !auto_translate_physmap &&
gnttab_map_avail_bits and remove this hack.
Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
---
arch/x86/include/asm/xen/page.h | 2 ++
arch/x86/xen/mmu.c | 26 ++++++++++++++++++++++++++
drivers/xen/gntdev.c | 23 +----------------------
3 files changed, 29 insertions(+), 22 deletions(-)
--- a/arch/x86/include/asm/xen/page.h
+++ b/arch/x86/include/asm/xen/page.h
@@ -370,4 +370,6 @@ static inline unsigned long xen_get_swio
return __get_free_pages(__GFP_NOWARN, order);
}
+extern void xen_set_grant_as_special(struct vm_area_struct *vma);
+
#endif /* _ASM_X86_XEN_PAGE_H */
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -51,3 +51,29 @@ int xen_unmap_domain_gfn_range(struct vm
return -EINVAL;
}
EXPORT_SYMBOL_GPL(xen_unmap_domain_gfn_range);
+
+static int set_grant_ptes_as_special(pte_t *pte, unsigned long addr, void *data)
+{
+ set_pte_at(current->mm, addr, pte, pte_mkspecial(*pte));
+ return 0;
+}
+
+void xen_set_grant_as_special(struct vm_area_struct *vma)
+{
+ if (xen_feature(XENFEAT_gnttab_map_avail_bits))
+ return;
+
+ /*
+ * If the PTEs were not made special by the grant map
+ * hypercall, do so here.
+ *
+ * This is racy since the mapping is already visible
+ * to userspace but userspace should be well-behaved
+ * enough to not touch it until the mmap() call
+ * returns.
+ */
+ apply_to_page_range(vma->vm_mm, vma->vm_start,
+ vma->vm_end - vma->vm_start,
+ set_grant_ptes_as_special, NULL);
+}
+EXPORT_SYMBOL_GPL(xen_set_grant_as_special);
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -278,14 +278,6 @@ static int find_grant_ptes(pte_t *pte, u
return 0;
}
-#ifdef CONFIG_X86
-static int set_grant_ptes_as_special(pte_t *pte, unsigned long addr, void *data)
-{
- set_pte_at(current->mm, addr, pte, pte_mkspecial(*pte));
- return 0;
-}
-#endif
-
int gntdev_map_grant_pages(struct gntdev_grant_map *map)
{
int i, err = 0;
@@ -1040,20 +1032,7 @@ static int gntdev_mmap(struct file *flip
goto out_put_map;
} else {
#ifdef CONFIG_X86
- /*
- * If the PTEs were not made special by the grant map
- * hypercall, do so here.
- *
- * This is racy since the mapping is already visible
- * to userspace but userspace should be well-behaved
- * enough to not touch it until the mmap() call
- * returns.
- */
- if (!xen_feature(XENFEAT_gnttab_map_avail_bits)) {
- apply_to_page_range(vma->vm_mm, vma->vm_start,
- vma->vm_end - vma->vm_start,
- set_grant_ptes_as_special, NULL);
- }
+ xen_set_grant_as_special(vma);
#endif
}
Powered by blists - more mailing lists