[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20180412192800.15708-11-mathieu.desnoyers@efficios.com>
Date: Thu, 12 Apr 2018 15:27:47 -0400
From: Mathieu Desnoyers <mathieu.desnoyers@...icios.com>
To: Peter Zijlstra <peterz@...radead.org>,
"Paul E . McKenney" <paulmck@...ux.vnet.ibm.com>,
Boqun Feng <boqun.feng@...il.com>,
Andy Lutomirski <luto@...capital.net>,
Dave Watson <davejwatson@...com>
Cc: linux-kernel@...r.kernel.org, linux-api@...r.kernel.org,
Paul Turner <pjt@...gle.com>,
Andrew Morton <akpm@...ux-foundation.org>,
Russell King <linux@....linux.org.uk>,
Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>,
"H . Peter Anvin" <hpa@...or.com>, Andrew Hunter <ahh@...gle.com>,
Andi Kleen <andi@...stfloor.org>, Chris Lameter <cl@...ux.com>,
Ben Maurer <bmaurer@...com>,
Steven Rostedt <rostedt@...dmis.org>,
Josh Triplett <josh@...htriplett.org>,
Linus Torvalds <torvalds@...ux-foundation.org>,
Catalin Marinas <catalin.marinas@....com>,
Will Deacon <will.deacon@....com>,
Michael Kerrisk <mtk.manpages@...il.com>,
Mathieu Desnoyers <mathieu.desnoyers@...icios.com>
Subject: [RFC PATCH for 4.18 10/23] mm: Introduce vm_map_user_ram, vm_unmap_user_ram
Create and destroy mappings aliased to a user-space mapping with the same
cache coloring as the userspace mapping. Allow the kernel to load from
and store to pages shared with user-space through its own mapping in
kernel virtual addresses while ensuring cache conherency between kernel
and userspace mappings for virtually aliased architectures.
Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@...icios.com>
Reviewed-by: Matthew Wilcox <mawilcox@...rosoft.com>
CC: "Paul E. McKenney" <paulmck@...ux.vnet.ibm.com>
CC: Peter Zijlstra <peterz@...radead.org>
CC: Paul Turner <pjt@...gle.com>
CC: Thomas Gleixner <tglx@...utronix.de>
CC: Andrew Hunter <ahh@...gle.com>
CC: Andy Lutomirski <luto@...capital.net>
CC: Andi Kleen <andi@...stfloor.org>
CC: Dave Watson <davejwatson@...com>
CC: Chris Lameter <cl@...ux.com>
CC: Ingo Molnar <mingo@...hat.com>
CC: "H. Peter Anvin" <hpa@...or.com>
CC: Ben Maurer <bmaurer@...com>
CC: Steven Rostedt <rostedt@...dmis.org>
CC: Josh Triplett <josh@...htriplett.org>
CC: Linus Torvalds <torvalds@...ux-foundation.org>
CC: Andrew Morton <akpm@...ux-foundation.org>
CC: Russell King <linux@....linux.org.uk>
CC: Catalin Marinas <catalin.marinas@....com>
CC: Will Deacon <will.deacon@....com>
CC: Michael Kerrisk <mtk.manpages@...il.com>
CC: Boqun Feng <boqun.feng@...il.com>
---
include/linux/vmalloc.h | 4 +++
mm/vmalloc.c | 66 +++++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 70 insertions(+)
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 1e5d8c392f15..d5e5c11ba947 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -58,6 +58,10 @@ struct vmap_area {
extern void vm_unmap_ram(const void *mem, unsigned int count);
extern void *vm_map_ram(struct page **pages, unsigned int count,
int node, pgprot_t prot);
+extern void vm_unmap_user_ram(const void *mem, unsigned int count);
+extern void *vm_map_user_ram(struct page **pages, unsigned int count,
+ unsigned long uaddr, int node, pgprot_t prot);
+
extern void vm_unmap_aliases(void);
#ifdef CONFIG_MMU
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index ebff729cc956..ae033b825e45 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1199,6 +1199,72 @@ void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t pro
}
EXPORT_SYMBOL(vm_map_ram);
+/**
+ * vm_unmap_user_ram - unmap linear kernel address space set up by vm_map_user_ram
+ * @mem: the pointer returned by vm_map_user_ram
+ * @count: the count passed to that vm_map_user_ram call (cannot unmap partial)
+ */
+void vm_unmap_user_ram(const void *mem, unsigned int count)
+{
+ unsigned long size = (unsigned long)count << PAGE_SHIFT;
+ unsigned long addr = (unsigned long)mem;
+ struct vmap_area *va;
+
+ might_sleep();
+ BUG_ON(!addr);
+ BUG_ON(addr < VMALLOC_START);
+ BUG_ON(addr > VMALLOC_END);
+ BUG_ON(!PAGE_ALIGNED(addr));
+
+ debug_check_no_locks_freed(mem, size);
+ vmap_debug_free_range(addr, addr+size);
+
+ va = find_vmap_area(addr);
+ BUG_ON(!va);
+ free_unmap_vmap_area(va);
+}
+EXPORT_SYMBOL(vm_unmap_user_ram);
+
+/**
+ * vm_map_user_ram - map user space pages linearly into kernel virtual address
+ * @pages: an array of pointers to the virtually contiguous pages to be mapped
+ * @count: number of pages
+ * @uaddr: address within the first page in the userspace mapping
+ * @node: prefer to allocate data structures on this node
+ * @prot: memory protection to use. PAGE_KERNEL for regular RAM
+ *
+ * Create a mapping aliased to a user-space mapping with the same cache
+ * coloring as the userspace mapping. Allow the kernel to load from and
+ * store to pages shared with user-space through its own mapping in kernel
+ * virtual addresses while ensuring cache conherency between kernel and
+ * userspace mappings for virtually aliased architectures.
+ *
+ * Returns: a pointer to the address that has been mapped, or %NULL on failure
+ */
+void *vm_map_user_ram(struct page **pages, unsigned int count,
+ unsigned long uaddr, int node, pgprot_t prot)
+{
+ unsigned long size = (unsigned long)count << PAGE_SHIFT;
+ unsigned long va_offset = ALIGN_DOWN(uaddr, PAGE_SIZE) & (SHMLBA - 1);
+ unsigned long alloc_size = ALIGN(va_offset + size, SHMLBA);
+ struct vmap_area *va;
+ unsigned long addr;
+ void *mem;
+
+ va = alloc_vmap_area(alloc_size, SHMLBA, VMALLOC_START, VMALLOC_END,
+ node, GFP_KERNEL);
+ if (IS_ERR(va))
+ return NULL;
+ addr = va->va_start + va_offset;
+ mem = (void *)addr;
+ if (vmap_page_range(addr, addr + size, prot, pages) < 0) {
+ vm_unmap_user_ram(mem, count);
+ return NULL;
+ }
+ return mem;
+}
+EXPORT_SYMBOL(vm_map_user_ram);
+
static struct vm_struct *vmlist __initdata;
/**
* vm_area_add_early - add vmap area early during boot
--
2.11.0
Powered by blists - more mailing lists