[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240222131230.635-5-petrtesarik@huaweicloud.com>
Date: Thu, 22 Feb 2024 14:12:29 +0100
From: Petr Tesarik <petrtesarik@...weicloud.com>
To: Dave Hansen <dave.hansen@...el.com>
Cc: Petr Tesařík <petr@...arici.cz>,
Petr Tesarik <petrtesarik@...weicloud.com>,
Jonathan Corbet <corbet@....net>,
Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>,
Borislav Petkov <bp@...en8.de>,
Dave Hansen <dave.hansen@...ux.intel.com>,
"maintainer:X86 ARCHITECTURE (32-BIT AND 64-BIT)" <x86@...nel.org>,
"H. Peter Anvin" <hpa@...or.com>,
Andy Lutomirski <luto@...nel.org>,
Oleg Nesterov <oleg@...hat.com>,
Peter Zijlstra <peterz@...radead.org>,
Xin Li <xin3.li@...el.com>,
Arnd Bergmann <arnd@...db.de>,
Andrew Morton <akpm@...ux-foundation.org>,
Rick Edgecombe <rick.p.edgecombe@...el.com>,
Kees Cook <keescook@...omium.org>,
"Masami Hiramatsu (Google)" <mhiramat@...nel.org>,
Pengfei Xu <pengfei.xu@...el.com>,
Josh Poimboeuf <jpoimboe@...nel.org>,
Ze Gao <zegao2021@...il.com>,
"Kirill A. Shutemov" <kirill.shutemov@...ux.intel.com>,
Kai Huang <kai.huang@...el.com>,
David Woodhouse <dwmw@...zon.co.uk>,
Brian Gerst <brgerst@...il.com>,
Jason Gunthorpe <jgg@...pe.ca>,
Joerg Roedel <jroedel@...e.de>,
"Mike Rapoport (IBM)" <rppt@...nel.org>,
Tina Zhang <tina.zhang@...el.com>,
Jacob Pan <jacob.jun.pan@...ux.intel.com>,
"open list:DOCUMENTATION" <linux-doc@...r.kernel.org>,
open list <linux-kernel@...r.kernel.org>,
Roberto Sassu <roberto.sassu@...weicloud.com>,
John Johansen <john.johansen@...onical.com>,
Paul Moore <paul@...l-moore.com>,
James Morris <jmorris@...ei.org>,
"Serge E. Hallyn" <serge@...lyn.com>,
apparmor@...ts.ubuntu.com,
linux-security-module@...r.kernel.org,
Petr Tesarik <petr.tesarik1@...wei-partners.com>
Subject: [RFC 4/5] sbm: fix up calls to dynamic memory allocators
From: Petr Tesarik <petr.tesarik1@...wei-partners.com>
Add fixup functions to call kmalloc(), vmalloc() and friends on behalf of
the sandbox code.
Signed-off-by: Petr Tesarik <petr.tesarik1@...wei-partners.com>
---
arch/x86/kernel/sbm/core.c | 81 ++++++++++++++++++++++++++++++++++++++
mm/slab_common.c | 3 +-
mm/slub.c | 17 ++++----
mm/vmalloc.c | 11 +++---
4 files changed, 98 insertions(+), 14 deletions(-)
diff --git a/arch/x86/kernel/sbm/core.c b/arch/x86/kernel/sbm/core.c
index c8ac7ecb08cc..3cf3842292b9 100644
--- a/arch/x86/kernel/sbm/core.c
+++ b/arch/x86/kernel/sbm/core.c
@@ -20,6 +20,12 @@
#include <linux/sbm.h>
#include <linux/sched/task_stack.h>
+/*
+ * FIXME: Remove these includes when there is proper API for defining
+ * which functions can be called from sandbox mode.
+ */
+#include <linux/vmalloc.h>
+
#define GFP_SBM_PGTABLE (GFP_KERNEL | __GFP_ZERO)
#define PGD_ORDER get_order(sizeof(pgd_t) * PTRS_PER_PGD)
@@ -52,8 +58,83 @@ struct sbm_fixup {
sbm_proxy_call_fn proxy;
};
+static int map_range(struct x86_sbm_state *state, unsigned long start,
+ unsigned long end, pgprot_t prot);
+
+/* Map the newly allocated dynamic memory region. */
+static unsigned long post_alloc(struct x86_sbm_state *state,
+ unsigned long objp, size_t size)
+{
+ int err;
+
+ if (!objp)
+ return objp;
+
+ err = map_range(state, objp, objp + size, PAGE_SHARED);
+ if (err) {
+ kfree((void*)objp);
+ return 0UL;
+ }
+ return objp;
+}
+
+/* Allocation proxy handler if size is the 1st parameter. */
+static unsigned long proxy_alloc1(struct x86_sbm_state *state,
+ unsigned long func, struct pt_regs *regs)
+{
+ unsigned long objp;
+
+ objp = x86_sbm_proxy_call(state, func, regs);
+ return post_alloc(state, objp, regs->di);
+}
+
+/* Allocation proxy handler if size is the 2nd parameter. */
+static unsigned long proxy_alloc2(struct x86_sbm_state *state,
+ unsigned long func, struct pt_regs *regs)
+{
+ unsigned long objp;
+
+ objp = x86_sbm_proxy_call(state, func, regs);
+ return post_alloc(state, objp, regs->si);
+}
+
+/* Allocation proxy handler if size is the 3rd parameter. */
+static unsigned long proxy_alloc3(struct x86_sbm_state *state,
+ unsigned long func, struct pt_regs *regs)
+{
+ unsigned long objp;
+
+ objp = x86_sbm_proxy_call(state, func, regs);
+ return post_alloc(state, objp, regs->dx);
+}
+
+/* Proxy handler to free previously allocated memory. */
+static unsigned long proxy_free(struct x86_sbm_state *state,
+ unsigned long func, struct pt_regs *regs)
+{
+ /* TODO: unmap allocated addresses from sandbox! */
+ return x86_sbm_proxy_call(state, func, regs);
+}
+
static const struct sbm_fixup fixups[] =
{
+ /* kmalloc() and friends */
+ { kmalloc_trace, proxy_alloc3 },
+ { __kmalloc, proxy_alloc1 },
+ { __kmalloc_node, proxy_alloc1 },
+ { __kmalloc_node_track_caller, proxy_alloc1 },
+ { kmalloc_large, proxy_alloc1 },
+ { kmalloc_large_node, proxy_alloc1 },
+ { krealloc, proxy_alloc2 },
+ { kfree, proxy_free },
+
+ /* vmalloc() and friends */
+ { vmalloc, proxy_alloc1 },
+ { __vmalloc, proxy_alloc1 },
+ { __vmalloc_node, proxy_alloc1 },
+ { vzalloc, proxy_alloc1 },
+ { vfree, proxy_free },
+
{ }
};
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 238293b1dbe1..2b72118d9bfa 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -28,6 +28,7 @@
#include <asm/page.h>
#include <linux/memcontrol.h>
#include <linux/stackdepot.h>
+#include <linux/sbm.h>
#include "internal.h"
#include "slab.h"
@@ -1208,7 +1209,7 @@ __do_krealloc(const void *p, size_t new_size, gfp_t flags)
*
* Return: pointer to the allocated memory or %NULL in case of error
*/
-void *krealloc(const void *p, size_t new_size, gfp_t flags)
+void * __nosbm krealloc(const void *p, size_t new_size, gfp_t flags)
{
void *ret;
diff --git a/mm/slub.c b/mm/slub.c
index 2ef88bbf56a3..5f2290fe4df0 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -42,6 +42,7 @@
#include <kunit/test.h>
#include <kunit/test-bug.h>
#include <linux/sort.h>
+#include <linux/sbm.h>
#include <linux/debugfs.h>
#include <trace/events/kmem.h>
@@ -3913,7 +3914,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
* directly to the page allocator. We use __GFP_COMP, because we will need to
* know the allocation order to free the pages properly in kfree.
*/
-static void *__kmalloc_large_node(size_t size, gfp_t flags, int node)
+static void * __nosbm __kmalloc_large_node(size_t size, gfp_t flags, int node)
{
struct folio *folio;
void *ptr = NULL;
@@ -3938,7 +3939,7 @@ static void *__kmalloc_large_node(size_t size, gfp_t flags, int node)
return ptr;
}
-void *kmalloc_large(size_t size, gfp_t flags)
+void * __nosbm kmalloc_large(size_t size, gfp_t flags)
{
void *ret = __kmalloc_large_node(size, flags, NUMA_NO_NODE);
@@ -3983,26 +3984,26 @@ void *__do_kmalloc_node(size_t size, gfp_t flags, int node,
return ret;
}
-void *__kmalloc_node(size_t size, gfp_t flags, int node)
+void * __nosbm __kmalloc_node(size_t size, gfp_t flags, int node)
{
return __do_kmalloc_node(size, flags, node, _RET_IP_);
}
EXPORT_SYMBOL(__kmalloc_node);
-void *__kmalloc(size_t size, gfp_t flags)
+void * __nosbm __kmalloc(size_t size, gfp_t flags)
{
return __do_kmalloc_node(size, flags, NUMA_NO_NODE, _RET_IP_);
}
EXPORT_SYMBOL(__kmalloc);
-void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
- int node, unsigned long caller)
+void * __nosbm __kmalloc_node_track_caller(size_t size, gfp_t flags,
+ int node, unsigned long caller)
{
return __do_kmalloc_node(size, flags, node, caller);
}
EXPORT_SYMBOL(__kmalloc_node_track_caller);
-void *kmalloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
+void * __nosbm kmalloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
{
void *ret = slab_alloc_node(s, NULL, gfpflags, NUMA_NO_NODE,
_RET_IP_, size);
@@ -4386,7 +4387,7 @@ static void free_large_kmalloc(struct folio *folio, void *object)
*
* If @object is NULL, no operation is performed.
*/
-void kfree(const void *object)
+void __nosbm kfree(const void *object)
{
struct folio *folio;
struct slab *slab;
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index d12a17fc0c17..d7a5b715ac03 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -40,6 +40,7 @@
#include <linux/pgtable.h>
#include <linux/hugetlb.h>
#include <linux/sched/mm.h>
+#include <linux/sbm.h>
#include <asm/tlbflush.h>
#include <asm/shmparam.h>
@@ -2804,7 +2805,7 @@ void vfree_atomic(const void *addr)
* if we have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling
* conventions for vfree() arch-dependent would be a really bad idea).
*/
-void vfree(const void *addr)
+void __nosbm vfree(const void *addr)
{
struct vm_struct *vm;
int i;
@@ -3379,7 +3380,7 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
*
* Return: pointer to the allocated memory or %NULL on error
*/
-void *__vmalloc_node(unsigned long size, unsigned long align,
+void * __nosbm __vmalloc_node(unsigned long size, unsigned long align,
gfp_t gfp_mask, int node, const void *caller)
{
return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
@@ -3394,7 +3395,7 @@ void *__vmalloc_node(unsigned long size, unsigned long align,
EXPORT_SYMBOL_GPL(__vmalloc_node);
#endif
-void *__vmalloc(unsigned long size, gfp_t gfp_mask)
+void * __nosbm __vmalloc(unsigned long size, gfp_t gfp_mask)
{
return __vmalloc_node(size, 1, gfp_mask, NUMA_NO_NODE,
__builtin_return_address(0));
@@ -3413,7 +3414,7 @@ EXPORT_SYMBOL(__vmalloc);
*
* Return: pointer to the allocated memory or %NULL on error
*/
-void *vmalloc(unsigned long size)
+void * __nosbm vmalloc(unsigned long size)
{
return __vmalloc_node(size, 1, GFP_KERNEL, NUMA_NO_NODE,
__builtin_return_address(0));
@@ -3453,7 +3454,7 @@ EXPORT_SYMBOL_GPL(vmalloc_huge);
*
* Return: pointer to the allocated memory or %NULL on error
*/
-void *vzalloc(unsigned long size)
+void * __nosbm vzalloc(unsigned long size)
{
return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_ZERO, NUMA_NO_NODE,
__builtin_return_address(0));
--
2.34.1
Powered by blists - more mailing lists