[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200515172756.27185-3-will@kernel.org>
Date: Fri, 15 May 2020 18:27:52 +0100
From: Will Deacon <will@...nel.org>
To: linux-kernel@...r.kernel.org
Cc: linux-arm-kernel@...ts.infradead.org,
Will Deacon <will@...nel.org>,
Sami Tolvanen <samitolvanen@...gle.com>,
Kees Cook <keescook@...omium.org>,
Catalin Marinas <catalin.marinas@....com>,
Mark Rutland <mark.rutland@...com>,
Jann Horn <jannh@...gle.com>, Ard Biesheuvel <ardb@...nel.org>,
Peter Zijlstra <peterz@...radead.org>, kernel-team@...roid.com
Subject: [PATCH 2/6] scs: Move accounting into alloc/free functions
There's no need to perform the shadow stack page accounting independently
of the lifetime of the underlying allocation, so call the accounting code
from the {alloc,free}() functions and simplify the code in the process.
Signed-off-by: Will Deacon <will@...nel.org>
---
kernel/scs.c | 45 +++++++++++++++++++++------------------------
1 file changed, 21 insertions(+), 24 deletions(-)
diff --git a/kernel/scs.c b/kernel/scs.c
index 5ff8663e4a67..aea841cd7586 100644
--- a/kernel/scs.c
+++ b/kernel/scs.c
@@ -14,25 +14,35 @@
static struct kmem_cache *scs_cache;
+static void __scs_account(void *s, int account)
+{
+ struct page *scs_page = virt_to_page(s);
+
+ mod_zone_page_state(page_zone(scs_page), NR_KERNEL_SCS_KB,
+ account * (SCS_SIZE / SZ_1K));
+}
+
static void *scs_alloc(int node)
{
- void *s;
-
- s = kmem_cache_alloc_node(scs_cache, GFP_SCS, node);
- if (s) {
- *__scs_magic(s) = SCS_END_MAGIC;
- /*
- * Poison the allocation to catch unintentional accesses to
- * the shadow stack when KASAN is enabled.
- */
- kasan_poison_object_data(scs_cache, s);
- }
+ void *s = kmem_cache_alloc_node(scs_cache, GFP_SCS, node);
+
+ if (!s)
+ return NULL;
+ *__scs_magic(s) = SCS_END_MAGIC;
+
+ /*
+ * Poison the allocation to catch unintentional accesses to
+ * the shadow stack when KASAN is enabled.
+ */
+ kasan_poison_object_data(scs_cache, s);
+ __scs_account(s, 1);
return s;
}
static void scs_free(void *s)
{
+ __scs_account(s, -1);
kasan_unpoison_object_data(scs_cache, s);
kmem_cache_free(scs_cache, s);
}
@@ -42,17 +52,6 @@ void __init scs_init(void)
scs_cache = kmem_cache_create("scs_cache", SCS_SIZE, 0, 0, NULL);
}
-static struct page *__scs_page(struct task_struct *tsk)
-{
- return virt_to_page(task_scs(tsk));
-}
-
-static void scs_account(struct task_struct *tsk, int account)
-{
- mod_zone_page_state(page_zone(__scs_page(tsk)), NR_KERNEL_SCS_KB,
- account * (SCS_SIZE / 1024));
-}
-
int scs_prepare(struct task_struct *tsk, int node)
{
void *s = scs_alloc(node);
@@ -61,7 +60,6 @@ int scs_prepare(struct task_struct *tsk, int node)
return -ENOMEM;
task_scs(tsk) = task_scs_sp(tsk) = s;
- scs_account(tsk, 1);
return 0;
}
@@ -102,6 +100,5 @@ void scs_release(struct task_struct *tsk)
WARN(scs_corrupted(tsk), "corrupted shadow stack detected when freeing task\n");
scs_check_usage(tsk);
- scs_account(tsk, -1);
scs_free(s);
}
--
2.26.2.761.g0e0b3e54be-goog
Powered by blists - more mailing lists