[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20241105200258.2380168-6-cmllamas@google.com>
Date: Tue, 5 Nov 2024 20:02:47 +0000
From: Carlos Llamas <cmllamas@...gle.com>
To: Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
"Arve Hjønnevåg" <arve@...roid.com>, Todd Kjos <tkjos@...roid.com>, Martijn Coenen <maco@...roid.com>,
Joel Fernandes <joel@...lfernandes.org>, Christian Brauner <brauner@...nel.org>,
Carlos Llamas <cmllamas@...gle.com>, Suren Baghdasaryan <surenb@...gle.com>
Cc: linux-kernel@...r.kernel.org, kernel-team@...roid.com,
Minchan Kim <minchan@...nel.org>, "Liam R. Howlett" <Liam.Howlett@...cle.com>,
Matthew Wilcox <willy@...radead.org>
Subject: [PATCH 5/8] binder: use alloc->mapped to save the vma state
It is unsafe to use alloc->vma outside of the mmap_sem. Instead, add a
new boolean alloc->mapped to save the vma state (mapped or unmmaped) and
use this to validate several paths. This change is in preparation for
removing the alloc->vma pointer in a subsequent patch.
Cc: Minchan Kim <minchan@...nel.org>
Cc: Liam R. Howlett <Liam.Howlett@...cle.com>
Cc: Matthew Wilcox <willy@...radead.org>
Cc: Suren Baghdasaryan <surenb@...gle.com>
Signed-off-by: Carlos Llamas <cmllamas@...gle.com>
---
drivers/android/binder_alloc.c | 29 ++++++++++++++++++-------
drivers/android/binder_alloc.h | 2 ++
drivers/android/binder_alloc_selftest.c | 2 +-
3 files changed, 24 insertions(+), 9 deletions(-)
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 98606e1ba687..aa299cbdab92 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -221,6 +221,19 @@ static void binder_lru_freelist_add(struct binder_alloc *alloc,
}
}
+static inline
+void binder_alloc_set_mapped(struct binder_alloc *alloc, bool state)
+{
+ /* pairs with smp_load_acquire in binder_alloc_is_mapped() */
+ smp_store_release(&alloc->mapped, state);
+}
+
+static inline bool binder_alloc_is_mapped(struct binder_alloc *alloc)
+{
+ /* pairs with smp_store_release in binder_alloc_set_mapped() */
+ return smp_load_acquire(&alloc->mapped);
+}
+
static int binder_install_single_page(struct binder_alloc *alloc,
unsigned long index,
unsigned long addr)
@@ -246,7 +259,7 @@ static int binder_install_single_page(struct binder_alloc *alloc,
mmap_read_lock(alloc->mm);
vma = vma_lookup(alloc->mm, addr);
- if (!vma || vma != alloc->vma) {
+ if (!vma || !binder_alloc_is_mapped(alloc)) {
pr_err("%d: %s failed, no vma\n", alloc->pid, __func__);
ret = -ESRCH;
goto unlock;
@@ -603,7 +616,7 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
int ret;
/* Check binder_alloc is fully initialized */
- if (!binder_alloc_get_vma(alloc)) {
+ if (!binder_alloc_is_mapped(alloc)) {
binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
"%d: binder_alloc_buf, no vma\n",
alloc->pid);
@@ -885,7 +898,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
alloc->free_async_space = alloc->buffer_size / 2;
/* Signal binder_alloc is fully initialized */
- binder_alloc_set_vma(alloc, vma);
+ binder_alloc_set_mapped(alloc, true);
return 0;
@@ -915,7 +928,7 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
buffers = 0;
mutex_lock(&alloc->mutex);
- BUG_ON(alloc->vma);
+ BUG_ON(alloc->mapped);
while ((n = rb_first(&alloc->allocated_buffers))) {
buffer = rb_entry(n, struct binder_buffer, rb_node);
@@ -1021,7 +1034,7 @@ void binder_alloc_print_pages(struct seq_file *m,
* Make sure the binder_alloc is fully initialized, otherwise we might
* read inconsistent state.
*/
- if (binder_alloc_get_vma(alloc) != NULL) {
+ if (binder_alloc_is_mapped(alloc)) {
for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
page = binder_get_installed_page(alloc, i);
if (!page)
@@ -1061,12 +1074,12 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
* @alloc: binder_alloc for this proc
*
* Called from binder_vma_close() when releasing address space.
- * Clears alloc->vma to prevent new incoming transactions from
+ * Clears alloc->mapped to prevent new incoming transactions from
* allocating more buffers.
*/
void binder_alloc_vma_close(struct binder_alloc *alloc)
{
- binder_alloc_set_vma(alloc, NULL);
+ binder_alloc_set_mapped(alloc, false);
}
/**
@@ -1103,7 +1116,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
page_addr = alloc->buffer + index * PAGE_SIZE;
vma = vma_lookup(mm, page_addr);
- if (vma && vma != binder_alloc_get_vma(alloc))
+ if (vma && !binder_alloc_is_mapped(alloc))
goto err_invalid_vma;
trace_binder_unmap_kernel_start(alloc, index);
diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h
index 5c2473e95494..89f6fa25c670 100644
--- a/drivers/android/binder_alloc.h
+++ b/drivers/android/binder_alloc.h
@@ -75,6 +75,7 @@ struct binder_buffer {
* @buffer_size: size of address space specified via mmap
* @pid: pid for associated binder_proc (invariant after init)
* @pages_high: high watermark of offset in @pages
+ * @mapped: whether the vm area is mapped
* @oneway_spam_detected: %true if oneway spam detection fired, clear that
* flag once the async buffer has returned to a healthy state
*
@@ -96,6 +97,7 @@ struct binder_alloc {
size_t buffer_size;
int pid;
size_t pages_high;
+ bool mapped;
bool oneway_spam_detected;
};
diff --git a/drivers/android/binder_alloc_selftest.c b/drivers/android/binder_alloc_selftest.c
index c6941b9abad9..2dda82d0d5e8 100644
--- a/drivers/android/binder_alloc_selftest.c
+++ b/drivers/android/binder_alloc_selftest.c
@@ -291,7 +291,7 @@ void binder_selftest_alloc(struct binder_alloc *alloc)
if (!binder_selftest_run)
return;
mutex_lock(&binder_selftest_lock);
- if (!binder_selftest_run || !alloc->vma)
+ if (!binder_selftest_run || !alloc->mapped)
goto done;
pr_info("STARTED\n");
binder_selftest_alloc_offset(alloc, end_offset, 0);
--
2.47.0.199.ga7371fff76-goog
Powered by blists - more mailing lists