lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20231102185934.773885-10-cmllamas@google.com>
Date:   Thu,  2 Nov 2023 18:59:10 +0000
From:   Carlos Llamas <cmllamas@...gle.com>
To:     Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
        "Arve Hjønnevåg" <arve@...roid.com>,
        Todd Kjos <tkjos@...roid.com>,
        Martijn Coenen <maco@...roid.com>,
        Joel Fernandes <joel@...lfernandes.org>,
        Christian Brauner <brauner@...nel.org>,
        Carlos Llamas <cmllamas@...gle.com>,
        Suren Baghdasaryan <surenb@...gle.com>
Cc:     linux-kernel@...r.kernel.org, kernel-team@...roid.com
Subject: [PATCH 09/21] binder: split up binder_update_page_range()

The binder_update_page_range() function performs both allocation and
freeing of binder pages. However, these two operations are unrelated and
have no common logic. In fact, when a free operation is requested, the
allocation logic is skipped entirely. This behavior makes the error path
unnecessarily complex. To improve readability of the code, this patch
splits the allocation and freeing operations into separate functions.

No functional changes are introduced by this patch.

Signed-off-by: Carlos Llamas <cmllamas@...gle.com>
---
 drivers/android/binder_alloc.c | 83 ++++++++++++++++++----------------
 1 file changed, 44 insertions(+), 39 deletions(-)

diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index b99d9845d69a..27c7163761c4 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -175,8 +175,36 @@ struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
 	return buffer;
 }
 
-static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
-				    unsigned long start, unsigned long end)
+static void binder_free_page_range(struct binder_alloc *alloc,
+				   unsigned long start, unsigned long end)
+{
+	struct binder_lru_page *page;
+	unsigned long page_addr;
+
+	binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+			   "%d: free pages %lx-%lx\n",
+			   alloc->pid, start, end);
+
+	trace_binder_update_page_range(alloc, false, start, end);
+
+	for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
+		size_t index;
+		int ret;
+
+		index = (page_addr - alloc->buffer) / PAGE_SIZE;
+		page = &alloc->pages[index];
+
+		trace_binder_free_lru_start(alloc, index);
+
+		ret = list_lru_add(&binder_alloc_lru, &page->lru);
+		WARN_ON(!ret);
+
+		trace_binder_free_lru_end(alloc, index);
+	}
+}
+
+static int binder_allocate_page_range(struct binder_alloc *alloc,
+				      unsigned long start, unsigned long end)
 {
 	struct vm_area_struct *vma = NULL;
 	struct binder_lru_page *page;
@@ -185,16 +213,13 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
 	bool need_mm = false;
 
 	binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
-			   "%d: %s allocate pages %lx-%lx\n", alloc->pid,
-			   allocate ? "allocate" : "free", start, end);
+			   "%d: allocate pages %lx-%lx\n",
+			   alloc->pid, start, end);
 
 	if (end <= start)
 		return 0;
 
-	trace_binder_update_page_range(alloc, allocate, start, end);
-
-	if (allocate == 0)
-		goto free_range;
+	trace_binder_update_page_range(alloc, true, start, end);
 
 	for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
 		page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
@@ -270,32 +295,12 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
 	}
 	return 0;
 
-free_range:
-	for (page_addr = end - PAGE_SIZE; 1; page_addr -= PAGE_SIZE) {
-		bool ret;
-		size_t index;
-
-		index = (page_addr - alloc->buffer) / PAGE_SIZE;
-		page = &alloc->pages[index];
-
-		trace_binder_free_lru_start(alloc, index);
-
-		ret = list_lru_add(&binder_alloc_lru, &page->lru);
-		WARN_ON(!ret);
-
-		trace_binder_free_lru_end(alloc, index);
-		if (page_addr == start)
-			break;
-		continue;
-
 err_vm_insert_page_failed:
-		__free_page(page->page_ptr);
-		page->page_ptr = NULL;
+	__free_page(page->page_ptr);
+	page->page_ptr = NULL;
 err_alloc_page_failed:
 err_page_ptr_cleared:
-		if (page_addr == start)
-			break;
-	}
+	binder_free_page_range(alloc, start, page_addr);
 err_no_vma:
 	if (mm) {
 		mmap_write_unlock(mm);
@@ -479,8 +484,8 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
 	end_page_addr = PAGE_ALIGN(buffer->user_data + size);
 	if (end_page_addr > has_page_addr)
 		end_page_addr = has_page_addr;
-	ret = binder_update_page_range(alloc, 1, PAGE_ALIGN(buffer->user_data),
-				       end_page_addr);
+	ret = binder_allocate_page_range(alloc, PAGE_ALIGN(buffer->user_data),
+					 end_page_addr);
 	if (ret)
 		return ERR_PTR(ret);
 
@@ -531,8 +536,8 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
 	return buffer;
 
 err_alloc_buf_struct_failed:
-	binder_update_page_range(alloc, 0, PAGE_ALIGN(buffer->user_data),
-				 end_page_addr);
+	binder_free_page_range(alloc, PAGE_ALIGN(buffer->user_data),
+			       end_page_addr);
 	return ERR_PTR(-ENOMEM);
 }
 
@@ -620,8 +625,8 @@ static void binder_delete_free_buffer(struct binder_alloc *alloc,
 				   alloc->pid, buffer->user_data,
 				   prev->user_data,
 				   next ? next->user_data : 0);
-		binder_update_page_range(alloc, 0, buffer_start_page(buffer),
-					 buffer_start_page(buffer) + PAGE_SIZE);
+		binder_free_page_range(alloc, buffer_start_page(buffer),
+				       buffer_start_page(buffer) + PAGE_SIZE);
 	}
 	list_del(&buffer->entry);
 	kfree(buffer);
@@ -656,8 +661,8 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
 			      alloc->pid, size, alloc->free_async_space);
 	}
 
-	binder_update_page_range(alloc, 0, PAGE_ALIGN(buffer->user_data),
-				 (buffer->user_data + buffer_size) & PAGE_MASK);
+	binder_free_page_range(alloc, PAGE_ALIGN(buffer->user_data),
+			       (buffer->user_data + buffer_size) & PAGE_MASK);
 
 	rb_erase(&buffer->rb_node, &alloc->allocated_buffers);
 	buffer->free = 1;
-- 
2.42.0.869.gea05f2083d-goog

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ