lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Fri,  1 Dec 2023 17:21:45 +0000
From:   Carlos Llamas <cmllamas@...gle.com>
To:     Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
        "Arve Hjønnevåg" <arve@...roid.com>,
        Todd Kjos <tkjos@...roid.com>,
        Martijn Coenen <maco@...roid.com>,
        Joel Fernandes <joel@...lfernandes.org>,
        Christian Brauner <brauner@...nel.org>,
        Carlos Llamas <cmllamas@...gle.com>,
        Suren Baghdasaryan <surenb@...gle.com>
Cc:     linux-kernel@...r.kernel.org, kernel-team@...roid.com,
        Alice Ryhl <aliceryhl@...gle.com>
Subject: [PATCH v2 16/28] binder: refactor page range allocation

Instead of looping through the page range twice to first determine if
the mmap lock is required, simply do it per-page as needed. Split out
all this logic into a separate binder_install_single_page() function.

Reviewed-by: Alice Ryhl <aliceryhl@...gle.com>
Signed-off-by: Carlos Llamas <cmllamas@...gle.com>
---
 drivers/android/binder_alloc.c | 107 +++++++++++++++------------------
 1 file changed, 47 insertions(+), 60 deletions(-)

diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 99eacd8782b8..1caf0e3d3451 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -199,14 +199,51 @@ static void binder_free_page_range(struct binder_alloc *alloc,
 	}
 }
 
+static int binder_install_single_page(struct binder_alloc *alloc,
+				      struct binder_lru_page *lru_page,
+				      unsigned long addr)
+{
+	struct page *page;
+	int ret = 0;
+
+	if (!mmget_not_zero(alloc->mm))
+		return -ESRCH;
+
+	mmap_write_lock(alloc->mm);
+	if (!alloc->vma) {
+		pr_err("%d: %s failed, no vma\n", alloc->pid, __func__);
+		ret = -ESRCH;
+		goto out;
+	}
+
+	page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
+	if (!page) {
+		pr_err("%d: failed to allocate page\n", alloc->pid);
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	ret = vm_insert_page(alloc->vma, addr, page);
+	if (ret) {
+		pr_err("%d: %s failed to insert page at %lx with %d\n",
+		       alloc->pid, __func__, addr, ret);
+		__free_page(page);
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	lru_page->page_ptr = page;
+out:
+	mmap_write_unlock(alloc->mm);
+	mmput_async(alloc->mm);
+	return ret;
+}
+
 static int binder_allocate_page_range(struct binder_alloc *alloc,
 				      unsigned long start, unsigned long end)
 {
-	struct vm_area_struct *vma = NULL;
 	struct binder_lru_page *page;
-	struct mm_struct *mm = NULL;
 	unsigned long page_addr;
-	bool need_mm = false;
 
 	binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
 			   "%d: allocate pages %lx-%lx\n",
@@ -218,32 +255,9 @@ static int binder_allocate_page_range(struct binder_alloc *alloc,
 	trace_binder_update_page_range(alloc, true, start, end);
 
 	for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
-		page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
-		if (!page->page_ptr) {
-			need_mm = true;
-			break;
-		}
-	}
-
-	if (need_mm && mmget_not_zero(alloc->mm))
-		mm = alloc->mm;
-
-	if (mm) {
-		mmap_write_lock(mm);
-		vma = alloc->vma;
-	}
-
-	if (!vma && need_mm) {
-		binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
-				   "%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
-				   alloc->pid);
-		goto err_no_vma;
-	}
-
-	for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
-		int ret;
+		unsigned long index;
 		bool on_lru;
-		size_t index;
+		int ret;
 
 		index = (page_addr - alloc->buffer) / PAGE_SIZE;
 		page = &alloc->pages[index];
@@ -258,26 +272,15 @@ static int binder_allocate_page_range(struct binder_alloc *alloc,
 			continue;
 		}
 
-		if (WARN_ON(!vma))
-			goto err_page_ptr_cleared;
-
 		trace_binder_alloc_page_start(alloc, index);
-		page->page_ptr = alloc_page(GFP_KERNEL |
-					    __GFP_HIGHMEM |
-					    __GFP_ZERO);
-		if (!page->page_ptr) {
-			pr_err("%d: binder_alloc_buf failed for page at %lx\n",
-			       alloc->pid, page_addr);
-			goto err_alloc_page_failed;
-		}
+
 		page->alloc = alloc;
 		INIT_LIST_HEAD(&page->lru);
 
-		ret = vm_insert_page(vma, page_addr, page->page_ptr);
+		ret = binder_install_single_page(alloc, page, page_addr);
 		if (ret) {
-			pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
-			       alloc->pid, page_addr);
-			goto err_vm_insert_page_failed;
+			binder_free_page_range(alloc, start, page_addr);
+			return ret;
 		}
 
 		if (index + 1 > alloc->pages_high)
@@ -285,24 +288,8 @@ static int binder_allocate_page_range(struct binder_alloc *alloc,
 
 		trace_binder_alloc_page_end(alloc, index);
 	}
-	if (mm) {
-		mmap_write_unlock(mm);
-		mmput_async(mm);
-	}
-	return 0;
 
-err_vm_insert_page_failed:
-	__free_page(page->page_ptr);
-	page->page_ptr = NULL;
-err_alloc_page_failed:
-err_page_ptr_cleared:
-	binder_free_page_range(alloc, start, page_addr);
-err_no_vma:
-	if (mm) {
-		mmap_write_unlock(mm);
-		mmput_async(mm);
-	}
-	return vma ? -ENOMEM : -ESRCH;
+	return 0;
 }
 
 static inline void binder_alloc_set_vma(struct binder_alloc *alloc,
-- 
2.43.0.rc2.451.g8631bc7472-goog

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ