lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20231201172212.1813387-23-cmllamas@google.com>
Date:   Fri,  1 Dec 2023 17:21:51 +0000
From:   Carlos Llamas <cmllamas@...gle.com>
To:     Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
        "Arve Hjønnevåg" <arve@...roid.com>,
        Todd Kjos <tkjos@...roid.com>,
        Martijn Coenen <maco@...roid.com>,
        Joel Fernandes <joel@...lfernandes.org>,
        Christian Brauner <brauner@...nel.org>,
        Carlos Llamas <cmllamas@...gle.com>,
        Suren Baghdasaryan <surenb@...gle.com>
Cc:     linux-kernel@...r.kernel.org, kernel-team@...roid.com
Subject: [PATCH v2 22/28] binder: rename lru shrinker utilities

Now that the page allocation step is done separately we should rename
the binder_free_page_range() and binder_allocate_page_range() functions
to provide a more accurate description of what they do. Lets borrow the
freelist concept used in other parts of the kernel for this.

No functional change here.

Signed-off-by: Carlos Llamas <cmllamas@...gle.com>
---
 drivers/android/binder_alloc.c          | 40 ++++++++++++-------------
 drivers/android/binder_alloc.h          |  4 +--
 drivers/android/binder_alloc_selftest.c |  6 ++--
 3 files changed, 25 insertions(+), 25 deletions(-)

diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index ad9b73c6ddb7..052a8c3b0ce1 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -26,7 +26,7 @@
 #include "binder_alloc.h"
 #include "binder_trace.h"
 
-struct list_lru binder_alloc_lru;
+struct list_lru binder_freelist;
 
 static DEFINE_MUTEX(binder_alloc_mmap_lock);
 
@@ -190,8 +190,8 @@ binder_get_installed_page(struct binder_lru_page *lru_page)
 	return smp_load_acquire(&lru_page->page_ptr);
 }
 
-static void binder_free_page_range(struct binder_alloc *alloc,
-				   unsigned long start, unsigned long end)
+static void binder_lru_freelist_add(struct binder_alloc *alloc,
+				    unsigned long start, unsigned long end)
 {
 	struct binder_lru_page *page;
 	unsigned long page_addr;
@@ -210,7 +210,7 @@ static void binder_free_page_range(struct binder_alloc *alloc,
 
 		trace_binder_free_lru_start(alloc, index);
 
-		ret = list_lru_add(&binder_alloc_lru, &page->lru);
+		ret = list_lru_add(&binder_freelist, &page->lru);
 		WARN_ON(!ret);
 
 		trace_binder_free_lru_end(alloc, index);
@@ -299,14 +299,14 @@ static int binder_install_buffer_pages(struct binder_alloc *alloc,
 }
 
 /* The range of pages should exclude those shared with other buffers */
-static void binder_allocate_page_range(struct binder_alloc *alloc,
-				       unsigned long start, unsigned long end)
+static void binder_lru_freelist_del(struct binder_alloc *alloc,
+				    unsigned long start, unsigned long end)
 {
 	struct binder_lru_page *page;
 	unsigned long page_addr;
 
 	binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
-			   "%d: allocate pages %lx-%lx\n",
+			   "%d: pages %lx-%lx\n",
 			   alloc->pid, start, end);
 
 	trace_binder_update_page_range(alloc, true, start, end);
@@ -321,7 +321,7 @@ static void binder_allocate_page_range(struct binder_alloc *alloc,
 		if (page->page_ptr) {
 			trace_binder_alloc_lru_start(alloc, index);
 
-			on_lru = list_lru_del(&binder_alloc_lru, &page->lru);
+			on_lru = list_lru_del(&binder_freelist, &page->lru);
 			WARN_ON(!on_lru);
 
 			trace_binder_alloc_lru_end(alloc, index);
@@ -504,8 +504,8 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
 	end_page_addr = PAGE_ALIGN(buffer->user_data + size);
 	if (end_page_addr > has_page_addr)
 		end_page_addr = has_page_addr;
-	binder_allocate_page_range(alloc, PAGE_ALIGN(buffer->user_data),
-				   end_page_addr);
+	binder_lru_freelist_del(alloc, PAGE_ALIGN(buffer->user_data),
+				end_page_addr);
 
 	rb_erase(&buffer->rb_node, &alloc->free_buffers);
 	buffer->free = 0;
@@ -671,8 +671,8 @@ static void binder_delete_free_buffer(struct binder_alloc *alloc,
 				   alloc->pid, buffer->user_data,
 				   prev->user_data,
 				   next ? next->user_data : 0);
-		binder_free_page_range(alloc, buffer_start_page(buffer),
-				       buffer_start_page(buffer) + PAGE_SIZE);
+		binder_lru_freelist_add(alloc, buffer_start_page(buffer),
+					buffer_start_page(buffer) + PAGE_SIZE);
 	}
 	list_del(&buffer->entry);
 	kfree(buffer);
@@ -706,8 +706,8 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
 			      alloc->pid, size, alloc->free_async_space);
 	}
 
-	binder_free_page_range(alloc, PAGE_ALIGN(buffer->user_data),
-			       (buffer->user_data + buffer_size) & PAGE_MASK);
+	binder_lru_freelist_add(alloc, PAGE_ALIGN(buffer->user_data),
+				(buffer->user_data + buffer_size) & PAGE_MASK);
 
 	rb_erase(&buffer->rb_node, &alloc->allocated_buffers);
 	buffer->free = 1;
@@ -953,7 +953,7 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
 			if (!alloc->pages[i].page_ptr)
 				continue;
 
-			on_lru = list_lru_del(&binder_alloc_lru,
+			on_lru = list_lru_del(&binder_freelist,
 					      &alloc->pages[i].lru);
 			page_addr = alloc->buffer + i * PAGE_SIZE;
 			binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
@@ -1152,13 +1152,13 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
 static unsigned long
 binder_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
 {
-	return list_lru_count(&binder_alloc_lru);
+	return list_lru_count(&binder_freelist);
 }
 
 static unsigned long
 binder_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
 {
-	return list_lru_walk(&binder_alloc_lru, binder_alloc_free_page,
+	return list_lru_walk(&binder_freelist, binder_alloc_free_page,
 			    NULL, sc->nr_to_scan);
 }
 
@@ -1184,13 +1184,13 @@ int binder_alloc_shrinker_init(void)
 {
 	int ret;
 
-	ret = list_lru_init(&binder_alloc_lru);
+	ret = list_lru_init(&binder_freelist);
 	if (ret)
 		return ret;
 
 	binder_shrinker = shrinker_alloc(0, "android-binder");
 	if (!binder_shrinker) {
-		list_lru_destroy(&binder_alloc_lru);
+		list_lru_destroy(&binder_freelist);
 		return -ENOMEM;
 	}
 
@@ -1205,7 +1205,7 @@ int binder_alloc_shrinker_init(void)
 void binder_alloc_shrinker_exit(void)
 {
 	shrinker_free(binder_shrinker);
-	list_lru_destroy(&binder_alloc_lru);
+	list_lru_destroy(&binder_freelist);
 }
 
 /**
diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h
index bbc16bc6d5ac..a5181916942e 100644
--- a/drivers/android/binder_alloc.h
+++ b/drivers/android/binder_alloc.h
@@ -15,7 +15,7 @@
 #include <linux/list_lru.h>
 #include <uapi/linux/android/binder.h>
 
-extern struct list_lru binder_alloc_lru;
+extern struct list_lru binder_freelist;
 struct binder_transaction;
 
 /**
@@ -61,7 +61,7 @@ struct binder_buffer {
 /**
  * struct binder_lru_page - page object used for binder shrinker
  * @page_ptr: pointer to physical page in mmap'd space
- * @lru:      entry in binder_alloc_lru
+ * @lru:      entry in binder_freelist
  * @alloc:    binder_alloc for a proc
  */
 struct binder_lru_page {
diff --git a/drivers/android/binder_alloc_selftest.c b/drivers/android/binder_alloc_selftest.c
index ed753747e54c..fba7ab6ca451 100644
--- a/drivers/android/binder_alloc_selftest.c
+++ b/drivers/android/binder_alloc_selftest.c
@@ -158,8 +158,8 @@ static void binder_selftest_free_page(struct binder_alloc *alloc)
 	int i;
 	unsigned long count;
 
-	while ((count = list_lru_count(&binder_alloc_lru))) {
-		list_lru_walk(&binder_alloc_lru, binder_alloc_free_page,
+	while ((count = list_lru_count(&binder_freelist))) {
+		list_lru_walk(&binder_freelist, binder_alloc_free_page,
 			      NULL, count);
 	}
 
@@ -183,7 +183,7 @@ static void binder_selftest_alloc_free(struct binder_alloc *alloc,
 
 	/* Allocate from lru. */
 	binder_selftest_alloc_buf(alloc, buffers, sizes, seq);
-	if (list_lru_count(&binder_alloc_lru))
+	if (list_lru_count(&binder_freelist))
 		pr_err("lru list should be empty but is not\n");
 
 	binder_selftest_free_buf(alloc, buffers, sizes, seq, end);
-- 
2.43.0.rc2.451.g8631bc7472-goog

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ