lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20211008161922.942459-2-nsaenzju@redhat.com>
Date:   Fri,  8 Oct 2021 18:19:20 +0200
From:   Nicolas Saenz Julienne <nsaenzju@...hat.com>
To:     akpm@...ux-foundation.org
Cc:     linux-kernel@...r.kernel.org, linux-mm@...ck.org,
        frederic@...nel.org, tglx@...utronix.de, peterz@...radead.org,
        mtosatti@...hat.com, nilal@...hat.com, mgorman@...e.de,
        linux-rt-users@...r.kernel.org, vbabka@...e.cz, cl@...ux.com,
        paulmck@...nel.org, ppandit@...hat.com,
        Nicolas Saenz Julienne <nsaenzju@...hat.com>
Subject: [RFC 1/3] mm/page_alloc: Simplify __rmqueue_pcplist()'s arguments

Both users of __rmqueue_pcplist() use the same means to extract the
right list from their per-cpu lists: calculate the index based on the
page's migratetype and order. This data is already being passed to
__rmqueue_pcplist(), so centralize the list extraction process inside
the function.

Signed-off-by: Nicolas Saenz Julienne <nsaenzju@...hat.com>
---
 mm/page_alloc.c | 15 ++++++---------
 1 file changed, 6 insertions(+), 9 deletions(-)

diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index b37435c274cf..dd89933503b4 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3600,11 +3600,13 @@ static inline
 struct page *__rmqueue_pcplist(struct zone *zone, unsigned int order,
 			int migratetype,
 			unsigned int alloc_flags,
-			struct per_cpu_pages *pcp,
-			struct list_head *list)
+			struct per_cpu_pages *pcp)
 {
+	struct list_head *list;
 	struct page *page;
 
+	list = &pcp->lists[order_to_pindex(migratetype, order)];
+
 	do {
 		if (list_empty(list)) {
 			int batch = READ_ONCE(pcp->batch);
@@ -3643,7 +3645,6 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone,
 			unsigned int alloc_flags)
 {
 	struct per_cpu_pages *pcp;
-	struct list_head *list;
 	struct page *page;
 	unsigned long flags;
 
@@ -3656,8 +3657,7 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone,
 	 */
 	pcp = this_cpu_ptr(zone->per_cpu_pageset);
 	pcp->free_factor >>= 1;
-	list = &pcp->lists[order_to_pindex(migratetype, order)];
-	page = __rmqueue_pcplist(zone, order, migratetype, alloc_flags, pcp, list);
+	page = __rmqueue_pcplist(zone, order, migratetype, alloc_flags, pcp);
 	local_unlock_irqrestore(&pagesets.lock, flags);
 	if (page) {
 		__count_zid_vm_events(PGALLOC, page_zonenum(page), 1);
@@ -5202,7 +5202,6 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
 	struct zone *zone;
 	struct zoneref *z;
 	struct per_cpu_pages *pcp;
-	struct list_head *pcp_list;
 	struct alloc_context ac;
 	gfp_t alloc_gfp;
 	unsigned int alloc_flags = ALLOC_WMARK_LOW;
@@ -5278,7 +5277,6 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
 	/* Attempt the batch allocation */
 	local_lock_irqsave(&pagesets.lock, flags);
 	pcp = this_cpu_ptr(zone->per_cpu_pageset);
-	pcp_list = &pcp->lists[order_to_pindex(ac.migratetype, 0)];
 
 	while (nr_populated < nr_pages) {
 
@@ -5288,8 +5286,7 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
 			continue;
 		}
 
-		page = __rmqueue_pcplist(zone, 0, ac.migratetype, alloc_flags,
-								pcp, pcp_list);
+		page = __rmqueue_pcplist(zone, 0, ac.migratetype, alloc_flags, pcp);
 		if (unlikely(!page)) {
 			/* Try and get at least one page */
 			if (!nr_populated)
-- 
2.31.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ