[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230817-free_pcppages_bulk-v1-2-c14574a9f80c@kernel.org>
Date: Thu, 17 Aug 2023 23:05:24 -0700
From: Chris Li <chrisl@...nel.org>
To: Andrew Morton <akpm@...ux-foundation.org>,
Kemeng Shi <shikemeng@...weicloud.com>
Cc: akpm@...ux-foundation.org, baolin.wang@...ux.alibaba.com,
mgorman@...hsingularity.net, Michal Hocko <mhocko@...e.com>,
david@...hat.com, willy@...radead.org, linux-mm@...ck.org,
Namhyung Kim <namhyung@...gle.com>,
Greg Thelen <gthelen@...gle.com>, linux-kernel@...r.kernel.org,
Chris Li <chrisl@...nel.org>
Subject: [PATCH RFC 2/2] mm/page_alloc: free_pcppages_bulk clean up
This patch does not have functional change. Just pure clean up.
It removes the pindex_max and pindex_min and replaces it with a simpler
loop.
It uses for_each_entry_safe_reverse() to replace the loop over
list_last_entry(). It produces slightly better machine code.
Signed-off-by: Chris Li <chrisl@...nel.org>
---
mm/page_alloc.c | 38 +++++++++++++-------------------------
1 file changed, 13 insertions(+), 25 deletions(-)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 347cb93081a02..d64d0f5ec70b4 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1209,11 +1209,9 @@ static void free_pcppages_bulk(struct zone *zone, int count,
int pindex)
{
unsigned long flags;
- int min_pindex = 0;
- int max_pindex = NR_PCP_LISTS - 1;
unsigned int order;
bool isolated_pageblocks;
- struct page *page;
+ int i;
/* Ensure requested pindex is drained first. */
pindex = pindex - 1;
@@ -1221,31 +1219,18 @@ static void free_pcppages_bulk(struct zone *zone, int count,
spin_lock_irqsave(&zone->lock, flags);
isolated_pageblocks = has_isolate_pageblock(zone);
- while (count > 0) {
+ for (i = 0; i < NR_PCP_LISTS; i++, pindex++) {
struct list_head *list;
int nr_pages;
+ struct page *page, *next;
- /* Remove pages from lists in a round-robin fashion. */
- do {
- if (++pindex > max_pindex)
- pindex = min_pindex;
- list = &pcp->lists[pindex];
- if (!list_empty(list))
- break;
-
- if (pindex == max_pindex)
- max_pindex--;
- if (pindex == min_pindex)
- min_pindex++;
- } while (1);
-
+ if (pindex == NR_PCP_LISTS)
+ pindex = 0;
+ list = pcp->lists + pindex;
order = pindex_to_order(pindex);
nr_pages = 1 << order;
- do {
- int mt;
-
- page = list_last_entry(list, struct page, pcp_list);
- mt = get_pcppage_migratetype(page);
+ list_for_each_entry_safe_reverse(page, next, list, lru) {
+ int mt = get_pcppage_migratetype(page);
/* must delete to avoid corrupting pcp list */
list_del(&page->pcp_list);
@@ -1260,9 +1245,12 @@ static void free_pcppages_bulk(struct zone *zone, int count,
__free_one_page(page, page_to_pfn(page), zone, order, mt, FPI_NONE);
trace_mm_page_pcpu_drain(page, order, mt);
- } while (count > 0 && pcp->count > 0 && !list_empty(list));
- }
+ if (count <= 0 || pcp->count <= 0)
+ goto out;
+ }
+ }
+out:
spin_unlock_irqrestore(&zone->lock, flags);
}
--
2.42.0.rc1.204.g551eb34607-goog
Powered by blists - more mailing lists