[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20190530215426.13974.82813.stgit@localhost.localdomain>
Date: Thu, 30 May 2019 14:54:26 -0700
From: Alexander Duyck <alexander.duyck@...il.com>
To: nitesh@...hat.com, kvm@...r.kernel.org, david@...hat.com,
mst@...hat.com, dave.hansen@...el.com,
linux-kernel@...r.kernel.org, linux-mm@...ck.org
Cc: yang.zhang.wz@...il.com, pagupta@...hat.com, riel@...riel.com,
konrad.wilk@...cle.com, lcapitulino@...hat.com,
wei.w.wang@...el.com, aarcange@...hat.com, pbonzini@...hat.com,
dan.j.williams@...el.com, alexander.h.duyck@...ux.intel.com
Subject: [RFC PATCH 07/11] mm: Add support for acquiring first free "raw" or
"untreated" page in zone
From: Alexander Duyck <alexander.h.duyck@...ux.intel.com>
In order to be able to "treat" memory in an asynchonous fashion we need a
way to acquire a block of memory that isn't already treated, and then flush
that back in a way that we will not pick it back up again.
To achieve that this patch adds a pair of functions. One to fill a list
with pages to be treated, and another that will flush out the list back to
the buddy allocator.
Signed-off-by: Alexander Duyck <alexander.h.duyck@...ux.intel.com>
---
include/linux/gfp.h | 6 +++
mm/page_alloc.c | 107 +++++++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 113 insertions(+)
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index fb07b503dc45..407a089d861f 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -559,6 +559,12 @@ extern void *page_frag_alloc(struct page_frag_cache *nc,
void drain_all_pages(struct zone *zone);
void drain_local_pages(struct zone *zone);
+#ifdef CONFIG_AERATION
+struct page *get_raw_pages(struct zone *zone, unsigned int order,
+ int migratetype);
+void free_treated_page(struct page *page);
+#endif
+
void page_alloc_init_late(void);
/*
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index f4a629b6af96..e79c65413dc9 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2155,6 +2155,113 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
return NULL;
}
+#ifdef CONFIG_AERATION
+static struct page *get_raw_page_from_free_area(struct free_area *area,
+ int migratetype)
+{
+ struct list_head *head = &area->free_list[migratetype];
+ struct page *page;
+
+ /* If we have not worked in this free_list before reset membrane */
+ if (area->treatment_mt != migratetype) {
+ area->treatment_mt = migratetype;
+ area->membrane = head;
+ }
+
+ /* Try to pulling in any untreated pages above the the membrane */
+ page = list_last_entry(area->membrane, struct page, lru);
+ list_for_each_entry_from_reverse(page, head, lru) {
+ /*
+ * If the page in front of the membrane is treated then try
+ * skimming the top to see if we have any untreated pages
+ * up there.
+ */
+ if (PageTreated(page)) {
+ page = list_first_entry(head, struct page, lru);
+ if (PageTreated(page))
+ break;
+ }
+
+ /* update state of treatment */
+ area->treatment_state = TREATMENT_AERATING;
+
+ return page;
+ }
+
+ /*
+ * At this point there are no longer any untreated pages between
+ * the membrane and the first entry of the list. So we can safely
+ * set the membrane to the top of the treated region and will mark
+ * the current migratetype as complete for now.
+ */
+ area->membrane = &page->lru;
+ area->treatment_state = TREATMENT_SETTLING;
+
+ return NULL;
+}
+
+/**
+ * get_raw_pages - Provide a "raw" page for treatment by the aerator
+ * @zone: Zone to draw pages from
+ * @order: Order to draw pages from
+ * @migratetype: Migratetype to draw pages from
+ *
+ * This function will obtain a page that does not have the Treated value
+ * set in the page type field. It will attempt to fetch a "raw" page from
+ * just above the "membrane" and if that is not available it will attempt
+ * to pull a "raw" page from the head of the free list.
+ *
+ * The page will have the migrate type and order stored in the page
+ * metadata.
+ *
+ * Return: page pointer if raw page found, otherwise NULL
+ */
+struct page *get_raw_pages(struct zone *zone, unsigned int order,
+ int migratetype)
+{
+ struct free_area *area = &(zone->free_area[order]);
+ struct page *page;
+
+ /* Find a page of the appropriate size in the preferred list */
+ page = get_raw_page_from_free_area(area, migratetype);
+ if (page) {
+ del_page_from_free_area(page, area);
+
+ /* record migratetype and order within page */
+ set_pcppage_migratetype(page, migratetype);
+ set_page_private(page, order);
+ __mod_zone_freepage_state(zone, -(1 << order), migratetype);
+ }
+
+ return page;
+}
+EXPORT_SYMBOL_GPL(get_raw_pages);
+
+/**
+ * free_treated_page - Return a now-treated "raw" page back where we got it
+ * @page: Previously "raw" page that can now be returned after treatment
+ *
+ * This function will pull the zone, migratetype, and order information out
+ * of the page and attempt to return it where it found it. We default to
+ * using free_one_page to return the page as it is possible that the
+ * pageblock might have been switched to an isolate migratetype during
+ * treatment.
+ */
+void free_treated_page(struct page *page)
+{
+ unsigned int order, mt;
+ struct zone *zone;
+
+ zone = page_zone(page);
+ mt = get_pcppage_migratetype(page);
+ order = page_private(page);
+
+ set_page_private(page, 0);
+
+ free_one_page(zone, page, page_to_pfn(page), order, mt);
+}
+EXPORT_SYMBOL_GPL(free_treated_page);
+#endif /* CONFIG_AERATION */
/*
* This array describes the order lists are fallen back to when
Powered by blists - more mailing lists