[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1598273705-69124-31-git-send-email-alex.shi@linux.alibaba.com>
Date: Mon, 24 Aug 2020 20:55:03 +0800
From: Alex Shi <alex.shi@...ux.alibaba.com>
To: akpm@...ux-foundation.org, mgorman@...hsingularity.net,
tj@...nel.org, hughd@...gle.com, khlebnikov@...dex-team.ru,
daniel.m.jordan@...cle.com, willy@...radead.org,
hannes@...xchg.org, lkp@...el.com, linux-mm@...ck.org,
linux-kernel@...r.kernel.org, cgroups@...r.kernel.org,
shakeelb@...gle.com, iamjoonsoo.kim@....com,
richard.weiyang@...il.com, kirill@...temov.name,
alexander.duyck@...il.com, rong.a.chen@...el.com, mhocko@...e.com,
vdavydov.dev@...il.com, shy828301@...il.com
Cc: Alexander Duyck <alexander.h.duyck@...ux.intel.com>,
Stephen Rothwell <sfr@...b.auug.org.au>
Subject: [PATCH v18 30/32] mm: Drop use of test_and_set_skip in favor of just setting skip
From: Alexander Duyck <alexander.h.duyck@...ux.intel.com>
The only user of test_and_set_skip was isolate_migratepages_block and it
was using it after a call that was testing and clearing the LRU flag. As
such it really didn't need to be behind the LRU lock anymore as it wasn't
really fulfilling its purpose.
Since it is only possible to be able to test and set the skip flag if we
were able to obtain the LRU bit for the first page in the pageblock the
use of the test_and_set_skip becomes redundant as the LRU flag now becomes
the item that limits us to only one thread being able to perform the
operation and there being no need for a test_and_set operation.
With that being the case we can simply drop the bit and instead directly
just call the set_pageblock_skip function if the page we are working on is
the valid_page at the start of the pageblock. Then any other threads that
enter this pageblock should see the skip bit set on the first valid page in
the pageblock.
Since we have dropped the late abort case we can drop the code that was
clearing the LRU flag and calling page_put since the abort case will now
not be holding a reference to a page now.
Signed-off-by: Alexander Duyck <alexander.h.duyck@...ux.intel.com>
Signed-off-by: Alex Shi <alex.shi@...ux.alibaba.com>
Cc: Andrew Morton <akpm@...ux-foundation.org>
Cc: Stephen Rothwell <sfr@...b.auug.org.au>
Cc: linux-kernel@...r.kernel.org
Cc: linux-mm@...ck.org
---
mm/compaction.c | 53 +++++++++++++----------------------------------------
1 file changed, 13 insertions(+), 40 deletions(-)
diff --git a/mm/compaction.c b/mm/compaction.c
index a0e48d079124..9443bc4d763d 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -399,29 +399,6 @@ void reset_isolation_suitable(pg_data_t *pgdat)
}
}
-/*
- * Sets the pageblock skip bit if it was clear. Note that this is a hint as
- * locks are not required for read/writers. Returns true if it was already set.
- */
-static bool test_and_set_skip(struct compact_control *cc, struct page *page,
- unsigned long pfn)
-{
- bool skip;
-
- /* Do no update if skip hint is being ignored */
- if (cc->ignore_skip_hint)
- return false;
-
- if (!IS_ALIGNED(pfn, pageblock_nr_pages))
- return false;
-
- skip = get_pageblock_skip(page);
- if (!skip && !cc->no_set_skip_hint)
- set_pageblock_skip(page);
-
- return skip;
-}
-
static void update_cached_migrate(struct compact_control *cc, unsigned long pfn)
{
struct zone *zone = cc->zone;
@@ -480,12 +457,6 @@ static inline void update_pageblock_skip(struct compact_control *cc,
static void update_cached_migrate(struct compact_control *cc, unsigned long pfn)
{
}
-
-static bool test_and_set_skip(struct compact_control *cc, struct page *page,
- unsigned long pfn)
-{
- return false;
-}
#endif /* CONFIG_COMPACTION */
/*
@@ -895,7 +866,6 @@ static bool too_many_isolated(pg_data_t *pgdat)
if (!valid_page && IS_ALIGNED(low_pfn, pageblock_nr_pages)) {
if (!cc->ignore_skip_hint && get_pageblock_skip(page)) {
low_pfn = end_pfn;
- page = NULL;
goto isolate_abort;
}
valid_page = page;
@@ -1021,11 +991,20 @@ static bool too_many_isolated(pg_data_t *pgdat)
lruvec_memcg_debug(lruvec, page);
- /* Try get exclusive access under lock */
- if (!skip_updated) {
+ /*
+ * Indicate that we want exclusive access to the
+ * rest of the pageblock.
+ *
+ * The LRU flag prevents simultaneous access to the
+ * first PFN, and the LRU lock helps to prevent
+ * simultaneous update of multiple pageblocks shared
+ * in the same bitmap.
+ */
+ if (page == valid_page) {
+ if (!cc->ignore_skip_hint &&
+ !cc->no_set_skip_hint)
+ set_pageblock_skip(page);
skip_updated = true;
- if (test_and_set_skip(cc, page, low_pfn))
- goto isolate_abort;
}
}
@@ -1098,15 +1077,9 @@ static bool too_many_isolated(pg_data_t *pgdat)
if (unlikely(low_pfn > end_pfn))
low_pfn = end_pfn;
- page = NULL;
-
isolate_abort:
if (lruvec)
unlock_page_lruvec_irqrestore(lruvec, flags);
- if (page) {
- SetPageLRU(page);
- put_page(page);
- }
/*
* Updated the cached scanner pfn once the pageblock has been scanned
--
1.8.3.1
Powered by blists - more mailing lists