[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-Id: <20230719113001.2023703-2-shikemeng@huaweicloud.com>
Date: Wed, 19 Jul 2023 19:29:58 +0800
From: Kemeng Shi <shikemeng@...weicloud.com>
To: akpm@...ux-foundation.org, linux-mm@...ck.org,
linux-kernel@...r.kernel.org
Cc: shikemeng@...weicloud.com
Subject: [PATCH 1/4] mm/compaction: use "spinlock_t *" to record held lock in compact [un]lock functions
Make compact_lock_irqsave and compact_unlock_should_abort use
"spinlock_t *" to record held lock.
This is a preparation to use compact_unlock_should_abort in
isolate_migratepages_block to remove repeat code.
Signed-off-by: Kemeng Shi <shikemeng@...weicloud.com>
---
mm/compaction.c | 23 +++++++++++------------
1 file changed, 11 insertions(+), 12 deletions(-)
diff --git a/mm/compaction.c b/mm/compaction.c
index 9641e2131901..dfef14d3ef78 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -523,22 +523,22 @@ static bool test_and_set_skip(struct compact_control *cc, struct page *page)
* abort when the current block is finished regardless of success rate.
* Sync compaction acquires the lock.
*
- * Always returns true which makes it easier to track lock state in callers.
+ * Always returns lock which makes it easier to track lock state in callers.
*/
-static bool compact_lock_irqsave(spinlock_t *lock, unsigned long *flags,
+static spinlock_t *compact_lock_irqsave(spinlock_t *lock, unsigned long *flags,
struct compact_control *cc)
__acquires(lock)
{
/* Track if the lock is contended in async mode */
if (cc->mode == MIGRATE_ASYNC && !cc->contended) {
if (spin_trylock_irqsave(lock, *flags))
- return true;
+ return lock;
cc->contended = true;
}
spin_lock_irqsave(lock, *flags);
- return true;
+ return lock;
}
/*
@@ -553,12 +553,12 @@ static bool compact_lock_irqsave(spinlock_t *lock, unsigned long *flags,
* Returns true if compaction should abort due to fatal signal pending.
* Returns false when compaction can continue.
*/
-static bool compact_unlock_should_abort(spinlock_t *lock,
- unsigned long flags, bool *locked, struct compact_control *cc)
+static bool compact_unlock_should_abort(spinlock_t **locked,
+ unsigned long flags, struct compact_control *cc)
{
if (*locked) {
- spin_unlock_irqrestore(lock, flags);
- *locked = false;
+ spin_unlock_irqrestore(*locked, flags);
+ *locked = NULL;
}
if (fatal_signal_pending(current)) {
@@ -586,7 +586,7 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
int nr_scanned = 0, total_isolated = 0;
struct page *cursor;
unsigned long flags = 0;
- bool locked = false;
+ spinlock_t *locked = NULL;
unsigned long blockpfn = *start_pfn;
unsigned int order;
@@ -607,8 +607,7 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
* pending.
*/
if (!(blockpfn % COMPACT_CLUSTER_MAX)
- && compact_unlock_should_abort(&cc->zone->lock, flags,
- &locked, cc))
+ && compact_unlock_should_abort(&locked, flags, cc))
break;
nr_scanned++;
@@ -673,7 +672,7 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
}
if (locked)
- spin_unlock_irqrestore(&cc->zone->lock, flags);
+ spin_unlock_irqrestore(locked, flags);
/*
* There is a tiny chance that we have read bogus compound_order(),
--
2.30.0
Powered by blists - more mailing lists