[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <aWiy0mDN0Ed9mLiG@MiWiFi-R3L-srv>
Date: Thu, 15 Jan 2026 17:26:42 +0800
From: Baoquan He <bhe@...hat.com>
To: Qi Zheng <qi.zheng@...ux.dev>
Cc: hannes@...xchg.org, hughd@...gle.com, mhocko@...e.com,
roman.gushchin@...ux.dev, shakeel.butt@...ux.dev,
muchun.song@...ux.dev, david@...nel.org, lorenzo.stoakes@...cle.com,
ziy@...dia.com, harry.yoo@...cle.com, yosry.ahmed@...ux.dev,
imran.f.khan@...cle.com, kamalesh.babulal@...cle.com,
axelrasmussen@...gle.com, yuanchu@...gle.com, weixugc@...gle.com,
chenridong@...weicloud.com, mkoutny@...e.com,
akpm@...ux-foundation.org, hamzamahfooz@...ux.microsoft.com,
apais@...ux.microsoft.com, lance.yang@...ux.dev, linux-mm@...ck.org,
linux-kernel@...r.kernel.org, cgroups@...r.kernel.org,
Qi Zheng <zhengqi.arch@...edance.com>
Subject: Re: [PATCH v3 23/30] mm: do not open-code lruvec lock
On 01/14/26 at 07:32pm, Qi Zheng wrote:
> From: Qi Zheng <zhengqi.arch@...edance.com>
>
> Now we have lruvec_unlock(), lruvec_unlock_irq() and
> lruvec_unlock_irqrestore(), but not the paired lruvec_lock(),
~~ typo, 'no'?
> lruvec_lock_irq() and lruvec_lock_irqsave().
>
> There is currently no use case for lruvec_lock_irqsave(), so only
> introduce lruvec_lock() and lruvec_lock_irq(), and change all open-code
I didn't see lruvec_lock() is introduced in this patch, do I miss
anthing?
> places to use these helper function. This looks cleaner and prepares for
> reparenting LRU pages, preventing user from missing RCU lock calls due to
> open-code lruvec lock.
>
> Signed-off-by: Qi Zheng <zhengqi.arch@...edance.com>
> ---
> include/linux/memcontrol.h | 5 +++++
> mm/vmscan.c | 38 +++++++++++++++++++-------------------
> 2 files changed, 24 insertions(+), 19 deletions(-)
>
> diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
> index f1556759d0d3f..4b6f20dc694ba 100644
> --- a/include/linux/memcontrol.h
> +++ b/include/linux/memcontrol.h
> @@ -1499,6 +1499,11 @@ static inline struct lruvec *parent_lruvec(struct lruvec *lruvec)
> return mem_cgroup_lruvec(memcg, lruvec_pgdat(lruvec));
> }
>
> +static inline void lruvec_lock_irq(struct lruvec *lruvec)
> +{
> + spin_lock_irq(&lruvec->lru_lock);
> +}
> +
> static inline void lruvec_unlock(struct lruvec *lruvec)
> {
> spin_unlock(&lruvec->lru_lock);
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index f206d4dac9e77..c48ff6e05e004 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -2020,7 +2020,7 @@ static unsigned long shrink_inactive_list(unsigned long nr_to_scan,
>
> lru_add_drain();
>
> - spin_lock_irq(&lruvec->lru_lock);
> + lruvec_lock_irq(lruvec);
>
> nr_taken = isolate_lru_folios(nr_to_scan, lruvec, &folio_list,
> &nr_scanned, sc, lru);
> @@ -2032,7 +2032,7 @@ static unsigned long shrink_inactive_list(unsigned long nr_to_scan,
> count_memcg_events(lruvec_memcg(lruvec), item, nr_scanned);
> __count_vm_events(PGSCAN_ANON + file, nr_scanned);
>
> - spin_unlock_irq(&lruvec->lru_lock);
> + lruvec_unlock_irq(lruvec);
>
> if (nr_taken == 0)
> return 0;
> @@ -2051,7 +2051,7 @@ static unsigned long shrink_inactive_list(unsigned long nr_to_scan,
> count_memcg_events(lruvec_memcg(lruvec), item, nr_reclaimed);
> count_vm_events(PGSTEAL_ANON + file, nr_reclaimed);
>
> - spin_lock_irq(&lruvec->lru_lock);
> + lruvec_lock_irq(lruvec);
> lru_note_cost_unlock_irq(lruvec, file, stat.nr_pageout,
> nr_scanned - nr_reclaimed);
>
> @@ -2130,7 +2130,7 @@ static void shrink_active_list(unsigned long nr_to_scan,
>
> lru_add_drain();
>
> - spin_lock_irq(&lruvec->lru_lock);
> + lruvec_lock_irq(lruvec);
>
> nr_taken = isolate_lru_folios(nr_to_scan, lruvec, &l_hold,
> &nr_scanned, sc, lru);
> @@ -2141,7 +2141,7 @@ static void shrink_active_list(unsigned long nr_to_scan,
> __count_vm_events(PGREFILL, nr_scanned);
> count_memcg_events(lruvec_memcg(lruvec), PGREFILL, nr_scanned);
>
> - spin_unlock_irq(&lruvec->lru_lock);
> + lruvec_unlock_irq(lruvec);
>
> while (!list_empty(&l_hold)) {
> struct folio *folio;
> @@ -2197,7 +2197,7 @@ static void shrink_active_list(unsigned long nr_to_scan,
> count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, nr_deactivate);
> mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
>
> - spin_lock_irq(&lruvec->lru_lock);
> + lruvec_lock_irq(lruvec);
> lru_note_cost_unlock_irq(lruvec, file, 0, nr_rotated);
> trace_mm_vmscan_lru_shrink_active(pgdat->node_id, nr_taken, nr_activate,
> nr_deactivate, nr_rotated, sc->priority, file);
> @@ -3832,9 +3832,9 @@ static void walk_mm(struct mm_struct *mm, struct lru_gen_mm_walk *walk)
> }
>
> if (walk->batched) {
> - spin_lock_irq(&lruvec->lru_lock);
> + lruvec_lock_irq(lruvec);
> reset_batch_size(walk);
> - spin_unlock_irq(&lruvec->lru_lock);
> + lruvec_unlock_irq(lruvec);
> }
>
> cond_resched();
> @@ -3993,7 +3993,7 @@ static bool inc_max_seq(struct lruvec *lruvec, unsigned long seq, int swappiness
> if (seq < READ_ONCE(lrugen->max_seq))
> return false;
>
> - spin_lock_irq(&lruvec->lru_lock);
> + lruvec_lock_irq(lruvec);
>
> VM_WARN_ON_ONCE(!seq_is_valid(lruvec));
>
> @@ -4008,7 +4008,7 @@ static bool inc_max_seq(struct lruvec *lruvec, unsigned long seq, int swappiness
> if (inc_min_seq(lruvec, type, swappiness))
> continue;
>
> - spin_unlock_irq(&lruvec->lru_lock);
> + lruvec_unlock_irq(lruvec);
> cond_resched();
> goto restart;
> }
> @@ -4043,7 +4043,7 @@ static bool inc_max_seq(struct lruvec *lruvec, unsigned long seq, int swappiness
> /* make sure preceding modifications appear */
> smp_store_release(&lrugen->max_seq, lrugen->max_seq + 1);
> unlock:
> - spin_unlock_irq(&lruvec->lru_lock);
> + lruvec_unlock_irq(lruvec);
>
> return success;
> }
> @@ -4739,7 +4739,7 @@ static int evict_folios(unsigned long nr_to_scan, struct lruvec *lruvec,
> struct mem_cgroup *memcg = lruvec_memcg(lruvec);
> struct pglist_data *pgdat = lruvec_pgdat(lruvec);
>
> - spin_lock_irq(&lruvec->lru_lock);
> + lruvec_lock_irq(lruvec);
>
> scanned = isolate_folios(nr_to_scan, lruvec, sc, swappiness, &type, &list);
>
> @@ -4748,7 +4748,7 @@ static int evict_folios(unsigned long nr_to_scan, struct lruvec *lruvec,
> if (evictable_min_seq(lrugen->min_seq, swappiness) + MIN_NR_GENS > lrugen->max_seq)
> scanned = 0;
>
> - spin_unlock_irq(&lruvec->lru_lock);
> + lruvec_unlock_irq(lruvec);
>
> if (list_empty(&list))
> return scanned;
> @@ -4786,9 +4786,9 @@ static int evict_folios(unsigned long nr_to_scan, struct lruvec *lruvec,
> walk = current->reclaim_state->mm_walk;
> if (walk && walk->batched) {
> walk->lruvec = lruvec;
> - spin_lock_irq(&lruvec->lru_lock);
> + lruvec_lock_irq(lruvec);
> reset_batch_size(walk);
> - spin_unlock_irq(&lruvec->lru_lock);
> + lruvec_unlock_irq(lruvec);
> }
>
> mod_lruvec_state(lruvec, PGDEMOTE_KSWAPD + reclaimer_offset(sc),
> @@ -5226,7 +5226,7 @@ static void lru_gen_change_state(bool enabled)
> for_each_node(nid) {
> struct lruvec *lruvec = get_lruvec(memcg, nid);
>
> - spin_lock_irq(&lruvec->lru_lock);
> + lruvec_lock_irq(lruvec);
>
> VM_WARN_ON_ONCE(!seq_is_valid(lruvec));
> VM_WARN_ON_ONCE(!state_is_valid(lruvec));
> @@ -5234,12 +5234,12 @@ static void lru_gen_change_state(bool enabled)
> lruvec->lrugen.enabled = enabled;
>
> while (!(enabled ? fill_evictable(lruvec) : drain_evictable(lruvec))) {
> - spin_unlock_irq(&lruvec->lru_lock);
> + lruvec_unlock_irq(lruvec);
> cond_resched();
> - spin_lock_irq(&lruvec->lru_lock);
> + lruvec_lock_irq(lruvec);
> }
>
> - spin_unlock_irq(&lruvec->lru_lock);
> + lruvec_unlock_irq(lruvec);
> }
>
> cond_resched();
> --
> 2.20.1
>
>
Powered by blists - more mailing lists