[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <CAJD7tkb=5kOiuYZxYQVCpjZZriCf2wrx9sgenrpP_Bct=GWfcw@mail.gmail.com>
Date: Tue, 20 Jun 2023 13:19:38 -0700
From: Yosry Ahmed <yosryahmed@...gle.com>
To: Yu Zhao <yuzhao@...gle.com>
Cc: Andrew Morton <akpm@...ux-foundation.org>, linux-mm@...ck.org,
linux-kernel@...r.kernel.org,
syzbot+87c490fd2be656269b6a@...kaller.appspotmail.com
Subject: Re: [PATCH mm-unstable v1] mm/mglru: make memcg_lru->lock irq safe
On Mon, Jun 19, 2023 at 12:38 PM Yu Zhao <yuzhao@...gle.com> wrote:
>
> lru_gen_rotate_memcg() can happen in softirq if
> memory.soft_limit_in_bytes is set. This requires memcg_lru->lock to be
> irq safe.
>
> This problem only affects memcg v1.
>
> Reported-by: syzbot+87c490fd2be656269b6a@...kaller.appspotmail.com
> Closes: https://syzkaller.appspot.com/bug?extid=87c490fd2be656269b6a
> Fixes: e4dde56cd208 ("mm: multi-gen LRU: per-node lru_gen_folio lists")
> Signed-off-by: Yu Zhao <yuzhao@...gle.com>
> ---
> mm/vmscan.c | 13 +++++++------
> 1 file changed, 7 insertions(+), 6 deletions(-)
>
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index 45d17c7cc555..27f90896f789 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -4759,10 +4759,11 @@ static void lru_gen_rotate_memcg(struct lruvec *lruvec, int op)
> {
> int seg;
> int old, new;
> + unsigned long flags;
> int bin = get_random_u32_below(MEMCG_NR_BINS);
> struct pglist_data *pgdat = lruvec_pgdat(lruvec);
>
> - spin_lock(&pgdat->memcg_lru.lock);
> + spin_lock_irqsave(&pgdat->memcg_lru.lock, flags);
Nit: I think it might be useful to add a comment here that this is
needed due to the call path from memcg_check_events() ->
mem_cgroup_update_tree() -- so that if that call path changes we can
come back and remove the irq-disablement.
FWIW:
Reviewed-by: Yosry Ahmed <yosryahmed@...gle.com>
>
> VM_WARN_ON_ONCE(hlist_nulls_unhashed(&lruvec->lrugen.list));
>
> @@ -4797,7 +4798,7 @@ static void lru_gen_rotate_memcg(struct lruvec *lruvec, int op)
> if (!pgdat->memcg_lru.nr_memcgs[old] && old == get_memcg_gen(pgdat->memcg_lru.seq))
> WRITE_ONCE(pgdat->memcg_lru.seq, pgdat->memcg_lru.seq + 1);
>
> - spin_unlock(&pgdat->memcg_lru.lock);
> + spin_unlock_irqrestore(&pgdat->memcg_lru.lock, flags);
> }
>
> void lru_gen_online_memcg(struct mem_cgroup *memcg)
> @@ -4810,7 +4811,7 @@ void lru_gen_online_memcg(struct mem_cgroup *memcg)
> struct pglist_data *pgdat = NODE_DATA(nid);
> struct lruvec *lruvec = get_lruvec(memcg, nid);
>
> - spin_lock(&pgdat->memcg_lru.lock);
> + spin_lock_irq(&pgdat->memcg_lru.lock);
>
> VM_WARN_ON_ONCE(!hlist_nulls_unhashed(&lruvec->lrugen.list));
>
> @@ -4821,7 +4822,7 @@ void lru_gen_online_memcg(struct mem_cgroup *memcg)
>
> lruvec->lrugen.gen = gen;
>
> - spin_unlock(&pgdat->memcg_lru.lock);
> + spin_unlock_irq(&pgdat->memcg_lru.lock);
> }
> }
>
> @@ -4845,7 +4846,7 @@ void lru_gen_release_memcg(struct mem_cgroup *memcg)
> struct pglist_data *pgdat = NODE_DATA(nid);
> struct lruvec *lruvec = get_lruvec(memcg, nid);
>
> - spin_lock(&pgdat->memcg_lru.lock);
> + spin_lock_irq(&pgdat->memcg_lru.lock);
>
> VM_WARN_ON_ONCE(hlist_nulls_unhashed(&lruvec->lrugen.list));
>
> @@ -4857,7 +4858,7 @@ void lru_gen_release_memcg(struct mem_cgroup *memcg)
> if (!pgdat->memcg_lru.nr_memcgs[gen] && gen == get_memcg_gen(pgdat->memcg_lru.seq))
> WRITE_ONCE(pgdat->memcg_lru.seq, pgdat->memcg_lru.seq + 1);
>
> - spin_unlock(&pgdat->memcg_lru.lock);
> + spin_unlock_irq(&pgdat->memcg_lru.lock);
> }
> }
>
> --
> 2.41.0.185.g7c58973941-goog
>
>
Powered by blists - more mailing lists