[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <ZqNHHMiHn-9vy_II@google.com>
Date: Fri, 26 Jul 2024 00:50:04 -0600
From: Yu Zhao <yuzhao@...gle.com>
To: Barry Song <21cnbao@...il.com>
Cc: Andrew Morton <akpm@...ux-foundation.org>, linux-mm@...ck.org,
linux-kernel@...r.kernel.org
Subject: Re: [PATCH mm-unstable v1 5/5] mm/swap: remove boilerplate
On Fri, Jul 26, 2024 at 05:56:10PM +1200, Barry Song wrote:
> On Fri, Jul 26, 2024 at 5:48 PM Barry Song <21cnbao@...il.com> wrote:
> >
> > On Thu, Jul 11, 2024 at 2:15 PM Yu Zhao <yuzhao@...gle.com> wrote:
> > >
> > > Remove boilerplate by using a macro to choose the corresponding lock
> > > and handler for each folio_batch in cpu_fbatches.
> > >
> > > Signed-off-by: Yu Zhao <yuzhao@...gle.com>
> > > ---
> > > mm/swap.c | 107 +++++++++++++++++++-----------------------------------
> > > 1 file changed, 37 insertions(+), 70 deletions(-)
> > >
> > > diff --git a/mm/swap.c b/mm/swap.c
> > > index 4a66d2f87f26..342ff4e39ba4 100644
> > > --- a/mm/swap.c
> > > +++ b/mm/swap.c
> > > @@ -220,16 +220,45 @@ static void folio_batch_move_lru(struct folio_batch *fbatch, move_fn_t move_fn)
> > > folios_put(fbatch);
> > > }
> > >
> > > -static void folio_batch_add_and_move(struct folio_batch *fbatch,
> > > - struct folio *folio, move_fn_t move_fn)
> > > +static void __folio_batch_add_and_move(struct folio_batch *fbatch,
> > > + struct folio *folio, move_fn_t move_fn,
> > > + bool on_lru, bool disable_irq)
> > > {
> > > + unsigned long flags;
> > > +
> > > + folio_get(folio);
> > > +
> > > + if (on_lru && !folio_test_clear_lru(folio)) {
> > > + folio_put(folio);
> > > + return;
> > > + }
> > > +
> > > if (folio_batch_add(fbatch, folio) && !folio_test_large(folio) &&
> > > !lru_cache_disabled())
> > > return;
> > >
> > > + if (disable_irq)
> > > + local_lock_irqsave(&cpu_fbatches.lock_irq, flags);
> > > + else
> > > + local_lock(&cpu_fbatches.lock);
> > > +
> > > folio_batch_move_lru(fbatch, move_fn);
> > > +
> > > + if (disable_irq)
> > > + local_unlock_irqrestore(&cpu_fbatches.lock_irq, flags);
> > > + else
> > > + local_unlock(&cpu_fbatches.lock);
> > > }
> > >
> > > +#define folio_batch_add_and_move(folio, op, on_lru) \
> > > + __folio_batch_add_and_move( \
> > > + this_cpu_ptr(&cpu_fbatches.op), \
> > > + folio, \
> > > + op, \
> > > + on_lru, \
> > > + offsetof(struct cpu_fbatches, op) > offsetof(struct cpu_fbatches, lock_irq) \
> > > + )
> >
> > I am running into this BUG, is it relevant?
Sorry for the trouble.
> > / # [ 64.908801] check_preemption_disabled: 1804 callbacks suppressed
> > [ 64.908915] BUG: using smp_processor_id() in preemptible [00000000]
> > code: jbd2/vda-8/96
> > [ 64.909912] caller is debug_smp_processor_id+0x20/0x30
> > [ 64.911743] CPU: 0 UID: 0 PID: 96 Comm: jbd2/vda-8 Not tainted
> > 6.10.0-gef32eccacce2 #59
> > [ 64.912373] Hardware name: linux,dummy-virt (DT)
> > [ 64.912741] Call trace:
> > [ 64.913048] dump_backtrace+0x9c/0x100
> > [ 64.913414] show_stack+0x20/0x38
> > [ 64.913761] dump_stack_lvl+0xc4/0x150
> > [ 64.914197] dump_stack+0x18/0x28
> > [ 64.914557] check_preemption_disabled+0xd8/0x120
> > [ 64.914944] debug_smp_processor_id+0x20/0x30
> > [ 64.915321] folio_add_lru+0x30/0xa8
> > [ 64.915680] filemap_add_folio+0xe4/0x118
> > [ 64.916082] __filemap_get_folio+0x178/0x450
> > [ 64.916455] __getblk_slow+0xb0/0x310
> > [ 64.916816] bdev_getblk+0x94/0xc0
> > [ 64.917169] jbd2_journal_get_descriptor_buffer+0x6c/0x1b0
> > [ 64.917590] jbd2_journal_commit_transaction+0x7f0/0x1c88
> > [ 64.917994] kjournald2+0xd4/0x278
> > [ 64.918344] kthread+0x11c/0x128
> > [ 64.918693] ret_from_fork+0x10/0x20
>
> This removes the BUG complaint, but I'm unsure if it's the correct fix:
Below is the proper fix. Will post v2.
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -221,7 +221,7 @@ static void folio_batch_move_lru(struct folio_batch *fbatch, move_fn_t move_fn)
folios_put(fbatch);
}
-static void __folio_batch_add_and_move(struct folio_batch *fbatch,
+static void __folio_batch_add_and_move(struct folio_batch __percpu *fbatch,
struct folio *folio, move_fn_t move_fn,
bool on_lru, bool disable_irq)
{
@@ -234,16 +234,14 @@ static void __folio_batch_add_and_move(struct folio_batch *fbatch,
return;
}
- if (folio_batch_add(fbatch, folio) && !folio_test_large(folio) &&
- !lru_cache_disabled())
- return;
-
if (disable_irq)
local_lock_irqsave(&cpu_fbatches.lock_irq, flags);
else
local_lock(&cpu_fbatches.lock);
- folio_batch_move_lru(fbatch, move_fn);
+ if (!folio_batch_add(this_cpu_ptr(fbatch), folio) || folio_test_large(folio) ||
+ lru_cache_disabled())
+ folio_batch_move_lru(this_cpu_ptr(fbatch), move_fn);
if (disable_irq)
local_unlock_irqrestore(&cpu_fbatches.lock_irq, flags);
@@ -253,7 +251,7 @@ static void __folio_batch_add_and_move(struct folio_batch *fbatch,
#define folio_batch_add_and_move(folio, op, on_lru) \
__folio_batch_add_and_move( \
- this_cpu_ptr(&cpu_fbatches.op), \
+ &cpu_fbatches.op, \
folio, \
op, \
on_lru, \
Powered by blists - more mailing lists