[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAGsJ_4zn46WWmhjsTGES1hH9Un65BiNn+KLUfvE_Espnf0tw9Q@mail.gmail.com>
Date: Fri, 26 Jul 2024 17:56:10 +1200
From: Barry Song <21cnbao@...il.com>
To: Yu Zhao <yuzhao@...gle.com>
Cc: Andrew Morton <akpm@...ux-foundation.org>, linux-mm@...ck.org,
linux-kernel@...r.kernel.org
Subject: Re: [PATCH mm-unstable v1 5/5] mm/swap: remove boilerplate
On Fri, Jul 26, 2024 at 5:48 PM Barry Song <21cnbao@...il.com> wrote:
>
> On Thu, Jul 11, 2024 at 2:15 PM Yu Zhao <yuzhao@...gle.com> wrote:
> >
> > Remove boilerplate by using a macro to choose the corresponding lock
> > and handler for each folio_batch in cpu_fbatches.
> >
> > Signed-off-by: Yu Zhao <yuzhao@...gle.com>
> > ---
> > mm/swap.c | 107 +++++++++++++++++++-----------------------------------
> > 1 file changed, 37 insertions(+), 70 deletions(-)
> >
> > diff --git a/mm/swap.c b/mm/swap.c
> > index 4a66d2f87f26..342ff4e39ba4 100644
> > --- a/mm/swap.c
> > +++ b/mm/swap.c
> > @@ -220,16 +220,45 @@ static void folio_batch_move_lru(struct folio_batch *fbatch, move_fn_t move_fn)
> > folios_put(fbatch);
> > }
> >
> > -static void folio_batch_add_and_move(struct folio_batch *fbatch,
> > - struct folio *folio, move_fn_t move_fn)
> > +static void __folio_batch_add_and_move(struct folio_batch *fbatch,
> > + struct folio *folio, move_fn_t move_fn,
> > + bool on_lru, bool disable_irq)
> > {
> > + unsigned long flags;
> > +
> > + folio_get(folio);
> > +
> > + if (on_lru && !folio_test_clear_lru(folio)) {
> > + folio_put(folio);
> > + return;
> > + }
> > +
> > if (folio_batch_add(fbatch, folio) && !folio_test_large(folio) &&
> > !lru_cache_disabled())
> > return;
> >
> > + if (disable_irq)
> > + local_lock_irqsave(&cpu_fbatches.lock_irq, flags);
> > + else
> > + local_lock(&cpu_fbatches.lock);
> > +
> > folio_batch_move_lru(fbatch, move_fn);
> > +
> > + if (disable_irq)
> > + local_unlock_irqrestore(&cpu_fbatches.lock_irq, flags);
> > + else
> > + local_unlock(&cpu_fbatches.lock);
> > }
> >
> > +#define folio_batch_add_and_move(folio, op, on_lru) \
> > + __folio_batch_add_and_move( \
> > + this_cpu_ptr(&cpu_fbatches.op), \
> > + folio, \
> > + op, \
> > + on_lru, \
> > + offsetof(struct cpu_fbatches, op) > offsetof(struct cpu_fbatches, lock_irq) \
> > + )
>
> I am running into this BUG, is it relevant?
>
> / # [ 64.908801] check_preemption_disabled: 1804 callbacks suppressed
> [ 64.908915] BUG: using smp_processor_id() in preemptible [00000000]
> code: jbd2/vda-8/96
> [ 64.909912] caller is debug_smp_processor_id+0x20/0x30
> [ 64.911743] CPU: 0 UID: 0 PID: 96 Comm: jbd2/vda-8 Not tainted
> 6.10.0-gef32eccacce2 #59
> [ 64.912373] Hardware name: linux,dummy-virt (DT)
> [ 64.912741] Call trace:
> [ 64.913048] dump_backtrace+0x9c/0x100
> [ 64.913414] show_stack+0x20/0x38
> [ 64.913761] dump_stack_lvl+0xc4/0x150
> [ 64.914197] dump_stack+0x18/0x28
> [ 64.914557] check_preemption_disabled+0xd8/0x120
> [ 64.914944] debug_smp_processor_id+0x20/0x30
> [ 64.915321] folio_add_lru+0x30/0xa8
> [ 64.915680] filemap_add_folio+0xe4/0x118
> [ 64.916082] __filemap_get_folio+0x178/0x450
> [ 64.916455] __getblk_slow+0xb0/0x310
> [ 64.916816] bdev_getblk+0x94/0xc0
> [ 64.917169] jbd2_journal_get_descriptor_buffer+0x6c/0x1b0
> [ 64.917590] jbd2_journal_commit_transaction+0x7f0/0x1c88
> [ 64.917994] kjournald2+0xd4/0x278
> [ 64.918344] kthread+0x11c/0x128
> [ 64.918693] ret_from_fork+0x10/0x20
> [ 64.928277] BUG: using smp_processor_id() in preemptible [00000000]
> code: jbd2/vda-8/96
> [ 64.928878] caller is debug_smp_processor_id+0x20/0x30
> [ 64.929381] CPU: 0 UID: 0 PID: 96 Comm: jbd2/vda-8 Not tainted
> 6.10.0-gef32eccacce2 #59
> [ 64.929886] Hardware name: linux,dummy-virt (DT)
> [ 64.930252] Call trace:
> [ 64.930544] dump_backtrace+0x9c/0x100
> [ 64.930907] show_stack+0x20/0x38
> [ 64.931255] dump_stack_lvl+0xc4/0x150
> [ 64.931616] dump_stack+0x18/0x28
> [ 64.932022] check_preemption_disabled+0xd8/0x120
> [ 64.932486] debug_smp_processor_id+0x20/0x30
> [ 64.933023] folio_add_lru+0x30/0xa8
> [ 64.933523] filemap_add_folio+0xe4/0x118
> [ 64.933892] __filemap_get_folio+0x178/0x450
> [ 64.934265] __getblk_slow+0xb0/0x310
> [ 64.934626] bdev_getblk+0x94/0xc0
> [ 64.934977] jbd2_journal_get_descriptor_buffer+0x6c/0x1b0
> [ 64.935418] journal_submit_commit_record.part.0.constprop.0+0x48/0x288
> [ 64.935919] jbd2_journal_commit_transaction+0x1590/0x1c88
> [ 64.936519] kjournald2+0xd4/0x278
> [ 64.936908] kthread+0x11c/0x128
> [ 64.937323] ret_from_fork+0x10/0x20
This removes the BUG complaint, but I'm unsure if it's the correct fix:
diff --git a/mm/swap.c b/mm/swap.c
index 342ff4e39ba4..a2781edeceef 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -252,7 +252,7 @@ static void __folio_batch_add_and_move(struct
folio_batch *fbatch,
#define folio_batch_add_and_move(folio, op, on_lru)
\
__folio_batch_add_and_move(
\
- this_cpu_ptr(&cpu_fbatches.op),
\
+ raw_cpu_ptr(&cpu_fbatches.op),
\
folio,
\
op,
\
on_lru,
\
>
> > +
> > static void lru_move_tail(struct lruvec *lruvec, struct folio *folio)
> > {
> > if (folio_test_unevictable(folio))
> > @@ -250,23 +279,11 @@ static void lru_move_tail(struct lruvec *lruvec, struct folio *folio)
> > */
> > void folio_rotate_reclaimable(struct folio *folio)
> > {
> > - struct folio_batch *fbatch;
> > - unsigned long flags;
> > -
> > if (folio_test_locked(folio) || folio_test_dirty(folio) ||
> > folio_test_unevictable(folio))
> > return;
> >
> > - folio_get(folio);
> > - if (!folio_test_clear_lru(folio)) {
> > - folio_put(folio);
> > - return;
> > - }
> > -
> > - local_lock_irqsave(&cpu_fbatches.lock_irq, flags);
> > - fbatch = this_cpu_ptr(&cpu_fbatches.lru_move_tail);
> > - folio_batch_add_and_move(fbatch, folio, lru_move_tail);
> > - local_unlock_irqrestore(&cpu_fbatches.lock_irq, flags);
> > + folio_batch_add_and_move(folio, lru_move_tail, true);
> > }
> >
> > void lru_note_cost(struct lruvec *lruvec, bool file,
> > @@ -355,21 +372,10 @@ static void folio_activate_drain(int cpu)
> >
> > void folio_activate(struct folio *folio)
> > {
> > - struct folio_batch *fbatch;
> > -
> > if (folio_test_active(folio) || folio_test_unevictable(folio))
> > return;
> >
> > - folio_get(folio);
> > - if (!folio_test_clear_lru(folio)) {
> > - folio_put(folio);
> > - return;
> > - }
> > -
> > - local_lock(&cpu_fbatches.lock);
> > - fbatch = this_cpu_ptr(&cpu_fbatches.lru_activate);
> > - folio_batch_add_and_move(fbatch, folio, lru_activate);
> > - local_unlock(&cpu_fbatches.lock);
> > + folio_batch_add_and_move(folio, lru_activate, true);
> > }
> >
> > #else
> > @@ -513,8 +519,6 @@ EXPORT_SYMBOL(folio_mark_accessed);
> > */
> > void folio_add_lru(struct folio *folio)
> > {
> > - struct folio_batch *fbatch;
> > -
> > VM_BUG_ON_FOLIO(folio_test_active(folio) &&
> > folio_test_unevictable(folio), folio);
> > VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
> > @@ -524,11 +528,7 @@ void folio_add_lru(struct folio *folio)
> > lru_gen_in_fault() && !(current->flags & PF_MEMALLOC))
> > folio_set_active(folio);
> >
> > - folio_get(folio);
> > - local_lock(&cpu_fbatches.lock);
> > - fbatch = this_cpu_ptr(&cpu_fbatches.lru_add);
> > - folio_batch_add_and_move(fbatch, folio, lru_add);
> > - local_unlock(&cpu_fbatches.lock);
> > + folio_batch_add_and_move(folio, lru_add, false);
> > }
> > EXPORT_SYMBOL(folio_add_lru);
> >
> > @@ -702,22 +702,11 @@ void lru_add_drain_cpu(int cpu)
> > */
> > void deactivate_file_folio(struct folio *folio)
> > {
> > - struct folio_batch *fbatch;
> > -
> > /* Deactivating an unevictable folio will not accelerate reclaim */
> > if (folio_test_unevictable(folio))
> > return;
> >
> > - folio_get(folio);
> > - if (!folio_test_clear_lru(folio)) {
> > - folio_put(folio);
> > - return;
> > - }
> > -
> > - local_lock(&cpu_fbatches.lock);
> > - fbatch = this_cpu_ptr(&cpu_fbatches.lru_deactivate_file);
> > - folio_batch_add_and_move(fbatch, folio, lru_deactivate_file);
> > - local_unlock(&cpu_fbatches.lock);
> > + folio_batch_add_and_move(folio, lru_deactivate_file, true);
> > }
> >
> > /*
> > @@ -730,21 +719,10 @@ void deactivate_file_folio(struct folio *folio)
> > */
> > void folio_deactivate(struct folio *folio)
> > {
> > - struct folio_batch *fbatch;
> > -
> > if (folio_test_unevictable(folio) || !(folio_test_active(folio) || lru_gen_enabled()))
> > return;
> >
> > - folio_get(folio);
> > - if (!folio_test_clear_lru(folio)) {
> > - folio_put(folio);
> > - return;
> > - }
> > -
> > - local_lock(&cpu_fbatches.lock);
> > - fbatch = this_cpu_ptr(&cpu_fbatches.lru_deactivate);
> > - folio_batch_add_and_move(fbatch, folio, lru_deactivate);
> > - local_unlock(&cpu_fbatches.lock);
> > + folio_batch_add_and_move(folio, lru_deactivate, true);
> > }
> >
> > /**
> > @@ -756,22 +734,11 @@ void folio_deactivate(struct folio *folio)
> > */
> > void folio_mark_lazyfree(struct folio *folio)
> > {
> > - struct folio_batch *fbatch;
> > -
> > if (!folio_test_anon(folio) || !folio_test_swapbacked(folio) ||
> > folio_test_swapcache(folio) || folio_test_unevictable(folio))
> > return;
> >
> > - folio_get(folio);
> > - if (!folio_test_clear_lru(folio)) {
> > - folio_put(folio);
> > - return;
> > - }
> > -
> > - local_lock(&cpu_fbatches.lock);
> > - fbatch = this_cpu_ptr(&cpu_fbatches.lru_lazyfree);
> > - folio_batch_add_and_move(fbatch, folio, lru_lazyfree);
> > - local_unlock(&cpu_fbatches.lock);
> > + folio_batch_add_and_move(folio, lru_lazyfree, true);
> > }
> >
> > void lru_add_drain(void)
> > --
> > 2.45.2.803.g4e1b14247a-goog
> >
> >
>
> Thanks
> Barry
Powered by blists - more mailing lists