[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240405192457.67068-1-sj@kernel.org>
Date: Fri, 5 Apr 2024 12:24:57 -0700
From: SeongJae Park <sj@...nel.org>
To: Hyeongtak Ji <hyeongtak.ji@...il.com>
Cc: SeongJae Park <sj@...nel.org>,
Honggyu Kim <honggyu.kim@...com>,
akpm@...ux-foundation.org,
apopple@...dia.com,
baolin.wang@...ux.alibaba.com,
dave.jiang@...el.com,
hyeongtak.ji@...com,
kernel_team@...ynix.com,
linmiaohe@...wei.com,
linux-kernel@...r.kernel.org,
linux-trace-kernel@...r.kernel.org,
mathieu.desnoyers@...icios.com,
mhiramat@...nel.org,
rakie.kim@...com,
rostedt@...dmis.org,
surenb@...gle.com,
yangx.jy@...itsu.com,
ying.huang@...el.com,
ziy@...dia.com,
42.hyeyoo@...il.com,
art.jeongseob@...il.com
Subject: Re: [RFC PATCH v3 5/7] mm/damon/paddr: introduce DAMOS_MIGRATE_COLD action for demotion
On Fri, 5 Apr 2024 16:55:57 +0900 Hyeongtak Ji <hyeongtak.ji@...il.com> wrote:
> On Fri, 5 Apr 2024 15:08:54 +0900 Honggyu Kim <honggyu.kim@...com> wrote:
>
> ...snip...
>
> > +static unsigned long damon_pa_migrate_pages(struct list_head *folio_list,
> > + enum migration_mode mm,
> > + int target_nid)
> > +{
> > + int nid;
> > + unsigned int nr_migrated = 0;
> > + LIST_HEAD(node_folio_list);
> > + unsigned int noreclaim_flag;
> > +
> > + if (list_empty(folio_list))
> > + return nr_migrated;
>
> How about checking if `target_nid` is `NUMA_NO_NODE` or not earlier,
>
> > +
> > + noreclaim_flag = memalloc_noreclaim_save();
> > +
> > + nid = folio_nid(lru_to_folio(folio_list));
> > + do {
> > + struct folio *folio = lru_to_folio(folio_list);
> > +
> > + if (nid == folio_nid(folio)) {
> > + folio_clear_active(folio);
> > + list_move(&folio->lru, &node_folio_list);
> > + continue;
> > + }
> > +
> > + nr_migrated += damon_pa_migrate_folio_list(&node_folio_list,
> > + NODE_DATA(nid), mm,
> > + target_nid);
> > + nid = folio_nid(lru_to_folio(folio_list));
> > + } while (!list_empty(folio_list));
> > +
> > + nr_migrated += damon_pa_migrate_folio_list(&node_folio_list,
> > + NODE_DATA(nid), mm,
> > + target_nid);
> > +
> > + memalloc_noreclaim_restore(noreclaim_flag);
> > +
> > + return nr_migrated;
> > +}
> > +
>
> ...snip...
>
> > +static unsigned int migrate_folio_list(struct list_head *migrate_folios,
> > + struct pglist_data *pgdat,
> > + int target_nid)
> > +{
> > + unsigned int nr_succeeded;
> > + nodemask_t allowed_mask = NODE_MASK_NONE;
> > +
> > + struct migration_target_control mtc = {
> > + /*
> > + * Allocate from 'node', or fail quickly and quietly.
> > + * When this happens, 'page' will likely just be discarded
> > + * instead of migrated.
> > + */
> > + .gfp_mask = (GFP_HIGHUSER_MOVABLE & ~__GFP_RECLAIM) | __GFP_NOWARN |
> > + __GFP_NOMEMALLOC | GFP_NOWAIT,
> > + .nid = target_nid,
> > + .nmask = &allowed_mask
> > + };
> > +
> > + if (pgdat->node_id == target_nid || target_nid == NUMA_NO_NODE)
> > + return 0;
>
> instead of here.
Agree. As I replied on the previous reply, I think this check can be done from
the caller (or the caller of the caller) of this function.
>
> > +
> > + if (list_empty(migrate_folios))
> > + return 0;
Same for this.
> > +
> > + /* Migration ignores all cpuset and mempolicy settings */
> > + migrate_pages(migrate_folios, alloc_migrate_folio, NULL,
> > + (unsigned long)&mtc, MIGRATE_ASYNC, MR_DAMON,
> > + &nr_succeeded);
> > +
> > + return nr_succeeded;
> > +}
> > +
>
> ...snip...
>
> Kind regards,
> Hyeongtak
>
Thanks,
SJ
Powered by blists - more mailing lists