[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <YRdGaKsLkAwpadW5@casper.infradead.org>
Date: Sat, 14 Aug 2021 05:28:24 +0100
From: Matthew Wilcox <willy@...radead.org>
To: linux-kernel@...r.kernel.org
Cc: Johannes Weiner <hannes@...xchg.org>, linux-mm@...ck.org,
linux-fsdevel@...r.kernel.org, Christoph Hellwig <hch@....de>
Subject: Re: [PATCH v14 080/138] mm/workingset: Convert workingset_refault()
to take a folio
On Thu, Jul 15, 2021 at 04:36:06AM +0100, Matthew Wilcox (Oracle) wrote:
> /**
> - * workingset_refault - evaluate the refault of a previously evicted page
> - * @page: the freshly allocated replacement page
> - * @shadow: shadow entry of the evicted page
> + * workingset_refault - evaluate the refault of a previously evicted folio
> + * @page: the freshly allocated replacement folio
Randy pointed out this doc mistake. Which got me looking at this
whole patch again, and I noticed that we're counting an entire folio as
a single page.
So I'm going to apply this patch on top of the below patch.
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 241bd0f53fb9..bfe38869498d 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -597,12 +597,6 @@ static inline void mod_lruvec_page_state(struct page *page,
#endif /* CONFIG_MEMCG */
-static inline void inc_lruvec_state(struct lruvec *lruvec,
- enum node_stat_item idx)
-{
- mod_lruvec_state(lruvec, idx, 1);
-}
-
static inline void __inc_lruvec_page_state(struct page *page,
enum node_stat_item idx)
{
diff --git a/mm/workingset.c b/mm/workingset.c
index 10830211a187..9f91c28cc0ce 100644
--- a/mm/workingset.c
+++ b/mm/workingset.c
@@ -273,9 +273,9 @@ void *workingset_eviction(struct page *page, struct mem_cgroup *target_memcg)
}
/**
- * workingset_refault - evaluate the refault of a previously evicted folio
- * @page: the freshly allocated replacement folio
- * @shadow: shadow entry of the evicted folio
+ * workingset_refault - Evaluate the refault of a previously evicted folio.
+ * @folio: The freshly allocated replacement folio.
+ * @shadow: Shadow entry of the evicted folio.
*
* Calculates and evaluates the refault distance of the previously
* evicted folio in the context of the node and the memcg whose memory
@@ -295,6 +295,7 @@ void workingset_refault(struct folio *folio, void *shadow)
unsigned long refault;
bool workingset;
int memcgid;
+ long nr;
unpack_shadow(shadow, &memcgid, &pgdat, &eviction, &workingset);
@@ -347,10 +348,11 @@ void workingset_refault(struct folio *folio, void *shadow)
* However, the cgroup that will own the folio is the one that
* is actually experiencing the refault event.
*/
+ nr = folio_nr_pages(folio);
memcg = folio_memcg(folio);
lruvec = mem_cgroup_lruvec(memcg, pgdat);
- inc_lruvec_state(lruvec, WORKINGSET_REFAULT_BASE + file);
+ mod_lruvec_state(lruvec, WORKINGSET_REFAULT_BASE + file, nr);
/*
* Compare the distance to the existing workingset size. We
@@ -376,15 +378,15 @@ void workingset_refault(struct folio *folio, void *shadow)
goto out;
folio_set_active(folio);
- workingset_age_nonresident(lruvec, folio_nr_pages(folio));
- inc_lruvec_state(lruvec, WORKINGSET_ACTIVATE_BASE + file);
+ workingset_age_nonresident(lruvec, nr);
+ mod_lruvec_state(lruvec, WORKINGSET_ACTIVATE_BASE + file, nr);
/* Folio was active prior to eviction */
if (workingset) {
folio_set_workingset(folio);
/* XXX: Move to lru_cache_add() when it supports new vs putback */
lru_note_cost_folio(folio);
- inc_lruvec_state(lruvec, WORKINGSET_RESTORE_BASE + file);
+ mod_lruvec_state(lruvec, WORKINGSET_RESTORE_BASE + file, nr);
}
out:
rcu_read_unlock();
> + * @shadow: shadow entry of the evicted folio
> *
> * Calculates and evaluates the refault distance of the previously
> - * evicted page in the context of the node and the memcg whose memory
> + * evicted folio in the context of the node and the memcg whose memory
> * pressure caused the eviction.
> */
> -void workingset_refault(struct page *page, void *shadow)
> +void workingset_refault(struct folio *folio, void *shadow)
> {
> - bool file = page_is_file_lru(page);
> + bool file = folio_is_file_lru(folio);
> struct mem_cgroup *eviction_memcg;
> struct lruvec *eviction_lruvec;
> unsigned long refault_distance;
> @@ -301,10 +301,10 @@ void workingset_refault(struct page *page, void *shadow)
> rcu_read_lock();
> /*
> * Look up the memcg associated with the stored ID. It might
> - * have been deleted since the page's eviction.
> + * have been deleted since the folio's eviction.
> *
> * Note that in rare events the ID could have been recycled
> - * for a new cgroup that refaults a shared page. This is
> + * for a new cgroup that refaults a shared folio. This is
> * impossible to tell from the available data. However, this
> * should be a rare and limited disturbance, and activations
> * are always speculative anyway. Ultimately, it's the aging
> @@ -340,14 +340,14 @@ void workingset_refault(struct page *page, void *shadow)
> refault_distance = (refault - eviction) & EVICTION_MASK;
>
> /*
> - * The activation decision for this page is made at the level
> + * The activation decision for this folio is made at the level
> * where the eviction occurred, as that is where the LRU order
> - * during page reclaim is being determined.
> + * during folio reclaim is being determined.
> *
> - * However, the cgroup that will own the page is the one that
> + * However, the cgroup that will own the folio is the one that
> * is actually experiencing the refault event.
> */
> - memcg = page_memcg(page);
> + memcg = folio_memcg(folio);
> lruvec = mem_cgroup_lruvec(memcg, pgdat);
>
> inc_lruvec_state(lruvec, WORKINGSET_REFAULT_BASE + file);
> @@ -375,15 +375,15 @@ void workingset_refault(struct page *page, void *shadow)
> if (refault_distance > workingset_size)
> goto out;
>
> - SetPageActive(page);
> - workingset_age_nonresident(lruvec, thp_nr_pages(page));
> + folio_set_active(folio);
> + workingset_age_nonresident(lruvec, folio_nr_pages(folio));
> inc_lruvec_state(lruvec, WORKINGSET_ACTIVATE_BASE + file);
>
> - /* Page was active prior to eviction */
> + /* Folio was active prior to eviction */
> if (workingset) {
> - SetPageWorkingset(page);
> + folio_set_workingset(folio);
> /* XXX: Move to lru_cache_add() when it supports new vs putback */
> - lru_note_cost_page(page);
> + lru_note_cost_folio(folio);
> inc_lruvec_state(lruvec, WORKINGSET_RESTORE_BASE + file);
> }
> out:
> --
> 2.30.2
>
Powered by blists - more mailing lists