[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240226205534.1603748-3-zi.yan@sent.com>
Date: Mon, 26 Feb 2024 15:55:28 -0500
From: Zi Yan <zi.yan@...t.com>
To: "Pankaj Raghav (Samsung)" <kernel@...kajraghav.com>,
linux-mm@...ck.org
Cc: Zi Yan <ziy@...dia.com>,
"Matthew Wilcox (Oracle)" <willy@...radead.org>,
David Hildenbrand <david@...hat.com>,
Yang Shi <shy828301@...il.com>,
Yu Zhao <yuzhao@...gle.com>,
"Kirill A . Shutemov" <kirill.shutemov@...ux.intel.com>,
Ryan Roberts <ryan.roberts@....com>,
Michal Koutný <mkoutny@...e.com>,
Roman Gushchin <roman.gushchin@...ux.dev>,
"Zach O'Keefe" <zokeefe@...gle.com>,
Hugh Dickins <hughd@...gle.com>,
Luis Chamberlain <mcgrof@...nel.org>,
Andrew Morton <akpm@...ux-foundation.org>,
linux-kernel@...r.kernel.org,
cgroups@...r.kernel.org,
linux-fsdevel@...r.kernel.org,
linux-kselftest@...r.kernel.org
Subject: [PATCH v5 2/8] mm: Support order-1 folios in the page cache
From: "Matthew Wilcox (Oracle)" <willy@...radead.org>
Folios of order 1 have no space to store the deferred list. This is
not a problem for the page cache as file-backed folios are never
placed on the deferred list. All we need to do is prevent the core
MM from touching the deferred list for order 1 folios and remove the
code which prevented us from allocating order 1 folios.
Link: https://lore.kernel.org/linux-mm/90344ea7-4eec-47ee-5996-0c22f42d6a6a@google.com/
Signed-off-by: Matthew Wilcox (Oracle) <willy@...radead.org>
Signed-off-by: Zi Yan <ziy@...dia.com>
---
mm/filemap.c | 2 --
mm/huge_memory.c | 19 +++++++++++++++----
mm/internal.h | 3 +--
mm/readahead.c | 3 ---
4 files changed, 16 insertions(+), 11 deletions(-)
diff --git a/mm/filemap.c b/mm/filemap.c
index b7a21551fbc7..b4858d89f1b1 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1912,8 +1912,6 @@ struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index,
gfp_t alloc_gfp = gfp;
err = -ENOMEM;
- if (order == 1)
- order = 0;
if (order > 0)
alloc_gfp |= __GFP_NORETRY | __GFP_NOWARN;
folio = filemap_alloc_folio(alloc_gfp, order);
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index b20e535e874c..9840f312c08f 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -790,8 +790,10 @@ struct deferred_split *get_deferred_split_queue(struct folio *folio)
void folio_prep_large_rmappable(struct folio *folio)
{
- VM_BUG_ON_FOLIO(folio_order(folio) < 2, folio);
- INIT_LIST_HEAD(&folio->_deferred_list);
+ if (!folio || !folio_test_large(folio))
+ return;
+ if (folio_order(folio) > 1)
+ INIT_LIST_HEAD(&folio->_deferred_list);
folio_set_large_rmappable(folio);
}
@@ -3114,7 +3116,8 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
/* Prevent deferred_split_scan() touching ->_refcount */
spin_lock(&ds_queue->split_queue_lock);
if (folio_ref_freeze(folio, 1 + extra_pins)) {
- if (!list_empty(&folio->_deferred_list)) {
+ if (folio_order(folio) > 1 &&
+ !list_empty(&folio->_deferred_list)) {
ds_queue->split_queue_len--;
list_del(&folio->_deferred_list);
}
@@ -3165,6 +3168,9 @@ void folio_undo_large_rmappable(struct folio *folio)
struct deferred_split *ds_queue;
unsigned long flags;
+ if (folio_order(folio) <= 1)
+ return;
+
/*
* At this point, there is no one trying to add the folio to
* deferred_list. If folio is not in deferred_list, it's safe
@@ -3190,7 +3196,12 @@ void deferred_split_folio(struct folio *folio)
#endif
unsigned long flags;
- VM_BUG_ON_FOLIO(folio_order(folio) < 2, folio);
+ /*
+ * Order 1 folios have no space for a deferred list, but we also
+ * won't waste much memory by not adding them to the deferred list.
+ */
+ if (folio_order(folio) <= 1)
+ return;
/*
* The try_to_unmap() in page reclaim path might reach here too,
diff --git a/mm/internal.h b/mm/internal.h
index 2b7efffbe4d7..c4853ebfa030 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -420,8 +420,7 @@ static inline struct folio *page_rmappable_folio(struct page *page)
{
struct folio *folio = (struct folio *)page;
- if (folio && folio_order(folio) > 1)
- folio_prep_large_rmappable(folio);
+ folio_prep_large_rmappable(folio);
return folio;
}
diff --git a/mm/readahead.c b/mm/readahead.c
index 1e74455f908e..130c0e7df99f 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -514,9 +514,6 @@ void page_cache_ra_order(struct readahead_control *ractl,
/* Don't allocate pages past EOF */
while (index + (1UL << order) - 1 > limit)
order--;
- /* THP machinery does not support order-1 */
- if (order == 1)
- order = 0;
err = ra_alloc_folio(ractl, index, mark, order, gfp);
if (err)
break;
--
2.43.0
Powered by blists - more mailing lists