[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <2014bf7370d78bc1f5600731af5bf8f569e5868b.1713755580.git.baolin.wang@linux.alibaba.com>
Date: Mon, 22 Apr 2024 15:02:40 +0800
From: Baolin Wang <baolin.wang@...ux.alibaba.com>
To: akpm@...ux-foundation.org,
hughd@...gle.com
Cc: willy@...radead.org,
david@...hat.com,
wangkefeng.wang@...wei.com,
21cnbao@...il.com,
ryan.roberts@....com,
ying.huang@...el.com,
shy828301@...il.com,
ziy@...dia.com,
baolin.wang@...ux.alibaba.com,
linux-mm@...ck.org,
linux-kernel@...r.kernel.org
Subject: [RFC PATCH 2/5] mm: shmem: add an 'order' parameter for shmem_alloc_hugefolio()
Add a new parameter to specify the huge page order for shmem_alloc_hugefolio(),
as a preparation to supoort mTHP.
Signed-off-by: Baolin Wang <baolin.wang@...ux.alibaba.com>
---
mm/shmem.c | 11 ++++++-----
1 file changed, 6 insertions(+), 5 deletions(-)
diff --git a/mm/shmem.c b/mm/shmem.c
index fa2a0ed97507..893c88efc45f 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1604,14 +1604,14 @@ static gfp_t limit_gfp_mask(gfp_t huge_gfp, gfp_t limit_gfp)
}
static struct folio *shmem_alloc_hugefolio(gfp_t gfp,
- struct shmem_inode_info *info, pgoff_t index)
+ struct shmem_inode_info *info, pgoff_t index, int order)
{
struct mempolicy *mpol;
pgoff_t ilx;
struct page *page;
- mpol = shmem_get_pgoff_policy(info, index, HPAGE_PMD_ORDER, &ilx);
- page = alloc_pages_mpol(gfp, HPAGE_PMD_ORDER, mpol, ilx, numa_node_id());
+ mpol = shmem_get_pgoff_policy(info, index, order, &ilx);
+ page = alloc_pages_mpol(gfp, order, mpol, ilx, numa_node_id());
mpol_cond_put(mpol);
return page_rmappable_folio(page);
@@ -1639,13 +1639,14 @@ static struct folio *shmem_alloc_and_add_folio(gfp_t gfp,
struct shmem_inode_info *info = SHMEM_I(inode);
struct folio *folio;
long pages;
- int error;
+ int error, order;
if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
huge = false;
if (huge) {
pages = HPAGE_PMD_NR;
+ order = HPAGE_PMD_ORDER;
index = round_down(index, HPAGE_PMD_NR);
/*
@@ -1660,7 +1661,7 @@ static struct folio *shmem_alloc_and_add_folio(gfp_t gfp,
index + HPAGE_PMD_NR - 1, XA_PRESENT))
return ERR_PTR(-E2BIG);
- folio = shmem_alloc_hugefolio(gfp, info, index);
+ folio = shmem_alloc_hugefolio(gfp, info, index, order);
if (!folio)
count_vm_event(THP_FILE_FALLBACK);
} else {
--
2.39.3
Powered by blists - more mailing lists