[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230915095042.1320180-5-da.gomez@samsung.com>
Date: Fri, 15 Sep 2023 09:51:28 +0000
From: Daniel Gomez <da.gomez@...sung.com>
To: "minchan@...nel.org" <minchan@...nel.org>,
"senozhatsky@...omium.org" <senozhatsky@...omium.org>,
"axboe@...nel.dk" <axboe@...nel.dk>,
"djwong@...nel.org" <djwong@...nel.org>,
"willy@...radead.org" <willy@...radead.org>,
"hughd@...gle.com" <hughd@...gle.com>,
"akpm@...ux-foundation.org" <akpm@...ux-foundation.org>,
"mcgrof@...nel.org" <mcgrof@...nel.org>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
"linux-block@...r.kernel.org" <linux-block@...r.kernel.org>,
"linux-xfs@...r.kernel.org" <linux-xfs@...r.kernel.org>,
"linux-fsdevel@...r.kernel.org" <linux-fsdevel@...r.kernel.org>,
"linux-mm@...ck.org" <linux-mm@...ck.org>
CC: "gost.dev@...sung.com" <gost.dev@...sung.com>,
Pankaj Raghav <p.raghav@...sung.com>,
Daniel Gomez <da.gomez@...sung.com>
Subject: [PATCH 4/6] shmem: add order parameter support to shmem_alloc_folio
In preparation for high order folio support for the write path, add
order parameter when allocating a folio. This is on the write path
when huge support is not enabled or when it is but the huge page
allocation fails, the fallback will take advantage of this too.
Use order 0 for the non write paths such as reads or swap in as these
currently lack high order folios support.
Signed-off-by: Daniel Gomez <da.gomez@...sung.com>
---
mm/shmem.c | 22 +++++++++++++---------
1 file changed, 13 insertions(+), 9 deletions(-)
diff --git a/mm/shmem.c b/mm/shmem.c
index 836d44584796..ee297d8874d3 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1669,20 +1669,21 @@ static struct folio *shmem_alloc_hugefolio(gfp_t gfp,
}
static struct folio *shmem_alloc_folio(gfp_t gfp,
- struct shmem_inode_info *info, pgoff_t index)
+ struct shmem_inode_info *info, pgoff_t index,
+ unsigned int order)
{
struct vm_area_struct pvma;
struct folio *folio;
shmem_pseudo_vma_init(&pvma, info, index);
- folio = vma_alloc_folio(gfp, 0, &pvma, 0, false);
+ folio = vma_alloc_folio(gfp, order, &pvma, 0, false);
shmem_pseudo_vma_destroy(&pvma);
return folio;
}
static struct folio *shmem_alloc_and_acct_folio(gfp_t gfp, struct inode *inode,
- pgoff_t index, bool huge)
+ pgoff_t index, bool huge, unsigned int *order)
{
struct shmem_inode_info *info = SHMEM_I(inode);
struct folio *folio;
@@ -1691,7 +1692,7 @@ static struct folio *shmem_alloc_and_acct_folio(gfp_t gfp, struct inode *inode,
if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
huge = false;
- nr = huge ? HPAGE_PMD_NR : 1;
+ nr = huge ? HPAGE_PMD_NR : 1U << *order;
err = shmem_inode_acct_block(inode, nr);
if (err)
@@ -1700,7 +1701,7 @@ static struct folio *shmem_alloc_and_acct_folio(gfp_t gfp, struct inode *inode,
if (huge)
folio = shmem_alloc_hugefolio(gfp, info, index);
else
- folio = shmem_alloc_folio(gfp, info, index);
+ folio = shmem_alloc_folio(gfp, info, index, *order);
if (folio) {
__folio_set_locked(folio);
__folio_set_swapbacked(folio);
@@ -1750,7 +1751,7 @@ static int shmem_replace_folio(struct folio **foliop, gfp_t gfp,
*/
gfp &= ~GFP_CONSTRAINT_MASK;
VM_BUG_ON_FOLIO(folio_test_large(old), old);
- new = shmem_alloc_folio(gfp, info, index);
+ new = shmem_alloc_folio(gfp, info, index, 0);
if (!new)
return -ENOMEM;
@@ -1961,6 +1962,7 @@ static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index,
int error;
int once = 0;
int alloced = 0;
+ unsigned int order = 0;
if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT))
return -EFBIG;
@@ -2036,10 +2038,12 @@ static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index,
huge_gfp = vma_thp_gfp_mask(vma);
huge_gfp = limit_gfp_mask(huge_gfp, gfp);
- folio = shmem_alloc_and_acct_folio(huge_gfp, inode, index, true);
+ folio = shmem_alloc_and_acct_folio(huge_gfp, inode, index, true,
+ &order);
if (IS_ERR(folio)) {
alloc_nohuge:
- folio = shmem_alloc_and_acct_folio(gfp, inode, index, false);
+ folio = shmem_alloc_and_acct_folio(gfp, inode, index, false,
+ &order);
}
if (IS_ERR(folio)) {
int retry = 5;
@@ -2602,7 +2606,7 @@ int shmem_mfill_atomic_pte(pmd_t *dst_pmd,
if (!*foliop) {
ret = -ENOMEM;
- folio = shmem_alloc_folio(gfp, info, pgoff);
+ folio = shmem_alloc_folio(gfp, info, pgoff, 0);
if (!folio)
goto out_unacct_blocks;
--
2.39.2
Powered by blists - more mailing lists