[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20231028211518.3424020-8-da.gomez@samsung.com>
Date: Sat, 28 Oct 2023 21:15:45 +0000
From: Daniel Gomez <da.gomez@...sung.com>
To: "minchan@...nel.org" <minchan@...nel.org>,
"senozhatsky@...omium.org" <senozhatsky@...omium.org>,
"axboe@...nel.dk" <axboe@...nel.dk>,
"djwong@...nel.org" <djwong@...nel.org>,
"willy@...radead.org" <willy@...radead.org>,
"hughd@...gle.com" <hughd@...gle.com>,
"akpm@...ux-foundation.org" <akpm@...ux-foundation.org>,
"mcgrof@...nel.org" <mcgrof@...nel.org>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
"linux-block@...r.kernel.org" <linux-block@...r.kernel.org>,
"linux-xfs@...r.kernel.org" <linux-xfs@...r.kernel.org>,
"linux-fsdevel@...r.kernel.org" <linux-fsdevel@...r.kernel.org>,
"linux-mm@...ck.org" <linux-mm@...ck.org>
CC: "gost.dev@...sung.com" <gost.dev@...sung.com>,
Pankaj Raghav <p.raghav@...sung.com>,
Daniel Gomez <da.gomez@...sung.com>
Subject: [RFC PATCH 07/11] shmem: remove huge arg from
shmem_alloc_and_add_folio()
The huge flag is already part of of the memory allocation flag (gfp_t).
Make use of the VM_HUGEPAGE bit set by vma_thp_gfp_mask() to know if
the allocation must be a huge page.
Drop CONFIG_TRANSPARENT_HUGEPAGE check in shmem_alloc_and_add_folio()
as VM_HUGEPAGE won't be set unless THP config is enabled.
Signed-off-by: Daniel Gomez <da.gomez@...sung.com>
---
mm/shmem.c | 13 +++++--------
1 file changed, 5 insertions(+), 8 deletions(-)
diff --git a/mm/shmem.c b/mm/shmem.c
index e2893cf2287f..9d68211373c4 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1644,7 +1644,7 @@ static struct folio *shmem_alloc_folio(gfp_t gfp,
static struct folio *shmem_alloc_and_add_folio(gfp_t gfp,
struct inode *inode, pgoff_t index,
- struct mm_struct *fault_mm, bool huge)
+ struct mm_struct *fault_mm)
{
struct address_space *mapping = inode->i_mapping;
struct shmem_inode_info *info = SHMEM_I(inode);
@@ -1652,10 +1652,7 @@ static struct folio *shmem_alloc_and_add_folio(gfp_t gfp,
long pages;
int error;
- if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
- huge = false;
-
- if (huge) {
+ if (gfp & VM_HUGEPAGE) {
pages = HPAGE_PMD_NR;
index = round_down(index, HPAGE_PMD_NR);
@@ -1690,7 +1687,7 @@ static struct folio *shmem_alloc_and_add_folio(gfp_t gfp,
if (xa_find(&mapping->i_pages, &index,
index + pages - 1, XA_PRESENT)) {
error = -EEXIST;
- } else if (huge) {
+ } else if (gfp & VM_HUGEPAGE) {
count_vm_event(THP_FILE_FALLBACK);
count_vm_event(THP_FILE_FALLBACK_CHARGE);
}
@@ -2054,7 +2051,7 @@ static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index,
huge_gfp = vma_thp_gfp_mask(vma);
huge_gfp = limit_gfp_mask(huge_gfp, gfp);
folio = shmem_alloc_and_add_folio(huge_gfp,
- inode, index, fault_mm, true);
+ inode, index, fault_mm);
if (!IS_ERR(folio)) {
count_vm_event(THP_FILE_ALLOC);
goto alloced;
@@ -2063,7 +2060,7 @@ static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index,
goto repeat;
}
- folio = shmem_alloc_and_add_folio(gfp, inode, index, fault_mm, false);
+ folio = shmem_alloc_and_add_folio(gfp, inode, index, fault_mm);
if (IS_ERR(folio)) {
error = PTR_ERR(folio);
if (error == -EEXIST)
--
2.39.2
Powered by blists - more mailing lists