[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230823131350.114942-23-alexandru.elisei@arm.com>
Date: Wed, 23 Aug 2023 14:13:35 +0100
From: Alexandru Elisei <alexandru.elisei@....com>
To: catalin.marinas@....com, will@...nel.org, oliver.upton@...ux.dev,
maz@...nel.org, james.morse@....com, suzuki.poulose@....com,
yuzenghui@...wei.com, arnd@...db.de, akpm@...ux-foundation.org,
mingo@...hat.com, peterz@...radead.org, juri.lelli@...hat.com,
vincent.guittot@...aro.org, dietmar.eggemann@....com,
rostedt@...dmis.org, bsegall@...gle.com, mgorman@...e.de,
bristot@...hat.com, vschneid@...hat.com, mhiramat@...nel.org,
rppt@...nel.org, hughd@...gle.com
Cc: pcc@...gle.com, steven.price@....com, anshuman.khandual@....com,
vincenzo.frascino@....com, david@...hat.com, eugenis@...gle.com,
kcc@...gle.com, hyesoo.yu@...sung.com,
linux-arm-kernel@...ts.infradead.org, linux-kernel@...r.kernel.org,
kvmarm@...ts.linux.dev, linux-fsdevel@...r.kernel.org,
linux-arch@...r.kernel.org, linux-mm@...ck.org,
linux-trace-kernel@...r.kernel.org
Subject: [PATCH RFC 22/37] mm: shmem: Allocate metadata storage for in-memory filesystems
Set __GFP_TAGGED when a new page is faulted in, so the page allocator
reserves the corresponding metadata storage.
Signed-off-by: Alexandru Elisei <alexandru.elisei@....com>
---
mm/shmem.c | 10 +++++++++-
1 file changed, 9 insertions(+), 1 deletion(-)
diff --git a/mm/shmem.c b/mm/shmem.c
index 2f2e0e618072..0b772ec34caa 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -81,6 +81,8 @@ static struct vfsmount *shm_mnt;
#include <linux/uaccess.h>
+#include <asm/memory_metadata.h>
+
#include "internal.h"
#define BLOCKS_PER_PAGE (PAGE_SIZE/512)
@@ -1530,7 +1532,7 @@ static struct folio *shmem_swapin(swp_entry_t swap, gfp_t gfp,
*/
static gfp_t limit_gfp_mask(gfp_t huge_gfp, gfp_t limit_gfp)
{
- gfp_t allowflags = __GFP_IO | __GFP_FS | __GFP_RECLAIM;
+ gfp_t allowflags = __GFP_IO | __GFP_FS | __GFP_RECLAIM | __GFP_TAGGED;
gfp_t denyflags = __GFP_NOWARN | __GFP_NORETRY;
gfp_t zoneflags = limit_gfp & GFP_ZONEMASK;
gfp_t result = huge_gfp & ~(allowflags | GFP_ZONEMASK);
@@ -1941,6 +1943,8 @@ static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index,
goto alloc_nohuge;
huge_gfp = vma_thp_gfp_mask(vma);
+ if (vma_has_metadata(vma))
+ huge_gfp |= __GFP_TAGGED;
huge_gfp = limit_gfp_mask(huge_gfp, gfp);
folio = shmem_alloc_and_acct_folio(huge_gfp, inode, index, true);
if (IS_ERR(folio)) {
@@ -2101,6 +2105,10 @@ static vm_fault_t shmem_fault(struct vm_fault *vmf)
int err;
vm_fault_t ret = VM_FAULT_LOCKED;
+ /* Fixup gfp flags for metadata enabled VMAs. */
+ if (vma_has_metadata(vma))
+ gfp |= __GFP_TAGGED;
+
/*
* Trinity finds that probing a hole which tmpfs is punching can
* prevent the hole-punch from ever completing: which in turn
--
2.41.0
Powered by blists - more mailing lists