[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230308094106.227365-6-rppt@kernel.org>
Date: Wed, 8 Mar 2023 11:41:06 +0200
From: Mike Rapoport <rppt@...nel.org>
To: linux-mm@...ck.org
Cc: Andrew Morton <akpm@...ux-foundation.org>,
Dave Hansen <dave.hansen@...ux.intel.com>,
Mike Rapoport <rppt@...nel.org>,
Peter Zijlstra <peterz@...radead.org>,
Rick Edgecombe <rick.p.edgecombe@...el.com>,
Song Liu <song@...nel.org>,
Thomas Gleixner <tglx@...utronix.de>,
Vlastimil Babka <vbabka@...e.cz>, linux-kernel@...r.kernel.org,
x86@...nel.org
Subject: [RFC PATCH 5/5] EXPERIMENTAL: mm/secretmem: use __GFP_UNMAPPED
From: "Mike Rapoport (IBM)" <rppt@...nel.org>
Signed-off-by: Mike Rapoport (IBM) <rppt@...nel.org>
---
mm/secretmem.c | 26 +-------------------------
1 file changed, 1 insertion(+), 25 deletions(-)
diff --git a/mm/secretmem.c b/mm/secretmem.c
index 0b502625cd30..f66dfd16a0c3 100644
--- a/mm/secretmem.c
+++ b/mm/secretmem.c
@@ -53,7 +53,6 @@ static vm_fault_t secretmem_fault(struct vm_fault *vmf)
struct inode *inode = file_inode(vmf->vma->vm_file);
pgoff_t offset = vmf->pgoff;
gfp_t gfp = vmf->gfp_mask;
- unsigned long addr;
struct page *page;
vm_fault_t ret;
int err;
@@ -66,38 +65,22 @@ static vm_fault_t secretmem_fault(struct vm_fault *vmf)
retry:
page = find_lock_page(mapping, offset);
if (!page) {
- page = alloc_page(gfp | __GFP_ZERO);
+ page = alloc_page(gfp | __GFP_ZERO | __GFP_UNMAPPED);
if (!page) {
ret = VM_FAULT_OOM;
goto out;
}
- err = set_direct_map_invalid_noflush(page);
- if (err) {
- put_page(page);
- ret = vmf_error(err);
- goto out;
- }
-
__SetPageUptodate(page);
err = add_to_page_cache_lru(page, mapping, offset, gfp);
if (unlikely(err)) {
put_page(page);
- /*
- * If a split of large page was required, it
- * already happened when we marked the page invalid
- * which guarantees that this call won't fail
- */
- set_direct_map_default_noflush(page);
if (err == -EEXIST)
goto retry;
ret = vmf_error(err);
goto out;
}
-
- addr = (unsigned long)page_address(page);
- flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
}
vmf->page = page;
@@ -150,15 +133,8 @@ static int secretmem_migrate_folio(struct address_space *mapping,
return -EBUSY;
}
-static void secretmem_free_folio(struct folio *folio)
-{
- set_direct_map_default_noflush(&folio->page);
- folio_zero_segment(folio, 0, folio_size(folio));
-}
-
const struct address_space_operations secretmem_aops = {
.dirty_folio = noop_dirty_folio,
- .free_folio = secretmem_free_folio,
.migrate_folio = secretmem_migrate_folio,
};
--
2.35.1
Powered by blists - more mailing lists