[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220519153713.819591-3-chao.p.peng@linux.intel.com>
Date: Thu, 19 May 2022 23:37:07 +0800
From: Chao Peng <chao.p.peng@...ux.intel.com>
To: kvm@...r.kernel.org, linux-kernel@...r.kernel.org,
linux-mm@...ck.org, linux-fsdevel@...r.kernel.org,
linux-api@...r.kernel.org, linux-doc@...r.kernel.org,
qemu-devel@...gnu.org
Cc: Paolo Bonzini <pbonzini@...hat.com>,
Jonathan Corbet <corbet@....net>,
Sean Christopherson <seanjc@...gle.com>,
Vitaly Kuznetsov <vkuznets@...hat.com>,
Wanpeng Li <wanpengli@...cent.com>,
Jim Mattson <jmattson@...gle.com>,
Joerg Roedel <joro@...tes.org>,
Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>, Borislav Petkov <bp@...en8.de>,
x86@...nel.org, "H . Peter Anvin" <hpa@...or.com>,
Hugh Dickins <hughd@...gle.com>,
Jeff Layton <jlayton@...nel.org>,
"J . Bruce Fields" <bfields@...ldses.org>,
Andrew Morton <akpm@...ux-foundation.org>,
Mike Rapoport <rppt@...nel.org>,
Steven Price <steven.price@....com>,
"Maciej S . Szmigiero" <mail@...iej.szmigiero.name>,
Vlastimil Babka <vbabka@...e.cz>,
Vishal Annapurve <vannapurve@...gle.com>,
Yu Zhang <yu.c.zhang@...ux.intel.com>,
Chao Peng <chao.p.peng@...ux.intel.com>,
"Kirill A . Shutemov" <kirill.shutemov@...ux.intel.com>,
luto@...nel.org, jun.nakajima@...el.com, dave.hansen@...el.com,
ak@...ux.intel.com, david@...hat.com, aarcange@...hat.com,
ddutile@...hat.com, dhildenb@...hat.com,
Quentin Perret <qperret@...gle.com>,
Michael Roth <michael.roth@....com>, mhocko@...e.com
Subject: [PATCH v6 2/8] mm/shmem: Support memfile_notifier
From: "Kirill A. Shutemov" <kirill.shutemov@...ux.intel.com>
Implement shmem as a memfile_notifier backing store. Essentially it
interacts with the memfile_notifier feature flags for userspace
access/page migration/page reclaiming and implements the necessary
memfile_backing_store callbacks.
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@...ux.intel.com>
Signed-off-by: Chao Peng <chao.p.peng@...ux.intel.com>
---
include/linux/shmem_fs.h | 2 +
mm/shmem.c | 120 ++++++++++++++++++++++++++++++++++++++-
2 files changed, 121 insertions(+), 1 deletion(-)
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index ab51d3cd39bd..a8e98bdd121e 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -9,6 +9,7 @@
#include <linux/percpu_counter.h>
#include <linux/xattr.h>
#include <linux/fs_parser.h>
+#include <linux/memfile_notifier.h>
/* inode in-kernel data */
@@ -25,6 +26,7 @@ struct shmem_inode_info {
struct simple_xattrs xattrs; /* list of xattrs */
atomic_t stop_eviction; /* hold when working on inode */
struct timespec64 i_crtime; /* file creation time */
+ struct memfile_node memfile_node; /* memfile node */
struct inode vfs_inode;
};
diff --git a/mm/shmem.c b/mm/shmem.c
index 529c9ad3e926..f97ae328c87a 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -905,6 +905,24 @@ static struct folio *shmem_get_partial_folio(struct inode *inode, pgoff_t index)
return page ? page_folio(page) : NULL;
}
+static void notify_populate(struct inode *inode, pgoff_t start, pgoff_t end)
+{
+ struct shmem_inode_info *info = SHMEM_I(inode);
+
+ memfile_notifier_populate(&info->memfile_node, start, end);
+}
+
+static void notify_invalidate(struct inode *inode, struct folio *folio,
+ pgoff_t start, pgoff_t end)
+{
+ struct shmem_inode_info *info = SHMEM_I(inode);
+
+ start = max(start, folio->index);
+ end = min(end, folio->index + folio_nr_pages(folio));
+
+ memfile_notifier_invalidate(&info->memfile_node, start, end);
+}
+
/*
* Remove range of pages and swap entries from page cache, and free them.
* If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate.
@@ -948,6 +966,8 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
}
index += folio_nr_pages(folio) - 1;
+ notify_invalidate(inode, folio, start, end);
+
if (!unfalloc || !folio_test_uptodate(folio))
truncate_inode_folio(mapping, folio);
folio_unlock(folio);
@@ -1021,6 +1041,9 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
index--;
break;
}
+
+ notify_invalidate(inode, folio, start, end);
+
VM_BUG_ON_FOLIO(folio_test_writeback(folio),
folio);
truncate_inode_folio(mapping, folio);
@@ -1092,6 +1115,13 @@ static int shmem_setattr(struct user_namespace *mnt_userns,
(newsize > oldsize && (info->seals & F_SEAL_GROW)))
return -EPERM;
+ if (info->memfile_node.flags & MEMFILE_F_USER_INACCESSIBLE) {
+ if(oldsize)
+ return -EPERM;
+ if (!PAGE_ALIGNED(newsize))
+ return -EINVAL;
+ }
+
if (newsize != oldsize) {
error = shmem_reacct_size(SHMEM_I(inode)->flags,
oldsize, newsize);
@@ -1340,6 +1370,8 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
goto redirty;
if (!total_swap_pages)
goto redirty;
+ if (info->memfile_node.flags & MEMFILE_F_UNRECLAIMABLE)
+ goto redirty;
/*
* Our capabilities prevent regular writeback or sync from ever calling
@@ -2234,6 +2266,9 @@ static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
if (ret)
return ret;
+ if (info->memfile_node.flags & MEMFILE_F_USER_INACCESSIBLE)
+ return -EPERM;
+
/* arm64 - allow memory tagging on RAM-based files */
vma->vm_flags |= VM_MTE_ALLOWED;
@@ -2274,6 +2309,7 @@ static struct inode *shmem_get_inode(struct super_block *sb, const struct inode
info->i_crtime = inode->i_mtime;
INIT_LIST_HEAD(&info->shrinklist);
INIT_LIST_HEAD(&info->swaplist);
+ memfile_node_init(&info->memfile_node);
simple_xattrs_init(&info->xattrs);
cache_no_acl(inode);
mapping_set_large_folios(inode->i_mapping);
@@ -2442,6 +2478,8 @@ shmem_write_begin(struct file *file, struct address_space *mapping,
if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size)
return -EPERM;
}
+ if (unlikely(info->memfile_node.flags & MEMFILE_F_USER_INACCESSIBLE))
+ return -EPERM;
ret = shmem_getpage(inode, index, pagep, SGP_WRITE);
@@ -2518,6 +2556,13 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
end_index = i_size >> PAGE_SHIFT;
if (index > end_index)
break;
+
+ if (SHMEM_I(inode)->memfile_node.flags &
+ MEMFILE_F_USER_INACCESSIBLE) {
+ error = -EPERM;
+ break;
+ }
+
if (index == end_index) {
nr = i_size & ~PAGE_MASK;
if (nr <= offset)
@@ -2649,6 +2694,12 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
goto out;
}
+ if ((info->memfile_node.flags & MEMFILE_F_USER_INACCESSIBLE) &&
+ (!PAGE_ALIGNED(offset) || !PAGE_ALIGNED(len))) {
+ error = -EINVAL;
+ goto out;
+ }
+
shmem_falloc.waitq = &shmem_falloc_waitq;
shmem_falloc.start = (u64)unmap_start >> PAGE_SHIFT;
shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
@@ -2768,6 +2819,7 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
i_size_write(inode, offset + len);
inode->i_ctime = current_time(inode);
+ notify_populate(inode, start, end);
undone:
spin_lock(&inode->i_lock);
inode->i_private = NULL;
@@ -3754,6 +3806,20 @@ static int shmem_error_remove_page(struct address_space *mapping,
return 0;
}
+#ifdef CONFIG_MIGRATION
+static int shmem_migrate_page(struct address_space *mapping,
+ struct page *newpage, struct page *page,
+ enum migrate_mode mode)
+{
+ struct inode *inode = mapping->host;
+ struct shmem_inode_info *info = SHMEM_I(inode);
+
+ if (info->memfile_node.flags & MEMFILE_F_UNMOVABLE)
+ return -ENOTSUPP;
+ return migrate_page(mapping, newpage, page, mode);
+}
+#endif
+
const struct address_space_operations shmem_aops = {
.writepage = shmem_writepage,
.dirty_folio = noop_dirty_folio,
@@ -3762,7 +3828,7 @@ const struct address_space_operations shmem_aops = {
.write_end = shmem_write_end,
#endif
#ifdef CONFIG_MIGRATION
- .migratepage = migrate_page,
+ .migratepage = shmem_migrate_page,
#endif
.error_remove_page = shmem_error_remove_page,
};
@@ -3879,6 +3945,54 @@ static struct file_system_type shmem_fs_type = {
.fs_flags = FS_USERNS_MOUNT,
};
+#ifdef CONFIG_MEMFILE_NOTIFIER
+static struct memfile_node* shmem_lookup_memfile_node(struct file *file)
+{
+ struct inode *inode = file_inode(file);
+
+ if (!shmem_mapping(inode->i_mapping))
+ return NULL;
+
+ return &SHMEM_I(inode)->memfile_node;
+}
+
+
+static int shmem_get_lock_pfn(struct file *file, pgoff_t offset, pfn_t *pfn,
+ int *order)
+{
+ struct page *page;
+ int ret;
+
+ ret = shmem_getpage(file_inode(file), offset, &page, SGP_NOALLOC);
+ if (ret)
+ return ret;
+
+ *pfn = page_to_pfn_t(page);
+ *order = thp_order(compound_head(page));
+ return 0;
+}
+
+static void shmem_put_unlock_pfn(pfn_t pfn)
+{
+ struct page *page = pfn_t_to_page(pfn);
+
+ if (!page)
+ return;
+
+ VM_BUG_ON_PAGE(!PageLocked(page), page);
+
+ set_page_dirty(page);
+ unlock_page(page);
+ put_page(page);
+}
+
+static struct memfile_backing_store shmem_backing_store = {
+ .lookup_memfile_node = shmem_lookup_memfile_node,
+ .get_lock_pfn = shmem_get_lock_pfn,
+ .put_unlock_pfn = shmem_put_unlock_pfn,
+};
+#endif /* CONFIG_MEMFILE_NOTIFIER */
+
int __init shmem_init(void)
{
int error;
@@ -3904,6 +4018,10 @@ int __init shmem_init(void)
else
shmem_huge = SHMEM_HUGE_NEVER; /* just in case it was patched */
#endif
+
+#ifdef CONFIG_MEMFILE_NOTIFIER
+ memfile_register_backing_store(&shmem_backing_store);
+#endif
return 0;
out1:
--
2.25.1
Powered by blists - more mailing lists