[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAGtprH9X-v-R+UiAvdvKgqAqoc4MBJAWTnoEtP+Y2nip_y8Heg@mail.gmail.com>
Date: Tue, 19 Apr 2022 15:40:09 -0700
From: Vishal Annapurve <vannapurve@...gle.com>
To: Chao Peng <chao.p.peng@...ux.intel.com>
Cc: kvm@...r.kernel.org, linux-kernel@...r.kernel.org,
linux-mm@...ck.org, linux-fsdevel@...r.kernel.org,
linux-api@...r.kernel.org, qemu-devel@...gnu.org,
Paolo Bonzini <pbonzini@...hat.com>,
Jonathan Corbet <corbet@....net>,
Sean Christopherson <seanjc@...gle.com>,
Vitaly Kuznetsov <vkuznets@...hat.com>,
Wanpeng Li <wanpengli@...cent.com>,
Jim Mattson <jmattson@...gle.com>,
Joerg Roedel <joro@...tes.org>,
Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>, Borislav Petkov <bp@...en8.de>,
x86@...nel.org, "H . Peter Anvin" <hpa@...or.com>,
Hugh Dickins <hughd@...gle.com>,
Jeff Layton <jlayton@...nel.org>,
"J . Bruce Fields" <bfields@...ldses.org>,
Andrew Morton <akpm@...ux-foundation.org>,
Mike Rapoport <rppt@...nel.org>,
Steven Price <steven.price@....com>,
"Maciej S . Szmigiero" <mail@...iej.szmigiero.name>,
Vlastimil Babka <vbabka@...e.cz>,
Yu Zhang <yu.c.zhang@...ux.intel.com>,
"Kirill A . Shutemov" <kirill.shutemov@...ux.intel.com>,
Andy Lutomirski <luto@...nel.org>,
Jun Nakajima <jun.nakajima@...el.com>, dave.hansen@...el.com,
ak@...ux.intel.com, david@...hat.com
Subject: Re: [PATCH v5 03/13] mm/shmem: Support memfile_notifier
On Thu, Mar 10, 2022 at 6:10 AM Chao Peng <chao.p.peng@...ux.intel.com> wrote:
>
> From: "Kirill A. Shutemov" <kirill.shutemov@...ux.intel.com>
>
> It maintains a memfile_notifier list in shmem_inode_info structure and
> implements memfile_pfn_ops callbacks defined by memfile_notifier. It
> then exposes them to memfile_notifier via
> shmem_get_memfile_notifier_info.
>
> We use SGP_NOALLOC in shmem_get_lock_pfn since the pages should be
> allocated by userspace for private memory. If there is no pages
> allocated at the offset then error should be returned so KVM knows that
> the memory is not private memory.
>
> Signed-off-by: Kirill A. Shutemov <kirill.shutemov@...ux.intel.com>
> Signed-off-by: Chao Peng <chao.p.peng@...ux.intel.com>
> ---
> include/linux/shmem_fs.h | 4 +++
> mm/shmem.c | 76 ++++++++++++++++++++++++++++++++++++++++
> 2 files changed, 80 insertions(+)
>
> diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
> index 2dde843f28ef..7bb16f2d2825 100644
> --- a/include/linux/shmem_fs.h
> +++ b/include/linux/shmem_fs.h
> @@ -9,6 +9,7 @@
> #include <linux/percpu_counter.h>
> #include <linux/xattr.h>
> #include <linux/fs_parser.h>
> +#include <linux/memfile_notifier.h>
>
> /* inode in-kernel data */
>
> @@ -28,6 +29,9 @@ struct shmem_inode_info {
> struct simple_xattrs xattrs; /* list of xattrs */
> atomic_t stop_eviction; /* hold when working on inode */
> unsigned int xflags; /* shmem extended flags */
> +#ifdef CONFIG_MEMFILE_NOTIFIER
> + struct memfile_notifier_list memfile_notifiers;
> +#endif
> struct inode vfs_inode;
> };
>
> diff --git a/mm/shmem.c b/mm/shmem.c
> index 9b31a7056009..7b43e274c9a2 100644
> --- a/mm/shmem.c
> +++ b/mm/shmem.c
> @@ -903,6 +903,28 @@ static struct folio *shmem_get_partial_folio(struct inode *inode, pgoff_t index)
> return page ? page_folio(page) : NULL;
> }
>
> +static void notify_fallocate(struct inode *inode, pgoff_t start, pgoff_t end)
> +{
> +#ifdef CONFIG_MEMFILE_NOTIFIER
> + struct shmem_inode_info *info = SHMEM_I(inode);
> +
> + memfile_notifier_fallocate(&info->memfile_notifiers, start, end);
> +#endif
> +}
> +
> +static void notify_invalidate_page(struct inode *inode, struct folio *folio,
> + pgoff_t start, pgoff_t end)
> +{
> +#ifdef CONFIG_MEMFILE_NOTIFIER
> + struct shmem_inode_info *info = SHMEM_I(inode);
> +
> + start = max(start, folio->index);
> + end = min(end, folio->index + folio_nr_pages(folio));
> +
> + memfile_notifier_invalidate(&info->memfile_notifiers, start, end);
> +#endif
> +}
> +
> /*
> * Remove range of pages and swap entries from page cache, and free them.
> * If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate.
> @@ -946,6 +968,8 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
> }
> index += folio_nr_pages(folio) - 1;
>
> + notify_invalidate_page(inode, folio, start, end);
> +
> if (!unfalloc || !folio_test_uptodate(folio))
> truncate_inode_folio(mapping, folio);
> folio_unlock(folio);
> @@ -1019,6 +1043,9 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
> index--;
> break;
> }
> +
> + notify_invalidate_page(inode, folio, start, end);
> +
Should this be done in batches or done once for all of range [start, end)?
> VM_BUG_ON_FOLIO(folio_test_writeback(folio),
> folio);
> truncate_inode_folio(mapping, folio);
> @@ -2279,6 +2306,9 @@ static struct inode *shmem_get_inode(struct super_block *sb, const struct inode
> info->flags = flags & VM_NORESERVE;
> INIT_LIST_HEAD(&info->shrinklist);
> INIT_LIST_HEAD(&info->swaplist);
> +#ifdef CONFIG_MEMFILE_NOTIFIER
> + memfile_notifier_list_init(&info->memfile_notifiers);
> +#endif
> simple_xattrs_init(&info->xattrs);
> cache_no_acl(inode);
> mapping_set_large_folios(inode->i_mapping);
> @@ -2802,6 +2832,7 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
> if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
> i_size_write(inode, offset + len);
> inode->i_ctime = current_time(inode);
> + notify_fallocate(inode, start, end);
> undone:
> spin_lock(&inode->i_lock);
> inode->i_private = NULL;
> @@ -3909,6 +3940,47 @@ static struct file_system_type shmem_fs_type = {
> .fs_flags = FS_USERNS_MOUNT,
> };
>
> +#ifdef CONFIG_MEMFILE_NOTIFIER
> +static long shmem_get_lock_pfn(struct inode *inode, pgoff_t offset, int *order)
> +{
> + struct page *page;
> + int ret;
> +
> + ret = shmem_getpage(inode, offset, &page, SGP_NOALLOC);
> + if (ret)
> + return ret;
> +
> + *order = thp_order(compound_head(page));
> +
> + return page_to_pfn(page);
> +}
> +
> +static void shmem_put_unlock_pfn(unsigned long pfn)
> +{
> + struct page *page = pfn_to_page(pfn);
> +
> + VM_BUG_ON_PAGE(!PageLocked(page), page);
> +
> + set_page_dirty(page);
> + unlock_page(page);
> + put_page(page);
> +}
> +
> +static struct memfile_notifier_list* shmem_get_notifier_list(struct inode *inode)
> +{
> + if (!shmem_mapping(inode->i_mapping))
> + return NULL;
> +
> + return &SHMEM_I(inode)->memfile_notifiers;
> +}
> +
> +static struct memfile_backing_store shmem_backing_store = {
> + .pfn_ops.get_lock_pfn = shmem_get_lock_pfn,
> + .pfn_ops.put_unlock_pfn = shmem_put_unlock_pfn,
> + .get_notifier_list = shmem_get_notifier_list,
> +};
> +#endif /* CONFIG_MEMFILE_NOTIFIER */
> +
> int __init shmem_init(void)
> {
> int error;
> @@ -3934,6 +4006,10 @@ int __init shmem_init(void)
> else
> shmem_huge = SHMEM_HUGE_NEVER; /* just in case it was patched */
> #endif
> +
> +#ifdef CONFIG_MEMFILE_NOTIFIER
> + memfile_register_backing_store(&shmem_backing_store);
> +#endif
> return 0;
>
> out1:
> --
> 2.17.1
>
Powered by blists - more mailing lists