[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CA+EHjTxgO4LmdYY83a+uzBshvFf8EcJzY58Rovvz=pZgyO2yow@mail.gmail.com>
Date: Fri, 30 May 2025 09:53:19 +0100
From: Fuad Tabba <tabba@...gle.com>
To: Yan Zhao <yan.y.zhao@...el.com>
Cc: Ackerley Tng <ackerleytng@...gle.com>, kvm@...r.kernel.org, linux-mm@...ck.org,
linux-kernel@...r.kernel.org, x86@...nel.org, linux-fsdevel@...r.kernel.org,
aik@....com, ajones@...tanamicro.com, akpm@...ux-foundation.org,
amoorthy@...gle.com, anthony.yznaga@...cle.com, anup@...infault.org,
aou@...s.berkeley.edu, bfoster@...hat.com, binbin.wu@...ux.intel.com,
brauner@...nel.org, catalin.marinas@....com, chao.p.peng@...el.com,
chenhuacai@...nel.org, dave.hansen@...el.com, david@...hat.com,
dmatlack@...gle.com, dwmw@...zon.co.uk, erdemaktas@...gle.com,
fan.du@...el.com, fvdl@...gle.com, graf@...zon.com, haibo1.xu@...el.com,
hch@...radead.org, hughd@...gle.com, ira.weiny@...el.com,
isaku.yamahata@...el.com, jack@...e.cz, james.morse@....com,
jarkko@...nel.org, jgg@...pe.ca, jgowans@...zon.com, jhubbard@...dia.com,
jroedel@...e.de, jthoughton@...gle.com, jun.miao@...el.com,
kai.huang@...el.com, keirf@...gle.com, kent.overstreet@...ux.dev,
kirill.shutemov@...el.com, liam.merwick@...cle.com,
maciej.wieczor-retman@...el.com, mail@...iej.szmigiero.name, maz@...nel.org,
mic@...ikod.net, michael.roth@....com, mpe@...erman.id.au,
muchun.song@...ux.dev, nikunj@....com, nsaenz@...zon.es,
oliver.upton@...ux.dev, palmer@...belt.com, pankaj.gupta@....com,
paul.walmsley@...ive.com, pbonzini@...hat.com, pdurrant@...zon.co.uk,
peterx@...hat.com, pgonda@...gle.com, pvorel@...e.cz, qperret@...gle.com,
quic_cvanscha@...cinc.com, quic_eberman@...cinc.com,
quic_mnalajal@...cinc.com, quic_pderrin@...cinc.com, quic_pheragu@...cinc.com,
quic_svaddagi@...cinc.com, quic_tsoni@...cinc.com, richard.weiyang@...il.com,
rick.p.edgecombe@...el.com, rientjes@...gle.com, roypat@...zon.co.uk,
rppt@...nel.org, seanjc@...gle.com, shuah@...nel.org, steven.price@....com,
steven.sistare@...cle.com, suzuki.poulose@....com, thomas.lendacky@....com,
usama.arif@...edance.com, vannapurve@...gle.com, vbabka@...e.cz,
viro@...iv.linux.org.uk, vkuznets@...hat.com, wei.w.wang@...el.com,
will@...nel.org, willy@...radead.org, xiaoyao.li@...el.com,
yilun.xu@...el.com, yuzenghui@...wei.com, zhiquan1.li@...el.com
Subject: Re: [RFC PATCH v2 02/51] KVM: guest_memfd: Introduce and use
shareability to guard faulting
Hi,
.. snip..
> I noticed that in [1], the kvm_gmem_mmap() does not check the range.
> So, the WARN() here can be hit when userspace mmap() an area larger than the
> inode size and accesses the out of band HVA.
>
> Maybe limit the mmap() range?
>
> @@ -1609,6 +1620,10 @@ static int kvm_gmem_mmap(struct file *file, struct vm_area_struct *vma)
> if (!kvm_gmem_supports_shared(file_inode(file)))
> return -ENODEV;
>
> + if (vma->vm_end - vma->vm_start + (vma->vm_pgoff << PAGE_SHIFT) > i_size_read(file_inode(file)))
> + return -EINVAL;
> +
> if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) !=
> (VM_SHARED | VM_MAYSHARE)) {
> return -EINVAL;
>
> [1] https://lore.kernel.org/all/20250513163438.3942405-8-tabba@google.com/
I don't think we want to do that for a couple of reasons. We catch
such invalid accesses on faulting, and, by analogy, afaikt, neither
secretmem nor memfd perform a similar check on mmap (nor do
memory-mapped files in general).
There are also valid reasons why a user would want to deliberately
mmap more memory than the backing store, knowing that it's only going
to fault what it's going to use, e.g., alignment.
Cheers,
/fuad
> > + return xa_to_value(entry);
> > +}
> > +
> > +static struct folio *kvm_gmem_get_shared_folio(struct inode *inode, pgoff_t index)
> > +{
> > + if (kvm_gmem_shareability_get(inode, index) != SHAREABILITY_ALL)
> > + return ERR_PTR(-EACCES);
> > +
> > + return kvm_gmem_get_folio(inode, index);
> > +}
> > +
> > +#else
> > +
> > +static int kvm_gmem_shareability_setup(struct maple_tree *mt, loff_t size, u64 flags)
> > +{
> > + return 0;
> > +}
> > +
> > +static inline struct folio *kvm_gmem_get_shared_folio(struct inode *inode, pgoff_t index)
> > +{
> > + WARN_ONCE("Unexpected call to get shared folio.")
> > + return NULL;
> > +}
> > +
> > +#endif /* CONFIG_KVM_GMEM_SHARED_MEM */
> > +
> > static int __kvm_gmem_prepare_folio(struct kvm *kvm, struct kvm_memory_slot *slot,
> > pgoff_t index, struct folio *folio)
> > {
> > @@ -333,7 +404,7 @@ static vm_fault_t kvm_gmem_fault_shared(struct vm_fault *vmf)
> >
> > filemap_invalidate_lock_shared(inode->i_mapping);
> >
> > - folio = kvm_gmem_get_folio(inode, vmf->pgoff);
> > + folio = kvm_gmem_get_shared_folio(inode, vmf->pgoff);
> > if (IS_ERR(folio)) {
> > int err = PTR_ERR(folio);
> >
> > @@ -420,8 +491,33 @@ static struct file_operations kvm_gmem_fops = {
> > .fallocate = kvm_gmem_fallocate,
> > };
> >
> > +static void kvm_gmem_free_inode(struct inode *inode)
> > +{
> > + struct kvm_gmem_inode_private *private = kvm_gmem_private(inode);
> > +
> > + kfree(private);
> > +
> > + free_inode_nonrcu(inode);
> > +}
> > +
> > +static void kvm_gmem_destroy_inode(struct inode *inode)
> > +{
> > + struct kvm_gmem_inode_private *private = kvm_gmem_private(inode);
> > +
> > +#ifdef CONFIG_KVM_GMEM_SHARED_MEM
> > + /*
> > + * mtree_destroy() can't be used within rcu callback, hence can't be
> > + * done in ->free_inode().
> > + */
> > + if (private)
> > + mtree_destroy(&private->shareability);
> > +#endif
> > +}
> > +
> > static const struct super_operations kvm_gmem_super_operations = {
> > .statfs = simple_statfs,
> > + .destroy_inode = kvm_gmem_destroy_inode,
> > + .free_inode = kvm_gmem_free_inode,
> > };
> >
> > static int kvm_gmem_init_fs_context(struct fs_context *fc)
> > @@ -549,12 +645,26 @@ static const struct inode_operations kvm_gmem_iops = {
> > static struct inode *kvm_gmem_inode_make_secure_inode(const char *name,
> > loff_t size, u64 flags)
> > {
> > + struct kvm_gmem_inode_private *private;
> > struct inode *inode;
> > + int err;
> >
> > inode = alloc_anon_secure_inode(kvm_gmem_mnt->mnt_sb, name);
> > if (IS_ERR(inode))
> > return inode;
> >
> > + err = -ENOMEM;
> > + private = kzalloc(sizeof(*private), GFP_KERNEL);
> > + if (!private)
> > + goto out;
> > +
> > + mt_init(&private->shareability);
> Wrap the mt_init() inside "#ifdef CONFIG_KVM_GMEM_SHARED_MEM" ?
>
> > + inode->i_mapping->i_private_data = private;
> > +
> > + err = kvm_gmem_shareability_setup(private, size, flags);
> > + if (err)
> > + goto out;
> > +
> > inode->i_private = (void *)(unsigned long)flags;
> > inode->i_op = &kvm_gmem_iops;
> > inode->i_mapping->a_ops = &kvm_gmem_aops;
> > @@ -566,6 +676,11 @@ static struct inode *kvm_gmem_inode_make_secure_inode(const char *name,
> > WARN_ON_ONCE(!mapping_unevictable(inode->i_mapping));
> >
> > return inode;
> > +
> > +out:
> > + iput(inode);
> > +
> > + return ERR_PTR(err);
> > }
> >
> > static struct file *kvm_gmem_inode_create_getfile(void *priv, loff_t size,
> > @@ -654,6 +769,9 @@ int kvm_gmem_create(struct kvm *kvm, struct kvm_create_guest_memfd *args)
> > if (kvm_arch_vm_supports_gmem_shared_mem(kvm))
> > valid_flags |= GUEST_MEMFD_FLAG_SUPPORT_SHARED;
> >
> > + if (flags & GUEST_MEMFD_FLAG_SUPPORT_SHARED)
> > + valid_flags |= GUEST_MEMFD_FLAG_INIT_PRIVATE;
> > +
> > if (flags & ~valid_flags)
> > return -EINVAL;
> >
> > @@ -842,6 +960,8 @@ int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot,
> > if (!file)
> > return -EFAULT;
> >
> > + filemap_invalidate_lock_shared(file_inode(file)->i_mapping);
> > +
> > folio = __kvm_gmem_get_pfn(file, slot, index, pfn, &is_prepared, max_order);
> > if (IS_ERR(folio)) {
> > r = PTR_ERR(folio);
> > @@ -857,8 +977,8 @@ int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot,
> > *page = folio_file_page(folio, index);
> > else
> > folio_put(folio);
> > -
> > out:
> > + filemap_invalidate_unlock_shared(file_inode(file)->i_mapping);
> > fput(file);
> > return r;
> > }
> > --
> > 2.49.0.1045.g170613ef41-goog
> >
> >
Powered by blists - more mailing lists