[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250408112402.181574-7-shivankg@amd.com>
Date: Tue, 8 Apr 2025 11:24:00 +0000
From: Shivank Garg <shivankg@....com>
To: <seanjc@...gle.com>, <david@...hat.com>, <vbabka@...e.cz>,
<willy@...radead.org>, <akpm@...ux-foundation.org>, <shuah@...nel.org>,
<pbonzini@...hat.com>
CC: <ackerleytng@...gle.com>, <paul@...l-moore.com>, <jmorris@...ei.org>,
<serge@...lyn.com>, <pvorel@...e.cz>, <bfoster@...hat.com>,
<tabba@...gle.com>, <vannapurve@...gle.com>, <chao.gao@...el.com>,
<bharata@....com>, <nikunj@....com>, <michael.day@....com>,
<yan.y.zhao@...el.com>, <Neeraj.Upadhyay@....com>, <thomas.lendacky@....com>,
<michael.roth@....com>, <aik@....com>, <jgg@...dia.com>,
<kalyazin@...zon.com>, <peterx@...hat.com>, <shivankg@....com>,
<linux-fsdevel@...r.kernel.org>, <linux-mm@...ck.org>,
<linux-kernel@...r.kernel.org>, <linux-security-module@...r.kernel.org>,
<kvm@...r.kernel.org>, <linux-kselftest@...r.kernel.org>,
<linux-coco@...ts.linux.dev>
Subject: [PATCH RFC v7 6/8] KVM: guest_memfd: Add slab-allocated inode cache
Add dedicated inode structure (kvm_gmem_inode_info) and slab-allocated
inode cache for guest memory backing, similar to how shmem handles inodes.
This adds the necessary allocation/destruction functions and prepares
for upcoming guest_memfd NUMA policy support changes.
Signed-off-by: Shivank Garg <shivankg@....com>
---
virt/kvm/guest_memfd.c | 52 ++++++++++++++++++++++++++++++++++++++++++
1 file changed, 52 insertions(+)
diff --git a/virt/kvm/guest_memfd.c b/virt/kvm/guest_memfd.c
index 002328569c9e..0ccbb152483a 100644
--- a/virt/kvm/guest_memfd.c
+++ b/virt/kvm/guest_memfd.c
@@ -18,6 +18,15 @@ struct kvm_gmem {
struct list_head entry;
};
+struct kvm_gmem_inode_info {
+ struct inode vfs_inode;
+};
+
+static inline struct kvm_gmem_inode_info *KVM_GMEM_I(struct inode *inode)
+{
+ return container_of(inode, struct kvm_gmem_inode_info, vfs_inode);
+}
+
/**
* folio_file_pfn - like folio_file_page, but return a pfn.
* @folio: The folio which contains this index.
@@ -317,8 +326,34 @@ static pgoff_t kvm_gmem_get_index(struct kvm_memory_slot *slot, gfn_t gfn)
return gfn - slot->base_gfn + slot->gmem.pgoff;
}
+static struct kmem_cache *kvm_gmem_inode_cachep;
+
+static struct inode *kvm_gmem_alloc_inode(struct super_block *sb)
+{
+ struct kvm_gmem_inode_info *info;
+
+ info = alloc_inode_sb(sb, kvm_gmem_inode_cachep, GFP_KERNEL);
+ if (!info)
+ return NULL;
+
+ return &info->vfs_inode;
+}
+
+static void kvm_gmem_destroy_inode(struct inode *inode)
+{
+
+}
+
+static void kvm_gmem_free_inode(struct inode *inode)
+{
+ kmem_cache_free(kvm_gmem_inode_cachep, KVM_GMEM_I(inode));
+}
+
static const struct super_operations kvm_gmem_super_operations = {
.statfs = simple_statfs,
+ .alloc_inode = kvm_gmem_alloc_inode,
+ .destroy_inode = kvm_gmem_destroy_inode,
+ .free_inode = kvm_gmem_free_inode,
};
static int kvm_gmem_init_fs_context(struct fs_context *fc)
@@ -355,16 +390,33 @@ static struct file_operations kvm_gmem_fops = {
.fallocate = kvm_gmem_fallocate,
};
+static void kvm_gmem_init_inode(void *foo)
+{
+ struct kvm_gmem_inode_info *info = foo;
+
+ inode_init_once(&info->vfs_inode);
+}
+
+static void kvm_gmem_init_inodecache(void)
+{
+ kvm_gmem_inode_cachep = kmem_cache_create("kvm_gmem_inode_cache",
+ sizeof(struct kvm_gmem_inode_info),
+ 0, SLAB_ACCOUNT,
+ kvm_gmem_init_inode);
+}
+
void kvm_gmem_init(struct module *module)
{
kvm_gmem_fops.owner = module;
+ kvm_gmem_init_inodecache();
kvm_gmem_init_mount();
}
void kvm_gmem_exit(void)
{
kern_unmount(kvm_gmem_mnt);
+ kmem_cache_destroy(kvm_gmem_inode_cachep);
}
static int kvm_gmem_migrate_folio(struct address_space *mapping,
--
2.34.1
Powered by blists - more mailing lists