[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20241105165515.154941-2-shivankg@amd.com>
Date: Tue, 5 Nov 2024 16:55:14 +0000
From: Shivank Garg <shivankg@....com>
To: <x86@...nel.org>, <viro@...iv.linux.org.uk>, <brauner@...nel.org>,
<jack@...e.cz>, <akpm@...ux-foundation.org>, <linux-kernel@...r.kernel.org>,
<linux-fsdevel@...r.kernel.org>, <linux-mm@...ck.org>,
<linux-api@...r.kernel.org>, <linux-arch@...r.kernel.org>,
<kvm@...r.kernel.org>
CC: <chao.gao@...el.com>, <pgonda@...gle.com>, <thomas.lendacky@....com>,
<seanjc@...gle.com>, <luto@...nel.org>, <tglx@...utronix.de>,
<mingo@...hat.com>, <bp@...en8.de>, <dave.hansen@...ux.intel.com>,
<willy@...radead.org>, <arnd@...db.de>, <pbonzini@...hat.com>,
<kees@...nel.org>, <shivankg@....com>, <bharata@....com>, <nikunj@....com>,
<michael.day@....com>, <Neeraj.Upadhyay@....com>
Subject: [RFC PATCH 3/4] KVM: guest_memfd: Pass file pointer instead of inode in guest_memfd APIs
Change the KVM guest_memfd APIs to pass file pointers instead of
inodes in the folio allocation functions. This is preparatory patch
for adding NUMA support to guest memory allocations.
The functional behavior remains unchanged.
Signed-off-by: Shivank Garg <shivankg@....com>
---
virt/kvm/guest_memfd.c | 21 +++++++++++----------
1 file changed, 11 insertions(+), 10 deletions(-)
diff --git a/virt/kvm/guest_memfd.c b/virt/kvm/guest_memfd.c
index e930014b4bdc..2c6fcf7c3ec9 100644
--- a/virt/kvm/guest_memfd.c
+++ b/virt/kvm/guest_memfd.c
@@ -91,7 +91,7 @@ static struct folio *kvm_gmem_get_huge_folio(struct inode *inode, pgoff_t index,
{
pgoff_t npages = 1UL << order;
pgoff_t huge_index = round_down(index, npages);
- struct address_space *mapping = inode->i_mapping;
+ struct address_space *mapping = inode->i_mapping;
gfp_t gfp = mapping_gfp_mask(mapping) | __GFP_NOWARN;
loff_t size = i_size_read(inode);
struct folio *folio;
@@ -125,16 +125,16 @@ static struct folio *kvm_gmem_get_huge_folio(struct inode *inode, pgoff_t index,
* Ignore accessed, referenced, and dirty flags. The memory is
* unevictable and there is no storage to write back to.
*/
-static struct folio *__kvm_gmem_get_folio(struct inode *inode, pgoff_t index,
+static struct folio *__kvm_gmem_get_folio(struct file *file, pgoff_t index,
bool allow_huge)
{
struct folio *folio = NULL;
if (gmem_2m_enabled && allow_huge)
- folio = kvm_gmem_get_huge_folio(inode, index, PMD_ORDER);
+ folio = kvm_gmem_get_huge_folio(file_inode(file), index, PMD_ORDER);
if (!folio)
- folio = filemap_grab_folio(inode->i_mapping, index);
+ folio = filemap_grab_folio(file_inode(file)->i_mapping, index);
pr_debug("%s: allocate folio with PFN %lx order %d\n",
__func__, folio_pfn(folio), folio_order(folio));
@@ -150,9 +150,9 @@ static struct folio *__kvm_gmem_get_folio(struct inode *inode, pgoff_t index,
* Ignore accessed, referenced, and dirty flags. The memory is
* unevictable and there is no storage to write back to.
*/
-static struct folio *kvm_gmem_get_folio(struct inode *inode, pgoff_t index)
+static struct folio *kvm_gmem_get_folio(struct file *file, pgoff_t index)
{
- return __kvm_gmem_get_folio(inode, index, true);
+ return __kvm_gmem_get_folio(file, index, true);
}
static void kvm_gmem_invalidate_begin(struct kvm_gmem *gmem, pgoff_t start,
@@ -228,8 +228,9 @@ static long kvm_gmem_punch_hole(struct inode *inode, loff_t offset, loff_t len)
return 0;
}
-static long kvm_gmem_allocate(struct inode *inode, loff_t offset, loff_t len)
+static long kvm_gmem_allocate(struct file *file, loff_t offset, loff_t len)
{
+ struct inode *inode = file_inode(file);
struct address_space *mapping = inode->i_mapping;
pgoff_t start, index, end;
int r;
@@ -252,7 +253,7 @@ static long kvm_gmem_allocate(struct inode *inode, loff_t offset, loff_t len)
break;
}
- folio = kvm_gmem_get_folio(inode, index);
+ folio = kvm_gmem_get_folio(file, index);
if (IS_ERR(folio)) {
r = PTR_ERR(folio);
break;
@@ -292,7 +293,7 @@ static long kvm_gmem_fallocate(struct file *file, int mode, loff_t offset,
if (mode & FALLOC_FL_PUNCH_HOLE)
ret = kvm_gmem_punch_hole(file_inode(file), offset, len);
else
- ret = kvm_gmem_allocate(file_inode(file), offset, len);
+ ret = kvm_gmem_allocate(file, offset, len);
if (!ret)
file_modified(file);
@@ -626,7 +627,7 @@ __kvm_gmem_get_pfn(struct file *file, struct kvm_memory_slot *slot,
return ERR_PTR(-EIO);
}
- folio = __kvm_gmem_get_folio(file_inode(file), index, allow_huge);
+ folio = __kvm_gmem_get_folio(file, index, allow_huge);
if (IS_ERR(folio))
return folio;
--
2.34.1
Powered by blists - more mailing lists