[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20220118110621.62462-6-nikunj@amd.com>
Date: Tue, 18 Jan 2022 16:36:20 +0530
From: Nikunj A Dadhania <nikunj@....com>
To: Paolo Bonzini <pbonzini@...hat.com>
CC: Sean Christopherson <seanjc@...gle.com>,
Vitaly Kuznetsov <vkuznets@...hat.com>,
Wanpeng Li <wanpengli@...cent.com>,
Jim Mattson <jmattson@...gle.com>,
Joerg Roedel <joro@...tes.org>,
Brijesh Singh <brijesh.singh@....com>,
Tom Lendacky <thomas.lendacky@....com>,
Peter Gonda <pgonda@...gle.com>, <kvm@...r.kernel.org>,
<linux-kernel@...r.kernel.org>, Nikunj A Dadhania <nikunj@....com>
Subject: [RFC PATCH 5/6] KVM: SEV: Carve out routine for allocation of pages
Create a separate routine sev_alloc_pages() for allocating sev pages.
This will be used in the following MMU based pinning.
While at it, validate the number of pages before the RLIMIT check and
use kzalloc instead of kmalloc.
Signed-off-by: Nikunj A Dadhania <nikunj@....com>
---
arch/x86/kvm/svm/sev.c | 44 +++++++++++++++++++++++++++---------------
1 file changed, 28 insertions(+), 16 deletions(-)
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index a962bed97a0b..14aeccfc500b 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -368,19 +368,13 @@ static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
return ret;
}
-static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
- unsigned long ulen, unsigned long *n,
- int write)
+static void *sev_alloc_pages(struct kvm_sev_info *sev, unsigned long uaddr,
+ unsigned long ulen, unsigned long *n)
{
- struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
- unsigned long npages, size;
- int npinned;
unsigned long locked, lock_limit;
- struct page **pages;
+ unsigned long npages, size;
unsigned long first, last;
- int ret;
-
- lockdep_assert_held(&kvm->lock);
+ struct page **pages;
if (ulen == 0 || uaddr + ulen < uaddr)
return ERR_PTR(-EINVAL);
@@ -390,6 +384,9 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
last = ((uaddr + ulen - 1) & PAGE_MASK) >> PAGE_SHIFT;
npages = (last - first + 1);
+ if (WARN_ON_ONCE(npages > INT_MAX))
+ return ERR_PTR(-EINVAL);
+
locked = sev->pages_locked + npages;
lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
if (locked > lock_limit && !capable(CAP_IPC_LOCK)) {
@@ -397,19 +394,34 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
return ERR_PTR(-ENOMEM);
}
- if (WARN_ON_ONCE(npages > INT_MAX))
- return ERR_PTR(-EINVAL);
-
/* Avoid using vmalloc for smaller buffers. */
size = npages * sizeof(struct page *);
if (size > PAGE_SIZE)
pages = __vmalloc(size, GFP_KERNEL_ACCOUNT | __GFP_ZERO);
else
- pages = kmalloc(size, GFP_KERNEL_ACCOUNT);
+ pages = kzalloc(size, GFP_KERNEL_ACCOUNT);
- if (!pages)
- return ERR_PTR(-ENOMEM);
+ *n = pages ? npages : 0;
+ return pages;
+}
+static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
+ unsigned long ulen, unsigned long *n,
+ int write)
+{
+ struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+ unsigned long npages, locked;
+ struct page **pages;
+ int npinned;
+ int ret;
+
+ lockdep_assert_held(&kvm->lock);
+
+ pages = sev_alloc_pages(sev, uaddr, ulen, &npages);
+ if (IS_ERR(pages))
+ return pages;
+
+ locked = sev->pages_locked + npages;
/* Pin the user virtual address. */
npinned = pin_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages);
if (npinned != npages) {
--
2.32.0
Powered by blists - more mailing lists