[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240518150457.1033295-1-michael.roth@amd.com>
Date: Sat, 18 May 2024 10:04:57 -0500
From: Michael Roth <mdroth@...xas.edu>
To: pbonzini@...hat.com
Cc: kvm@...r.kernel.org,
linux-kernel@...r.kernel.org,
ashish.kalra@....com,
thomas.lendacky@....com,
seanjc@...gle.com,
rick.p.edgecombe@...el.com
Subject: [PATCH] KVM: SEV: Fix guest memory leak when handling guest requests
Before forwarding guest requests to firmware, KVM takes a reference on
the 2 pages the guest uses for its request/response buffers. Make sure
to release these when cleaning up after the request is completed.
Signed-off-by: Michael Roth <michael.roth@....com>
---
Hi Paolo,
Sorry for another late fix, but I finally spotted this while looking over
the code again today. I've re-tested attestation guest requests with this
applied (after applying the other pending fix) and everything looks good.
-Mike
arch/x86/kvm/svm/sev.c | 27 +++++++++++++++++----------
1 file changed, 17 insertions(+), 10 deletions(-)
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index 41e383e30797..e57faf7d04d1 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -3933,11 +3933,16 @@ static int snp_setup_guest_buf(struct kvm *kvm, struct sev_data_snp_guest_reques
return -EINVAL;
resp_pfn = gfn_to_pfn(kvm, gpa_to_gfn(resp_gpa));
- if (is_error_noslot_pfn(resp_pfn))
+ if (is_error_noslot_pfn(resp_pfn)) {
+ kvm_release_pfn_clean(req_pfn);
return -EINVAL;
+ }
- if (rmp_make_private(resp_pfn, 0, PG_LEVEL_4K, 0, true))
+ if (rmp_make_private(resp_pfn, 0, PG_LEVEL_4K, 0, true)) {
+ kvm_release_pfn_clean(req_pfn);
+ kvm_release_pfn_clean(resp_pfn);
return -EINVAL;
+ }
data->gctx_paddr = __psp_pa(sev->snp_context);
data->req_paddr = __sme_set(req_pfn << PAGE_SHIFT);
@@ -3948,11 +3953,16 @@ static int snp_setup_guest_buf(struct kvm *kvm, struct sev_data_snp_guest_reques
static int snp_cleanup_guest_buf(struct sev_data_snp_guest_request *data)
{
- u64 pfn = __sme_clr(data->res_paddr) >> PAGE_SHIFT;
+ u64 req_pfn = __sme_clr(data->req_paddr) >> PAGE_SHIFT;
+ u64 resp_pfn = __sme_clr(data->res_paddr) >> PAGE_SHIFT;
+
+ kvm_release_pfn_clean(req_pfn);
- if (snp_page_reclaim(pfn) || rmp_make_shared(pfn, PG_LEVEL_4K))
+ if (snp_page_reclaim(resp_pfn) || rmp_make_shared(resp_pfn, PG_LEVEL_4K))
return -EINVAL;
+ kvm_release_pfn_dirty(resp_pfn);
+
return 0;
}
@@ -3970,14 +3980,11 @@ static int __snp_handle_guest_req(struct kvm *kvm, gpa_t req_gpa, gpa_t resp_gpa
return ret;
ret = sev_issue_cmd(kvm, SEV_CMD_SNP_GUEST_REQUEST, &data, fw_err);
- if (ret)
- return ret;
- ret = snp_cleanup_guest_buf(&data);
- if (ret)
- return ret;
+ if (snp_cleanup_guest_buf(&data))
+ return -EINVAL;
- return 0;
+ return ret;
}
static void snp_handle_guest_req(struct vcpu_svm *svm, gpa_t req_gpa, gpa_t resp_gpa)
--
2.25.1
Powered by blists - more mailing lists