[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210728142631.41860-5-imbrenda@linux.ibm.com>
Date: Wed, 28 Jul 2021 16:26:22 +0200
From: Claudio Imbrenda <imbrenda@...ux.ibm.com>
To: kvm@...r.kernel.org
Cc: cohuck@...hat.com, borntraeger@...ibm.com, frankja@...ux.ibm.com,
thuth@...hat.com, pasic@...ux.ibm.com, david@...hat.com,
linux-s390@...r.kernel.org, linux-kernel@...r.kernel.org
Subject: [PATCH v2 04/13] KVM: s390: pv: handle secure storage violations for protected guests
With upcoming patches, protected guests will be able to trigger secure
storage violations in normal operation.
This patch adds handling of secure storage violations for protected
guests.
Pages that trigger the exception will be made non-secure before
attempting to use them again.
Signed-off-by: Claudio Imbrenda <imbrenda@...ux.ibm.com>
Acked-by: Janosch Frank <frankja@...ux.ibm.com>
---
arch/s390/include/asm/uv.h | 1 +
arch/s390/kernel/uv.c | 43 ++++++++++++++++++++++++++++++++++++++
arch/s390/mm/fault.c | 10 +++++++++
3 files changed, 54 insertions(+)
diff --git a/arch/s390/include/asm/uv.h b/arch/s390/include/asm/uv.h
index bbd51aa94d05..7722cde51912 100644
--- a/arch/s390/include/asm/uv.h
+++ b/arch/s390/include/asm/uv.h
@@ -351,6 +351,7 @@ static inline int is_prot_virt_host(void)
}
int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb);
+int gmap_destroy_page(struct gmap *gmap, unsigned long gaddr);
int uv_destroy_owned_page(unsigned long paddr);
int uv_convert_from_secure(unsigned long paddr);
int uv_convert_owned_from_secure(unsigned long paddr);
diff --git a/arch/s390/kernel/uv.c b/arch/s390/kernel/uv.c
index 5a6ac965f379..9cfd7979648c 100644
--- a/arch/s390/kernel/uv.c
+++ b/arch/s390/kernel/uv.c
@@ -337,6 +337,49 @@ int gmap_convert_to_secure(struct gmap *gmap, unsigned long gaddr)
}
EXPORT_SYMBOL_GPL(gmap_convert_to_secure);
+int gmap_destroy_page(struct gmap *gmap, unsigned long gaddr)
+{
+ struct vm_area_struct *vma;
+ unsigned long uaddr;
+ struct page *page;
+ int rc;
+
+ rc = -EFAULT;
+ mmap_read_lock(gmap->mm);
+
+ uaddr = __gmap_translate(gmap, gaddr);
+ if (IS_ERR_VALUE(uaddr))
+ goto out;
+ vma = find_vma(gmap->mm, uaddr);
+ if (!vma)
+ goto out;
+ /*
+ * Huge pages should not be able to become secure
+ */
+ if (is_vm_hugetlb_page(vma))
+ goto out;
+
+ rc = 0;
+ /* we take an extra reference here */
+ page = follow_page(vma, uaddr, FOLL_WRITE | FOLL_GET);
+ if (IS_ERR_OR_NULL(page))
+ goto out;
+ rc = uv_destroy_owned_page(page_to_phys(page));
+ /*
+ * Fault handlers can race; it is possible that one CPU will destroy
+ * and import the page, at which point the second CPU handling the
+ * same fault will not be able to destroy. In that case we do not
+ * want to terminate the process, we instead try to export the page.
+ */
+ if (rc)
+ rc = uv_convert_owned_from_secure(page_to_phys(page));
+ put_page(page);
+out:
+ mmap_read_unlock(gmap->mm);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(gmap_destroy_page);
+
/*
* To be called with the page locked or with an extra reference! This will
* prevent gmap_make_secure from touching the page concurrently. Having 2
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index e33c43b38afe..eb68b4f36927 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -850,6 +850,16 @@ NOKPROBE_SYMBOL(do_non_secure_storage_access);
void do_secure_storage_violation(struct pt_regs *regs)
{
+ unsigned long gaddr = regs->int_parm_long & __FAIL_ADDR_MASK;
+ struct gmap *gmap = (struct gmap *)S390_lowcore.gmap;
+
+ /*
+ * If the VM has been rebooted, its address space might still contain
+ * secure pages from the previous boot.
+ * Clear the page so it can be reused.
+ */
+ if (!gmap_destroy_page(gmap, gaddr))
+ return;
/*
* Either KVM messed up the secure guest mapping or the same
* page is mapped into multiple secure guests.
--
2.31.1
Powered by blists - more mailing lists