[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <20171101113136.1844-1-paul.durrant@citrix.com>
Date: Wed, 1 Nov 2017 11:31:36 +0000
From: Paul Durrant <paul.durrant@...rix.com>
To: <x86@...nel.org>, <xen-devel@...ts.xenproject.org>,
<linux-kernel@...r.kernel.org>
CC: Paul Durrant <paul.durrant@...rix.com>,
Boris Ostrovsky <boris.ostrovsky@...cle.com>,
Juergen Gross <jgross@...e.com>,
Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>,
"H. Peter Anvin" <hpa@...or.com>
Subject: [PATCH v2] xen: support priv-mapping in an HVM tools domain
If the domain has XENFEAT_auto_translated_physmap then use of the PV-
specific HYPERVISOR_mmu_update hypercall is clearly incorrect.
This patch adds checks in xen_remap_domain_gfn_array() and
xen_unmap_domain_gfn_array() which call through to the approprate
xlate_mmu function if the feature is present.
This patch also moves xen_remap_domain_gfn_range() into the PV-only MMU
code and #ifdefs the (only) calling code in privcmd accordingly.
Signed-off-by: Paul Durrant <paul.durrant@...rix.com>
---
Cc: Boris Ostrovsky <boris.ostrovsky@...cle.com>
Cc: Juergen Gross <jgross@...e.com>
Cc: Thomas Gleixner <tglx@...utronix.de>
Cc: Ingo Molnar <mingo@...hat.com>
Cc: "H. Peter Anvin" <hpa@...or.com>
---
arch/x86/xen/mmu.c | 36 +++++++++++++++++-------------------
arch/x86/xen/mmu_pv.c | 11 +++++++++++
drivers/xen/privcmd.c | 17 +++++++++++++----
include/xen/xen-ops.h | 7 +++++++
4 files changed, 48 insertions(+), 23 deletions(-)
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 3e15345abfe7..01837c36e293 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -91,12 +91,12 @@ static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token,
return 0;
}
-static int do_remap_gfn(struct vm_area_struct *vma,
- unsigned long addr,
- xen_pfn_t *gfn, int nr,
- int *err_ptr, pgprot_t prot,
- unsigned domid,
- struct page **pages)
+int xen_remap_gfn(struct vm_area_struct *vma,
+ unsigned long addr,
+ xen_pfn_t *gfn, int nr,
+ int *err_ptr, pgprot_t prot,
+ unsigned int domid,
+ struct page **pages)
{
int err = 0;
struct remap_data rmd;
@@ -166,36 +166,34 @@ static int do_remap_gfn(struct vm_area_struct *vma,
return err < 0 ? err : mapped;
}
-int xen_remap_domain_gfn_range(struct vm_area_struct *vma,
- unsigned long addr,
- xen_pfn_t gfn, int nr,
- pgprot_t prot, unsigned domid,
- struct page **pages)
-{
- return do_remap_gfn(vma, addr, &gfn, nr, NULL, prot, domid, pages);
-}
-EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_range);
-
int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
unsigned long addr,
xen_pfn_t *gfn, int nr,
int *err_ptr, pgprot_t prot,
unsigned domid, struct page **pages)
{
+ if (xen_feature(XENFEAT_auto_translated_physmap))
+ return xen_xlate_remap_gfn_array(vma, addr, gfn, nr, err_ptr,
+ prot, domid, pages);
+
/* We BUG_ON because it's a programmer error to pass a NULL err_ptr,
* and the consequences later is quite hard to detect what the actual
* cause of "wrong memory was mapped in".
*/
BUG_ON(err_ptr == NULL);
- return do_remap_gfn(vma, addr, gfn, nr, err_ptr, prot, domid, pages);
+ return xen_remap_gfn(vma, addr, gfn, nr, err_ptr, prot, domid,
+ pages);
}
EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_array);
/* Returns: 0 success */
int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
- int numpgs, struct page **pages)
+ int nr, struct page **pages)
{
- if (!pages || !xen_feature(XENFEAT_auto_translated_physmap))
+ if (xen_feature(XENFEAT_auto_translated_physmap))
+ return xen_xlate_unmap_gfn_range(vma, nr, pages);
+
+ if (!pages)
return 0;
return -EINVAL;
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
index 71495f1a86d7..4974d8a6c2b4 100644
--- a/arch/x86/xen/mmu_pv.c
+++ b/arch/x86/xen/mmu_pv.c
@@ -2670,3 +2670,14 @@ phys_addr_t paddr_vmcoreinfo_note(void)
return __pa(vmcoreinfo_note);
}
#endif /* CONFIG_KEXEC_CORE */
+
+int xen_remap_domain_gfn_range(struct vm_area_struct *vma,
+ unsigned long addr,
+ xen_pfn_t gfn, int nr,
+ pgprot_t prot, unsigned int domid,
+ struct page **pages)
+{
+ return xen_remap_gfn(vma, addr, &gfn, nr, NULL, prot, domid,
+ pages);
+}
+EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_range);
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index feca75b07fdd..b58a1719b606 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -215,6 +215,8 @@ static int traverse_pages_block(unsigned nelem, size_t size,
return ret;
}
+#ifdef CONFIG_XEN_PV
+
struct mmap_gfn_state {
unsigned long va;
struct vm_area_struct *vma;
@@ -261,10 +263,6 @@ static long privcmd_ioctl_mmap(struct file *file, void __user *udata)
LIST_HEAD(pagelist);
struct mmap_gfn_state state;
- /* We only support privcmd_ioctl_mmap_batch for auto translated. */
- if (xen_feature(XENFEAT_auto_translated_physmap))
- return -ENOSYS;
-
if (copy_from_user(&mmapcmd, udata, sizeof(mmapcmd)))
return -EFAULT;
@@ -312,6 +310,17 @@ static long privcmd_ioctl_mmap(struct file *file, void __user *udata)
return rc;
}
+#else
+
+static long privcmd_ioctl_mmap(struct file *file, void __user *udata)
+{
+ /* We only support privcmd_ioctl_mmap for PV. */
+ return -ENOSYS;
+}
+
+
+#endif /* CONFIG_XEN_PV */
+
struct mmap_batch_state {
domid_t domain;
unsigned long va;
diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h
index 218e6aae5433..663a9a06b762 100644
--- a/include/xen/xen-ops.h
+++ b/include/xen/xen-ops.h
@@ -60,6 +60,10 @@ static inline void xen_destroy_contiguous_region(phys_addr_t pstart,
struct vm_area_struct;
+int xen_remap_gfn(struct vm_area_struct *vma, unsigned long addr,
+ xen_pfn_t *gfn, int nr, int *err_ptr, pgprot_t prot,
+ unsigned int domid, struct page **pages);
+
/*
* xen_remap_domain_gfn_array() - map an array of foreign frames
* @vma: VMA to map the pages into
@@ -84,6 +88,7 @@ int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
unsigned domid,
struct page **pages);
+#ifdef CONFIG_XEN_PV
/* xen_remap_domain_gfn_range() - map a range of foreign frames
* @vma: VMA to map the pages into
* @addr: Address at which to map the pages
@@ -101,6 +106,8 @@ int xen_remap_domain_gfn_range(struct vm_area_struct *vma,
xen_pfn_t gfn, int nr,
pgprot_t prot, unsigned domid,
struct page **pages);
+#endif
+
int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
int numpgs, struct page **pages);
int xen_xlate_remap_gfn_array(struct vm_area_struct *vma,
--
2.11.0
Powered by blists - more mailing lists