[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <ed0f048c-bb40-c6c6-887c-ef68c9e411a2@ozlabs.ru>
Date: Tue, 10 Dec 2019 14:07:36 +1100
From: Alexey Kardashevskiy <aik@...abs.ru>
To: Ram Pai <linuxram@...ibm.com>, mpe@...erman.id.au
Cc: linuxppc-dev@...ts.ozlabs.org, benh@...nel.crashing.org,
david@...son.dropbear.id.au, paulus@...abs.org,
mdroth@...ux.vnet.ibm.com, hch@....de, andmike@...ibm.com,
sukadev@...ux.vnet.ibm.com, mst@...hat.com, ram.n.pai@...il.com,
cai@....pw, tglx@...utronix.de, bauerman@...ux.ibm.com,
linux-kernel@...r.kernel.org, leonardo@...ux.ibm.com
Subject: Re: [PATCH v5 1/2] powerpc/pseries/iommu: Share the per-cpu TCE page
with the hypervisor.
On 07/12/2019 12:12, Ram Pai wrote:
> H_PUT_TCE_INDIRECT hcall uses a page filled with TCE entries, as one of
> its parameters. On secure VMs, hypervisor cannot access the contents of
> this page since it gets encrypted. Hence share the page with the
> hypervisor, and unshare when done.
I thought the idea was to use H_PUT_TCE and avoid sharing any extra
pages. There is small problem that when DDW is enabled,
FW_FEATURE_MULTITCE is ignored (easy to fix); I also noticed complains
about the performance on slack but this is caused by initial cleanup of
the default TCE window (which we do not use anyway) and to battle this
we can simply reduce its size by adding
-global
spapr-pci-host-bridge.dma_win_size=0x4000000
to the qemu command line. And the huge DMA window will use 16MB or 1GB
TCEs so even mapping 32GB guests is barely noticeable. What do I miss?
>
> Signed-off-by: Ram Pai <linuxram@...ibm.com>
> ---
> arch/powerpc/platforms/pseries/iommu.c | 32 +++++++++++++++++++++++++++++---
> 1 file changed, 29 insertions(+), 3 deletions(-)
>
> diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
> index 6ba081d..67b5009 100644
> --- a/arch/powerpc/platforms/pseries/iommu.c
> +++ b/arch/powerpc/platforms/pseries/iommu.c
> @@ -37,6 +37,7 @@
> #include <asm/mmzone.h>
> #include <asm/plpar_wrappers.h>
> #include <asm/svm.h>
> +#include <asm/ultravisor.h>
>
> #include "pseries.h"
>
> @@ -179,6 +180,18 @@ static int tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum,
>
> static DEFINE_PER_CPU(__be64 *, tce_page);
>
> +static void pre_process_tce_page(__be64 *tcep)
> +{
> + if (tcep && is_secure_guest())
> + uv_share_page(PHYS_PFN(__pa(tcep)), 1);
> +}
> +
> +static void post_process_tce_page(__be64 *tcep)
> +{
> + if (tcep && is_secure_guest())
> + uv_unshare_page(PHYS_PFN(__pa(tcep)), 1);
> +}
> +
> static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
> long npages, unsigned long uaddr,
> enum dma_data_direction direction,
> @@ -187,7 +200,7 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
> u64 rc = 0;
> u64 proto_tce;
> __be64 *tcep;
> - u64 rpn;
> + u64 rpn, tcep0;
> long l, limit;
> long tcenum_start = tcenum, npages_start = npages;
> int ret = 0;
> @@ -216,6 +229,8 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
> __this_cpu_write(tce_page, tcep);
> }
>
> + pre_process_tce_page(tcep);
> +
> rpn = __pa(uaddr) >> TCE_SHIFT;
> proto_tce = TCE_PCI_READ;
> if (direction != DMA_TO_DEVICE)
> @@ -243,6 +258,14 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
> tcenum += limit;
> } while (npages > 0 && !rc);
>
> + /*
> + * if "tcep" is shared, post_process_tce_page() will unshare the page,
> + * which will zero the page. Grab any interesting content, before it
> + * disappears.
> + */
> + tcep0 = tcep[0];
> + post_process_tce_page(tcep);
> +
> local_irq_restore(flags);
>
> if (unlikely(rc == H_NOT_ENOUGH_RESOURCES)) {
> @@ -256,7 +279,7 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
> printk("tce_buildmulti_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc);
> printk("\tindex = 0x%llx\n", (u64)tbl->it_index);
> printk("\tnpages = 0x%llx\n", (u64)npages);
> - printk("\ttce[0] val = 0x%llx\n", tcep[0]);
> + printk("\ttce[0] val = 0x%llx\n", tcep0);
> dump_stack();
> }
> return ret;
> @@ -280,7 +303,6 @@ static void tce_free_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages
> }
> }
>
> -
> static void tce_freemulti_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages)
> {
> u64 rc;
> @@ -413,6 +435,8 @@ static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn,
> __this_cpu_write(tce_page, tcep);
> }
>
> + pre_process_tce_page(tcep);
> +
> proto_tce = TCE_PCI_READ | TCE_PCI_WRITE;
>
> liobn = (u64)be32_to_cpu(maprange->liobn);
> @@ -451,6 +475,8 @@ static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn,
> num_tce -= limit;
> } while (num_tce > 0 && !rc);
>
> + post_process_tce_page(tcep);
> +
> /* error cleanup: caller will clear whole range */
>
> local_irq_enable();
>
--
Alexey
Powered by blists - more mailing lists