[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <9b1815bd-9019-360f-f648-5c99211a3474@gmail.com>
Date: Fri, 6 Aug 2021 00:01:15 +0800
From: Tianyu Lan <ltykernel@...il.com>
To: Konrad Rzeszutek Wilk <konrad.wilk@...cle.com>
Cc: iommu@...ts.linux-foundation.org, linux-arch@...r.kernel.org,
linux-hyperv@...r.kernel.org, linux-kernel@...r.kernel.org,
linux-scsi@...r.kernel.org, netdev@...r.kernel.org,
vkuznets@...hat.com, parri.andrea@...il.com, kys@...rosoft.com,
haiyangz@...rosoft.com, sthemmin@...rosoft.com, wei.liu@...nel.org,
decui@...rosoft.com, tglx@...utronix.de, mingo@...hat.com,
bp@...en8.de, x86@...nel.org, hpa@...or.com,
dave.hansen@...ux.intel.com, luto@...nel.org, peterz@...radead.org,
boris.ostrovsky@...cle.com, jgross@...e.com,
sstabellini@...nel.org, joro@...tes.org, will@...nel.org,
davem@...emloft.net, kuba@...nel.org, jejb@...ux.ibm.com,
martin.petersen@...cle.com, arnd@...db.de, hch@....de,
m.szyprowski@...sung.com, robin.murphy@....com,
Tianyu.Lan@...rosoft.com, rppt@...nel.org,
kirill.shutemov@...ux.intel.com, akpm@...ux-foundation.org,
brijesh.singh@....com, thomas.lendacky@....com, pgonda@...gle.com,
david@...hat.com, krish.sadhukhan@...cle.com, saravanand@...com,
aneesh.kumar@...ux.ibm.com, xen-devel@...ts.xenproject.org,
martin.b.radev@...il.com, ardb@...nel.org, rientjes@...gle.com,
tj@...nel.org, keescook@...omium.org,
michael.h.kelley@...rosoft.com
Subject: Re: [PATCH V2 11/14] x86/Swiotlb: Add Swiotlb bounce buffer remap
function for HV IVM
Hi Konrad:
Could you have a look at this new version? The change since v1 is
make swiotlb_init_io_tlb_mem() return error code when
dma_map_decrypted() fails according your previous comment. If this
change is ok, could you give your ack and this series needs to be merged
via Hyper-V next tree.
Thanks.
On 8/5/2021 2:45 AM, Tianyu Lan wrote:
> From: Tianyu Lan <Tianyu.Lan@...rosoft.com>
>
> In Isolation VM with AMD SEV, bounce buffer needs to be accessed via
> extra address space which is above shared_gpa_boundary
> (E.G 39 bit address line) reported by Hyper-V CPUID ISOLATION_CONFIG.
> The access physical address will be original physical address +
> shared_gpa_boundary. The shared_gpa_boundary in the AMD SEV SNP
> spec is called virtual top of memory(vTOM). Memory addresses below
> vTOM are automatically treated as private while memory above
> vTOM is treated as shared.
>
> Use dma_map_decrypted() in the swiotlb code, store remap address returned
> and use the remap address to copy data from/to swiotlb bounce buffer.
>
> Signed-off-by: Tianyu Lan <Tianyu.Lan@...rosoft.com>
> ---
> Change since v1:
> * Make swiotlb_init_io_tlb_mem() return error code and return
> error when dma_map_decrypted() fails.
>
> Signed-off-by: Tianyu Lan <Tianyu.Lan@...rosoft.com>
> ---
> include/linux/swiotlb.h | 4 ++++
> kernel/dma/swiotlb.c | 32 ++++++++++++++++++++++++--------
> 2 files changed, 28 insertions(+), 8 deletions(-)
>
> diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
> index f507e3eacbea..584560ecaa8e 100644
> --- a/include/linux/swiotlb.h
> +++ b/include/linux/swiotlb.h
> @@ -72,6 +72,9 @@ extern enum swiotlb_force swiotlb_force;
> * @end: The end address of the swiotlb memory pool. Used to do a quick
> * range check to see if the memory was in fact allocated by this
> * API.
> + * @vaddr: The vaddr of the swiotlb memory pool. The swiotlb
> + * memory pool may be remapped in the memory encrypted case and store
> + * virtual address for bounce buffer operation.
> * @nslabs: The number of IO TLB blocks (in groups of 64) between @start and
> * @end. For default swiotlb, this is command line adjustable via
> * setup_io_tlb_npages.
> @@ -89,6 +92,7 @@ extern enum swiotlb_force swiotlb_force;
> struct io_tlb_mem {
> phys_addr_t start;
> phys_addr_t end;
> + void *vaddr;
> unsigned long nslabs;
> unsigned long used;
> unsigned int index;
> diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
> index 1fa81c096c1d..29b6d888ef3b 100644
> --- a/kernel/dma/swiotlb.c
> +++ b/kernel/dma/swiotlb.c
> @@ -176,7 +176,7 @@ void __init swiotlb_update_mem_attributes(void)
> memset(vaddr, 0, bytes);
> }
>
> -static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start,
> +static int swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start,
> unsigned long nslabs, bool late_alloc)
> {
> void *vaddr = phys_to_virt(start);
> @@ -194,14 +194,21 @@ static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start,
> mem->slots[i].alloc_size = 0;
> }
>
> - set_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT);
> - memset(vaddr, 0, bytes);
> + mem->vaddr = dma_map_decrypted(vaddr, bytes);
> + if (!mem->vaddr) {
> + pr_err("Failed to decrypt memory.\n");
> + return -ENOMEM;
> + }
> +
> + memset(mem->vaddr, 0, bytes);
> + return 0;
> }
>
> int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
> {
> struct io_tlb_mem *mem;
> size_t alloc_size;
> + int ret;
>
> if (swiotlb_force == SWIOTLB_NO_FORCE)
> return 0;
> @@ -216,7 +223,11 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
> panic("%s: Failed to allocate %zu bytes align=0x%lx\n",
> __func__, alloc_size, PAGE_SIZE);
>
> - swiotlb_init_io_tlb_mem(mem, __pa(tlb), nslabs, false);
> + ret = swiotlb_init_io_tlb_mem(mem, __pa(tlb), nslabs, false);
> + if (ret) {
> + memblock_free(__pa(mem), alloc_size);
> + return ret;
> + }
>
> io_tlb_default_mem = mem;
> if (verbose)
> @@ -304,6 +315,8 @@ int
> swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
> {
> struct io_tlb_mem *mem;
> + int size = get_order(struct_size(mem, slots, nslabs));
> + int ret;
>
> if (swiotlb_force == SWIOTLB_NO_FORCE)
> return 0;
> @@ -312,12 +325,15 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
> if (WARN_ON_ONCE(io_tlb_default_mem))
> return -ENOMEM;
>
> - mem = (void *)__get_free_pages(GFP_KERNEL,
> - get_order(struct_size(mem, slots, nslabs)));
> + mem = (void *)__get_free_pages(GFP_KERNEL, size);
> if (!mem)
> return -ENOMEM;
>
> - swiotlb_init_io_tlb_mem(mem, virt_to_phys(tlb), nslabs, true);
> + ret = swiotlb_init_io_tlb_mem(mem, virt_to_phys(tlb), nslabs, true);
> + if (ret) {
> + free_pages((unsigned long)mem, size);
> + return ret;
> + }
>
> io_tlb_default_mem = mem;
> swiotlb_print_info();
> @@ -360,7 +376,7 @@ static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size
> phys_addr_t orig_addr = mem->slots[index].orig_addr;
> size_t alloc_size = mem->slots[index].alloc_size;
> unsigned long pfn = PFN_DOWN(orig_addr);
> - unsigned char *vaddr = phys_to_virt(tlb_addr);
> + unsigned char *vaddr = mem->vaddr + tlb_addr - mem->start;
> unsigned int tlb_offset;
>
> if (orig_addr == INVALID_PHYS_ADDR)
>
Powered by blists - more mailing lists