[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20170308105649.x6qcwpiwyxzp4nvb@pd.tnic>
Date: Wed, 8 Mar 2017 11:56:49 +0100
From: Borislav Petkov <bp@...e.de>
To: Brijesh Singh <brijesh.singh@....com>
Cc: simon.guinot@...uanux.org, linux-efi@...r.kernel.org,
kvm@...r.kernel.org, rkrcmar@...hat.com, matt@...eblueprint.co.uk,
linux-pci@...r.kernel.org, linus.walleij@...aro.org,
gary.hook@....com, linux-mm@...ck.org,
paul.gortmaker@...driver.com, hpa@...or.com, cl@...ux.com,
dan.j.williams@...el.com, aarcange@...hat.com,
sfr@...b.auug.org.au, andriy.shevchenko@...ux.intel.com,
herbert@...dor.apana.org.au, bhe@...hat.com, xemul@...allels.com,
joro@...tes.org, x86@...nel.org, peterz@...radead.org,
piotr.luc@...el.com, mingo@...hat.com, msalter@...hat.com,
ross.zwisler@...ux.intel.com, dyoung@...hat.com,
thomas.lendacky@....com, jroedel@...e.de, keescook@...omium.org,
arnd@...db.de, toshi.kani@....com, mathieu.desnoyers@...icios.com,
luto@...nel.org, devel@...uxdriverproject.org, bhelgaas@...gle.com,
tglx@...utronix.de, mchehab@...nel.org, iamjoonsoo.kim@....com,
labbott@...oraproject.org, tony.luck@...el.com,
alexandre.bounine@....com, kuleshovmail@...il.com,
linux-kernel@...r.kernel.org, mcgrof@...nel.org, mst@...hat.com,
linux-crypto@...r.kernel.org, tj@...nel.org, pbonzini@...hat.com,
akpm@...ux-foundation.org, davem@...emloft.net
Subject: Re: [RFC PATCH v2 10/32] x86: DMA support for SEV memory encryption
On Thu, Mar 02, 2017 at 10:14:25AM -0500, Brijesh Singh wrote:
> From: Tom Lendacky <thomas.lendacky@....com>
>
> DMA access to memory mapped as encrypted while SEV is active can not be
> encrypted during device write or decrypted during device read. In order
> for DMA to properly work when SEV is active, the swiotlb bounce buffers
> must be used.
>
> Signed-off-by: Tom Lendacky <thomas.lendacky@....com>
> ---
> arch/x86/mm/mem_encrypt.c | 77 +++++++++++++++++++++++++++++++++++++++++++++
> 1 file changed, 77 insertions(+)
>
> diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
> index 090419b..7df5f4c 100644
> --- a/arch/x86/mm/mem_encrypt.c
> +++ b/arch/x86/mm/mem_encrypt.c
> @@ -197,8 +197,81 @@ void __init sme_early_init(void)
> /* Update the protection map with memory encryption mask */
> for (i = 0; i < ARRAY_SIZE(protection_map); i++)
> protection_map[i] = pgprot_encrypted(protection_map[i]);
> +
> + if (sev_active())
> + swiotlb_force = SWIOTLB_FORCE;
> +}
> +
> +static void *sme_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
> + gfp_t gfp, unsigned long attrs)
> +{
> + unsigned long dma_mask;
> + unsigned int order;
> + struct page *page;
> + void *vaddr = NULL;
> +
> + dma_mask = dma_alloc_coherent_mask(dev, gfp);
> + order = get_order(size);
> +
> + gfp &= ~__GFP_ZERO;
Please add a comment around here that swiotlb_alloc_coherent() will
memset(, 0, ) the memory. It took me a while to figure out what the
situation is.
Also, Joerg says the __GFP_ZERO is not absolutely necessary but it has
not been fixed in the other DMA alloc* functions because of fears that
something would break. That bit could also be part of the comment.
> +
> + page = alloc_pages_node(dev_to_node(dev), gfp, order);
> + if (page) {
> + dma_addr_t addr;
> +
> + /*
> + * Since we will be clearing the encryption bit, check the
> + * mask with it already cleared.
> + */
> + addr = phys_to_dma(dev, page_to_phys(page)) & ~sme_me_mask;
> + if ((addr + size) > dma_mask) {
> + __free_pages(page, get_order(size));
> + } else {
> + vaddr = page_address(page);
> + *dma_handle = addr;
> + }
> + }
> +
> + if (!vaddr)
> + vaddr = swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
> +
> + if (!vaddr)
> + return NULL;
> +
> + /* Clear the SME encryption bit for DMA use if not swiotlb area */
> + if (!is_swiotlb_buffer(dma_to_phys(dev, *dma_handle))) {
> + set_memory_decrypted((unsigned long)vaddr, 1 << order);
> + *dma_handle &= ~sme_me_mask;
> + }
> +
> + return vaddr;
> }
>
> +static void sme_free(struct device *dev, size_t size, void *vaddr,
> + dma_addr_t dma_handle, unsigned long attrs)
> +{
> + /* Set the SME encryption bit for re-use if not swiotlb area */
> + if (!is_swiotlb_buffer(dma_to_phys(dev, dma_handle)))
> + set_memory_encrypted((unsigned long)vaddr,
> + 1 << get_order(size));
> +
> + swiotlb_free_coherent(dev, size, vaddr, dma_handle);
> +}
> +
> +static struct dma_map_ops sme_dma_ops = {
WARNING: struct dma_map_ops should normally be const
#112: FILE: arch/x86/mm/mem_encrypt.c:261:
+static struct dma_map_ops sme_dma_ops = {
Please integrate scripts/checkpatch.pl in your patch creation workflow.
Some of the warnings/errors *actually* make sense.
> + .alloc = sme_alloc,
> + .free = sme_free,
> + .map_page = swiotlb_map_page,
> + .unmap_page = swiotlb_unmap_page,
> + .map_sg = swiotlb_map_sg_attrs,
> + .unmap_sg = swiotlb_unmap_sg_attrs,
> + .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
> + .sync_single_for_device = swiotlb_sync_single_for_device,
> + .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
> + .sync_sg_for_device = swiotlb_sync_sg_for_device,
> + .mapping_error = swiotlb_dma_mapping_error,
> +};
> +
> /* Architecture __weak replacement functions */
> void __init mem_encrypt_init(void)
> {
> @@ -208,6 +281,10 @@ void __init mem_encrypt_init(void)
> /* Call into SWIOTLB to update the SWIOTLB DMA buffers */
> swiotlb_update_mem_attributes();
>
> + /* Use SEV DMA operations if SEV is active */
That's obvious. The WHY is not.
> + if (sev_active())
> + dma_ops = &sme_dma_ops;
> +
> pr_info("AMD Secure Memory Encryption (SME) active\n");
> }
>
>
--
Regards/Gruss,
Boris.
SUSE Linux GmbH, GF: Felix Imendörffer, Jane Smithard, Graham Norton, HRB 21284 (AG Nürnberg)
--
Powered by blists - more mailing lists