lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Wed, 20 Oct 2021 12:22:41 -0500
From:   Tom Lendacky <thomas.lendacky@....com>
To:     Sathyanarayanan Kuppuswamy 
        <sathyanarayanan.kuppuswamy@...ux.intel.com>,
        Thomas Gleixner <tglx@...utronix.de>,
        Ingo Molnar <mingo@...hat.com>, Borislav Petkov <bp@...en8.de>,
        Peter Zijlstra <peterz@...radead.org>,
        Andy Lutomirski <luto@...nel.org>,
        Bjorn Helgaas <bhelgaas@...gle.com>,
        Richard Henderson <rth@...ddle.net>,
        Thomas Bogendoerfer <tsbogend@...ha.franken.de>,
        James E J Bottomley <James.Bottomley@...senPartnership.com>,
        Helge Deller <deller@....de>,
        "David S . Miller" <davem@...emloft.net>,
        Arnd Bergmann <arnd@...db.de>,
        Jonathan Corbet <corbet@....net>,
        "Michael S . Tsirkin" <mst@...hat.com>,
        Paolo Bonzini <pbonzini@...hat.com>,
        David Hildenbrand <david@...hat.com>,
        Andrea Arcangeli <aarcange@...hat.com>,
        Josh Poimboeuf <jpoimboe@...hat.com>
Cc:     Peter H Anvin <hpa@...or.com>, Dave Hansen <dave.hansen@...el.com>,
        Tony Luck <tony.luck@...el.com>,
        Dan Williams <dan.j.williams@...el.com>,
        Andi Kleen <ak@...ux.intel.com>,
        Kirill Shutemov <kirill.shutemov@...ux.intel.com>,
        Sean Christopherson <seanjc@...gle.com>,
        Kuppuswamy Sathyanarayanan <knsathya@...nel.org>,
        x86@...nel.org, linux-kernel@...r.kernel.org,
        linux-pci@...r.kernel.org, linux-alpha@...r.kernel.org,
        linux-mips@...r.kernel.org, linux-parisc@...r.kernel.org,
        sparclinux@...r.kernel.org, linux-arch@...r.kernel.org,
        linux-doc@...r.kernel.org,
        virtualization@...ts.linux-foundation.org
Subject: Re: [PATCH v5 06/16] x86/tdx: Make DMA pages shared

On 10/20/21 11:45 AM, Sathyanarayanan Kuppuswamy wrote:
> On 10/20/21 9:33 AM, Tom Lendacky wrote:
>> On 10/8/21 7:37 PM, Kuppuswamy Sathyanarayanan wrote:

...

>>>   bool force_dma_unencrypted(struct device *dev)
>>>   {
>>> -    return amd_force_dma_unencrypted(dev);
>>> +    if (cc_platform_has(CC_ATTR_GUEST_TDX) &&
>>> +        cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
>>> +        return true;
>>> +
>>> +    if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT) ||
>>> +        cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT))
>>> +        return amd_force_dma_unencrypted(dev);
>>> +
>>> +    return false;
>>
>> Assuming the original force_dma_unencrypted() function was moved here or 
>> cc_platform.c, then you shouldn't need any changes. Both SEV and TDX 
>> require true be returned if CC_ATTR_GUEST_MEM_ENCRYPT returns true. And 
>> then TDX should never return true for CC_ATTR_HOST_MEM_ENCRYPT.
> 
> 
> For non TDX case, in CC_ATTR_HOST_MEM_ENCRYPT, we should still call
> amd_force_dma_unencrypted() right?

What I'm saying is that you wouldn't have amd_force_dma_unencrypted(). I 
think the whole force_dma_unencrypted() can exist as-is in a different 
file, whether that's cc_platform.c or mem_encrypt_common.c.

It will return true for an SEV or TDX guest, true for an SME host based on 
the DMA mask or else false. That should work just fine for TDX.

Thanks,
Tom

> 
>>
>>>   }
>>> diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c
>>> index 527957586f3c..6c531d5cb5fd 100644
>>> --- a/arch/x86/mm/pat/set_memory.c
>>> +++ b/arch/x86/mm/pat/set_memory.c
>>> @@ -30,6 +30,7 @@
>>>   #include <asm/proto.h>
>>>   #include <asm/memtype.h>
>>>   #include <asm/set_memory.h>
>>> +#include <asm/tdx.h>
>>>   #include "../mm_internal.h"
>>> @@ -1981,8 +1982,10 @@ int set_memory_global(unsigned long addr, int 
>>> numpages)
>>>                       __pgprot(_PAGE_GLOBAL), 0);
>>>   }
>>> -static int __set_memory_enc_dec(unsigned long addr, int numpages, bool 
>>> enc)
>>> +static int __set_memory_protect(unsigned long addr, int numpages, bool 
>>> protect)
>>>   {
>>> +    pgprot_t mem_protected_bits, mem_plain_bits;
>>> +    enum tdx_map_type map_type;
>>>       struct cpa_data cpa;
>>>       int ret;
>>> @@ -1997,8 +2000,25 @@ static int __set_memory_enc_dec(unsigned long 
>>> addr, int numpages, bool enc)
>>>       memset(&cpa, 0, sizeof(cpa));
>>>       cpa.vaddr = &addr;
>>>       cpa.numpages = numpages;
>>> -    cpa.mask_set = enc ? __pgprot(_PAGE_ENC) : __pgprot(0);
>>> -    cpa.mask_clr = enc ? __pgprot(0) : __pgprot(_PAGE_ENC);
>>> +
>>> +    if (cc_platform_has(CC_ATTR_GUEST_SHARED_MAPPING_INIT)) {
>>> +        mem_protected_bits = __pgprot(0);
>>> +        mem_plain_bits = pgprot_cc_shared_mask();
>>
>> How about having generic versions for both shared and private that 
>> return the proper value for SEV or TDX. Then this remains looking 
>> similar to as it does now, just replacing the __pgprot() calls with the 
>> appropriate pgprot_cc_{shared,private}_mask().
> 
> Makes sense.
> 
>>
>> Thanks,
>> Tom
>>
>>> +    } else {
>>> +        mem_protected_bits = __pgprot(_PAGE_ENC);
>>> +        mem_plain_bits = __pgprot(0);
>>> +    }
>>> +
>>> +    if (protect) {
>>> +        cpa.mask_set = mem_protected_bits;
>>> +        cpa.mask_clr = mem_plain_bits;
>>> +        map_type = TDX_MAP_PRIVATE;
>>> +    } else {
>>> +        cpa.mask_set = mem_plain_bits;
>>> +        cpa.mask_clr = mem_protected_bits;
>>> +        map_type = TDX_MAP_SHARED;
>>> +    }
>>> +
>>>       cpa.pgd = init_mm.pgd;
>>>       /* Must avoid aliasing mappings in the highmem code */
>>> @@ -2006,9 +2026,17 @@ static int __set_memory_enc_dec(unsigned long 
>>> addr, int numpages, bool enc)
>>>       vm_unmap_aliases();
>>>       /*
>>> -     * Before changing the encryption attribute, we need to flush caches.
>>> +     * Before changing the encryption attribute, flush caches.
>>> +     *
>>> +     * For TDX, guest is responsible for flushing caches on 
>>> private->shared
>>> +     * transition. VMM is responsible for flushing on shared->private.
>>>        */
>>> -    cpa_flush(&cpa, !this_cpu_has(X86_FEATURE_SME_COHERENT));
>>> +    if (cc_platform_has(CC_ATTR_GUEST_TDX)) {
>>> +        if (map_type == TDX_MAP_SHARED)
>>> +            cpa_flush(&cpa, 1);
>>> +    } else {
>>> +        cpa_flush(&cpa, !this_cpu_has(X86_FEATURE_SME_COHERENT));
>>> +    }
>>>       ret = __change_page_attr_set_clr(&cpa, 1);
>>> @@ -2021,18 +2049,21 @@ static int __set_memory_enc_dec(unsigned long 
>>> addr, int numpages, bool enc)
>>>        */
>>>       cpa_flush(&cpa, 0);
>>> +    if (!ret && cc_platform_has(CC_ATTR_GUEST_SHARED_MAPPING_INIT))
>>> +        ret = tdx_hcall_gpa_intent(__pa(addr), numpages, map_type);
>>> +
>>>       return ret;
>>>   }
>>>   int set_memory_encrypted(unsigned long addr, int numpages)
>>>   {
>>> -    return __set_memory_enc_dec(addr, numpages, true);
>>> +    return __set_memory_protect(addr, numpages, true);
>>>   }
>>>   EXPORT_SYMBOL_GPL(set_memory_encrypted);
>>>   int set_memory_decrypted(unsigned long addr, int numpages)
>>>   {
>>> -    return __set_memory_enc_dec(addr, numpages, false);
>>> +    return __set_memory_protect(addr, numpages, false);
>>>   }
>>>   EXPORT_SYMBOL_GPL(set_memory_decrypted);
>>>
> 

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ