[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <942e6fcb-3bdf-9294-d3db-ca311db440d3@gmail.com>
Date: Thu, 5 Aug 2021 22:05:17 +0800
From: Tianyu Lan <ltykernel@...il.com>
To: Dave Hansen <dave.hansen@...el.com>, kys@...rosoft.com,
haiyangz@...rosoft.com, sthemmin@...rosoft.com, wei.liu@...nel.org,
decui@...rosoft.com, tglx@...utronix.de, mingo@...hat.com,
bp@...en8.de, x86@...nel.org, hpa@...or.com,
dave.hansen@...ux.intel.com, luto@...nel.org, peterz@...radead.org,
konrad.wilk@...cle.com, boris.ostrovsky@...cle.com,
jgross@...e.com, sstabellini@...nel.org, joro@...tes.org,
will@...nel.org, davem@...emloft.net, kuba@...nel.org,
jejb@...ux.ibm.com, martin.petersen@...cle.com, arnd@...db.de,
hch@....de, m.szyprowski@...sung.com, robin.murphy@....com,
Tianyu.Lan@...rosoft.com, rppt@...nel.org,
kirill.shutemov@...ux.intel.com, akpm@...ux-foundation.org,
brijesh.singh@....com, thomas.lendacky@....com, pgonda@...gle.com,
david@...hat.com, krish.sadhukhan@...cle.com, saravanand@...com,
aneesh.kumar@...ux.ibm.com, xen-devel@...ts.xenproject.org,
martin.b.radev@...il.com, ardb@...nel.org, rientjes@...gle.com,
tj@...nel.org, keescook@...omium.org,
michael.h.kelley@...rosoft.com, Joerg Roedel <joro@...tes.org>,
Tom Lendacky <thomas.lendacky@....com>
Cc: iommu@...ts.linux-foundation.org, linux-arch@...r.kernel.org,
linux-hyperv@...r.kernel.org, linux-kernel@...r.kernel.org,
linux-scsi@...r.kernel.org, netdev@...r.kernel.org,
vkuznets@...hat.com, parri.andrea@...il.com
Subject: Re: [PATCH V2 03/14] x86/set_memory: Add x86_set_memory_enc static
call support
Hi Dave:
Thanks for review.
On 8/5/2021 3:27 AM, Dave Hansen wrote:
> On 8/4/21 11:44 AM, Tianyu Lan wrote:
>> +static int default_set_memory_enc(unsigned long addr, int numpages, bool enc);
>> +DEFINE_STATIC_CALL(x86_set_memory_enc, default_set_memory_enc);
>> +
>> #define CPA_FLUSHTLB 1
>> #define CPA_ARRAY 2
>> #define CPA_PAGES_ARRAY 4
>> @@ -1981,6 +1985,11 @@ int set_memory_global(unsigned long addr, int numpages)
>> }
>>
>> static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc)
>> +{
>> + return static_call(x86_set_memory_enc)(addr, numpages, enc);
>> +}
>> +
>> +static int default_set_memory_enc(unsigned long addr, int numpages, bool enc)
>> {
>> struct cpa_data cpa;
>> int ret;
>
> It doesn't make a lot of difference to add this infrastructure and then
> ignore it for the existing in-tree user:
>
>> static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc)
>> {
>> struct cpa_data cpa;
>> int ret;
>>
>> /* Nothing to do if memory encryption is not active */
>> if (!mem_encrypt_active())
>> return 0;
>
> Shouldn't the default be to just "return 0"? Then on
> mem_encrypt_active() systems, do the bulk of what is in
> __set_memory_enc_dec() today.
>
OK. I try moving code in __set_memory_enc_dec() to sev file
mem_encrypt.c and this requires to expose cpa functions and structure.
Please have a look.
Tom, Joerg and Brijesh, Could you review at sev code change?
Thanks.
diff --git a/arch/x86/include/asm/set_memory.h
b/arch/x86/include/asm/set_memory.h
index 43fa081a1adb..991366612deb 100644
--- a/arch/x86/include/asm/set_memory.h
+++ b/arch/x86/include/asm/set_memory.h
@@ -4,6 +4,25 @@
#include <asm/page.h>
#include <asm-generic/set_memory.h>
+#include <linux/static_call.h>
+
+/*
+ * The current flushing context - we pass it instead of 5 arguments:
+ */
+struct cpa_data {
+ unsigned long *vaddr;
+ pgd_t *pgd;
+ pgprot_t mask_set;
+ pgprot_t mask_clr;
+ unsigned long numpages;
+ unsigned long curpage;
+ unsigned long pfn;
+ unsigned int flags;
+ unsigned int force_split : 1,
+ force_static_prot : 1,
+ force_flush_all : 1;
+ struct page **pages;
+};
/*
* The set_memory_* API can be used to change various attributes of a
virtual
@@ -83,6 +102,11 @@ int set_pages_rw(struct page *page, int numpages);
int set_direct_map_invalid_noflush(struct page *page);
int set_direct_map_default_noflush(struct page *page);
bool kernel_page_present(struct page *page);
+int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias);
+void cpa_flush(struct cpa_data *data, int cache);
+
+int dummy_set_memory_enc(unsigned long addr, int numpages, bool enc);
+DECLARE_STATIC_CALL(x86_set_memory_enc, dummy_set_memory_enc);
extern int kernel_set_to_readonly;
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index ff08dc463634..49e957c4191f 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -20,6 +20,8 @@
#include <linux/bitops.h>
#include <linux/dma-mapping.h>
#include <linux/virtio_config.h>
+#include <linux/highmem.h>
+#include <linux/static_call.h>
#include <asm/tlbflush.h>
#include <asm/fixmap.h>
@@ -178,6 +180,45 @@ void __init sme_map_bootdata(char *real_mode_data)
__sme_early_map_unmap_mem(__va(cmdline_paddr), COMMAND_LINE_SIZE, true);
}
+static int sev_set_memory_enc(unsigned long addr, int numpages, bool enc)
+{
+ struct cpa_data cpa;
+ int ret;
+
+ /* Should not be working on unaligned addresses */
+ if (WARN_ONCE(addr & ~PAGE_MASK, "misaligned address: %#lx\n", addr))
+ addr &= PAGE_MASK;
+
+ memset(&cpa, 0, sizeof(cpa));
+ cpa.vaddr = &addr;
+ cpa.numpages = numpages;
+ cpa.mask_set = enc ? __pgprot(_PAGE_ENC) : __pgprot(0);
+ cpa.mask_clr = enc ? __pgprot(0) : __pgprot(_PAGE_ENC);
+ cpa.pgd = init_mm.pgd;
+
+ /* Must avoid aliasing mappings in the highmem code */
+ kmap_flush_unused();
+ vm_unmap_aliases();
+
+ /*
+ * Before changing the encryption attribute, we need to flush caches.
+ */
+ cpa_flush(&cpa, !this_cpu_has(X86_FEATURE_SME_COHERENT));
+
+ ret = __change_page_attr_set_clr(&cpa, 1);
+
+ /*
+ * After changing the encryption attribute, we need to flush TLBs again
+ * in case any speculative TLB caching occurred (but no need to flush
+ * caches again). We could just use cpa_flush_all(), but in case TLB
+ * flushing gets optimized in the cpa_flush() path use the same logic
+ * as above.
+ */
+ cpa_flush(&cpa, 0);
+
+ return ret;
+}
+
void __init sme_early_init(void)
{
unsigned int i;
@@ -185,6 +226,8 @@ void __init sme_early_init(void)
if (!sme_me_mask)
return;
+ static_call_update(x86_set_memory_enc, sev_set_memory_enc);
+
early_pmd_flags = __sme_set(early_pmd_flags);
__supported_pte_mask = __sme_set(__supported_pte_mask);
diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c
index ad8a5c586a35..4f15f7c89dbc 100644
--- a/arch/x86/mm/pat/set_memory.c
+++ b/arch/x86/mm/pat/set_memory.c
@@ -18,6 +18,7 @@
#include <linux/libnvdimm.h>
#include <linux/vmstat.h>
#include <linux/kernel.h>
+#include <linux/static_call.h>
#include <asm/e820/api.h>
#include <asm/processor.h>
@@ -32,24 +33,6 @@
#include "../mm_internal.h"
-/*
- * The current flushing context - we pass it instead of 5 arguments:
- */
-struct cpa_data {
- unsigned long *vaddr;
- pgd_t *pgd;
- pgprot_t mask_set;
- pgprot_t mask_clr;
- unsigned long numpages;
- unsigned long curpage;
- unsigned long pfn;
- unsigned int flags;
- unsigned int force_split : 1,
- force_static_prot : 1,
- force_flush_all : 1;
- struct page **pages;
-};
-
enum cpa_warn {
CPA_CONFLICT,
CPA_PROTECT,
@@ -66,6 +49,13 @@ static const int cpa_warn_level = CPA_PROTECT;
*/
static DEFINE_SPINLOCK(cpa_lock);
+static int default_set_memory_enc(unsigned long addr, int numpages,
bool enc)
+{
+ return 0;
+}
+
+DEFINE_STATIC_CALL(x86_set_memory_enc, default_set_memory_enc);
+
#define CPA_FLUSHTLB 1
#define CPA_ARRAY 2
#define CPA_PAGES_ARRAY 4
@@ -357,7 +347,7 @@ static void __cpa_flush_tlb(void *data)
flush_tlb_one_kernel(fix_addr(__cpa_addr(cpa, i)));
}
-static void cpa_flush(struct cpa_data *data, int cache)
+void cpa_flush(struct cpa_data *data, int cache)
{
struct cpa_data *cpa = data;
unsigned int i;
@@ -1587,8 +1577,6 @@ static int __change_page_attr(struct cpa_data
*cpa, int primary)
return err;
}
-static int __change_page_attr_set_clr(struct cpa_data *cpa, int
checkalias);
-
static int cpa_process_alias(struct cpa_data *cpa)
{
struct cpa_data alias_cpa;
@@ -1646,7 +1634,7 @@ static int cpa_process_alias(struct cpa_data *cpa)
return 0;
}
-static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias)
+int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias)
{
unsigned long numpages = cpa->numpages;
unsigned long rempages = numpages;
@@ -1982,45 +1970,7 @@ int set_memory_global(unsigned long addr, int
numpages)
static int __set_memory_enc_dec(unsigned long addr, int numpages, bool
enc)
{
- struct cpa_data cpa;
- int ret;
-
- /* Nothing to do if memory encryption is not active */
- if (!mem_encrypt_active())
- return 0;
-
- /* Should not be working on unaligned addresses */
- if (WARN_ONCE(addr & ~PAGE_MASK, "misaligned address: %#lx\n", addr))
- addr &= PAGE_MASK;
-
- memset(&cpa, 0, sizeof(cpa));
- cpa.vaddr = &addr;
- cpa.numpages = numpages;
- cpa.mask_set = enc ? __pgprot(_PAGE_ENC) : __pgprot(0);
- cpa.mask_clr = enc ? __pgprot(0) : __pgprot(_PAGE_ENC);
- cpa.pgd = init_mm.pgd;
-
- /* Must avoid aliasing mappings in the highmem code */
- kmap_flush_unused();
- vm_unmap_aliases();
-
- /*
- * Before changing the encryption attribute, we need to flush caches.
- */
- cpa_flush(&cpa, !this_cpu_has(X86_FEATURE_SME_COHERENT));
-
- ret = __change_page_attr_set_clr(&cpa, 1);
-
- /*
- * After changing the encryption attribute, we need to flush TLBs again
- * in case any speculative TLB caching occurred (but no need to flush
- * caches again). We could just use cpa_flush_all(), but in case TLB
- * flushing gets optimized in the cpa_flush() path use the same logic
- * as above.
- */
- cpa_flush(&cpa, 0);
-
- return ret;
+ return static_call(x86_set_memory_enc)(addr, numpages, enc);
}
int set_memory_encrypted(unsigned long addr, int numpages)
Powered by blists - more mailing lists