[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <5804d4a8-8c5f-5352-8eed-4962e0adca1c@linux.intel.com>
Date: Mon, 18 Jul 2022 07:22:52 -0700
From: Sathyanarayanan Kuppuswamy
<sathyanarayanan.kuppuswamy@...ux.intel.com>
To: "Kirill A. Shutemov" <kirill@...temov.name>,
Kai Huang <kai.huang@...el.com>
Cc: Dave Hansen <dave.hansen@...el.com>,
"Kirill A. Shutemov" <kirill.shutemov@...ux.intel.com>,
Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>, Borislav Petkov <bp@...en8.de>,
Dave Hansen <dave.hansen@...ux.intel.com>, x86@...nel.org,
"H . Peter Anvin" <hpa@...or.com>, Tony Luck <tony.luck@...el.com>,
Andi Kleen <ak@...ux.intel.com>,
Wander Lairson Costa <wander@...hat.com>,
Isaku Yamahata <isaku.yamahata@...il.com>,
marcelo.cerri@...onical.com, tim.gardner@...onical.com,
khalid.elmously@...onical.com, philip.cox@...onical.com,
linux-kernel@...r.kernel.org
Subject: Re: [PATCH v8 4/5] x86/mm: Add noalias variants of
set_memory_*crypted() functions
Hi Dave,
On 7/5/22 8:29 AM, Kirill A. Shutemov wrote:
>>> I still don't like the idea of using the DMA API itself. But, maybe we
>>> need some common infrastructure that the DMA API and this code use which
>>> says, "get me some pages that I can safely make shared".
>> Right. For instance any KVM PV feature would require shared memory, and DMA API
>> normally doesn't fit (taking 'struct kvm_steal_time' as example).
>>
>> Maybe we can reserve a CMA for this purpose.
> CMA for couple low traffic users sounds like an overkill. It will create
> an separate pool just for them.
>
> I think the best way is to add an dummy device and couple of helpers
> around DMA API that would allow to tap into swiotlb.
>
> Maybe hide it inside CC infrastructure. Like cc_decrypted_alloc() and
> cc_decrypted_free().
I also think creating a generic device in the CC layer, and using it to allocate
memory via DMA APIs is a better approach for this issue. Following is the sample
implementation to give you an idea on how it looks. Please let me know
your comments.
cc_shmem_alloc/free() APIs can be used by CC users to allocate and
free shared memory.
Other vendors can define their own shared memory allocation and free
logic via struct shmem_priv alloc/free/init hooks.
--- a/arch/x86/coco/Makefile
+++ b/arch/x86/coco/Makefile
@@ -3,6 +3,6 @@ CFLAGS_REMOVE_core.o = -pg
KASAN_SANITIZE_core.o := n
CFLAGS_core.o += -fno-stack-protector
-obj-y += core.o
+obj-y += core.o shmem.o
obj-$(CONFIG_INTEL_TDX_GUEST) += tdx/
diff --git a/arch/x86/coco/core.c b/arch/x86/coco/core.c
index 49b44f881484..62fe68d1f60a 100644
--- a/arch/x86/coco/core.c
+++ b/arch/x86/coco/core.c
@@ -13,7 +13,7 @@
#include <asm/coco.h>
#include <asm/processor.h>
-static enum cc_vendor vendor __ro_after_init;
+enum cc_vendor vendor __ro_after_init;
static u64 cc_mask __ro_after_init;
static bool intel_cc_platform_has(enum cc_attr attr)
@@ -23,6 +23,7 @@ static bool intel_cc_platform_has(enum cc_attr attr)
case CC_ATTR_HOTPLUG_DISABLED:
case CC_ATTR_GUEST_MEM_ENCRYPT:
case CC_ATTR_MEM_ENCRYPT:
+ case CC_ATTR_SHMEM:
return true;
default:
return false;
@@ -134,6 +135,11 @@ __init void cc_set_vendor(enum cc_vendor v)
vendor = v;
}
+enum cc_vendor cc_get_vendor(void)
+{
+ return vendor;
+}
+
__init void cc_set_mask(u64 mask)
{
cc_mask = mask;
+++ b/arch/x86/coco/shmem.c
@@ -0,0 +1,156 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Confidential Computing Shared Memory Allocator
+ *
+ * Copyright (C) 2022 Intel Corporation, Inc.
+ *
+ */
+
+#undef pr_fmt
+#define pr_fmt(fmt) "CC: " fmt
+
+#include <linux/export.h>
+#include <linux/cma.h>
+#include <linux/mm.h>
+#include <linux/cc_platform.h>
+#include <linux/set_memory.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/coco.h>
+#include <asm/processor.h>
+
+#define CC_SHMEM_DRIVER "cc-shmem"
+
+struct platform_device *shmem_pdev;
+
+struct shmem_priv
+{
+ int (*init)(struct platform_device *dev);
+ void* (*alloc)(size_t size, gfp_t gfp, struct shmem_priv *priv);
+ void (*free)(void *addr, size_t size, struct shmem_priv *priv);
+ struct platform_device *pdev;
+ void *data;
+};
+
+void *cc_shmem_alloc(size_t size, gfp_t gfp)
+{
+ struct shmem_priv *priv;
+
+ if (!shmem_pdev)
+ return NULL;
+
+ priv = platform_get_drvdata(shmem_pdev);
+
+ return priv->alloc(size, gfp, priv);
+}
+
+void cc_shmem_free(void *addr, size_t size)
+{
+ struct shmem_priv *priv;
+
+ if (!shmem_pdev)
+ return;
+
+ priv = platform_get_drvdata(shmem_pdev);
+
+ priv->free(addr, size, priv);
+}
+
+static int intel_shmem_init(struct platform_device *pdev)
+{
+ struct shmem_priv *priv;
+ dma_addr_t *handle;
+
+ handle = devm_kmalloc(&pdev->dev, sizeof(*handle), GFP_KERNEL);
+ if (!handle)
+ return -ENOMEM;
+
+ priv = platform_get_drvdata(pdev);
+
+ priv->data = handle;
+
+ return dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
+}
+
+static void *intel_shmem_alloc(size_t size, gfp_t gfp, struct shmem_priv *priv)
+{
+ dma_addr_t *handle = (dma_addr_t *) priv->data;
+
+ return dma_alloc_coherent(&priv->pdev->dev, size, handle, gfp);
+}
+
+static void intel_shmem_free(void *addr, size_t size, struct shmem_priv *priv)
+{
+ dma_addr_t *handle = (dma_addr_t *) priv->data;
+
+ return dma_free_coherent(&priv->pdev->dev, size, addr, *handle);
+}
+
+static struct shmem_priv intel_shmem = {
+ .init = intel_shmem_init,
+ .alloc = intel_shmem_alloc,
+ .free = intel_shmem_free,
+};
+
+static int cc_set_drv_data(struct platform_device *pdev)
+{
+ enum cc_vendor vendor = cc_get_vendor();
+
+ switch (vendor) {
+ case CC_VENDOR_INTEL:
+ platform_set_drvdata(pdev, &intel_shmem);
+ return 0;
+ default:
+ break;
+ }
+
+ return -ENODEV;
+}
+
+static int cc_shmem_probe(struct platform_device *pdev)
+{
+ struct shmem_priv *priv;
+ void *data;
+
+ if (cc_set_drv_data(pdev))
+ return -ENODEV;
+
+ priv = platform_get_drvdata(pdev);
+
+ shmem_pdev = priv->pdev = pdev;
+
+ if (priv->init(pdev))
+ return -EIO;
+
+ return 0;
+}
+
+static struct platform_driver cc_shmem_driver = {
+ .probe = cc_shmem_probe,
+ .driver = {
+ .name = CC_SHMEM_DRIVER,
+ },
+};
+
+static int __init cc_shmem_init(void)
+{
+ struct platform_device *pdev;
+ int ret;
+
+ if (!cc_platform_has(CC_ATTR_SHMEM))
+ return -ENODEV;
+
+ ret = platform_driver_register(&cc_shmem_driver);
+ if (ret)
+ return ret;
+
+ pdev = platform_device_register_simple(CC_SHMEM_DRIVER, -1, NULL, 0);
+ if (IS_ERR(pdev)) {
+ platform_driver_unregister(&cc_shmem_driver);
+ return PTR_ERR(pdev);
+ }
+
+ return 0;
+}
+device_initcall(cc_shmem_init);
diff --git a/arch/x86/include/asm/coco.h b/arch/x86/include/asm/coco.h
index 3d98c3a60d34..5c259135c8fc 100644
--- a/arch/x86/include/asm/coco.h
+++ b/arch/x86/include/asm/coco.h
@@ -12,11 +12,14 @@ enum cc_vendor {
};
void cc_set_vendor(enum cc_vendor v);
+enum cc_vendor cc_get_vendor(void);
void cc_set_mask(u64 mask);
#ifdef CONFIG_ARCH_HAS_CC_PLATFORM
u64 cc_mkenc(u64 val);
u64 cc_mkdec(u64 val);
+void *cc_shmem_alloc(size_t size, gfp_t gfp);
+void cc_shmem_free(void *addr, size_t size);
#else
static inline u64 cc_mkenc(u64 val)
{
@@ -27,6 +30,10 @@ static inline u64 cc_mkdec(u64 val)
{
return val;
}
+
+void *cc_shmem_alloc(size_t size, gfp_t gfp) { return NULL; }
+void cc_shmem_free(void *addr, size_t size) { }
+
#endif
#endif /* _ASM_X86_COCO_H */
diff --git a/include/linux/cc_platform.h b/include/linux/cc_platform.h
index cb0d6cd1c12f..50e0db22056f 100644
--- a/include/linux/cc_platform.h
+++ b/include/linux/cc_platform.h
@@ -90,6 +90,17 @@ enum cc_attr {
* Examples include TDX Guest.
*/
CC_ATTR_HOTPLUG_DISABLED,
+
+ /**
+ * @CC_ATTR_SHMEM: CC shared memory is supported.
+ *
+ * The platform/OS is running as a guest/virtual machine
+ * can allocate shared memory using cc_shmem_{alloc/free}()
+ * APIs.
+ *
+ */
+ CC_ATTR_SHMEM,
+
};
#ifdef CONFIG_ARCH_HAS_CC_PLATFORM
--
Sathyanarayanan Kuppuswamy
Linux Kernel Developer
Powered by blists - more mailing lists