[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240112055251.36101-2-vannapurve@google.com>
Date: Fri, 12 Jan 2024 05:52:47 +0000
From: Vishal Annapurve <vannapurve@...gle.com>
To: x86@...nel.org, linux-kernel@...r.kernel.org
Cc: pbonzini@...hat.com, rientjes@...gle.com, bgardon@...gle.com,
seanjc@...gle.com, erdemaktas@...gle.com, ackerleytng@...gle.com,
jxgao@...gle.com, sagis@...gle.com, oupton@...gle.com, peterx@...hat.com,
vkuznets@...hat.com, dmatlack@...gle.com, pgonda@...gle.com,
michael.roth@....com, kirill@...temov.name, thomas.lendacky@....com,
dave.hansen@...ux.intel.com, linux-coco@...ts.linux.dev,
chao.p.peng@...ux.intel.com, isaku.yamahata@...il.com, andrew.jones@...ux.dev,
corbet@....net, hch@....de, m.szyprowski@...sung.com, bp@...e.de,
rostedt@...dmis.org, iommu@...ts.linux.dev,
Vishal Annapurve <vannapurve@...gle.com>
Subject: [RFC V1 1/5] swiotlb: Support allocating DMA memory from SWIOTLB
Modify SWIOTLB framework to allocate DMA memory always from SWIOTLB.
CVMs use SWIOTLB buffers for bouncing memory when using dma_map_* APIs
to setup memory for IO operations. SWIOTLB buffers are marked as shared
once during early boot.
Buffers allocated using dma_alloc_* APIs are allocated from kernel memory
and then converted to shared during each API invocation. This patch ensures
that such buffers are also allocated from already shared SWIOTLB
regions. This allows enforcing alignment requirements on regions marked
as shared.
Signed-off-by: Vishal Annapurve <vannapurve@...gle.com>
---
include/linux/swiotlb.h | 17 +----------------
kernel/dma/direct.c | 4 ++--
kernel/dma/swiotlb.c | 5 +++--
3 files changed, 6 insertions(+), 20 deletions(-)
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index ecde0312dd52..058901313405 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -17,6 +17,7 @@ struct scatterlist;
#define SWIOTLB_VERBOSE (1 << 0) /* verbose initialization */
#define SWIOTLB_FORCE (1 << 1) /* force bounce buffering */
#define SWIOTLB_ANY (1 << 2) /* allow any memory for the buffer */
+#define SWIOTLB_ALLOC (1 << 4) /* force dma allocation through swiotlb */
/*
* Maximum allowable number of contiguous slabs to map,
@@ -259,7 +260,6 @@ static inline phys_addr_t default_swiotlb_limit(void)
extern void swiotlb_print_info(void);
-#ifdef CONFIG_DMA_RESTRICTED_POOL
struct page *swiotlb_alloc(struct device *dev, size_t size);
bool swiotlb_free(struct device *dev, struct page *page, size_t size);
@@ -267,20 +267,5 @@ static inline bool is_swiotlb_for_alloc(struct device *dev)
{
return dev->dma_io_tlb_mem->for_alloc;
}
-#else
-static inline struct page *swiotlb_alloc(struct device *dev, size_t size)
-{
- return NULL;
-}
-static inline bool swiotlb_free(struct device *dev, struct page *page,
- size_t size)
-{
- return false;
-}
-static inline bool is_swiotlb_for_alloc(struct device *dev)
-{
- return false;
-}
-#endif /* CONFIG_DMA_RESTRICTED_POOL */
#endif /* __LINUX_SWIOTLB_H */
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 73c95815789a..a7d3266d3d83 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -78,7 +78,7 @@ bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
static int dma_set_decrypted(struct device *dev, void *vaddr, size_t size)
{
- if (!force_dma_unencrypted(dev))
+ if (!force_dma_unencrypted(dev) || is_swiotlb_for_alloc(dev))
return 0;
return set_memory_decrypted((unsigned long)vaddr, PFN_UP(size));
}
@@ -87,7 +87,7 @@ static int dma_set_encrypted(struct device *dev, void *vaddr, size_t size)
{
int ret;
- if (!force_dma_unencrypted(dev))
+ if (!force_dma_unencrypted(dev) || is_swiotlb_for_alloc(dev))
return 0;
ret = set_memory_encrypted((unsigned long)vaddr, PFN_UP(size));
if (ret)
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index 33d942615be5..a056d2f8b9ee 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -363,6 +363,7 @@ void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
io_tlb_default_mem.force_bounce =
swiotlb_force_bounce || (flags & SWIOTLB_FORCE);
+ io_tlb_default_mem.for_alloc = (flags & SWIOTLB_ALLOC);
#ifdef CONFIG_SWIOTLB_DYNAMIC
if (!remap)
@@ -1601,8 +1602,6 @@ static inline void swiotlb_create_debugfs_files(struct io_tlb_mem *mem,
#endif /* CONFIG_DEBUG_FS */
-#ifdef CONFIG_DMA_RESTRICTED_POOL
-
struct page *swiotlb_alloc(struct device *dev, size_t size)
{
struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
@@ -1634,6 +1633,8 @@ bool swiotlb_free(struct device *dev, struct page *page, size_t size)
return true;
}
+#ifdef CONFIG_DMA_RESTRICTED_POOL
+
static int rmem_swiotlb_device_init(struct reserved_mem *rmem,
struct device *dev)
{
--
2.43.0.275.g3460e3d667-goog
Powered by blists - more mailing lists