[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240112055251.36101-3-vannapurve@google.com>
Date: Fri, 12 Jan 2024 05:52:48 +0000
From: Vishal Annapurve <vannapurve@...gle.com>
To: x86@...nel.org, linux-kernel@...r.kernel.org
Cc: pbonzini@...hat.com, rientjes@...gle.com, bgardon@...gle.com,
seanjc@...gle.com, erdemaktas@...gle.com, ackerleytng@...gle.com,
jxgao@...gle.com, sagis@...gle.com, oupton@...gle.com, peterx@...hat.com,
vkuznets@...hat.com, dmatlack@...gle.com, pgonda@...gle.com,
michael.roth@....com, kirill@...temov.name, thomas.lendacky@....com,
dave.hansen@...ux.intel.com, linux-coco@...ts.linux.dev,
chao.p.peng@...ux.intel.com, isaku.yamahata@...il.com, andrew.jones@...ux.dev,
corbet@....net, hch@....de, m.szyprowski@...sung.com, bp@...e.de,
rostedt@...dmis.org, iommu@...ts.linux.dev,
Vishal Annapurve <vannapurve@...gle.com>
Subject: [RFC V1 2/5] swiotlb: Allow setting up default alignment of SWIOTLB region
Allow adjusting alignment of SWIOTLB memory. CVMs can use this framework
to align the shared memory regions as needed.
Signed-off-by: Vishal Annapurve <vannapurve@...gle.com>
---
include/linux/swiotlb.h | 5 +++++
kernel/dma/swiotlb.c | 12 +++++++++---
2 files changed, 14 insertions(+), 3 deletions(-)
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index 058901313405..450bd82cdb9f 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -206,6 +206,7 @@ size_t swiotlb_max_mapping_size(struct device *dev);
bool is_swiotlb_allocated(void);
bool is_swiotlb_active(struct device *dev);
void __init swiotlb_adjust_size(unsigned long size);
+void __init swiotlb_adjust_alignment(unsigned long alignment);
phys_addr_t default_swiotlb_base(void);
phys_addr_t default_swiotlb_limit(void);
#else
@@ -247,6 +248,10 @@ static inline void swiotlb_adjust_size(unsigned long size)
{
}
+void __init swiotlb_adjust_alignment(unsigned long alignment)
+{
+}
+
static inline phys_addr_t default_swiotlb_base(void)
{
return 0;
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index a056d2f8b9ee..eeab0607a028 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -97,6 +97,7 @@ static struct io_tlb_mem io_tlb_default_mem;
#endif /* CONFIG_SWIOTLB_DYNAMIC */
static unsigned long default_nslabs = IO_TLB_DEFAULT_SIZE >> IO_TLB_SHIFT;
+static unsigned long default_alignment = PAGE_SIZE;
static unsigned long default_nareas;
/**
@@ -223,6 +224,11 @@ void __init swiotlb_adjust_size(unsigned long size)
pr_info("SWIOTLB bounce buffer size adjusted to %luMB", size >> 20);
}
+void __init swiotlb_adjust_alignment(unsigned long alignment)
+{
+ default_alignment = alignment;
+}
+
void swiotlb_print_info(void)
{
struct io_tlb_pool *mem = &io_tlb_default_mem.defpool;
@@ -315,7 +321,7 @@ static void __init *swiotlb_memblock_alloc(unsigned long nslabs,
unsigned int flags,
int (*remap)(void *tlb, unsigned long nslabs))
{
- size_t bytes = PAGE_ALIGN(nslabs << IO_TLB_SHIFT);
+ size_t bytes = ALIGN(nslabs << IO_TLB_SHIFT, default_alignment);
void *tlb;
/*
@@ -324,9 +330,9 @@ static void __init *swiotlb_memblock_alloc(unsigned long nslabs,
* memory encryption.
*/
if (flags & SWIOTLB_ANY)
- tlb = memblock_alloc(bytes, PAGE_SIZE);
+ tlb = memblock_alloc(bytes, default_alignment);
else
- tlb = memblock_alloc_low(bytes, PAGE_SIZE);
+ tlb = memblock_alloc_low(bytes, default_alignment);
if (!tlb) {
pr_warn("%s: Failed to allocate %zu bytes tlb structure\n",
--
2.43.0.275.g3460e3d667-goog
Powered by blists - more mailing lists