[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190902141043.27210-4-nsaenzjulienne@suse.de>
Date: Mon, 2 Sep 2019 16:10:41 +0200
From: Nicolas Saenz Julienne <nsaenzjulienne@...e.de>
To: catalin.marinas@....com, hch@....de, wahrenst@....net,
marc.zyngier@....com, robh+dt@...nel.org,
linux-arm-kernel@...ts.infradead.org, linux-mm@...ck.org,
linux-riscv@...ts.infradead.org, Will Deacon <will@...nel.org>
Cc: f.fainelli@...il.com, robin.murphy@....com, nsaenzjulienne@...e.de,
linux-kernel@...r.kernel.org, mbrugger@...e.com,
linux-rpi-kernel@...ts.infradead.org, phill@...pberrypi.org,
m.szyprowski@...sung.com
Subject: [PATCH v3 3/4] arm64: use both ZONE_DMA and ZONE_DMA32
So far all arm64 devices have supported 32 bit DMA masks for their
peripherals. This is not true anymore for the Raspberry Pi 4 as most of
it's peripherals can only address the first GB of memory on a total of
up to 4 GB.
This goes against ZONE_DMA32's intent, as it's expected for ZONE_DMA32
to be addressable with a 32 bit mask. So it was decided to re-introduce
ZONE_DMA in arm64.
ZONE_DMA will contain the lower 1G of memory, which is currently the
memory area addressable by any peripheral on an arm64 device.
ZONE_DMA32 will contain the rest of the 32 bit addressable memory.
Signed-off-by: Nicolas Saenz Julienne <nsaenzjulienne@...e.de>
---
Changes in v3:
- Used fixed size ZONE_DMA
- Fix check befor swiotlb_init()
Changes in v2:
- Update comment to reflect new zones split
- ZONE_DMA will never be left empty
arch/arm64/Kconfig | 4 +++
arch/arm64/include/asm/page.h | 2 ++
arch/arm64/mm/init.c | 51 ++++++++++++++++++++++++++++-------
3 files changed, 47 insertions(+), 10 deletions(-)
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 3adcec05b1f6..a9fd71d3bc8e 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -266,6 +266,10 @@ config GENERIC_CSUM
config GENERIC_CALIBRATE_DELAY
def_bool y
+config ZONE_DMA
+ bool "Support DMA zone" if EXPERT
+ default y
+
config ZONE_DMA32
bool "Support DMA32 zone" if EXPERT
default y
diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h
index d39ddb258a04..7b8c98830101 100644
--- a/arch/arm64/include/asm/page.h
+++ b/arch/arm64/include/asm/page.h
@@ -38,4 +38,6 @@ extern int pfn_valid(unsigned long);
#include <asm-generic/getorder.h>
+#define ARCH_ZONE_DMA_BITS 30
+
#endif
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 8956c22634dd..f02a4945aeac 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -50,6 +50,13 @@
s64 memstart_addr __ro_after_init = -1;
EXPORT_SYMBOL(memstart_addr);
+/*
+ * We create both ZONE_DMA and ZONE_DMA32. ZONE_DMA covers the first 1G of
+ * memory as some devices, namely the Raspberry Pi 4, have peripherals with
+ * this limited view of the memory. ZONE_DMA32 will cover the rest of the 32
+ * bit addressable memory area.
+ */
+phys_addr_t arm64_dma_phys_limit __ro_after_init;
phys_addr_t arm64_dma32_phys_limit __ro_after_init;
#ifdef CONFIG_KEXEC_CORE
@@ -164,9 +171,9 @@ static void __init reserve_elfcorehdr(void)
}
#endif /* CONFIG_CRASH_DUMP */
/*
- * Return the maximum physical address for ZONE_DMA32 (DMA_BIT_MASK(32)). It
- * currently assumes that for memory starting above 4G, 32-bit devices will
- * use a DMA offset.
+ * Return the maximum physical address for ZONE_DMA32 (DMA_BIT_MASK(32)) and
+ * ZONE_DMA (DMA_BIT_MASK(30)) respectively. It currently assumes that for
+ * memory starting above 4G, 32-bit devices will use a DMA offset.
*/
static phys_addr_t __init max_zone_dma32_phys(void)
{
@@ -174,12 +181,23 @@ static phys_addr_t __init max_zone_dma32_phys(void)
return min(offset + (1ULL << 32), memblock_end_of_DRAM());
}
+static phys_addr_t __init max_zone_dma_phys(void)
+{
+ phys_addr_t offset = memblock_start_of_DRAM() & GENMASK_ULL(63, 32);
+
+ return min(offset + (1ULL << ARCH_ZONE_DMA_BITS),
+ memblock_end_of_DRAM());
+}
+
#ifdef CONFIG_NUMA
static void __init zone_sizes_init(unsigned long min, unsigned long max)
{
unsigned long max_zone_pfns[MAX_NR_ZONES] = {0};
+#ifdef CONFIG_ZONE_DMA
+ max_zone_pfns[ZONE_DMA] = PFN_DOWN(arm64_dma_phys_limit);
+#endif
#ifdef CONFIG_ZONE_DMA32
max_zone_pfns[ZONE_DMA32] = PFN_DOWN(arm64_dma32_phys_limit);
#endif
@@ -195,13 +213,17 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
struct memblock_region *reg;
unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
unsigned long max_dma32 = min;
+ unsigned long max_dma = min;
memset(zone_size, 0, sizeof(zone_size));
- /* 4GB maximum for 32-bit only capable devices */
+#ifdef CONFIG_ZONE_DMA
+ max_dma = PFN_DOWN(arm64_dma_phys_limit);
+ zone_size[ZONE_DMA] = max_dma - min;
+#endif
#ifdef CONFIG_ZONE_DMA32
max_dma32 = PFN_DOWN(arm64_dma32_phys_limit);
- zone_size[ZONE_DMA32] = max_dma32 - min;
+ zone_size[ZONE_DMA32] = max_dma32 - max_dma;
#endif
zone_size[ZONE_NORMAL] = max - max_dma32;
@@ -213,11 +235,17 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
if (start >= max)
continue;
-
+#ifdef CONFIG_ZONE_DMA
+ if (start < max_dma) {
+ unsigned long dma_end = min_not_zero(end, max_dma);
+ zhole_size[ZONE_DMA] -= dma_end - start;
+ }
+#endif
#ifdef CONFIG_ZONE_DMA32
if (start < max_dma32) {
- unsigned long dma_end = min(end, max_dma32);
- zhole_size[ZONE_DMA32] -= dma_end - start;
+ unsigned long dma32_end = min(end, max_dma32);
+ unsigned long dma32_start = max(start, max_dma);
+ zhole_size[ZONE_DMA32] -= dma32_end - dma32_start;
}
#endif
if (end > max_dma32) {
@@ -405,7 +433,9 @@ void __init arm64_memblock_init(void)
early_init_fdt_scan_reserved_mem();
- /* 4GB maximum for 32-bit only capable devices */
+ if (IS_ENABLED(CONFIG_ZONE_DMA))
+ arm64_dma_phys_limit = max_zone_dma_phys();
+
if (IS_ENABLED(CONFIG_ZONE_DMA32))
arm64_dma32_phys_limit = max_zone_dma32_phys();
else
@@ -417,7 +447,7 @@ void __init arm64_memblock_init(void)
high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
- dma_contiguous_reserve(arm64_dma32_phys_limit);
+ dma_contiguous_reserve(arm64_dma_phys_limit ? : arm64_dma32_phys_limit);
}
void __init bootmem_init(void)
@@ -521,6 +551,7 @@ static void __init free_unused_memmap(void)
void __init mem_init(void)
{
if (swiotlb_force == SWIOTLB_FORCE ||
+ max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT) ||
max_pfn > (arm64_dma32_phys_limit >> PAGE_SHIFT))
swiotlb_init(1);
else
--
2.23.0
Powered by blists - more mailing lists