[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240125164256.4147-25-alexandru.elisei@arm.com>
Date: Thu, 25 Jan 2024 16:42:45 +0000
From: Alexandru Elisei <alexandru.elisei@....com>
To: catalin.marinas@....com,
will@...nel.org,
oliver.upton@...ux.dev,
maz@...nel.org,
james.morse@....com,
suzuki.poulose@....com,
yuzenghui@...wei.com,
arnd@...db.de,
akpm@...ux-foundation.org,
mingo@...hat.com,
peterz@...radead.org,
juri.lelli@...hat.com,
vincent.guittot@...aro.org,
dietmar.eggemann@....com,
rostedt@...dmis.org,
bsegall@...gle.com,
mgorman@...e.de,
bristot@...hat.com,
vschneid@...hat.com,
mhiramat@...nel.org,
rppt@...nel.org,
hughd@...gle.com
Cc: pcc@...gle.com,
steven.price@....com,
anshuman.khandual@....com,
vincenzo.frascino@....com,
david@...hat.com,
eugenis@...gle.com,
kcc@...gle.com,
hyesoo.yu@...sung.com,
linux-arm-kernel@...ts.infradead.org,
linux-kernel@...r.kernel.org,
kvmarm@...ts.linux.dev,
linux-fsdevel@...r.kernel.org,
linux-arch@...r.kernel.org,
linux-mm@...ck.org,
linux-trace-kernel@...r.kernel.org
Subject: [PATCH RFC v3 24/35] arm64: mte: Perform CMOs for tag blocks
Make sure the contents of the tag storage block is not corrupted by
performing:
1. A tag dcache inval when the associated tagged pages are freed, to avoid
dirty tag cache lines being evicted and corrupting the tag storage
block when it's being used to store data.
2. A data cache inval when the tag storage block is being reserved, to
ensure that no dirty data cache lines are present, which would
trigger a writeback that could corrupt the tags stored in the block.
Signed-off-by: Alexandru Elisei <alexandru.elisei@....com>
---
arch/arm64/include/asm/assembler.h | 10 ++++++++++
arch/arm64/include/asm/mte_tag_storage.h | 2 ++
arch/arm64/kernel/mte_tag_storage.c | 11 +++++++++++
arch/arm64/lib/mte.S | 16 ++++++++++++++++
4 files changed, 39 insertions(+)
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index 513787e43329..65fe88cce72b 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -310,6 +310,16 @@ alternative_cb_end
lsl \reg, \reg, \tmp // actual cache line size
.endm
+/*
+ * tcache_line_size - get the safe tag cache line size across all CPUs
+ */
+ .macro tcache_line_size, reg, tmp
+ read_ctr \tmp
+ ubfm \tmp, \tmp, #32, #37 // tag cache line size encoding
+ mov \reg, #4 // bytes per word
+ lsl \reg, \reg, \tmp // actual tag cache line size
+ .endm
+
/*
* raw_icache_line_size - get the minimum I-cache line size on this CPU
* from the CTR register.
diff --git a/arch/arm64/include/asm/mte_tag_storage.h b/arch/arm64/include/asm/mte_tag_storage.h
index 09f1318d924e..423b19e0cc46 100644
--- a/arch/arm64/include/asm/mte_tag_storage.h
+++ b/arch/arm64/include/asm/mte_tag_storage.h
@@ -11,6 +11,8 @@
#include <asm/mte.h>
+extern void dcache_inval_tags_poc(unsigned long start, unsigned long end);
+
#ifdef CONFIG_ARM64_MTE_TAG_STORAGE
DECLARE_STATIC_KEY_FALSE(tag_storage_enabled_key);
diff --git a/arch/arm64/kernel/mte_tag_storage.c b/arch/arm64/kernel/mte_tag_storage.c
index 762c7c803a70..8c347f4855e4 100644
--- a/arch/arm64/kernel/mte_tag_storage.c
+++ b/arch/arm64/kernel/mte_tag_storage.c
@@ -17,6 +17,7 @@
#include <linux/string.h>
#include <linux/xarray.h>
+#include <asm/cacheflush.h>
#include <asm/mte_tag_storage.h>
__ro_after_init DEFINE_STATIC_KEY_FALSE(tag_storage_enabled_key);
@@ -421,8 +422,13 @@ static bool tag_storage_block_is_reserved(unsigned long block)
static int tag_storage_reserve_block(unsigned long block, struct tag_region *region, int order)
{
+ unsigned long block_va;
int ret;
+ block_va = (unsigned long)page_to_virt(pfn_to_page(block));
+ /* Avoid writeback of dirty data cache lines corrupting tags. */
+ dcache_inval_poc(block_va, block_va + region->block_size_pages * PAGE_SIZE);
+
ret = xa_err(xa_store(&tag_blocks_reserved, block, pfn_to_page(block), GFP_KERNEL));
if (!ret)
block_ref_add(block, region, order);
@@ -570,6 +576,7 @@ void free_tag_storage(struct page *page, int order)
{
unsigned long block, start_block, end_block;
struct tag_region *region;
+ unsigned long page_va;
unsigned long flags;
int ret;
@@ -577,6 +584,10 @@ void free_tag_storage(struct page *page, int order)
if (WARN_ONCE(ret, "Missing tag storage block for pfn 0x%lx", page_to_pfn(page)))
return;
+ page_va = (unsigned long)page_to_virt(page);
+ /* Avoid writeback of dirty tag cache lines corrupting data. */
+ dcache_inval_tags_poc(page_va, page_va + (PAGE_SIZE << order));
+
end_block = start_block + order_to_num_blocks(order, region->block_size_pages);
xa_lock_irqsave(&tag_blocks_reserved, flags);
diff --git a/arch/arm64/lib/mte.S b/arch/arm64/lib/mte.S
index 9f623e9da09f..bc02b4e95062 100644
--- a/arch/arm64/lib/mte.S
+++ b/arch/arm64/lib/mte.S
@@ -175,3 +175,19 @@ SYM_FUNC_START(mte_copy_page_tags_from_buf)
ret
SYM_FUNC_END(mte_copy_page_tags_from_buf)
+
+/*
+ * dcache_inval_tags_poc(start, end)
+ *
+ * Ensure that any tags in the D-cache for the interval [start, end)
+ * are invalidated to PoC.
+ *
+ * - start - virtual start address of region
+ * - end - virtual end address of region
+ */
+SYM_FUNC_START(__pi_dcache_inval_tags_poc)
+ tcache_line_size x2, x3
+ dcache_by_myline_op igvac, sy, x0, x1, x2, x3
+ ret
+SYM_FUNC_END(__pi_dcache_inval_tags_poc)
+SYM_FUNC_ALIAS(dcache_inval_tags_poc, __pi_dcache_inval_tags_poc)
--
2.43.0
Powered by blists - more mailing lists