lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20200321121621.1600-2-yezhenyu2@huawei.com>
Date:   Sat, 21 Mar 2020 20:16:18 +0800
From:   Zhenyu Ye <yezhenyu2@...wei.com>
To:     <will@...nel.org>, <mark.rutland@....com>,
        <catalin.marinas@....com>, <aneesh.kumar@...ux.ibm.com>,
        <maz@...nel.org>, <steven.price@....com>, <broonie@...nel.org>,
        <guohanjun@...wei.com>
CC:     <yezhenyu2@...wei.com>, <linux-arm-kernel@...ts.infradead.org>,
        <linux-kernel@...r.kernel.org>, <linux-arch@...r.kernel.org>,
        <linux-mm@...ck.org>, <arm@...nel.org>, <xiexiangyou@...wei.com>,
        <prime.zeng@...ilicon.com>, <zhangshaokun@...ilicon.com>
Subject: [RFC PATCH v3 1/4] arm64: Add level-hinted TLB invalidation helper to tlbi_user

Add a level-hinted parameter to __tlbi_user, which only gets used
if ARMv8.4-TTL gets detected.

ARMv8.4-TTL provides the TTL field in tlbi instruction to indicate
the level of translation table walk holding the leaf entry for the
address that is being invalidated.

This patch set the default level value to 0.

Signed-off-by: Zhenyu Ye <yezhenyu2@...wei.com>
---
 arch/arm64/include/asm/tlbflush.h | 42 ++++++++++++++++++++++++++-----
 1 file changed, 36 insertions(+), 6 deletions(-)

diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index a3f70778a325..d141c080e494 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -89,6 +89,36 @@
 		__tlbi(op,  arg);					\
 	} while(0)
 
+#define __tlbi_user_level(op, addr, level)				\
+	do {								\
+		u64 arg = addr;						\
+									\
+		if (!arm64_kernel_unmapped_at_el0())			\
+			break;						\
+									\
+		if (cpus_have_const_cap(ARM64_HAS_ARMv8_4_TTL) &&	\
+		    level) {						\
+			u64 ttl = level;				\
+									\
+			switch (PAGE_SIZE) {				\
+			case SZ_4K:					\
+				ttl |= 1 << 2;				\
+				break;					\
+			case SZ_16K:					\
+				ttl |= 2 << 2;				\
+				break;					\
+			case SZ_64K:					\
+				ttl |= 3 << 2;				\
+				break;					\
+			}						\
+									\
+			arg &= ~TLBI_TTL_MASK;				\
+			arg |= FIELD_PREP(TLBI_TTL_MASK, ttl);		\
+		}							\
+									\
+		__tlbi(op,  (arg) | USER_ASID_FLAG);			\
+	} while (0)
+
 /*
  *	TLB Invalidation
  *	================
@@ -190,8 +220,8 @@ static inline void flush_tlb_page_nosync(struct vm_area_struct *vma,
 	unsigned long addr = __TLBI_VADDR(uaddr, ASID(vma->vm_mm));
 
 	dsb(ishst);
-	__tlbi(vale1is, addr);
-	__tlbi_user(vale1is, addr);
+	__tlbi_level(vale1is, addr, 0);
+	__tlbi_user_level(vale1is, addr, 0);
 }
 
 static inline void flush_tlb_page(struct vm_area_struct *vma,
@@ -231,11 +261,11 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma,
 	dsb(ishst);
 	for (addr = start; addr < end; addr += stride) {
 		if (last_level) {
-			__tlbi(vale1is, addr);
-			__tlbi_user(vale1is, addr);
+			__tlbi_level(vale1is, addr, 0);
+			__tlbi_user_level(vale1is, addr, 0);
 		} else {
-			__tlbi(vae1is, addr);
-			__tlbi_user(vae1is, addr);
+			__tlbi_level(vae1is, addr, 0);
+			__tlbi_user_level(vae1is, addr, 0);
 		}
 	}
 	dsb(ish);
-- 
2.19.1


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ