[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <san7zg5rohy7q2oep2kx6awf6gltpuv2f4xlfljw5ezxqydfkv@zkd4k4udcezz>
Date: Tue, 18 Jun 2024 09:40:48 -0500
From: Maxwell Bland <mbland@...orola.com>
To: linux-mm@...ck.org
Cc: Catalin Marinas <catalin.marinas@....com>, Will Deacon <will@...nel.org>,
Jonathan Corbet <corbet@....net>,
Andrew Morton <akpm@...ux-foundation.org>,
Ard Biesheuvel <ardb@...nel.org>, Mark Rutland <mark.rutland@....com>,
Christophe Leroy <christophe.leroy@...roup.eu>,
Maxwell Bland <mbland@...orola.com>,
Alexandre Ghiti <alexghiti@...osinc.com>,
linux-arm-kernel@...ts.infradead.org, linux-doc@...r.kernel.org,
linux-kernel@...r.kernel.org
Subject: [PATCH v4 2/5] arm64: non leaf ptdump support
Separate the pte_bits used in ptdump from pxd_bits used by pmd, p4d,
pud, and pgd descriptors, thereby adding support for printing key
intermediate directory protection bits, such as PXNTable, and enable the
associated support Kconfig option.
Signed-off-by: Maxwell Bland <mbland@...orola.com>
---
arch/arm64/Kconfig | 1 +
arch/arm64/mm/ptdump.c | 140 ++++++++++++++++++++++++++++++++++++-----
2 files changed, 125 insertions(+), 16 deletions(-)
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 5d91259ee7b5..f4c3290160db 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -98,6 +98,7 @@ config ARM64
select ARCH_SUPPORTS_NUMA_BALANCING
select ARCH_SUPPORTS_PAGE_TABLE_CHECK
select ARCH_SUPPORTS_PER_VMA_LOCK
+ select ARCH_SUPPORTS_NON_LEAF_PTDUMP
select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
select ARCH_WANT_COMPAT_IPC_PARSE_VERSION if COMPAT
select ARCH_WANT_DEFAULT_BPF_JIT
diff --git a/arch/arm64/mm/ptdump.c b/arch/arm64/mm/ptdump.c
index 6986827e0d64..8f0b459c13ed 100644
--- a/arch/arm64/mm/ptdump.c
+++ b/arch/arm64/mm/ptdump.c
@@ -24,6 +24,7 @@
#include <asm/memory.h>
#include <asm/pgtable-hwdef.h>
#include <asm/ptdump.h>
+#include <asm/pgalloc.h>
#define pt_dump_seq_printf(m, fmt, args...) \
@@ -105,11 +106,6 @@ static const struct prot_bits pte_bits[] = {
.val = PTE_CONT,
.set = "CON",
.clear = " ",
- }, {
- .mask = PTE_TABLE_BIT,
- .val = PTE_TABLE_BIT,
- .set = " ",
- .clear = "BLK",
}, {
.mask = PTE_UXN,
.val = PTE_UXN,
@@ -143,34 +139,129 @@ static const struct prot_bits pte_bits[] = {
}
};
+static const struct prot_bits pxd_bits[] = {
+ {
+ .mask = PMD_SECT_VALID,
+ .val = PMD_SECT_VALID,
+ .set = " ",
+ .clear = "F",
+ }, {
+ .mask = PMD_TABLE_BIT,
+ .val = PMD_TABLE_BIT,
+ .set = "TBL",
+ .clear = "BLK",
+ }, {
+ .mask = PMD_SECT_USER,
+ .val = PMD_SECT_USER,
+ .set = "USR",
+ .clear = " ",
+ }, {
+ .mask = PMD_SECT_RDONLY,
+ .val = PMD_SECT_RDONLY,
+ .set = "ro",
+ .clear = "RW",
+ }, {
+ .mask = PMD_SECT_S,
+ .val = PMD_SECT_S,
+ .set = "SHD",
+ .clear = " ",
+ }, {
+ .mask = PMD_SECT_AF,
+ .val = PMD_SECT_AF,
+ .set = "AF",
+ .clear = " ",
+ }, {
+ .mask = PMD_SECT_NG,
+ .val = PMD_SECT_NG,
+ .set = "NG",
+ .clear = " ",
+ }, {
+ .mask = PMD_SECT_CONT,
+ .val = PMD_SECT_CONT,
+ .set = "CON",
+ .clear = " ",
+ }, {
+ .mask = PMD_SECT_PXN,
+ .val = PMD_SECT_PXN,
+ .set = "NX",
+ .clear = "x ",
+ }, {
+ .mask = PMD_SECT_UXN,
+ .val = PMD_SECT_UXN,
+ .set = "UXN",
+ .clear = " ",
+ }, {
+ .mask = PMD_TABLE_PXN,
+ .val = PMD_TABLE_PXN,
+ .set = "NXTbl",
+ .clear = " ",
+ }, {
+ .mask = PMD_TABLE_UXN,
+ .val = PMD_TABLE_UXN,
+ .set = "UXNTbl",
+ .clear = " ",
+ }, {
+ .mask = PTE_GP,
+ .val = PTE_GP,
+ .set = "GP",
+ .clear = " ",
+ }, {
+ .mask = PMD_ATTRINDX_MASK,
+ .val = PMD_ATTRINDX(MT_DEVICE_nGnRnE),
+ .set = "DEVICE/nGnRnE",
+ }, {
+ .mask = PMD_ATTRINDX_MASK,
+ .val = PMD_ATTRINDX(MT_DEVICE_nGnRE),
+ .set = "DEVICE/nGnRE",
+ }, {
+ .mask = PMD_ATTRINDX_MASK,
+ .val = PMD_ATTRINDX(MT_NORMAL_NC),
+ .set = "MEM/NORMAL-NC",
+ }, {
+ .mask = PMD_ATTRINDX_MASK,
+ .val = PMD_ATTRINDX(MT_NORMAL),
+ .set = "MEM/NORMAL",
+ }, {
+ .mask = PMD_ATTRINDX_MASK,
+ .val = PMD_ATTRINDX(MT_NORMAL_TAGGED),
+ .set = "MEM/NORMAL-TAGGED",
+ }
+};
+
struct pg_level {
const struct prot_bits *bits;
char name[4];
int num;
u64 mask;
+ unsigned long size;
};
static struct pg_level pg_level[] __ro_after_init = {
{ /* pgd */
.name = "PGD",
- .bits = pte_bits,
- .num = ARRAY_SIZE(pte_bits),
+ .bits = pxd_bits,
+ .num = ARRAY_SIZE(pxd_bits),
+ .size = PGDIR_SIZE,
}, { /* p4d */
.name = "P4D",
- .bits = pte_bits,
- .num = ARRAY_SIZE(pte_bits),
+ .bits = pxd_bits,
+ .num = ARRAY_SIZE(pxd_bits),
+ .size = P4D_SIZE,
}, { /* pud */
.name = "PUD",
- .bits = pte_bits,
- .num = ARRAY_SIZE(pte_bits),
+ .bits = pxd_bits,
+ .num = ARRAY_SIZE(pxd_bits),
+ .size = PUD_SIZE,
}, { /* pmd */
.name = "PMD",
- .bits = pte_bits,
- .num = ARRAY_SIZE(pte_bits),
+ .bits = pxd_bits,
+ .num = ARRAY_SIZE(pxd_bits),
+ .size = PMD_SIZE,
}, { /* pte */
.name = "PTE",
.bits = pte_bits,
.num = ARRAY_SIZE(pte_bits),
+ .size = PAGE_SIZE
},
};
@@ -251,10 +342,27 @@ static void note_page(struct ptdump_state *pt_st, unsigned long addr, int level,
note_prot_wx(st, addr);
}
- pt_dump_seq_printf(st->seq, "0x%016lx-0x%016lx ",
- st->start_address, addr);
+ /*
+ * Non-leaf entries use a fixed size for their range
+ * specification, whereas leaf entries are grouped by
+ * attributes and may not have a range larger than the type
+ * specifier.
+ */
+ if (st->start_address == addr) {
+ if (check_add_overflow(addr, pg_level[st->level].size,
+ &delta))
+ delta = ULONG_MAX - addr + 1;
+ else
+ delta = pg_level[st->level].size;
+ pt_dump_seq_printf(st->seq, "0x%016lx-0x%016lx ",
+ addr, addr + delta);
+ } else {
+ delta = (addr - st->start_address);
+ pt_dump_seq_printf(st->seq, "0x%016lx-0x%016lx ",
+ st->start_address, addr);
+ }
- delta = (addr - st->start_address) >> 10;
+ delta >>= 10;
while (!(delta & 1023) && unit[1]) {
delta >>= 10;
unit++;
--
2.39.2
Powered by blists - more mailing lists