[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAMj1kXEr579hDqV5OuEbBB-O9meCOcmcTe_SZg97UKSLbKj6pw@mail.gmail.com>
Date: Tue, 18 Jun 2024 16:59:22 +0200
From: Ard Biesheuvel <ardb@...nel.org>
To: Maxwell Bland <mbland@...orola.com>
Cc: linux-mm@...ck.org, Catalin Marinas <catalin.marinas@....com>,
Will Deacon <will@...nel.org>, Jonathan Corbet <corbet@....net>,
Andrew Morton <akpm@...ux-foundation.org>, Mark Rutland <mark.rutland@....com>,
Christophe Leroy <christophe.leroy@...roup.eu>, Alexandre Ghiti <alexghiti@...osinc.com>,
linux-arm-kernel@...ts.infradead.org, linux-doc@...r.kernel.org,
linux-kernel@...r.kernel.org
Subject: Re: [PATCH v4 2/5] arm64: non leaf ptdump support
On Tue, 18 Jun 2024 at 16:40, Maxwell Bland <mbland@...orola.com> wrote:
>
> Separate the pte_bits used in ptdump from pxd_bits used by pmd, p4d,
> pud, and pgd descriptors, thereby adding support for printing key
> intermediate directory protection bits, such as PXNTable, and enable the
> associated support Kconfig option.
>
> Signed-off-by: Maxwell Bland <mbland@...orola.com>
> ---
> arch/arm64/Kconfig | 1 +
> arch/arm64/mm/ptdump.c | 140 ++++++++++++++++++++++++++++++++++++-----
> 2 files changed, 125 insertions(+), 16 deletions(-)
>
> diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
> index 5d91259ee7b5..f4c3290160db 100644
> --- a/arch/arm64/Kconfig
> +++ b/arch/arm64/Kconfig
> @@ -98,6 +98,7 @@ config ARM64
> select ARCH_SUPPORTS_NUMA_BALANCING
> select ARCH_SUPPORTS_PAGE_TABLE_CHECK
> select ARCH_SUPPORTS_PER_VMA_LOCK
> + select ARCH_SUPPORTS_NON_LEAF_PTDUMP
> select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
> select ARCH_WANT_COMPAT_IPC_PARSE_VERSION if COMPAT
> select ARCH_WANT_DEFAULT_BPF_JIT
> diff --git a/arch/arm64/mm/ptdump.c b/arch/arm64/mm/ptdump.c
> index 6986827e0d64..8f0b459c13ed 100644
> --- a/arch/arm64/mm/ptdump.c
> +++ b/arch/arm64/mm/ptdump.c
> @@ -24,6 +24,7 @@
> #include <asm/memory.h>
> #include <asm/pgtable-hwdef.h>
> #include <asm/ptdump.h>
> +#include <asm/pgalloc.h>
>
>
> #define pt_dump_seq_printf(m, fmt, args...) \
> @@ -105,11 +106,6 @@ static const struct prot_bits pte_bits[] = {
> .val = PTE_CONT,
> .set = "CON",
> .clear = " ",
> - }, {
> - .mask = PTE_TABLE_BIT,
> - .val = PTE_TABLE_BIT,
> - .set = " ",
> - .clear = "BLK",
> }, {
> .mask = PTE_UXN,
> .val = PTE_UXN,
> @@ -143,34 +139,129 @@ static const struct prot_bits pte_bits[] = {
> }
> };
>
> +static const struct prot_bits pxd_bits[] = {
This table will need to distinguish between table and block entries.
In your sample output, I see
2M PMD TBL RW x UXNTbl MEM/NORMAL
for a table entry, which includes a memory type and access permissions
based on descriptor fields that are not used for table descriptors.
Some other attributes listed below are equally inapplicable to table
entries, but happen to be 0x0 so they don't appear in the output, but
they would if the IGNORED bit in the descriptor happened to be set.
So I suspect that the distinction pte_bits <-> pxd_bits is not so
useful here. It would be better to have tbl_bits[], with pointers to
it in the pg_level array, where the PTE level one is set to NULL.
> + {
> + .mask = PMD_SECT_VALID,
> + .val = PMD_SECT_VALID,
> + .set = " ",
> + .clear = "F",
> + }, {
> + .mask = PMD_TABLE_BIT,
> + .val = PMD_TABLE_BIT,
> + .set = "TBL",
> + .clear = "BLK",
> + }, {
> + .mask = PMD_SECT_USER,
> + .val = PMD_SECT_USER,
> + .set = "USR",
> + .clear = " ",
> + }, {
> + .mask = PMD_SECT_RDONLY,
> + .val = PMD_SECT_RDONLY,
> + .set = "ro",
> + .clear = "RW",
> + }, {
> + .mask = PMD_SECT_S,
> + .val = PMD_SECT_S,
> + .set = "SHD",
> + .clear = " ",
> + }, {
> + .mask = PMD_SECT_AF,
> + .val = PMD_SECT_AF,
> + .set = "AF",
> + .clear = " ",
> + }, {
> + .mask = PMD_SECT_NG,
> + .val = PMD_SECT_NG,
> + .set = "NG",
> + .clear = " ",
> + }, {
> + .mask = PMD_SECT_CONT,
> + .val = PMD_SECT_CONT,
> + .set = "CON",
> + .clear = " ",
> + }, {
> + .mask = PMD_SECT_PXN,
> + .val = PMD_SECT_PXN,
> + .set = "NX",
> + .clear = "x ",
> + }, {
> + .mask = PMD_SECT_UXN,
> + .val = PMD_SECT_UXN,
> + .set = "UXN",
> + .clear = " ",
> + }, {
> + .mask = PMD_TABLE_PXN,
> + .val = PMD_TABLE_PXN,
> + .set = "NXTbl",
> + .clear = " ",
> + }, {
> + .mask = PMD_TABLE_UXN,
> + .val = PMD_TABLE_UXN,
> + .set = "UXNTbl",
> + .clear = " ",
> + }, {
> + .mask = PTE_GP,
> + .val = PTE_GP,
> + .set = "GP",
> + .clear = " ",
> + }, {
> + .mask = PMD_ATTRINDX_MASK,
> + .val = PMD_ATTRINDX(MT_DEVICE_nGnRnE),
> + .set = "DEVICE/nGnRnE",
> + }, {
> + .mask = PMD_ATTRINDX_MASK,
> + .val = PMD_ATTRINDX(MT_DEVICE_nGnRE),
> + .set = "DEVICE/nGnRE",
> + }, {
> + .mask = PMD_ATTRINDX_MASK,
> + .val = PMD_ATTRINDX(MT_NORMAL_NC),
> + .set = "MEM/NORMAL-NC",
> + }, {
> + .mask = PMD_ATTRINDX_MASK,
> + .val = PMD_ATTRINDX(MT_NORMAL),
> + .set = "MEM/NORMAL",
> + }, {
> + .mask = PMD_ATTRINDX_MASK,
> + .val = PMD_ATTRINDX(MT_NORMAL_TAGGED),
> + .set = "MEM/NORMAL-TAGGED",
> + }
> +};
> +
> struct pg_level {
> const struct prot_bits *bits;
> char name[4];
> int num;
> u64 mask;
> + unsigned long size;
> };
>
> static struct pg_level pg_level[] __ro_after_init = {
> { /* pgd */
> .name = "PGD",
> - .bits = pte_bits,
> - .num = ARRAY_SIZE(pte_bits),
> + .bits = pxd_bits,
> + .num = ARRAY_SIZE(pxd_bits),
> + .size = PGDIR_SIZE,
> }, { /* p4d */
> .name = "P4D",
> - .bits = pte_bits,
> - .num = ARRAY_SIZE(pte_bits),
> + .bits = pxd_bits,
> + .num = ARRAY_SIZE(pxd_bits),
> + .size = P4D_SIZE,
> }, { /* pud */
> .name = "PUD",
> - .bits = pte_bits,
> - .num = ARRAY_SIZE(pte_bits),
> + .bits = pxd_bits,
> + .num = ARRAY_SIZE(pxd_bits),
> + .size = PUD_SIZE,
> }, { /* pmd */
> .name = "PMD",
> - .bits = pte_bits,
> - .num = ARRAY_SIZE(pte_bits),
> + .bits = pxd_bits,
> + .num = ARRAY_SIZE(pxd_bits),
> + .size = PMD_SIZE,
> }, { /* pte */
> .name = "PTE",
> .bits = pte_bits,
> .num = ARRAY_SIZE(pte_bits),
> + .size = PAGE_SIZE
> },
> };
>
> @@ -251,10 +342,27 @@ static void note_page(struct ptdump_state *pt_st, unsigned long addr, int level,
> note_prot_wx(st, addr);
> }
>
> - pt_dump_seq_printf(st->seq, "0x%016lx-0x%016lx ",
> - st->start_address, addr);
> + /*
> + * Non-leaf entries use a fixed size for their range
> + * specification, whereas leaf entries are grouped by
> + * attributes and may not have a range larger than the type
> + * specifier.
> + */
> + if (st->start_address == addr) {
> + if (check_add_overflow(addr, pg_level[st->level].size,
> + &delta))
> + delta = ULONG_MAX - addr + 1;
> + else
> + delta = pg_level[st->level].size;
> + pt_dump_seq_printf(st->seq, "0x%016lx-0x%016lx ",
> + addr, addr + delta);
> + } else {
> + delta = (addr - st->start_address);
> + pt_dump_seq_printf(st->seq, "0x%016lx-0x%016lx ",
> + st->start_address, addr);
> + }
>
> - delta = (addr - st->start_address) >> 10;
> + delta >>= 10;
> while (!(delta & 1023) && unit[1]) {
> delta >>= 10;
> unit++;
> --
> 2.39.2
>
>
Powered by blists - more mailing lists