[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <066fa4ca-5a46-ba86-607f-9c3e16f79cde@arm.com>
Date: Wed, 7 Aug 2019 13:58:21 +0100
From: Steven Price <steven.price@....com>
To: Andrew Morton <akpm@...ux-foundation.org>
Cc: Mark Rutland <Mark.Rutland@....com>, x86@...nel.org,
Arnd Bergmann <arnd@...db.de>,
Ard Biesheuvel <ard.biesheuvel@...aro.org>,
Peter Zijlstra <peterz@...radead.org>,
Catalin Marinas <catalin.marinas@....com>,
Dave Hansen <dave.hansen@...ux.intel.com>,
linux-kernel@...r.kernel.org, linux-mm@...ck.org,
Jérôme Glisse <jglisse@...hat.com>,
Ingo Molnar <mingo@...hat.com>, Borislav Petkov <bp@...en8.de>,
Andy Lutomirski <luto@...nel.org>,
"H. Peter Anvin" <hpa@...or.com>,
James Morse <james.morse@....com>,
Thomas Gleixner <tglx@...utronix.de>,
Will Deacon <will@...nel.org>,
linux-arm-kernel@...ts.infradead.org,
"Liang, Kan" <kan.liang@...ux.intel.com>
Subject: Re: [PATCH v10 20/22] x86: mm: Convert dump_pagetables to use
walk_page_range
On 07/08/2019 00:58, Andrew Morton wrote:
> On Wed, 31 Jul 2019 16:46:01 +0100 Steven Price <steven.price@....com> wrote:
>
>> Make use of the new functionality in walk_page_range to remove the
>> arch page walking code and use the generic code to walk the page tables.
>>
>> The effective permissions are passed down the chain using new fields
>> in struct pg_state.
>>
>> The KASAN optimisation is implemented by including test_p?d callbacks
>> which can decide to skip an entire tree of entries
>>
>> ...
>>
>> +static const struct ptdump_range ptdump_ranges[] = {
>> +#ifdef CONFIG_X86_64
>>
>> -#define pgd_large(a) (pgtable_l5_enabled() ? pgd_large(a) : p4d_large(__p4d(pgd_val(a))))
>> -#define pgd_none(a) (pgtable_l5_enabled() ? pgd_none(a) : p4d_none(__p4d(pgd_val(a))))
>> +#define normalize_addr_shift (64 - (__VIRTUAL_MASK_SHIFT + 1))
>> +#define normalize_addr(u) ((signed long)(u << normalize_addr_shift) \
>> + >> normalize_addr_shift)
>>
>> -static inline bool is_hypervisor_range(int idx)
>> -{
>> -#ifdef CONFIG_X86_64
>> - /*
>> - * A hole in the beginning of kernel address space reserved
>> - * for a hypervisor.
>> - */
>> - return (idx >= pgd_index(GUARD_HOLE_BASE_ADDR)) &&
>> - (idx < pgd_index(GUARD_HOLE_END_ADDR));
>> + {0, PTRS_PER_PGD * PGD_LEVEL_MULT / 2},
>> + {normalize_addr(PTRS_PER_PGD * PGD_LEVEL_MULT / 2), ~0UL},
>
> This blows up because PGD_LEVEL_MULT is sometimes not a constant.
>
> x86_64 allmodconfig:
>
> In file included from ./arch/x86/include/asm/pgtable_types.h:249:0,
> from ./arch/x86/include/asm/paravirt_types.h:45,
> from ./arch/x86/include/asm/ptrace.h:94,
> from ./arch/x86/include/asm/math_emu.h:5,
> from ./arch/x86/include/asm/processor.h:12,
> from ./arch/x86/include/asm/cpufeature.h:5,
> from ./arch/x86/include/asm/thread_info.h:53,
> from ./include/linux/thread_info.h:38,
> from ./arch/x86/include/asm/preempt.h:7,
> from ./include/linux/preempt.h:78,
> from ./include/linux/spinlock.h:51,
> from ./include/linux/wait.h:9,
> from ./include/linux/wait_bit.h:8,
> from ./include/linux/fs.h:6,
> from ./include/linux/debugfs.h:15,
> from arch/x86/mm/dump_pagetables.c:11:
> ./arch/x86/include/asm/pgtable_64_types.h:56:22: error: initializer element is not constant
> #define PTRS_PER_PGD 512
> ^
This is very unhelpful of GCC - it's actually PTRS_PER_P4D which isn't
constant!
> arch/x86/mm/dump_pagetables.c:363:6: note: in expansion of macro ‘PTRS_PER_PGD’
> {0, PTRS_PER_PGD * PGD_LEVEL_MULT / 2},
> ^~~~~~~~~~~~
> ./arch/x86/include/asm/pgtable_64_types.h:56:22: note: (near initialization for ‘ptdump_ranges[0].end’)
> #define PTRS_PER_PGD 512
> ^
> arch/x86/mm/dump_pagetables.c:363:6: note: in expansion of macro ‘PTRS_PER_PGD’
> {0, PTRS_PER_PGD * PGD_LEVEL_MULT / 2},
> ^~~~~~~~~~~~
> arch/x86/mm/dump_pagetables.c:360:27: error: initializer element is not constant
> #define normalize_addr(u) ((signed long)(u << normalize_addr_shift) \
> ^
> arch/x86/mm/dump_pagetables.c:364:3: note: in expansion of macro ‘normalize_addr’
> {normalize_addr(PTRS_PER_PGD * PGD_LEVEL_MULT / 2), ~0UL},
> ^~~~~~~~~~~~~~
> arch/x86/mm/dump_pagetables.c:360:27: note: (near initialization for ‘ptdump_ranges[1].start’)
> #define normalize_addr(u) ((signed long)(u << normalize_addr_shift) \
> ^
> arch/x86/mm/dump_pagetables.c:364:3: note: in expansion of macro ‘normalize_addr’
> {normalize_addr(PTRS_PER_PGD * PGD_LEVEL_MULT / 2), ~0UL},
>
> I don't know what to do about this so I'll drop the series.
My best solution to this is to simply make ptdump_ranges dynamic (see
below). But there are other problems with this series (thanks for
spotting them), so I'll send out another version later.
Thanks,
Steve
----8<-----
diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c
index 998c7f46763c..8fc129ff985e 100644
--- a/arch/x86/mm/dump_pagetables.c
+++ b/arch/x86/mm/dump_pagetables.c
@@ -353,7 +353,10 @@ static void note_page(struct ptdump_state *pt_st,
unsigned long addr, int level,
}
}
-static const struct ptdump_range ptdump_ranges[] = {
+static void ptdump_walk_pgd_level_core(struct seq_file *m, struct
mm_struct *mm,
+ bool checkwx, bool dmesg)
+{
+ const struct ptdump_range ptdump_ranges[] = {
#ifdef CONFIG_X86_64
#define normalize_addr_shift (64 - (__VIRTUAL_MASK_SHIFT + 1))
@@ -368,9 +371,6 @@ static const struct ptdump_range ptdump_ranges[] = {
{0, 0}
};
-static void ptdump_walk_pgd_level_core(struct seq_file *m, struct
mm_struct *mm,
- bool checkwx, bool dmesg)
-{
struct pg_state st = {
.ptdump = {
.note_page = note_page,
Powered by blists - more mailing lists