[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <7db28eaf-f556-8ca5-e6b9-b932d4e786e2@ghiti.fr>
Date: Wed, 10 Mar 2021 14:18:53 -0500
From: Alex Ghiti <alex@...ti.fr>
To: Palmer Dabbelt <palmer@...belt.com>
Cc: Paul Walmsley <paul.walmsley@...ive.com>, aou@...s.berkeley.edu,
nylon7@...estech.com, nickhu@...estech.com,
aryabinin@...tuozzo.com, glider@...gle.com, dvyukov@...gle.com,
linux-riscv@...ts.infradead.org, linux-kernel@...r.kernel.org,
kasan-dev@...glegroups.com
Subject: Re: [PATCH v2] riscv: Improve KASAN_VMALLOC support
Le 3/9/21 à 9:37 PM, Palmer Dabbelt a écrit :
> On Fri, 26 Feb 2021 10:01:54 PST (-0800), alex@...ti.fr wrote:
>> When KASAN vmalloc region is populated, there is no userspace process and
>> the page table in use is swapper_pg_dir, so there is no need to read
>> SATP. Then we can use the same scheme used by kasan_populate_p*d
>> functions to go through the page table, which harmonizes the code.
>>
>> In addition, make use of set_pgd that goes through all unused page table
>> levels, contrary to p*d_populate functions, which makes this function
>> work
>> whatever the number of page table levels.
>>
>> And finally, make sure the writes to swapper_pg_dir are visible using
>> an sfence.vma.
>
> So I think this is actually a bug: without the fence we could get a
> kasan-related fault at any point (as the mappings might not be visible
> yet), and if we get one when inside do_page_fault() (or while holding a
> lock it wants) we'll end up deadlocking against ourselves. That'll
> probably never happen in practice, but it'd still be good to get the
> fence onto fixes. The rest are cleanups, they're for for-next (and
> should probably be part of your sv48 series, if you need to re-spin it
> -- I'll look at that next).
I only talked about sv48 support in the changelog as it explains why I
replaced p*d_populate functions for set_p*d, this is not directly linked
to the sv48 patchset, this is just a bonus that it works for both :)
>
> LMK if you want to split this up, or if you want me to do it. Either way,
I'll split it up: one patch for the cleanup and one patch for the fix.
>
> Reviewed-by: Palmer Dabbelt <palmerdabbelt@...gle.com>
Thanks,
Alex
>
> Thanks!
>
>> Signed-off-by: Alexandre Ghiti <alex@...ti.fr>
>> ---
>>
>> Changes in v2:
>> - Quiet kernel test robot warnings about missing prototypes by declaring
>> the introduced functions as static.
>>
>> arch/riscv/mm/kasan_init.c | 61 +++++++++++++-------------------------
>> 1 file changed, 20 insertions(+), 41 deletions(-)
>>
>> diff --git a/arch/riscv/mm/kasan_init.c b/arch/riscv/mm/kasan_init.c
>> index e3d91f334b57..aaa3bdc0ffc0 100644
>> --- a/arch/riscv/mm/kasan_init.c
>> +++ b/arch/riscv/mm/kasan_init.c
>> @@ -11,18 +11,6 @@
>> #include <asm/fixmap.h>
>> #include <asm/pgalloc.h>
>>
>> -static __init void *early_alloc(size_t size, int node)
>> -{
>> - void *ptr = memblock_alloc_try_nid(size, size,
>> - __pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, node);
>> -
>> - if (!ptr)
>> - panic("%pS: Failed to allocate %zu bytes align=%zx nid=%d
>> from=%llx\n",
>> - __func__, size, size, node, (u64)__pa(MAX_DMA_ADDRESS));
>> -
>> - return ptr;
>> -}
>> -
>> extern pgd_t early_pg_dir[PTRS_PER_PGD];
>> asmlinkage void __init kasan_early_init(void)
>> {
>> @@ -155,38 +143,29 @@ static void __init kasan_populate(void *start,
>> void *end)
>> memset(start, KASAN_SHADOW_INIT, end - start);
>> }
>>
>> -void __init kasan_shallow_populate(void *start, void *end)
>> +static void __init kasan_shallow_populate_pgd(unsigned long vaddr,
>> unsigned long end)
>> {
>> - unsigned long vaddr = (unsigned long)start & PAGE_MASK;
>> - unsigned long vend = PAGE_ALIGN((unsigned long)end);
>> - unsigned long pfn;
>> - int index;
>> + unsigned long next;
>> void *p;
>> - pud_t *pud_dir, *pud_k;
>> - pgd_t *pgd_dir, *pgd_k;
>> - p4d_t *p4d_dir, *p4d_k;
>> -
>> - while (vaddr < vend) {
>> - index = pgd_index(vaddr);
>> - pfn = csr_read(CSR_SATP) & SATP_PPN;
>> - pgd_dir = (pgd_t *)pfn_to_virt(pfn) + index;
>> - pgd_k = init_mm.pgd + index;
>> - pgd_dir = pgd_offset_k(vaddr);
>> - set_pgd(pgd_dir, *pgd_k);
>> -
>> - p4d_dir = p4d_offset(pgd_dir, vaddr);
>> - p4d_k = p4d_offset(pgd_k, vaddr);
>> -
>> - vaddr = (vaddr + PUD_SIZE) & PUD_MASK;
>> - pud_dir = pud_offset(p4d_dir, vaddr);
>> - pud_k = pud_offset(p4d_k, vaddr);
>> -
>> - if (pud_present(*pud_dir)) {
>> - p = early_alloc(PAGE_SIZE, NUMA_NO_NODE);
>> - pud_populate(&init_mm, pud_dir, p);
>> + pgd_t *pgd_k = pgd_offset_k(vaddr);
>> +
>> + do {
>> + next = pgd_addr_end(vaddr, end);
>> + if (pgd_page_vaddr(*pgd_k) == (unsigned
>> long)lm_alias(kasan_early_shadow_pmd)) {
>> + p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
>> + set_pgd(pgd_k, pfn_pgd(PFN_DOWN(__pa(p)), PAGE_TABLE));
>> }
>> - vaddr += PAGE_SIZE;
>> - }
>> + } while (pgd_k++, vaddr = next, vaddr != end);
>> +}
>> +
>> +static void __init kasan_shallow_populate(void *start, void *end)
>> +{
>> + unsigned long vaddr = (unsigned long)start & PAGE_MASK;
>> + unsigned long vend = PAGE_ALIGN((unsigned long)end);
>> +
>> + kasan_shallow_populate_pgd(vaddr, vend);
>> +
>> + local_flush_tlb_all();
>> }
>>
>> void __init kasan_init(void)
>
> _______________________________________________
> linux-riscv mailing list
> linux-riscv@...ts.infradead.org
> http://lists.infradead.org/mailman/listinfo/linux-riscv
Powered by blists - more mailing lists