[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <d148f86c-b27b-63fb-31d2-35b8f52ec540@huawei.com>
Date: Wed, 1 Jul 2020 15:10:35 +0800
From: Zefan Li <lizefan@...wei.com>
To: Nicholas Piggin <npiggin@...il.com>, <linux-mm@...ck.org>
CC: <linux-kernel@...r.kernel.org>, <linux-arch@...r.kernel.org>,
<linuxppc-dev@...ts.ozlabs.org>,
Catalin Marinas <catalin.marinas@....com>,
Will Deacon <will@...nel.org>,
<linux-arm-kernel@...ts.infradead.org>,
"Thomas Gleixner" <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>,
"Borislav Petkov" <bp@...en8.de>, <x86@...nel.org>,
"H. Peter Anvin" <hpa@...or.com>
Subject: Re: [PATCH v2 4/4] mm/vmalloc: Hugepage vmalloc mappings
> static void *__vmalloc_node(unsigned long size, unsigned long align,
> - gfp_t gfp_mask, pgprot_t prot,
> - int node, const void *caller);
> + gfp_t gfp_mask, pgprot_t prot, unsigned long vm_flags,
> + int node, const void *caller);
> static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
> - pgprot_t prot, int node)
> + pgprot_t prot, unsigned int page_shift,
> + int node)
> {
> struct page **pages;
> + unsigned long addr = (unsigned long)area->addr;
> + unsigned long size = get_vm_area_size(area);
> + unsigned int page_order = page_shift - PAGE_SHIFT;
> unsigned int nr_pages, array_size, i;
> const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
> const gfp_t alloc_mask = gfp_mask | __GFP_NOWARN;
> const gfp_t highmem_mask = (gfp_mask & (GFP_DMA | GFP_DMA32)) ?
> - 0 :
> - __GFP_HIGHMEM;
> + 0 : __GFP_HIGHMEM;
>
> - nr_pages = get_vm_area_size(area) >> PAGE_SHIFT;
> + nr_pages = size >> page_shift;
while try out this patchset, we encountered a BUG_ON in account_kernel_stack()
in kernel/fork.c.
BUG_ON(vm->nr_pages != THREAD_SIZE / PAGE_SIZE);
which obviously should be updated accordingly.
> array_size = (nr_pages * sizeof(struct page *));
>
> /* Please note that the recursion is strictly bounded. */
> if (array_size > PAGE_SIZE) {
> pages = __vmalloc_node(array_size, 1, nested_gfp|highmem_mask,
> - PAGE_KERNEL, node, area->caller);
> + PAGE_KERNEL, 0, node, area->caller);
> } else {
> pages = kmalloc_node(array_size, nested_gfp, node);
> }
Powered by blists - more mailing lists