lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Tue, 5 Sep 2023 18:59:59 +0800
From:   Huacai Chen <chenhuacai@...nel.org>
To:     Bibo Mao <maobibo@...ngson.cn>
Cc:     WANG Xuerui <kernel@...0n.name>,
        Andrew Morton <akpm@...ux-foundation.org>,
        David Hildenbrand <david@...hat.com>,
        loongarch@...ts.linux.dev, linux-kernel@...r.kernel.org
Subject: Re: [PATCH 2/2] LoongArch: Use static defined zero page rather than allocated

Hi, Bibo,

On Tue, Sep 5, 2023 at 4:01 PM Bibo Mao <maobibo@...ngson.cn> wrote:
>
> Like other popular architectures, zero page is defined in kernel
> BSS code segment, rather than dynamically alloted page. It is
> simpler.
>
> Signed-off-by: Bibo Mao <maobibo@...ngson.cn>
> ---
>  arch/loongarch/include/asm/mmzone.h  |  2 --
>  arch/loongarch/include/asm/pgtable.h |  6 ++----
>  arch/loongarch/kernel/numa.c         |  1 -
>  arch/loongarch/mm/init.c             | 21 +--------------------
>  4 files changed, 3 insertions(+), 27 deletions(-)
>
> diff --git a/arch/loongarch/include/asm/mmzone.h b/arch/loongarch/include/asm/mmzone.h
> index fe67d0b4b33d..2b9a90727e19 100644
> --- a/arch/loongarch/include/asm/mmzone.h
> +++ b/arch/loongarch/include/asm/mmzone.h
> @@ -13,6 +13,4 @@ extern struct pglist_data *node_data[];
>
>  #define NODE_DATA(nid) (node_data[(nid)])
>
> -extern void setup_zero_pages(void);
> -
>  #endif /* _ASM_MMZONE_H_ */
> diff --git a/arch/loongarch/include/asm/pgtable.h b/arch/loongarch/include/asm/pgtable.h
> index 342c5f9c25d2..70bd57daf42a 100644
> --- a/arch/loongarch/include/asm/pgtable.h
> +++ b/arch/loongarch/include/asm/pgtable.h
> @@ -70,10 +70,8 @@ struct vm_area_struct;
>   * for zero-mapped memory areas etc..
>   */
>
> -extern unsigned long empty_zero_page;
> -
> -#define ZERO_PAGE(vaddr) \
> -       (virt_to_page((void *)(empty_zero_page)))
> +extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
> +#define ZERO_PAGE(vaddr)       ((void)(vaddr), virt_to_page(empty_zero_page))
Can we simply use virt_to_page(empty_zero_page) here? Only x86 does it
like this now.

Huacai
>
>  /*
>   * TLB refill handlers may also map the vmalloc area into xkvrange.
> diff --git a/arch/loongarch/kernel/numa.c b/arch/loongarch/kernel/numa.c
> index 708665895b47..6f464d49f0c2 100644
> --- a/arch/loongarch/kernel/numa.c
> +++ b/arch/loongarch/kernel/numa.c
> @@ -470,7 +470,6 @@ void __init mem_init(void)
>  {
>         high_memory = (void *) __va(get_num_physpages() << PAGE_SHIFT);
>         memblock_free_all();
> -       setup_zero_pages();     /* This comes from node 0 */
>  }
>
>  int pcibus_to_node(struct pci_bus *bus)
> diff --git a/arch/loongarch/mm/init.c b/arch/loongarch/mm/init.c
> index 8ec668f97b00..628ebe42b519 100644
> --- a/arch/loongarch/mm/init.c
> +++ b/arch/loongarch/mm/init.c
> @@ -35,27 +35,9 @@
>  #include <asm/pgalloc.h>
>  #include <asm/tlb.h>
>
> -unsigned long empty_zero_page;
> +unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
>  EXPORT_SYMBOL(empty_zero_page);
>
> -void setup_zero_pages(void)
> -{
> -       unsigned int order, i;
> -       struct page *page;
> -
> -       order = 0;
> -
> -       empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
> -       if (!empty_zero_page)
> -               panic("Oh boy, that early out of memory?");
> -
> -       page = virt_to_page((void *)empty_zero_page);
> -       split_page(page, order);
> -       for (i = 0; i < (1 << order); i++, page++)
> -               mark_page_reserved(page);
> -
> -}
> -
>  void copy_user_highpage(struct page *to, struct page *from,
>         unsigned long vaddr, struct vm_area_struct *vma)
>  {
> @@ -99,7 +81,6 @@ void __init mem_init(void)
>         high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
>
>         memblock_free_all();
> -       setup_zero_pages();     /* Setup zeroed pages.  */
>  }
>  #endif /* !CONFIG_NUMA */
>
> --
> 2.27.0
>
>

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ