lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAJZ5v0gE=iqsJVPrihox0JYpC4-q08p3ELnNst0g+ExYNYWT5g@mail.gmail.com>
Date: Mon, 19 Aug 2024 21:26:47 +0200
From: "Rafael J. Wysocki" <rafael@...nel.org>
To: "Kirill A. Shutemov" <kirill.shutemov@...ux.intel.com>
Cc: Thomas Gleixner <tglx@...utronix.de>, Ingo Molnar <mingo@...hat.com>, Borislav Petkov <bp@...en8.de>, 
	Dave Hansen <dave.hansen@...ux.intel.com>, x86@...nel.org, 
	"H. Peter Anvin" <hpa@...or.com>, "Rafael J. Wysocki" <rafael@...nel.org>, Andy Lutomirski <luto@...nel.org>, 
	Peter Zijlstra <peterz@...radead.org>, Baoquan He <bhe@...hat.com>, Ard Biesheuvel <ardb@...nel.org>, 
	Tom Lendacky <thomas.lendacky@....com>, Andrew Morton <akpm@...ux-foundation.org>, 
	Thomas Zimmermann <tzimmermann@...e.de>, Sean Christopherson <seanjc@...gle.com>, linux-kernel@...r.kernel.org, 
	linux-acpi@...r.kernel.org, Kai Huang <kai.huang@...el.com>
Subject: Re: [PATCHv3 2/4] x86/acpi: Replace manual page table initialization
 with kernel_ident_mapping_init()

On Mon, Aug 19, 2024 at 9:08 AM Kirill A. Shutemov
<kirill.shutemov@...ux.intel.com> wrote:
>
> The function init_transition_pgtable() maps the page with
> asm_acpi_mp_play_dead() into an identity mapping.
>
> Replace manual page table initialization with kernel_ident_mapping_init()
> to avoid code duplication. Use x86_mapping_info::offset to get the page
> mapped at the correct location.
>
> Signed-off-by: Kirill A. Shutemov <kirill.shutemov@...ux.intel.com>
> Reviewed-by: Kai Huang <kai.huang@...el.com>
> Reviewed-by: Tom Lendacky <thomas.lendacky@....com>
> ---
>  arch/x86/kernel/acpi/madt_wakeup.c | 73 ++++++------------------------
>  1 file changed, 15 insertions(+), 58 deletions(-)
>
> diff --git a/arch/x86/kernel/acpi/madt_wakeup.c b/arch/x86/kernel/acpi/madt_wakeup.c
> index d5ef6215583b..78960b338be9 100644
> --- a/arch/x86/kernel/acpi/madt_wakeup.c
> +++ b/arch/x86/kernel/acpi/madt_wakeup.c
> @@ -70,58 +70,6 @@ static void __init free_pgt_page(void *pgt, void *dummy)
>         return memblock_free(pgt, PAGE_SIZE);
>  }
>
> -/*
> - * Make sure asm_acpi_mp_play_dead() is present in the identity mapping at
> - * the same place as in the kernel page tables. asm_acpi_mp_play_dead() switches
> - * to the identity mapping and the function has be present at the same spot in
> - * the virtual address space before and after switching page tables.
> - */
> -static int __init init_transition_pgtable(pgd_t *pgd)
> -{
> -       pgprot_t prot = PAGE_KERNEL_EXEC_NOENC;
> -       unsigned long vaddr, paddr;
> -       p4d_t *p4d;
> -       pud_t *pud;
> -       pmd_t *pmd;
> -       pte_t *pte;
> -
> -       vaddr = (unsigned long)asm_acpi_mp_play_dead;
> -       pgd += pgd_index(vaddr);
> -       if (!pgd_present(*pgd)) {
> -               p4d = (p4d_t *)alloc_pgt_page(NULL);
> -               if (!p4d)
> -                       return -ENOMEM;
> -               set_pgd(pgd, __pgd(__pa(p4d) | _KERNPG_TABLE));
> -       }
> -       p4d = p4d_offset(pgd, vaddr);
> -       if (!p4d_present(*p4d)) {
> -               pud = (pud_t *)alloc_pgt_page(NULL);
> -               if (!pud)
> -                       return -ENOMEM;
> -               set_p4d(p4d, __p4d(__pa(pud) | _KERNPG_TABLE));
> -       }
> -       pud = pud_offset(p4d, vaddr);
> -       if (!pud_present(*pud)) {
> -               pmd = (pmd_t *)alloc_pgt_page(NULL);
> -               if (!pmd)
> -                       return -ENOMEM;
> -               set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
> -       }
> -       pmd = pmd_offset(pud, vaddr);
> -       if (!pmd_present(*pmd)) {
> -               pte = (pte_t *)alloc_pgt_page(NULL);
> -               if (!pte)
> -                       return -ENOMEM;
> -               set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
> -       }
> -       pte = pte_offset_kernel(pmd, vaddr);
> -
> -       paddr = __pa(vaddr);
> -       set_pte(pte, pfn_pte(paddr >> PAGE_SHIFT, prot));
> -
> -       return 0;
> -}
> -
>  static int __init acpi_mp_setup_reset(u64 reset_vector)
>  {
>         struct x86_mapping_info info = {
> @@ -130,6 +78,7 @@ static int __init acpi_mp_setup_reset(u64 reset_vector)
>                 .page_flag      = __PAGE_KERNEL_LARGE_EXEC,
>                 .kernpg_flag    = _KERNPG_TABLE_NOENC,
>         };
> +       unsigned long mstart, mend;
>         pgd_t *pgd;
>
>         pgd = alloc_pgt_page(NULL);
> @@ -137,8 +86,6 @@ static int __init acpi_mp_setup_reset(u64 reset_vector)
>                 return -ENOMEM;
>
>         for (int i = 0; i < nr_pfn_mapped; i++) {
> -               unsigned long mstart, mend;
> -
>                 mstart = pfn_mapped[i].start << PAGE_SHIFT;
>                 mend   = pfn_mapped[i].end << PAGE_SHIFT;
>                 if (kernel_ident_mapping_init(&info, pgd, mstart, mend)) {
> @@ -147,14 +94,24 @@ static int __init acpi_mp_setup_reset(u64 reset_vector)
>                 }
>         }
>
> -       if (kernel_ident_mapping_init(&info, pgd,
> -                                     PAGE_ALIGN_DOWN(reset_vector),
> -                                     PAGE_ALIGN(reset_vector + 1))) {
> +       mstart = PAGE_ALIGN_DOWN(reset_vector);
> +       mend = mstart + PAGE_SIZE;
> +       if (kernel_ident_mapping_init(&info, pgd, mstart, mend)) {
>                 kernel_ident_mapping_free(&info, pgd);
>                 return -ENOMEM;
>         }
>
> -       if (init_transition_pgtable(pgd)) {
> +       /*
> +        * Make sure asm_acpi_mp_play_dead() is present in the identity mapping
> +        * at the same place as in the kernel page tables.
> +        * asm_acpi_mp_play_dead() switches to the identity mapping and the
> +        * function has be present at the same spot in the virtual address space

s/has be/must/

Otherwise LGTM

> +        * before and after switching page tables.
> +        */
> +       info.offset = __START_KERNEL_map - phys_base;
> +       mstart = PAGE_ALIGN_DOWN(__pa(asm_acpi_mp_play_dead));
> +       mend = mstart + PAGE_SIZE;
> +       if (kernel_ident_mapping_init(&info, pgd, mstart, mend)) {
>                 kernel_ident_mapping_free(&info, pgd);
>                 return -ENOMEM;
>         }
> --
> 2.43.0
>

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ