lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [day] [month] [year] [list]
Date:   Thu, 8 Apr 2021 19:30:59 +0800
From:   Huacai Chen <chenhuacai@...nel.org>
To:     Rui Wang <wangrui@...ngson.cn>
Cc:     Thomas Bogendoerfer <tsbogend@...ha.franken.de>,
        Jiaxun Yang <jiaxun.yang@...goat.com>,
        "open list:MIPS" <linux-mips@...r.kernel.org>,
        LKML <linux-kernel@...r.kernel.org>
Subject: Re: [PATCH v4 RESEND] MIPS: tlbex: Avoid access invalid address when
 pmd is modifying

Hi, Rui Wang,

On Fri, Feb 12, 2021 at 4:21 PM Rui Wang <wangrui@...ngson.cn> wrote:
>
> From: wangrui <wangrui@...ngson.cn>
>
> When user-space program accessing a virtual address and falls into TLB invalid
> exception handling. at almost the same time, if the pmd which that contains this
> virtual address is hit by THP scanning, and then a invalid address access may
> occurs in the tlb handler:
>
>    CPU 0: (userspace)                 | CPU 1: (khugepaged)
> 1:                                    | scan hit: set pmde to invalid_pmd_table
>                                       |  (by pmd_clear)
> 2: handle_tlbl(tlb invalid):          |
>     load pmde for huge page testing,  |
>     pmde doesn't contains _PAGE_HUGE  |
>     bit                               |
> 3:                                    | collapsed: set pmde to huge page format
> 4: handle_tlbl(normal page case):     |
>     load pmde again as base address,  |
>     pmde doesn't contains an address, |
>     access invalid address            |
>
> This patch avoids the inconsistency of two memory loads by reusing the result
> of one load.
>
You can CC stable@...r.kernel.org here.

> Signed-off-by: wangrui <wangrui@...ngson.cn>
> ---
>  arch/mips/mm/tlbex.c | 27 ++++++++++++---------------
>  1 file changed, 12 insertions(+), 15 deletions(-)
>
> diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
> index a7521b8f7658..2295f1e2db81 100644
> --- a/arch/mips/mm/tlbex.c
> +++ b/arch/mips/mm/tlbex.c
> @@ -721,13 +721,12 @@ static void build_huge_tlb_write_entry(u32 **p, struct uasm_label **l,
>   */
>  static void
>  build_is_huge_pte(u32 **p, struct uasm_reloc **r, unsigned int tmp,
> -                 unsigned int pmd, int lid)
> +                 unsigned int pmde, int lid)
>  {
> -       UASM_i_LW(p, tmp, 0, pmd);
>         if (use_bbit_insns()) {
> -               uasm_il_bbit1(p, r, tmp, ilog2(_PAGE_HUGE), lid);
> +               uasm_il_bbit1(p, r, pmde, ilog2(_PAGE_HUGE), lid);
>         } else {
> -               uasm_i_andi(p, tmp, tmp, _PAGE_HUGE);
> +               uasm_i_andi(p, tmp, pmde, _PAGE_HUGE);
>                 uasm_il_bnez(p, r, tmp, lid);
>         }
>  }
> @@ -1103,7 +1102,6 @@ EXPORT_SYMBOL_GPL(build_update_entries);
>  struct mips_huge_tlb_info {
>         int huge_pte;
>         int restore_scratch;
> -       bool need_reload_pte;
>  };
>
>  static struct mips_huge_tlb_info
> @@ -1118,7 +1116,6 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
>
>         rv.huge_pte = scratch;
>         rv.restore_scratch = 0;
> -       rv.need_reload_pte = false;
>
>         if (check_for_high_segbits) {
>                 UASM_i_MFC0(p, tmp, C0_BADVADDR);
> @@ -1323,7 +1320,6 @@ static void build_r4000_tlb_refill_handler(void)
>         } else {
>                 htlb_info.huge_pte = K0;
>                 htlb_info.restore_scratch = 0;
> -               htlb_info.need_reload_pte = true;
>                 vmalloc_mode = refill_noscratch;
>                 /*
>                  * create the plain linear handler
> @@ -1348,11 +1344,14 @@ static void build_r4000_tlb_refill_handler(void)
>                 build_get_pgde32(&p, K0, K1); /* get pgd in K1 */
>  #endif
>
> +               UASM_i_LW(&p, K0, 0, K1); /* get pmd entry in K0 */
>  #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
> -               build_is_huge_pte(&p, &r, K0, K1, label_tlb_huge_update);
> +               build_is_huge_pte(&p, &r, K1, K0, label_tlb_huge_update);
>  #endif
>
> -               build_get_ptep(&p, K0, K1);
You remove the build_get_ptep here, but this may be wrong for NEVADA.

Huacai
> +               GET_CONTEXT(&p, K1); /* get context reg */
> +               build_adjust_context(&p, K1);
> +               UASM_i_ADDU(&p, K1, K0, K1); /* add in offset */
>                 build_update_entries(&p, K0, K1);
>                 build_tlb_write_entry(&p, &l, &r, tlb_random);
>                 uasm_l_leave(&l, p);
> @@ -1360,8 +1359,6 @@ static void build_r4000_tlb_refill_handler(void)
>         }
>  #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
>         uasm_l_tlb_huge_update(&l, p);
> -       if (htlb_info.need_reload_pte)
> -               UASM_i_LW(&p, htlb_info.huge_pte, 0, K1);
>         build_huge_update_entries(&p, htlb_info.huge_pte, K1);
>         build_huge_tlb_write_entry(&p, &l, &r, K0, tlb_random,
>                                    htlb_info.restore_scratch);
> @@ -2059,20 +2056,20 @@ build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l,
>         build_get_pgde32(p, wr.r1, wr.r2); /* get pgd in ptr */
>  #endif
>
> +       UASM_i_LW(p, wr.r3, 0, wr.r2); /* get pmd entry in wr.r3 */
>  #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
>         /*
> -        * For huge tlb entries, pmd doesn't contain an address but
> +        * For huge tlb entries, pmde doesn't contain an address but
>          * instead contains the tlb pte. Check the PAGE_HUGE bit and
>          * see if we need to jump to huge tlb processing.
>          */
> -       build_is_huge_pte(p, r, wr.r1, wr.r2, label_tlb_huge_update);
> +       build_is_huge_pte(p, r, wr.r1, wr.r3, label_tlb_huge_update);
>  #endif
>
>         UASM_i_MFC0(p, wr.r1, C0_BADVADDR);
> -       UASM_i_LW(p, wr.r2, 0, wr.r2);
>         UASM_i_SRL(p, wr.r1, wr.r1, PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2);
>         uasm_i_andi(p, wr.r1, wr.r1, (PTRS_PER_PTE - 1) << PTE_T_LOG2);
> -       UASM_i_ADDU(p, wr.r2, wr.r2, wr.r1);
> +       UASM_i_ADDU(p, wr.r2, wr.r3, wr.r1);
>
>  #ifdef CONFIG_SMP
>         uasm_l_smp_pgtable_change(l, *p);
> --
> 2.30.1
>

Powered by blists - more mailing lists