[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <61833f93-31b5-206f-b8c7-a2e55091ac@google.com>
Date: Tue, 25 Jan 2022 22:45:08 -0800 (PST)
From: David Rientjes <rientjes@...gle.com>
To: Pasha Tatashin <pasha.tatashin@...een.com>
cc: linux-kernel@...r.kernel.org, linux-mm@...ck.org,
akpm@...ux-foundation.org, pjt@...gle.com, weixugc@...gle.com,
gthelen@...gle.com, mingo@...hat.com, will@...nel.org,
rppt@...nel.org, dave.hansen@...ux.intel.com, hpa@...or.com,
aneesh.kumar@...ux.ibm.com, jirislaby@...nel.org,
songmuchun@...edance.com, qydwhotmail@...il.com, hughd@...gle.com,
ziy@...dia.com, anshuman.khandual@....com
Subject: Re: [PATCH v3 4/4] mm/page_table_check: check entries at pmd
levels
On Wed, 26 Jan 2022, Pasha Tatashin wrote:
> syzbot detected a case where the page table counters were not properly
> updated.
>
Is there a Reported-by tag that syzbot wants us to use to track this?
> syzkaller login: ------------[ cut here ]------------
> kernel BUG at mm/page_table_check.c:162!
> invalid opcode: 0000 [#1] PREEMPT SMP KASAN
> CPU: 0 PID: 3099 Comm: pasha Not tainted 5.16.0+ #48
> Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIO4
> RIP: 0010:__page_table_check_zero+0x159/0x1a0
> Code: 7d 3a b2 ff 45 39 f5 74 2a e8 43 38 b2 ff 4d 85 e4 01
> RSP: 0018:ffff888010667418 EFLAGS: 00010293
> RAX: 0000000000000000 RBX: 0000000000000001 RCX: 0000000000
> RDX: ffff88800cea8680 RSI: ffffffff81becaf9 RDI: 0000000003
> RBP: ffff888010667450 R08: 0000000000000001 R09: 0000000000
> R10: ffffffff81becaab R11: 0000000000000001 R12: ffff888008
> R13: 0000000000000001 R14: 0000000000000200 R15: dffffc0000
> FS: 0000000000000000(0000) GS:ffff888035e00000(0000) knlG0
> CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
> CR2: 00007ffd875cad00 CR3: 00000000094ce000 CR4: 0000000000
> DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000
> DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000
> Call Trace:
> <TASK>
> free_pcp_prepare+0x3be/0xaa0
> free_unref_page+0x1c/0x650
> ? trace_hardirqs_on+0x6a/0x1d0
> free_compound_page+0xec/0x130
> free_transhuge_page+0x1be/0x260
> __put_compound_page+0x90/0xd0
> release_pages+0x54c/0x1060
> ? filemap_remove_folio+0x161/0x210
> ? lock_downgrade+0x720/0x720
> ? __put_page+0x150/0x150
> ? filemap_free_folio+0x164/0x350
> __pagevec_release+0x7c/0x110
> shmem_undo_range+0x85e/0x1250
> ...
>
> The repro involved having a huge page that is split due to uprobe event
> temporarily replacing one of the pages in the huge page. Later the huge
> page was combined again, but the counters were off, as the PTE level
> was not properly updated.
>
> Make sure that when PMD is cleared and prior to freeing the level the
> PTEs are updated.
>
> Fixes: df4e817b7108 ("mm: page table check")
>
> Signed-off-by: Pasha Tatashin <pasha.tatashin@...een.com>
> ---
> include/linux/page_table_check.h | 18 ++++++++++++++++++
> mm/khugepaged.c | 2 ++
> mm/page_table_check.c | 21 +++++++++++++++++++++
> 3 files changed, 41 insertions(+)
>
> diff --git a/include/linux/page_table_check.h b/include/linux/page_table_check.h
> index 38cace1da7b6..e88bbe37727b 100644
> --- a/include/linux/page_table_check.h
> +++ b/include/linux/page_table_check.h
> @@ -26,6 +26,8 @@ void __page_table_check_pmd_set(struct mm_struct *mm, unsigned long addr,
> pmd_t *pmdp, pmd_t pmd);
> void __page_table_check_pud_set(struct mm_struct *mm, unsigned long addr,
> pud_t *pudp, pud_t pud);
> +void __page_table_check_pmd_clear_full(struct mm_struct *mm, unsigned long addr,
> + pmd_t pmd);
>
> static inline void page_table_check_alloc(struct page *page, unsigned int order)
> {
> @@ -100,6 +102,16 @@ static inline void page_table_check_pud_set(struct mm_struct *mm,
> __page_table_check_pud_set(mm, addr, pudp, pud);
> }
>
> +static inline void page_table_check_pmd_clear_full(struct mm_struct *mm,
> + unsigned long addr,
> + pmd_t pmd)
> +{
> + if (static_branch_likely(&page_table_check_disabled))
> + return;
> +
> + __page_table_check_pmd_clear_full(mm, addr, pmd);
> +}
> +
> #else
>
> static inline void page_table_check_alloc(struct page *page, unsigned int order)
> @@ -143,5 +155,11 @@ static inline void page_table_check_pud_set(struct mm_struct *mm,
> {
> }
>
> +static inline void page_table_check_pmd_clear_full(struct mm_struct *mm,
> + unsigned long addr,
> + pmd_t pmd)
> +{
> +}
> +
> #endif /* CONFIG_PAGE_TABLE_CHECK */
> #endif /* __LINUX_PAGE_TABLE_CHECK_H */
> diff --git a/mm/khugepaged.c b/mm/khugepaged.c
> index 440112355ffe..eefe3706f6c2 100644
> --- a/mm/khugepaged.c
> +++ b/mm/khugepaged.c
> @@ -16,6 +16,7 @@
> #include <linux/hashtable.h>
> #include <linux/userfaultfd_k.h>
> #include <linux/page_idle.h>
> +#include <linux/page_table_check.h>
> #include <linux/swapops.h>
> #include <linux/shmem_fs.h>
>
> @@ -1424,6 +1425,7 @@ static void collapse_and_free_pmd(struct mm_struct *mm, struct vm_area_struct *v
>
> spin_unlock(ptl);
> mm_dec_nr_ptes(mm);
> + page_table_check_pmd_clear_full(mm, addr, pmd);
> pte_free(mm, pmd_pgtable(pmd));
> }
This looks right, I'm wondering if we want to add a
mmap_assert_write_locked(mm) to collapse_and_free_pmd().
>
> diff --git a/mm/page_table_check.c b/mm/page_table_check.c
> index c61d7ebe13b1..251f95a808b4 100644
> --- a/mm/page_table_check.c
> +++ b/mm/page_table_check.c
> @@ -247,3 +247,24 @@ void __page_table_check_pud_set(struct mm_struct *mm, unsigned long addr,
> }
> }
> EXPORT_SYMBOL(__page_table_check_pud_set);
> +
> +void __page_table_check_pmd_clear_full(struct mm_struct *mm, unsigned long addr,
> + pmd_t pmd)
> +{
> + if (&init_mm == mm)
> + return;
> +
> + if (!pmd_bad(pmd) && !pmd_leaf(pmd)) {
> + pte_t *ptep = pte_offset_map(&pmd, addr);
> + unsigned long i;
> +
> + pte_unmap(ptep);
> + for (i = 0; i < PTRS_PER_PTE; i++) {
> + __page_table_check_pte_clear(mm, addr, *ptep);
> + addr += PAGE_SIZE;
> + ptep++;
> + }
> + } else {
> + __page_table_check_pmd_clear(mm, addr, pmd);
> + }
> +}
> --
> 2.35.0.rc0.227.g00780c9af4-goog
>
>
Powered by blists - more mailing lists