[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251223094227.1771-1-hdanton@sina.com>
Date: Tue, 23 Dec 2025 17:42:26 +0800
From: Hillf Danton <hdanton@...a.com>
To: syzbot <syzbot+b165fc2e11771c66d8ba@...kaller.appspotmail.com>
Cc: linux-kernel@...r.kernel.org,
syzkaller-bugs@...glegroups.com
Subject: Re: [syzbot] [mm?] WARNING in folio_remove_rmap_ptes
> Date: Mon, 22 Dec 2025 21:23:17 -0800
> Hello,
>
> syzbot found the following issue on:
>
> HEAD commit: 9094662f6707 Merge tag 'ata-6.19-rc2' of git://git.kernel...
> git tree: upstream
> console output: https://syzkaller.appspot.com/x/log.txt?x=1411f77c580000
> kernel config: https://syzkaller.appspot.com/x/.config?x=a11e0f726bfb6765
> dashboard link: https://syzkaller.appspot.com/bug?extid=b165fc2e11771c66d8ba
> compiler: gcc (Debian 12.2.0-14+deb12u1) 12.2.0, GNU ld (GNU Binutils for Debian) 2.40
> syz repro: https://syzkaller.appspot.com/x/repro.syz?x=11998b1a580000
> C reproducer: https://syzkaller.appspot.com/x/repro.c?x=128cdb1a580000
#syz test
--- x/include/linux/mm.h
+++ y/include/linux/mm.h
@@ -2626,6 +2626,9 @@ static inline void zap_vma_pages(struct
void unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas,
struct vm_area_struct *start_vma, unsigned long start,
unsigned long end, unsigned long tree_end);
+void lock_unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas,
+ struct vm_area_struct *start_vma, unsigned long start,
+ unsigned long end, unsigned long tree_end);
struct mmu_notifier_range;
--- x/mm/memory.c
+++ y/mm/memory.c
@@ -2104,7 +2104,31 @@ void unmap_vmas(struct mmu_gather *tlb,
} while (vma && likely(!xa_is_zero(vma)));
mmu_notifier_invalidate_range_end(&range);
}
+void lock_unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas,
+ struct vm_area_struct *vma, unsigned long start_addr,
+ unsigned long end_addr, unsigned long tree_end)
+{
+ struct mmu_notifier_range range;
+ struct zap_details details = {
+ .zap_flags = ZAP_FLAG_DROP_MARKER | ZAP_FLAG_UNMAP,
+ /* Careful - we need to zap private pages too! */
+ .even_cows = true,
+ };
+ mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma->vm_mm,
+ start_addr, end_addr);
+ mmu_notifier_invalidate_range_start(&range);
+ do {
+ unsigned long start = start_addr;
+ unsigned long end = end_addr;
+ hugetlb_zap_begin(vma, &start, &end);
+ vma_start_write(vma);
+ unmap_single_vma(tlb, vma, start, end, &details);
+ hugetlb_zap_end(vma, &details);
+ vma = mas_find(mas, tree_end - 1);
+ } while (vma && likely(!xa_is_zero(vma)));
+ mmu_notifier_invalidate_range_end(&range);
+}
/**
* zap_page_range_single_batched - remove user pages in a given range
* @tlb: pointer to the caller's struct mmu_gather
--- x/mm/vma.c
+++ y/mm/vma.c
@@ -1228,7 +1228,7 @@ static inline void vms_clear_ptes(struct
mas_set(mas_detach, 1);
tlb_gather_mmu(&tlb, vms->vma->vm_mm);
update_hiwater_rss(vms->vma->vm_mm);
- unmap_vmas(&tlb, mas_detach, vms->vma, vms->start, vms->end,
+ lock_unmap_vmas(&tlb, mas_detach, vms->vma, vms->start, vms->end,
vms->vma_count);
mas_set(mas_detach, 1);
@@ -1271,8 +1271,6 @@ static void vms_complete_munmap_vmas(str
mm = current->mm;
mm->map_count -= vms->vma_count;
mm->locked_vm -= vms->locked_vm;
- if (vms->unlock)
- mmap_write_downgrade(mm);
if (!vms->nr_pages)
return;
@@ -1298,7 +1296,7 @@ static void vms_complete_munmap_vmas(str
vm_unacct_memory(vms->nr_accounted);
validate_mm(mm);
if (vms->unlock)
- mmap_read_unlock(mm);
+ mmap_write_unlock(mm);
__mt_destroy(mas_detach->tree);
}
--
Powered by blists - more mailing lists