lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [day] [month] [year] [list]
Message-ID: <tencent_60B7A97D0D7317998DF607DE3B264E2E0406@qq.com>
Date: Wed, 2 Apr 2025 22:04:14 -0400
From: "ffhgfv" <xnxc22xnxc22@...com>
To: "konishi.ryusuke" <konishi.ryusuke@...il.com>
Cc: "linux-nilfs" <linux-nilfs@...r.kernel.org>, "linux-kernel" <linux-kernel@...r.kernel.org>
Subject: Linux6.14-rc5 BUG:  possible deadlock in nilfs_bmap_lookup_at_level 

Hello, I found a bug titled :BUG:  possible deadlock in nilfs_bmap_lookup_at_level ; with modified syzkaller in the Linux6.14-rc5.
If you fix this issue, please add the following tag to the commit:&nbsp; Reported-by: Jianzhou Zhao <xnxc22xnxc22@...com>,&nbsp;&nbsp;&nbsp; xingwei lee <xrivendell7@...il.com>
I use the same kernel as syzbot instance upstream: 7eb172143d5508b4da468ed59ee857c6e5e01da6
kernel config: https://syzkaller.appspot.com/text?tag=KernelConfig&amp;x=da4b04ae798b7ef6
compiler: gcc version 11.4.0
------------[ cut here ]-----------------------------------------
&nbsp;TITLE:&nbsp;&nbsp;&nbsp;&nbsp; possible deadlock in nilfs_bmap_lookup_at_level 
==================================================================
WARNING: possible circular locking dependency detected
6.14.0-rc5-dirty #17 Not tainted
------------------------------------------------------
segctord/13049 is trying to acquire lock:
ffff88805a1400c0 (&amp;nilfs_bmap_dat_lock_key){++++}-{4:4}, at: nilfs_bmap_lookup_at_level+0x7f/0x460 fs/nilfs2/bmap.c:65

but task is already holding lock:
ffff88805a142d60 (&amp;bmap-&gt;b_sem){++++}-{4:4}, at: nilfs_bmap_propagate+0x25/0x170 fs/nilfs2/bmap.c:328

which lock already depends on the new lock.


the existing dependency chain (in reverse order) is:

-&gt; #4 (&amp;bmap-&gt;b_sem){++++}-{4:4}:
       down_write+0x92/0x200 kernel/locking/rwsem.c:1577
       nilfs_bmap_clear+0x1c/0xa0 fs/nilfs2/bmap.c:305
       nilfs_clear_inode+0x28a/0x330 fs/nilfs2/inode.c:836
       nilfs_evict_inode+0x3c1/0x530 fs/nilfs2/inode.c:856
       evict+0x3ef/0x860 fs/inode.c:796
       dispose_list+0x124/0x1f0 fs/inode.c:845
       prune_icache_sb+0xf0/0x150 fs/inode.c:1033
       super_cache_scan+0x2d2/0x490 fs/super.c:223
       do_shrink_slab+0x464/0x11d0 mm/shrinker.c:437
       shrink_slab_memcg mm/shrinker.c:550 [inline]
       shrink_slab+0x92e/0x12c0 mm/shrinker.c:628
       shrink_one+0x4a8/0x7d0 mm/vmscan.c:4868
       shrink_many mm/vmscan.c:4929 [inline]
       lru_gen_shrink_node mm/vmscan.c:5007 [inline]
       shrink_node+0x2355/0x3c10 mm/vmscan.c:5978
       kswapd_shrink_node mm/vmscan.c:6807 [inline]
       balance_pgdat+0xa85/0x1740 mm/vmscan.c:6999
       kswapd+0x4c0/0xbe0 mm/vmscan.c:7264
       kthread+0x427/0x880 kernel/kthread.c:464
       ret_from_fork+0x45/0x80 arch/x86/kernel/process.c:148
       ret_from_fork_asm+0x1a/0x30 arch/x86/entry/entry_64.S:244

-&gt; #3 (fs_reclaim){+.+.}-{0:0}:
       __fs_reclaim_acquire mm/page_alloc.c:3853 [inline]
       fs_reclaim_acquire+0x102/0x150 mm/page_alloc.c:3867
       might_alloc include/linux/sched/mm.h:318 [inline]
       slab_pre_alloc_hook mm/slub.c:4066 [inline]
       slab_alloc_node mm/slub.c:4144 [inline]
       kmem_cache_alloc_node_noprof+0x52/0x4b0 mm/slub.c:4216
       __alloc_skb+0x2ad/0x370 net/core/skbuff.c:596
       alloc_skb include/linux/skbuff.h:1331 [inline]
       alloc_uevent_skb+0x7b/0x210 lib/kobject_uevent.c:289
       uevent_net_broadcast_untagged lib/kobject_uevent.c:326 [inline]
       kobject_uevent_net_broadcast lib/kobject_uevent.c:410 [inline]
       kobject_uevent_env+0xc87/0x16c0 lib/kobject_uevent.c:608
       kobject_synth_uevent+0x716/0x870 lib/kobject_uevent.c:207
       bus_uevent_store+0x3f/0xa0 drivers/base/bus.c:832
       bus_attr_store+0x71/0xb0 drivers/base/bus.c:172
       sysfs_kf_write+0x114/0x170 fs/sysfs/file.c:139
       kernfs_fop_write_iter+0x344/0x510 fs/kernfs/file.c:334
       new_sync_write fs/read_write.c:586 [inline]
       vfs_write fs/read_write.c:679 [inline]
       vfs_write+0xb9f/0x1100 fs/read_write.c:659
       ksys_write+0x122/0x240 fs/read_write.c:731
       do_syscall_x64 arch/x86/entry/common.c:52 [inline]
       do_syscall_64+0xcf/0x250 arch/x86/entry/common.c:83
       entry_SYSCALL_64_after_hwframe+0x77/0x7f

-&gt; #2 (uevent_sock_mutex){+.+.}-{4:4}:
       __mutex_lock_common kernel/locking/mutex.c:585 [inline]
       __mutex_lock+0x16f/0x2020 kernel/locking/mutex.c:730
       uevent_net_broadcast_untagged lib/kobject_uevent.c:317 [inline]
       kobject_uevent_net_broadcast lib/kobject_uevent.c:410 [inline]
       kobject_uevent_env+0xb0f/0x16c0 lib/kobject_uevent.c:608
       loop_set_size drivers/block/loop.c:235 [inline]
       loop_set_size.isra.0+0x9b/0xc0 drivers/block/loop.c:232
       loop_set_status+0x4d9/0xbe0 drivers/block/loop.c:1292
       loop_set_status_old+0x148/0x1c0 drivers/block/loop.c:1391
       lo_ioctl+0xd83/0x1c30 drivers/block/loop.c:1561
       blkdev_ioctl+0x27b/0x6d0 block/ioctl.c:693
       vfs_ioctl fs/ioctl.c:51 [inline]
       __do_sys_ioctl fs/ioctl.c:906 [inline]
       __se_sys_ioctl fs/ioctl.c:892 [inline]
       __x64_sys_ioctl+0x19b/0x210 fs/ioctl.c:892
       do_syscall_x64 arch/x86/entry/common.c:52 [inline]
       do_syscall_64+0xcf/0x250 arch/x86/entry/common.c:83
       entry_SYSCALL_64_after_hwframe+0x77/0x7f

-&gt; #1 (&amp;q-&gt;q_usage_counter(io)#21){++++}-{0:0}:
       bio_queue_enter block/blk.h:78 [inline]
       blk_mq_submit_bio+0x1ca5/0x2570 block/blk-mq.c:3091
       __submit_bio+0x63b/0x10b0 block/blk-core.c:628
       __submit_bio_noacct_mq block/blk-core.c:715 [inline]
       submit_bio_noacct_nocheck block/blk-core.c:744 [inline]
       submit_bio_noacct_nocheck+0x678/0xd30 block/blk-core.c:721
       submit_bio_noacct+0x6c9/0x1ea0 block/blk-core.c:867
       nilfs_btnode_submit_block+0x523/0x6f0 fs/nilfs2/btnode.c:139
       __nilfs_btree_get_block+0x10a/0x6a0 fs/nilfs2/btree.c:481
       nilfs_btree_do_lookup+0x3f5/0x8f0 fs/nilfs2/btree.c:579
       nilfs_btree_lookup+0x45/0x70 fs/nilfs2/btree.c:695
       nilfs_bmap_lookup_at_level+0xd4/0x460 fs/nilfs2/bmap.c:66
       nilfs_bmap_lookup fs/nilfs2/bmap.h:182 [inline]
       nilfs_mdt_submit_block+0x1a1/0x890 fs/nilfs2/mdt.c:144
       nilfs_mdt_read_block+0x93/0x3c0 fs/nilfs2/mdt.c:178
       nilfs_mdt_get_block+0xd2/0xa10 fs/nilfs2/mdt.c:254
       nilfs_palloc_get_block.isra.0+0xc2/0x310 fs/nilfs2/alloc.c:239
       nilfs_palloc_get_entry_block+0x90/0xb0 fs/nilfs2/alloc.c:355
       nilfs_dat_translate+0x82/0x3b0 fs/nilfs2/dat.c:406
       nilfs_bmap_lookup_at_level+0x250/0x460 fs/nilfs2/bmap.c:71
       nilfs_bmap_lookup fs/nilfs2/bmap.h:182 [inline]
       nilfs_mdt_submit_block+0x1a1/0x890 fs/nilfs2/mdt.c:144
       nilfs_mdt_read_block+0x93/0x3c0 fs/nilfs2/mdt.c:178
       nilfs_mdt_get_block+0xd2/0xa10 fs/nilfs2/mdt.c:254
       nilfs_sufile_read+0x20e/0x4f0 fs/nilfs2/sufile.c:1243
       nilfs_load_super_root fs/nilfs2/the_nilfs.c:128 [inline]
       load_nilfs+0x6b5/0x13f0 fs/nilfs2/the_nilfs.c:308
       nilfs_fill_super fs/nilfs2/super.c:1075 [inline]
       nilfs_get_tree+0xa71/0x1100 fs/nilfs2/super.c:1228
       vfs_get_tree+0x90/0x340 fs/super.c:1814
       do_new_mount fs/namespace.c:3560 [inline]
       path_mount+0x1290/0x1bc0 fs/namespace.c:3887
       do_mount+0xb4/0x110 fs/namespace.c:3900
       __do_sys_mount fs/namespace.c:4111 [inline]
       __se_sys_mount fs/namespace.c:4088 [inline]
       __x64_sys_mount+0x193/0x230 fs/namespace.c:4088
       do_syscall_x64 arch/x86/entry/common.c:52 [inline]
       do_syscall_64+0xcf/0x250 arch/x86/entry/common.c:83
       entry_SYSCALL_64_after_hwframe+0x77/0x7f

-&gt; #0 (&amp;nilfs_bmap_dat_lock_key){++++}-{4:4}:
       check_prev_add kernel/locking/lockdep.c:3163 [inline]
       check_prevs_add kernel/locking/lockdep.c:3282 [inline]
       validate_chain kernel/locking/lockdep.c:3906 [inline]
       __lock_acquire+0x2846/0x46b0 kernel/locking/lockdep.c:5228
       lock_acquire kernel/locking/lockdep.c:5851 [inline]
       lock_acquire+0x1b6/0x570 kernel/locking/lockdep.c:5816
       down_read+0x9c/0x480 kernel/locking/rwsem.c:1524
       nilfs_bmap_lookup_at_level+0x7f/0x460 fs/nilfs2/bmap.c:65
       nilfs_bmap_lookup fs/nilfs2/bmap.h:182 [inline]
       nilfs_mdt_submit_block+0x1a1/0x890 fs/nilfs2/mdt.c:144
       nilfs_mdt_read_block+0x93/0x3c0 fs/nilfs2/mdt.c:178
       nilfs_mdt_get_block+0xd2/0xa10 fs/nilfs2/mdt.c:254
       nilfs_palloc_get_block.isra.0+0xc2/0x310 fs/nilfs2/alloc.c:239
       nilfs_palloc_get_desc_block+0x91/0xb0 fs/nilfs2/alloc.c:296
       nilfs_palloc_prepare_alloc_entry+0x27e/0xbb0 fs/nilfs2/alloc.c:607
       nilfs_dat_prepare_alloc+0x24/0x90 fs/nilfs2/dat.c:78
       nilfs_dat_prepare_update+0x59/0x80 fs/nilfs2/dat.c:250
       nilfs_direct_propagate fs/nilfs2/direct.c:279 [inline]
       nilfs_direct_propagate+0x1e4/0x330 fs/nilfs2/direct.c:261
       nilfs_bmap_propagate+0x79/0x170 fs/nilfs2/bmap.c:329
       nilfs_collect_file_data+0x49/0xe0 fs/nilfs2/segment.c:589
       nilfs_segctor_apply_buffers+0x149/0x490 fs/nilfs2/segment.c:1010
       nilfs_segctor_scan_file+0x3ed/0x570 fs/nilfs2/segment.c:1059
       nilfs_segctor_collect_blocks+0x7cf/0x2b80 fs/nilfs2/segment.c:1221
       nilfs_segctor_collect fs/nilfs2/segment.c:1547 [inline]
       nilfs_segctor_do_construct+0xf7b/0x5dc0 fs/nilfs2/segment.c:2122
       nilfs_segctor_construct+0x5e1/0x840 fs/nilfs2/segment.c:2478
       nilfs_segctor_thread_construct fs/nilfs2/segment.c:2586 [inline]
       nilfs_segctor_thread+0x565/0xa50 fs/nilfs2/segment.c:2700
       kthread+0x427/0x880 kernel/kthread.c:464
       ret_from_fork+0x45/0x80 arch/x86/kernel/process.c:148
       ret_from_fork_asm+0x1a/0x30 arch/x86/entry/entry_64.S:244

other info that might help us debug this:

Chain exists of:
  &amp;nilfs_bmap_dat_lock_key --&gt; fs_reclaim --&gt; &amp;bmap-&gt;b_sem

 Possible unsafe locking scenario:

       CPU0                                                CPU1
       ----                                                       ----
  lock(&amp;bmap-&gt;b_sem);
                                                           lock(fs_reclaim);
                                                           lock(&amp;bmap-&gt;b_sem);
  rlock(&amp;nilfs_bmap_dat_lock_key);

 *** DEADLOCK ***

2 locks held by segctord/13049:
 #0: ffff88801e9af2a0 (&amp;nilfs-&gt;ns_segctor_sem){++++}-{4:4}, at: nilfs_transaction_lock+0x1eb/0x7c0 fs/nilfs2/segment.c:357
 #1: ffff88805a142d60 (&amp;bmap-&gt;b_sem){++++}-{4:4}, at: nilfs_bmap_propagate+0x25/0x170 fs/nilfs2/bmap.c:328

stack backtrace:
CPU: 0 UID: 0 PID: 13049 Comm: segctord Not tainted 6.14.0-rc5-dirty #17
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.15.0-1 04/01/2014
Call Trace:
 <task>
 __dump_stack lib/dump_stack.c:94 [inline]
 dump_stack_lvl+0x116/0x1b0 lib/dump_stack.c:120
 print_circular_bug.isra.0+0x505/0x740 kernel/locking/lockdep.c:2076
 check_noncircular+0x2f1/0x3d0 kernel/locking/lockdep.c:2208
 check_prev_add kernel/locking/lockdep.c:3163 [inline]
 check_prevs_add kernel/locking/lockdep.c:3282 [inline]
 validate_chain kernel/locking/lockdep.c:3906 [inline]
 __lock_acquire+0x2846/0x46b0 kernel/locking/lockdep.c:5228
 lock_acquire kernel/locking/lockdep.c:5851 [inline]
 lock_acquire+0x1b6/0x570 kernel/locking/lockdep.c:5816
 down_read+0x9c/0x480 kernel/locking/rwsem.c:1524
 nilfs_bmap_lookup_at_level+0x7f/0x460 fs/nilfs2/bmap.c:65
 nilfs_bmap_lookup fs/nilfs2/bmap.h:182 [inline]
 nilfs_mdt_submit_block+0x1a1/0x890 fs/nilfs2/mdt.c:144
 nilfs_mdt_read_block+0x93/0x3c0 fs/nilfs2/mdt.c:178
 nilfs_mdt_get_block+0xd2/0xa10 fs/nilfs2/mdt.c:254
 nilfs_palloc_get_block.isra.0+0xc2/0x310 fs/nilfs2/alloc.c:239
 nilfs_palloc_get_desc_block+0x91/0xb0 fs/nilfs2/alloc.c:296
 nilfs_palloc_prepare_alloc_entry+0x27e/0xbb0 fs/nilfs2/alloc.c:607
 nilfs_dat_prepare_alloc+0x24/0x90 fs/nilfs2/dat.c:78
 nilfs_dat_prepare_update+0x59/0x80 fs/nilfs2/dat.c:250
 nilfs_direct_propagate fs/nilfs2/direct.c:279 [inline]
 nilfs_direct_propagate+0x1e4/0x330 fs/nilfs2/direct.c:261
 nilfs_bmap_propagate+0x79/0x170 fs/nilfs2/bmap.c:329
 nilfs_collect_file_data+0x49/0xe0 fs/nilfs2/segment.c:589
 nilfs_segctor_apply_buffers+0x149/0x490 fs/nilfs2/segment.c:1010
 nilfs_segctor_scan_file+0x3ed/0x570 fs/nilfs2/segment.c:1059
 nilfs_segctor_collect_blocks+0x7cf/0x2b80 fs/nilfs2/segment.c:1221
 nilfs_segctor_collect fs/nilfs2/segment.c:1547 [inline]
 nilfs_segctor_do_construct+0xf7b/0x5dc0 fs/nilfs2/segment.c:2122
 nilfs_segctor_construct+0x5e1/0x840 fs/nilfs2/segment.c:2478
 nilfs_segctor_thread_construct fs/nilfs2/segment.c:2586 [inline]
 nilfs_segctor_thread+0x565/0xa50 fs/nilfs2/segment.c:2700
 kthread+0x427/0x880 kernel/kthread.c:464
 ret_from_fork+0x45/0x80 arch/x86/kernel/process.c:148
 ret_from_fork_asm+0x1a/0x30 arch/x86/entry/entry_64.S:244
 </task>


==================================================================

I hope it helps.
Best regards
Jianzhou Zhao</xrivendell7@...il.com></xnxc22xnxc22@...com>

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ