[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240908083424.13212-1-aha310510@gmail.com>
Date: Sun, 8 Sep 2024 17:34:24 +0900
From: Jeongjun Park <aha310510@...il.com>
To: syzbot+702361cf7e3d95758761@...kaller.appspotmail.com
Cc: linux-kernel@...r.kernel.org,
syzkaller-bugs@...glegroups.com
Subject: Re: KCSAN: data-race in generic_fillattr / shmem_mknod (2)
#syz test git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git master
---
fs/ext4/orphan.c | 2 +-
fs/ext4/super.c | 4 ++--
mm/percpu.c | 5 ++---
3 files changed, 5 insertions(+), 6 deletions(-)
diff --git a/fs/ext4/orphan.c b/fs/ext4/orphan.c
index e5b47dda3317..08299b2a1b3b 100644
--- a/fs/ext4/orphan.c
+++ b/fs/ext4/orphan.c
@@ -293,7 +293,7 @@ int ext4_orphan_del(handle_t *handle, struct inode *inode)
mutex_unlock(&sbi->s_orphan_lock);
goto out_brelse;
}
- NEXT_ORPHAN(i_prev) = ino_next;
+ WRITE_ONCE(NEXT_ORPHAN(i_prev), ino_next);
err = ext4_mark_iloc_dirty(handle, i_prev, &iloc2);
mutex_unlock(&sbi->s_orphan_lock);
}
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index e72145c4ae5a..8cc5e19bfe78 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -346,7 +346,7 @@ __u32 ext4_free_group_clusters(struct super_block *sb,
__u32 ext4_free_inodes_count(struct super_block *sb,
struct ext4_group_desc *bg)
{
- return le16_to_cpu(bg->bg_free_inodes_count_lo) |
+ return le16_to_cpu(READ_ONCE(bg->bg_free_inodes_count_lo)) |
(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
(__u32)le16_to_cpu(bg->bg_free_inodes_count_hi) << 16 : 0);
}
@@ -402,7 +402,7 @@ void ext4_free_group_clusters_set(struct super_block *sb,
void ext4_free_inodes_set(struct super_block *sb,
struct ext4_group_desc *bg, __u32 count)
{
- bg->bg_free_inodes_count_lo = cpu_to_le16((__u16)count);
+ WRITE_ONCE(bg->bg_free_inodes_count_lo, cpu_to_le16((__u16)count));
if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
bg->bg_free_inodes_count_hi = cpu_to_le16(count >> 16);
}
diff --git a/mm/percpu.c b/mm/percpu.c
index 20d91af8c033..5c958a54da51 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -1864,7 +1864,6 @@ void __percpu *pcpu_alloc_noprof(size_t size, size_t align, bool reserved,
area_found:
pcpu_stats_area_alloc(chunk, size);
- spin_unlock_irqrestore(&pcpu_lock, flags);
/* populate if not all pages are already there */
if (!is_atomic) {
@@ -1878,14 +1877,12 @@ void __percpu *pcpu_alloc_noprof(size_t size, size_t align, bool reserved,
ret = pcpu_populate_chunk(chunk, rs, re, pcpu_gfp);
- spin_lock_irqsave(&pcpu_lock, flags);
if (ret) {
pcpu_free_area(chunk, off);
err = "failed to populate";
goto fail_unlock;
}
pcpu_chunk_populated(chunk, rs, re);
- spin_unlock_irqrestore(&pcpu_lock, flags);
}
mutex_unlock(&pcpu_alloc_mutex);
@@ -1894,6 +1891,8 @@ void __percpu *pcpu_alloc_noprof(size_t size, size_t align, bool reserved,
if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_LOW)
pcpu_schedule_balance_work();
+ spin_unlock_irqrestore(&pcpu_lock, flags);
+
/* clear the areas and return address relative to base address */
for_each_possible_cpu(cpu)
memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size);
--
Powered by blists - more mailing lists