[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <2000105566.816789.1765132208865@kpc.webmail.kpnmail.nl>
Date: Sun, 7 Dec 2025 19:30:08 +0100 (CET)
From: Jori Koolstra <jkoolstra@...all.nl>
To: Viacheslav Dubeyko <Slava.Dubeyko@....com>,
"linux-fsdevel@...r.kernel.org" <linux-fsdevel@...r.kernel.org>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>
Cc: "glaubitz@...sik.fu-berlin.de" <glaubitz@...sik.fu-berlin.de>,
"frank.li@...o.com" <frank.li@...o.com>,
"slava@...eyko.com" <slava@...eyko.com>,
"skhan@...uxfoundation.org" <skhan@...uxfoundation.org>,
"syzbot+17cc9bb6d8d69b4139f0@...kaller.appspotmail.com"
<syzbot+17cc9bb6d8d69b4139f0@...kaller.appspotmail.com>
Subject: RE: [PATCH v2] hfs: replace BUG_ONs with error handling
Hi Viacheslav,
>
> > + return -EINVAL;
>
> It's not about input values, from my point of view. We have
> folder_count/file_count out of range. I think -ERANGE more good fit here.
There is not really any good errno to indicate programmer error, or something like
that. I saw in ext2 that they will sometimes use EINVAL for this, although I agree
that this has nothing to do with user input.
But I have no objection to changing it to ERANGE.
> >
> > +static bool hfs_mdb_verify(struct super_block *sb) {
> > + struct hfs_sb_info *sbi = HFS_SB(sb);
> > +
> > + /* failure of one of the following checks indicates programmer error */
> > + if (atomic64_read(&sbi->next_id) > U32_MAX)
> > + pr_err("mdb invalid: next CNID exceeds limit\n");
> > + else if (atomic64_read(&sbi->file_count) > U32_MAX)
> > + pr_err("mdb invalid: file count exceeds limit\n");
> > + else if (atomic64_read(&sbi->folder_count) > U32_MAX)
> > + pr_err("mdb invalid: folder count exceeds limit\n");
> > + else
> > + return true;
> > +
> > + return false;
> > +}
> > +
> > static int hfs_sync_fs(struct super_block *sb, int wait)
> > {
> > + if (!hfs_mdb_verify(sb)) {
> > + pr_err("cannot sync fs because hfs_mdb_verify() failed\n");
> > + return -EINVAL;
>
> OK. I think that don't execute commit is not good strategy here, anyway. First
> of all, we are protecting from exceeding next_id/folder_count/file_count above
> U32_MAX. So, it's really low probability event. Potentially, we still could have
> such corruption during file system driver operations. However, even if
> next_id/folder_count/file_count values will be inconsistent, then FSCK can
> easily recover file system.
>
> So, I suggest of executing the check and inform about potential corruption with
> recommendation of running FSCK. But we need to execute commit anyway, because of
> low probability of such event and easy recovering by FSCK from such corruption.
> So, don't return error here but continue the logic. To break commit is more
> harmful here than to execute it, from my point of view.
>
I have incorported your suggestions for improvement and retested. Please let me know
what you think, and again thanks for your time. I am learing quite a lot from this
discussion :)
diff --git a/fs/hfs/dir.c b/fs/hfs/dir.c
index 86a6b317b474..9df3d5b9c87e 100644
--- a/fs/hfs/dir.c
+++ b/fs/hfs/dir.c
@@ -196,8 +196,8 @@ static int hfs_create(struct mnt_idmap *idmap, struct inode *dir,
int res;
inode = hfs_new_inode(dir, &dentry->d_name, mode);
- if (!inode)
- return -ENOMEM;
+ if (IS_ERR(inode))
+ return PTR_ERR(inode);
res = hfs_cat_create(inode->i_ino, dir, &dentry->d_name, inode);
if (res) {
@@ -226,8 +226,8 @@ static struct dentry *hfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
int res;
inode = hfs_new_inode(dir, &dentry->d_name, S_IFDIR | mode);
- if (!inode)
- return ERR_PTR(-ENOMEM);
+ if (IS_ERR(inode))
+ return ERR_CAST(inode);
res = hfs_cat_create(inode->i_ino, dir, &dentry->d_name, inode);
if (res) {
@@ -254,11 +254,19 @@ static struct dentry *hfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
*/
static int hfs_remove(struct inode *dir, struct dentry *dentry)
{
+ struct hfs_sb_info *sbi = HFS_SB(dir->i_sb);
struct inode *inode = d_inode(dentry);
int res;
if (S_ISDIR(inode->i_mode) && inode->i_size != 2)
return -ENOTEMPTY;
+
+ if (unlikely(atomic64_read(&sbi->folder_count) > U32_MAX
+ || atomic64_read(&sbi->file_count) > U32_MAX)) {
+ pr_err("cannot remove file/folder: count exceeds limit\n");
+ return -ERANGE;
+ }
+
res = hfs_cat_delete(inode->i_ino, dir, &dentry->d_name);
if (res)
return res;
diff --git a/fs/hfs/hfs_fs.h b/fs/hfs/hfs_fs.h
index fff149af89da..76b6f19c251f 100644
--- a/fs/hfs/hfs_fs.h
+++ b/fs/hfs/hfs_fs.h
@@ -188,6 +188,7 @@ extern void hfs_delete_inode(struct inode *);
extern const struct xattr_handler * const hfs_xattr_handlers[];
/* mdb.c */
+extern bool hfs_mdb_verify_limits(struct super_block *);
extern int hfs_mdb_get(struct super_block *);
extern void hfs_mdb_commit(struct super_block *);
extern void hfs_mdb_close(struct super_block *);
diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c
index 81ad93e6312f..98089ac490db 100644
--- a/fs/hfs/inode.c
+++ b/fs/hfs/inode.c
@@ -186,16 +186,23 @@ struct inode *hfs_new_inode(struct inode *dir, const struct qstr *name, umode_t
s64 next_id;
s64 file_count;
s64 folder_count;
+ int err = -ENOMEM;
if (!inode)
- return NULL;
+ goto out_err;
+
+ err = -ERANGE;
mutex_init(&HFS_I(inode)->extents_lock);
INIT_LIST_HEAD(&HFS_I(inode)->open_dir_list);
spin_lock_init(&HFS_I(inode)->open_dir_lock);
hfs_cat_build_key(sb, (btree_key *)&HFS_I(inode)->cat_key, dir->i_ino, name);
next_id = atomic64_inc_return(&HFS_SB(sb)->next_id);
- BUG_ON(next_id > U32_MAX);
+ if (next_id > U32_MAX) {
+ atomic64_dec(&HFS_SB(sb)->next_id);
+ pr_err("cannot create new inode: next CNID exceeds limit\n");
+ goto out_discard;
+ }
inode->i_ino = (u32)next_id;
inode->i_mode = mode;
inode->i_uid = current_fsuid();
@@ -209,7 +216,11 @@ struct inode *hfs_new_inode(struct inode *dir, const struct qstr *name, umode_t
if (S_ISDIR(mode)) {
inode->i_size = 2;
folder_count = atomic64_inc_return(&HFS_SB(sb)->folder_count);
- BUG_ON(folder_count > U32_MAX);
+ if (folder_count > U32_MAX) {
+ atomic64_dec(&HFS_SB(sb)->folder_count);
+ pr_err("cannot create new inode: folder count exceeds limit\n");
+ goto out_discard;
+ }
if (dir->i_ino == HFS_ROOT_CNID)
HFS_SB(sb)->root_dirs++;
inode->i_op = &hfs_dir_inode_operations;
@@ -219,7 +230,11 @@ struct inode *hfs_new_inode(struct inode *dir, const struct qstr *name, umode_t
} else if (S_ISREG(mode)) {
HFS_I(inode)->clump_blocks = HFS_SB(sb)->clumpablks;
file_count = atomic64_inc_return(&HFS_SB(sb)->file_count);
- BUG_ON(file_count > U32_MAX);
+ if (file_count > U32_MAX) {
+ atomic64_dec(&HFS_SB(sb)->file_count);
+ pr_err("cannot create new inode: file count exceeds limit\n");
+ goto out_discard;
+ }
if (dir->i_ino == HFS_ROOT_CNID)
HFS_SB(sb)->root_files++;
inode->i_op = &hfs_file_inode_operations;
@@ -243,6 +258,11 @@ struct inode *hfs_new_inode(struct inode *dir, const struct qstr *name, umode_t
hfs_mark_mdb_dirty(sb);
return inode;
+
+ out_discard:
+ iput(inode);
+ out_err:
+ return ERR_PTR(err);
}
void hfs_delete_inode(struct inode *inode)
@@ -251,7 +271,6 @@ void hfs_delete_inode(struct inode *inode)
hfs_dbg("ino %lu\n", inode->i_ino);
if (S_ISDIR(inode->i_mode)) {
- BUG_ON(atomic64_read(&HFS_SB(sb)->folder_count) > U32_MAX);
atomic64_dec(&HFS_SB(sb)->folder_count);
if (HFS_I(inode)->cat_key.ParID == cpu_to_be32(HFS_ROOT_CNID))
HFS_SB(sb)->root_dirs--;
@@ -260,7 +279,6 @@ void hfs_delete_inode(struct inode *inode)
return;
}
- BUG_ON(atomic64_read(&HFS_SB(sb)->file_count) > U32_MAX);
atomic64_dec(&HFS_SB(sb)->file_count);
if (HFS_I(inode)->cat_key.ParID == cpu_to_be32(HFS_ROOT_CNID))
HFS_SB(sb)->root_files--;
diff --git a/fs/hfs/mdb.c b/fs/hfs/mdb.c
index 53f3fae60217..84acb67d2551 100644
--- a/fs/hfs/mdb.c
+++ b/fs/hfs/mdb.c
@@ -64,6 +64,22 @@ static int hfs_get_last_session(struct super_block *sb,
return 0;
}
+bool hfs_mdb_verify_limits(struct super_block *sb)
+{
+ struct hfs_sb_info *sbi = HFS_SB(sb);
+
+ if (atomic64_read(&sbi->next_id) > U32_MAX)
+ pr_warn("mdb invalid: next CNID exceeds limit\n");
+ else if (atomic64_read(&sbi->file_count) > U32_MAX)
+ pr_warn("mdb invalid: file count exceeds limit\n");
+ else if (atomic64_read(&sbi->folder_count) > U32_MAX)
+ pr_warn("mdb invalid: folder count exceeds limit\n");
+ else
+ return true;
+
+ return false;
+}
+
/*
* hfs_mdb_get()
*
@@ -156,6 +172,12 @@ int hfs_mdb_get(struct super_block *sb)
atomic64_set(&HFS_SB(sb)->file_count, be32_to_cpu(mdb->drFilCnt));
atomic64_set(&HFS_SB(sb)->folder_count, be32_to_cpu(mdb->drDirCnt));
+ if (!hfs_mdb_verify_limits(sb)) {
+ pr_warn("filesystem possibly corrupted, running fsck.hfs is recommended.\n");
+ pr_warn("mounting read only.\n");
+ sb->s_flags |= SB_RDONLY;
+ }
+
/* TRY to get the alternate (backup) MDB. */
sect = part_start + part_size - 2;
bh = sb_bread512(sb, sect, mdb2);
@@ -209,7 +231,7 @@ int hfs_mdb_get(struct super_block *sb)
attrib = mdb->drAtrb;
if (!(attrib & cpu_to_be16(HFS_SB_ATTRIB_UNMNT))) {
- pr_warn("filesystem was not cleanly unmounted, running fsck.hfs is recommended. mounting read-only.\n");
+ pr_warn("filesystem was not cleanly unmounted, running fsck.hfs is recommended. Mounting read-only.\n");
sb->s_flags |= SB_RDONLY;
}
if ((attrib & cpu_to_be16(HFS_SB_ATTRIB_SLOCK))) {
@@ -273,15 +295,12 @@ void hfs_mdb_commit(struct super_block *sb)
/* These parameters may have been modified, so write them back */
mdb->drLsMod = hfs_mtime();
mdb->drFreeBks = cpu_to_be16(HFS_SB(sb)->free_ablocks);
- BUG_ON(atomic64_read(&HFS_SB(sb)->next_id) > U32_MAX);
mdb->drNxtCNID =
cpu_to_be32((u32)atomic64_read(&HFS_SB(sb)->next_id));
mdb->drNmFls = cpu_to_be16(HFS_SB(sb)->root_files);
mdb->drNmRtDirs = cpu_to_be16(HFS_SB(sb)->root_dirs);
- BUG_ON(atomic64_read(&HFS_SB(sb)->file_count) > U32_MAX);
mdb->drFilCnt =
cpu_to_be32((u32)atomic64_read(&HFS_SB(sb)->file_count));
- BUG_ON(atomic64_read(&HFS_SB(sb)->folder_count) > U32_MAX);
mdb->drDirCnt =
cpu_to_be32((u32)atomic64_read(&HFS_SB(sb)->folder_count));
diff --git a/fs/hfs/super.c b/fs/hfs/super.c
index 47f50fa555a4..ca5f9b5a296e 100644
--- a/fs/hfs/super.c
+++ b/fs/hfs/super.c
@@ -34,6 +34,9 @@ MODULE_LICENSE("GPL");
static int hfs_sync_fs(struct super_block *sb, int wait)
{
+ if (!hfs_mdb_verify_limits(sb))
+ pr_warn("hfs_mdb_verify_limits() failed during filesystem sync\n");
+
hfs_mdb_commit(sb);
return 0;
}
@@ -65,6 +68,9 @@ static void flush_mdb(struct work_struct *work)
sbi->work_queued = 0;
spin_unlock(&sbi->work_lock);
+ if (!hfs_mdb_verify_limits(sb))
+ pr_warn("hfs_mdb_verify_limits() failed during mdb flushing\n");
+
hfs_mdb_commit(sb);
}
Thanks,
Jori.
Powered by blists - more mailing lists