[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20210316075530.GS21246@kadam>
Date: Tue, 16 Mar 2021 10:55:30 +0300
From: Dan Carpenter <dan.carpenter@...cle.com>
To: Harshad Shirwadkar <harshadshirwadkar@...il.com>
Cc: linux-ext4@...r.kernel.org, tytso@....edu,
kernel test robot <lkp@...el.com>
Subject: Re: [PATCH v4 5/6] ext4: improve cr 0 / cr 1 group scanning
On Mon, Mar 15, 2021 at 10:37:15AM -0700, Harshad Shirwadkar wrote:
> @@ -744,6 +801,251 @@ static void ext4_mb_mark_free_simple(struct super_block *sb,
> }
> }
>
> +static void ext4_mb_rb_insert(struct rb_root *root, struct rb_node *new,
> + int (*cmp)(struct rb_node *, struct rb_node *))
> +{
> + struct rb_node **iter = &root->rb_node, *parent = NULL;
> +
> + while (*iter) {
> + parent = *iter;
> + if (cmp(new, *iter))
> + iter = &((*iter)->rb_left);
> + else
> + iter = &((*iter)->rb_right);
> + }
This would be neater like so:
while (*iter) {
node = *iter;
if (cmp(new, node))
iter = &node->rb_left;
else
iter = &node->rb_right;
}
It's unexpected that the cmp() function returns bool instead of -1, 0
1 like other cmp() functions.
> +
> + rb_link_node(new, parent, iter);
> + rb_insert_color(new, root);
> +}
> +
[ snip ]
> @@ -2909,6 +3240,22 @@ int ext4_mb_init(struct super_block *sb)
> i++;
> } while (i < MB_NUM_ORDERS(sb));
>
> + sbi->s_mb_avg_fragment_size_root = RB_ROOT;
> + sbi->s_mb_largest_free_orders =
> + kmalloc_array(MB_NUM_ORDERS(sb), sizeof(struct list_head),
> + GFP_KERNEL);
> + if (!sbi->s_mb_largest_free_orders)
> + goto out;
Missing error code. ret = -ENOMEM;
> + sbi->s_mb_largest_free_orders_locks =
> + kmalloc_array(MB_NUM_ORDERS(sb), sizeof(rwlock_t),
> + GFP_KERNEL);
> + if (!sbi->s_mb_largest_free_orders_locks)
> + goto out;
ret = -ENOMEM;
> + for (i = 0; i < MB_NUM_ORDERS(sb); i++) {
> + INIT_LIST_HEAD(&sbi->s_mb_largest_free_orders[i]);
> + rwlock_init(&sbi->s_mb_largest_free_orders_locks[i]);
> + }
> + rwlock_init(&sbi->s_mb_rb_lock);
>
> spin_lock_init(&sbi->s_md_lock);
> sbi->s_mb_free_pending = 0;
> @@ -2961,6 +3308,10 @@ int ext4_mb_init(struct super_block *sb)
> spin_lock_init(&lg->lg_prealloc_lock);
> }
>
> + if (blk_queue_nonrot(bdev_get_queue(sb->s_bdev)))
> + sbi->s_mb_linear_limit = 0;
> + else
> + sbi->s_mb_linear_limit = MB_DEFAULT_LINEAR_LIMIT;
> /* init file for buddy data */
> ret = ext4_mb_init_backend(sb);
> if (ret != 0)
> @@ -2972,6 +3323,8 @@ int ext4_mb_init(struct super_block *sb)
> free_percpu(sbi->s_locality_groups);
> sbi->s_locality_groups = NULL;
> out:
> + kfree(sbi->s_mb_largest_free_orders);
> + kfree(sbi->s_mb_largest_free_orders_locks);
> kfree(sbi->s_mb_offsets);
> sbi->s_mb_offsets = NULL;
> kfree(sbi->s_mb_maxs);
> @@ -3028,6 +3381,7 @@ int ext4_mb_release(struct super_block *sb)
> kvfree(group_info);
> rcu_read_unlock();
> }
> + kfree(sbi->s_mb_largest_free_orders);
Add kfree(sbi->s_mb_largest_free_orders_locks);
> kfree(sbi->s_mb_offsets);
> kfree(sbi->s_mb_maxs);
> iput(sbi->s_buddy_cache);
regards,
dan carpenter
Powered by blists - more mailing lists