[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200819073104.1141705-3-harshadshirwadkar@gmail.com>
Date: Wed, 19 Aug 2020 00:30:57 -0700
From: Harshad Shirwadkar <harshadshirwadkar@...il.com>
To: linux-ext4@...r.kernel.org
Cc: tytso@....edu, lyx1209@...il.com,
Harshad Shirwadkar <harshadshirwadkar@...il.com>
Subject: [PATCH 2/9] ext4: rename ext4_mb_load_buddy to ext4_mb_load_allocator
This patch renames ext4_mb_load_buddy and ext4_mb_unload_buddy to
ext4_mb_load_allocator and ext4_mb_unload_allocator. Also, we add a
flag argument to ext4_mb_load_allocator function which is currently
unused. This patch helps reduce the size of the following patch "ext4:
add freespace tree allocator" significantly. In the interest of
keeping this patchset shorter, I have not renamed ext4_buddy structure
and e4b variable names. But have added that as a TODO item.
Signed-off-by: Harshad Shirwadkar <harshadshirwadkar@...il.com>
---
fs/ext4/mballoc.c | 86 ++++++++++++++++++++++++-----------------------
1 file changed, 44 insertions(+), 42 deletions(-)
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 47de61e44db2..2d8d3d9c7918 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -29,6 +29,7 @@
* - don't normalize tails
* - quota
* - reservation for superuser
+ * - rename ext4_buddy to ext4_allocator and e4b variables to allocator
*
* TODO v3:
* - bitmap read-ahead (proposed by Oleg Drokin aka green)
@@ -92,7 +93,7 @@
* mapped to the buddy and bitmap information regarding different
* groups. The buddy information is attached to buddy cache inode so that
* we can access them through the page cache. The information regarding
- * each group is loaded via ext4_mb_load_buddy. The information involve
+ * each group is loaded via ext4_mb_load_allocator. The information involve
* block bitmap and buddy information. The information are stored in the
* inode as:
*
@@ -845,7 +846,7 @@ static void mb_regenerate_buddy(struct ext4_buddy *e4b)
/* The buddy information is attached the buddy cache inode
* for convenience. The information regarding each group
- * is loaded via ext4_mb_load_buddy. The information involve
+ * is loaded via ext4_mb_load_allocator. The information involve
* block bitmap and buddy information. The information are
* stored in the inode as
*
@@ -1105,7 +1106,7 @@ int ext4_mb_init_group(struct super_block *sb, ext4_group_t group, gfp_t gfp)
* This ensures that we don't reinit the buddy cache
* page which map to the group from which we are already
* allocating. If we are looking at the buddy cache we would
- * have taken a reference using ext4_mb_load_buddy and that
+ * have taken a reference using ext4_mb_load_allocator and that
* would have pinned buddy page to page cache.
* The call to ext4_mb_get_buddy_page_lock will mark the
* page accessed.
@@ -1157,8 +1158,8 @@ int ext4_mb_init_group(struct super_block *sb, ext4_group_t group, gfp_t gfp)
* calling this routine!
*/
static noinline_for_stack int
-ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
- struct ext4_buddy *e4b, gfp_t gfp)
+ext4_mb_load_allocator_gfp(struct super_block *sb, ext4_group_t group,
+ struct ext4_buddy *e4b, gfp_t gfp, int flags)
{
int blocks_per_page;
int block;
@@ -1296,13 +1297,13 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
return ret;
}
-static int ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
- struct ext4_buddy *e4b)
+static int ext4_mb_load_allocator(struct super_block *sb, ext4_group_t group,
+ struct ext4_buddy *e4b, int flags)
{
- return ext4_mb_load_buddy_gfp(sb, group, e4b, GFP_NOFS);
+ return ext4_mb_load_allocator_gfp(sb, group, e4b, GFP_NOFS, flags);
}
-static void ext4_mb_unload_buddy(struct ext4_buddy *e4b)
+static void ext4_mb_unload_allocator(struct ext4_buddy *e4b)
{
if (e4b->bd_bitmap_page)
put_page(e4b->bd_bitmap_page);
@@ -1866,7 +1867,7 @@ int ext4_mb_try_best_found(struct ext4_allocation_context *ac,
int err;
BUG_ON(ex.fe_len <= 0);
- err = ext4_mb_load_buddy(ac->ac_sb, group, e4b);
+ err = ext4_mb_load_allocator(ac->ac_sb, group, e4b, 0);
if (err)
return err;
@@ -1879,7 +1880,7 @@ int ext4_mb_try_best_found(struct ext4_allocation_context *ac,
}
ext4_unlock_group(ac->ac_sb, group);
- ext4_mb_unload_buddy(e4b);
+ ext4_mb_unload_allocator(e4b);
return 0;
}
@@ -1900,12 +1901,12 @@ int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
if (grp->bb_free == 0)
return 0;
- err = ext4_mb_load_buddy(ac->ac_sb, group, e4b);
+ err = ext4_mb_load_allocator(ac->ac_sb, group, e4b, 0);
if (err)
return err;
if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))) {
- ext4_mb_unload_buddy(e4b);
+ ext4_mb_unload_allocator(e4b);
return 0;
}
@@ -1943,7 +1944,7 @@ int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
ext4_mb_use_best_found(ac, e4b);
}
ext4_unlock_group(ac->ac_sb, group);
- ext4_mb_unload_buddy(e4b);
+ ext4_mb_unload_allocator(e4b);
return 0;
}
@@ -2424,7 +2425,7 @@ ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
continue;
}
- err = ext4_mb_load_buddy(sb, group, &e4b);
+ err = ext4_mb_load_allocator(sb, group, &e4b, 0);
if (err)
goto out;
@@ -2437,7 +2438,7 @@ ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
ret = ext4_mb_good_group(ac, group, cr);
if (ret == 0) {
ext4_unlock_group(sb, group);
- ext4_mb_unload_buddy(&e4b);
+ ext4_mb_unload_allocator(&e4b);
continue;
}
@@ -2451,7 +2452,7 @@ ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
ext4_mb_complex_scan_group(ac, &e4b);
ext4_unlock_group(sb, group);
- ext4_mb_unload_buddy(&e4b);
+ ext4_mb_unload_allocator(&e4b);
if (ac->ac_status != AC_STATUS_CONTINUE)
break;
@@ -2548,7 +2549,7 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
grinfo = ext4_get_group_info(sb, group);
/* Load the group info in memory only if not already loaded. */
if (unlikely(EXT4_MB_GRP_NEED_INIT(grinfo))) {
- err = ext4_mb_load_buddy(sb, group, &e4b);
+ err = ext4_mb_load_allocator(sb, group, &e4b, 0);
if (err) {
seq_printf(seq, "#%-5u: I/O error\n", group);
return 0;
@@ -2559,7 +2560,7 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
memcpy(&sg, ext4_get_group_info(sb, group), i);
if (buddy_loaded)
- ext4_mb_unload_buddy(&e4b);
+ ext4_mb_unload_allocator(&e4b);
seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg.info.bb_free,
sg.info.bb_fragments, sg.info.bb_first_free);
@@ -3053,7 +3054,7 @@ static void ext4_free_data_in_buddy(struct super_block *sb,
mb_debug(sb, "gonna free %u blocks in group %u (0x%p):",
entry->efd_count, entry->efd_group, entry);
- err = ext4_mb_load_buddy(sb, entry->efd_group, &e4b);
+ err = ext4_mb_load_allocator(sb, entry->efd_group, &e4b, 0);
/* we expect to find existing buddy because it's pinned */
BUG_ON(err != 0);
@@ -3088,7 +3089,7 @@ static void ext4_free_data_in_buddy(struct super_block *sb,
}
ext4_unlock_group(sb, entry->efd_group);
kmem_cache_free(ext4_free_data_cachep, entry);
- ext4_mb_unload_buddy(&e4b);
+ ext4_mb_unload_allocator(&e4b);
mb_debug(sb, "freed %d blocks in %d structures\n", count,
count2);
@@ -3562,12 +3563,13 @@ static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac)
if (pa == NULL) {
if (ac->ac_f_ex.fe_len == 0)
return;
- err = ext4_mb_load_buddy(ac->ac_sb, ac->ac_f_ex.fe_group, &e4b);
+ err = ext4_mb_load_allocator(ac->ac_sb, ac->ac_f_ex.fe_group,
+ &e4b, 0);
if (err) {
/*
* This should never happen since we pin the
* pages in the ext4_allocation_context so
- * ext4_mb_load_buddy() should never fail.
+ * ext4_mb_load_allocator() should never fail.
*/
WARN(1, "mb_load_buddy failed (%d)", err);
return;
@@ -3576,7 +3578,7 @@ static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac)
mb_free_blocks(ac->ac_inode, &e4b, ac->ac_f_ex.fe_start,
ac->ac_f_ex.fe_len);
ext4_unlock_group(ac->ac_sb, ac->ac_f_ex.fe_group);
- ext4_mb_unload_buddy(&e4b);
+ ext4_mb_unload_allocator(&e4b);
return;
}
if (pa->pa_type == MB_INODE_PA)
@@ -4158,7 +4160,7 @@ ext4_mb_discard_group_preallocations(struct super_block *sb,
goto out_dbg;
}
- err = ext4_mb_load_buddy(sb, group, &e4b);
+ err = ext4_mb_load_allocator(sb, group, &e4b, 0);
if (err) {
ext4_warning(sb, "Error %d loading buddy information for %u",
err, group);
@@ -4233,7 +4235,7 @@ ext4_mb_discard_group_preallocations(struct super_block *sb,
out:
ext4_unlock_group(sb, group);
- ext4_mb_unload_buddy(&e4b);
+ ext4_mb_unload_allocator(&e4b);
put_bh(bitmap_bh);
out_dbg:
mb_debug(sb, "discarded (%d) blocks preallocated for group %u bb_free (%d)\n",
@@ -4325,8 +4327,8 @@ void ext4_discard_preallocations(struct inode *inode)
BUG_ON(pa->pa_type != MB_INODE_PA);
group = ext4_get_group_number(sb, pa->pa_pstart);
- err = ext4_mb_load_buddy_gfp(sb, group, &e4b,
- GFP_NOFS|__GFP_NOFAIL);
+ err = ext4_mb_load_allocator_gfp(sb, group, &e4b,
+ GFP_NOFS|__GFP_NOFAIL, 0);
if (err) {
ext4_error_err(sb, -err, "Error %d loading buddy information for %u",
err, group);
@@ -4338,7 +4340,7 @@ void ext4_discard_preallocations(struct inode *inode)
err = PTR_ERR(bitmap_bh);
ext4_error_err(sb, -err, "Error %d reading block bitmap for %u",
err, group);
- ext4_mb_unload_buddy(&e4b);
+ ext4_mb_unload_allocator(&e4b);
continue;
}
@@ -4347,7 +4349,7 @@ void ext4_discard_preallocations(struct inode *inode)
ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa);
ext4_unlock_group(sb, group);
- ext4_mb_unload_buddy(&e4b);
+ ext4_mb_unload_allocator(&e4b);
put_bh(bitmap_bh);
list_del(&pa->u.pa_tmp_list);
@@ -4620,8 +4622,8 @@ ext4_mb_discard_lg_preallocations(struct super_block *sb,
int err;
group = ext4_get_group_number(sb, pa->pa_pstart);
- err = ext4_mb_load_buddy_gfp(sb, group, &e4b,
- GFP_NOFS|__GFP_NOFAIL);
+ err = ext4_mb_load_allocator_gfp(sb, group, &e4b,
+ GFP_NOFS|__GFP_NOFAIL, 0);
if (err) {
ext4_error_err(sb, -err, "Error %d loading buddy information for %u",
err, group);
@@ -4632,7 +4634,7 @@ ext4_mb_discard_lg_preallocations(struct super_block *sb,
ext4_mb_release_group_pa(&e4b, pa);
ext4_unlock_group(sb, group);
- ext4_mb_unload_buddy(&e4b);
+ ext4_mb_unload_allocator(&e4b);
list_del(&pa->u.pa_tmp_list);
call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
}
@@ -5189,8 +5191,8 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
trace_ext4_mballoc_free(sb, inode, block_group, bit, count_clusters);
/* __GFP_NOFAIL: retry infinitely, ignore TIF_MEMDIE and memcg limit. */
- err = ext4_mb_load_buddy_gfp(sb, block_group, &e4b,
- GFP_NOFS|__GFP_NOFAIL);
+ err = ext4_mb_load_allocator_gfp(sb, block_group, &e4b,
+ GFP_NOFS|__GFP_NOFAIL, 0);
if (err)
goto error_return;
@@ -5264,7 +5266,7 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
count_clusters);
}
- ext4_mb_unload_buddy(&e4b);
+ ext4_mb_unload_allocator(&e4b);
/* We dirtied the bitmap block */
BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
@@ -5382,7 +5384,7 @@ int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
}
}
- err = ext4_mb_load_buddy(sb, block_group, &e4b);
+ err = ext4_mb_load_allocator(sb, block_group, &e4b, 0);
if (err)
goto error_return;
@@ -5410,7 +5412,7 @@ int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
flex_group)->free_clusters);
}
- ext4_mb_unload_buddy(&e4b);
+ ext4_mb_unload_allocator(&e4b);
/* We dirtied the bitmap block */
BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
@@ -5498,7 +5500,7 @@ ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
trace_ext4_trim_all_free(sb, group, start, max);
- ret = ext4_mb_load_buddy(sb, group, &e4b);
+ ret = ext4_mb_load_allocator(sb, group, &e4b, 0);
if (ret) {
ext4_warning(sb, "Error %d loading buddy information for %u",
ret, group);
@@ -5552,7 +5554,7 @@ ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
}
out:
ext4_unlock_group(sb, group);
- ext4_mb_unload_buddy(&e4b);
+ ext4_mb_unload_allocator(&e4b);
ext4_debug("trimmed %d blocks in the group %d\n",
count, group);
@@ -5666,7 +5668,7 @@ ext4_mballoc_query_range(
struct ext4_buddy e4b;
int error;
- error = ext4_mb_load_buddy(sb, group, &e4b);
+ error = ext4_mb_load_allocator(sb, group, &e4b, 0);
if (error)
return error;
bitmap = e4b.bd_bitmap;
@@ -5695,7 +5697,7 @@ ext4_mballoc_query_range(
ext4_unlock_group(sb, group);
out_unload:
- ext4_mb_unload_buddy(&e4b);
+ ext4_mb_unload_allocator(&e4b);
return error;
}
--
2.28.0.220.ged08abb693-goog
Powered by blists - more mailing lists