[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20221011215634.478330-4-vishal.moola@gmail.com>
Date: Tue, 11 Oct 2022 14:56:33 -0700
From: "Vishal Moola (Oracle)" <vishal.moola@...il.com>
To: akpm@...ux-foundation.org
Cc: willy@...radead.org, hughd@...gle.com,
linux-fsdevel@...r.kernel.org, linux-mm@...ck.org,
linux-kernel@...r.kernel.org,
"Vishal Moola (Oracle)" <vishal.moola@...il.com>
Subject: [PATCH 3/4] truncate: Remove indices argument from truncate_folio_batch_exceptionals()
The indices array is unnecessary. Folios keep track of their xarray indices
in the folio->index field which can simply be accessed as needed.
This change is in preparation for the removal of the indices arguments of
find_get_entries() and find_lock_entries().
Signed-off-by: Vishal Moola (Oracle) <vishal.moola@...il.com>
---
mm/truncate.c | 11 +++++------
1 file changed, 5 insertions(+), 6 deletions(-)
diff --git a/mm/truncate.c b/mm/truncate.c
index 846ddbdb27a4..4e63d885498a 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -58,7 +58,7 @@ static void clear_shadow_entry(struct address_space *mapping, pgoff_t index,
* exceptional entries similar to what folio_batch_remove_exceptionals() does.
*/
static void truncate_folio_batch_exceptionals(struct address_space *mapping,
- struct folio_batch *fbatch, pgoff_t *indices)
+ struct folio_batch *fbatch)
{
int i, j;
bool dax;
@@ -82,7 +82,6 @@ static void truncate_folio_batch_exceptionals(struct address_space *mapping,
for (i = j; i < folio_batch_count(fbatch); i++) {
struct folio *folio = fbatch->folios[i];
- pgoff_t index = indices[i];
if (!xa_is_value(folio)) {
fbatch->folios[j++] = folio;
@@ -90,11 +89,11 @@ static void truncate_folio_batch_exceptionals(struct address_space *mapping,
}
if (unlikely(dax)) {
- dax_delete_mapping_entry(mapping, index);
+ dax_delete_mapping_entry(mapping, folio->index);
continue;
}
- __clear_shadow_entry(mapping, index, folio);
+ __clear_shadow_entry(mapping, folio->index, folio);
}
if (!dax) {
@@ -363,7 +362,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
index = start;
while (index < end && find_lock_entries(mapping, &index, end - 1,
&fbatch, indices)) {
- truncate_folio_batch_exceptionals(mapping, &fbatch, indices);
+ truncate_folio_batch_exceptionals(mapping, &fbatch);
for (i = 0; i < folio_batch_count(&fbatch); i++)
truncate_cleanup_folio(fbatch.folios[i]);
delete_from_page_cache_batch(mapping, &fbatch);
@@ -424,7 +423,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
truncate_inode_folio(mapping, folio);
folio_unlock(folio);
}
- truncate_folio_batch_exceptionals(mapping, &fbatch, indices);
+ truncate_folio_batch_exceptionals(mapping, &fbatch);
folio_batch_release(&fbatch);
}
}
--
2.36.1
Powered by blists - more mailing lists