[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220608150249.3033815-11-willy@infradead.org>
Date: Wed, 8 Jun 2022 16:02:40 +0100
From: "Matthew Wilcox (Oracle)" <willy@...radead.org>
To: linux-fsdevel@...r.kernel.org
Cc: "Matthew Wilcox (Oracle)" <willy@...radead.org>,
linux-kernel@...r.kernel.org, linux-block@...r.kernel.org,
linux-aio@...ck.org, linux-btrfs@...r.kernel.org,
linux-ext4@...r.kernel.org, linux-f2fs-devel@...ts.sourceforge.net,
cluster-devel@...hat.com, linux-mm@...ck.org,
linux-xfs@...r.kernel.org, linux-nfs@...r.kernel.org,
linux-ntfs-dev@...ts.sourceforge.net, ocfs2-devel@....oracle.com,
linux-mtd@...ts.infradead.org,
virtualization@...ts.linux-foundation.org,
Christoph Hellwig <hch@....de>
Subject: [PATCH v2 10/19] mm/migrate: Convert migrate_page() to migrate_folio()
Convert all callers to pass a folio. Most have the folio
already available. Switch all users from aops->migratepage to
aops->migrate_folio. Also turn the documentation into kerneldoc.
Signed-off-by: Matthew Wilcox (Oracle) <willy@...radead.org>
Reviewed-by: Christoph Hellwig <hch@....de>
---
drivers/gpu/drm/i915/gem/i915_gem_userptr.c | 4 +--
fs/btrfs/disk-io.c | 2 +-
fs/nfs/write.c | 2 +-
include/linux/migrate.h | 5 ++-
mm/migrate.c | 37 +++++++++++----------
mm/migrate_device.c | 3 +-
mm/shmem.c | 2 +-
mm/swap_state.c | 2 +-
8 files changed, 30 insertions(+), 27 deletions(-)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index 094f06b4ce33..8423df021b71 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -216,8 +216,8 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
* However...!
*
* The mmu-notifier can be invalidated for a
- * migrate_page, that is alreadying holding the lock
- * on the page. Such a try_to_unmap() will result
+ * migrate_folio, that is alreadying holding the lock
+ * on the folio. Such a try_to_unmap() will result
* in us calling put_pages() and so recursively try
* to lock the page. We avoid that deadlock with
* a trylock_page() and in exchange we risk missing
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 9ceb73f683af..8e5f1fa1e972 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -968,7 +968,7 @@ static int btree_migrate_folio(struct address_space *mapping,
if (folio_get_private(src) &&
!filemap_release_folio(src, GFP_KERNEL))
return -EAGAIN;
- return migrate_page(mapping, &dst->page, &src->page, mode);
+ return migrate_folio(mapping, dst, src, mode);
}
#else
#define btree_migrate_folio NULL
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 649b9e633459..69569696dde0 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -2139,7 +2139,7 @@ int nfs_migrate_folio(struct address_space *mapping, struct folio *dst,
folio_wait_fscache(src);
}
- return migrate_page(mapping, &dst->page, &src->page, mode);
+ return migrate_folio(mapping, dst, src, mode);
}
#endif
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 48aa4be04108..82f00ad69a54 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -32,9 +32,8 @@ extern const char *migrate_reason_names[MR_TYPES];
#ifdef CONFIG_MIGRATION
extern void putback_movable_pages(struct list_head *l);
-extern int migrate_page(struct address_space *mapping,
- struct page *newpage, struct page *page,
- enum migrate_mode mode);
+int migrate_folio(struct address_space *mapping, struct folio *dst,
+ struct folio *src, enum migrate_mode mode);
extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
unsigned long private, enum migrate_mode mode, int reason,
unsigned int *ret_succeeded);
diff --git a/mm/migrate.c b/mm/migrate.c
index 2e2f41572066..785e32d0cf1b 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -593,34 +593,37 @@ EXPORT_SYMBOL(folio_migrate_copy);
* Migration functions
***********************************************************/
-/*
- * Common logic to directly migrate a single LRU page suitable for
- * pages that do not use PagePrivate/PagePrivate2.
+/**
+ * migrate_folio() - Simple folio migration.
+ * @mapping: The address_space containing the folio.
+ * @dst: The folio to migrate the data to.
+ * @src: The folio containing the current data.
+ * @mode: How to migrate the page.
*
- * Pages are locked upon entry and exit.
+ * Common logic to directly migrate a single LRU folio suitable for
+ * folios that do not use PagePrivate/PagePrivate2.
+ *
+ * Folios are locked upon entry and exit.
*/
-int migrate_page(struct address_space *mapping,
- struct page *newpage, struct page *page,
- enum migrate_mode mode)
+int migrate_folio(struct address_space *mapping, struct folio *dst,
+ struct folio *src, enum migrate_mode mode)
{
- struct folio *newfolio = page_folio(newpage);
- struct folio *folio = page_folio(page);
int rc;
- BUG_ON(folio_test_writeback(folio)); /* Writeback must be complete */
+ BUG_ON(folio_test_writeback(src)); /* Writeback must be complete */
- rc = folio_migrate_mapping(mapping, newfolio, folio, 0);
+ rc = folio_migrate_mapping(mapping, dst, src, 0);
if (rc != MIGRATEPAGE_SUCCESS)
return rc;
if (mode != MIGRATE_SYNC_NO_COPY)
- folio_migrate_copy(newfolio, folio);
+ folio_migrate_copy(dst, src);
else
- folio_migrate_flags(newfolio, folio);
+ folio_migrate_flags(dst, src);
return MIGRATEPAGE_SUCCESS;
}
-EXPORT_SYMBOL(migrate_page);
+EXPORT_SYMBOL(migrate_folio);
#ifdef CONFIG_BLOCK
/* Returns true if all buffers are successfully locked */
@@ -671,7 +674,7 @@ static int __buffer_migrate_folio(struct address_space *mapping,
head = folio_buffers(src);
if (!head)
- return migrate_page(mapping, &dst->page, &src->page, mode);
+ return migrate_folio(mapping, dst, src, mode);
/* Check whether page does not have extra refs before we do more work */
expected_count = folio_expected_refs(mapping, src);
@@ -848,7 +851,7 @@ static int fallback_migrate_folio(struct address_space *mapping,
!filemap_release_folio(src, GFP_KERNEL))
return mode == MIGRATE_SYNC ? -EAGAIN : -EBUSY;
- return migrate_page(mapping, &dst->page, &src->page, mode);
+ return migrate_folio(mapping, dst, src, mode);
}
/*
@@ -875,7 +878,7 @@ static int move_to_new_folio(struct folio *dst, struct folio *src,
struct address_space *mapping = folio_mapping(src);
if (!mapping)
- rc = migrate_page(mapping, &dst->page, &src->page, mode);
+ rc = migrate_folio(mapping, dst, src, mode);
else if (mapping->a_ops->migrate_folio)
/*
* Most folios have a mapping and most filesystems
diff --git a/mm/migrate_device.c b/mm/migrate_device.c
index 5052093d0262..5dd97c39ca6a 100644
--- a/mm/migrate_device.c
+++ b/mm/migrate_device.c
@@ -718,7 +718,8 @@ void migrate_vma_pages(struct migrate_vma *migrate)
continue;
}
- r = migrate_page(mapping, newpage, page, MIGRATE_SYNC_NO_COPY);
+ r = migrate_folio(mapping, page_folio(newpage),
+ page_folio(page), MIGRATE_SYNC_NO_COPY);
if (r != MIGRATEPAGE_SUCCESS)
migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
}
diff --git a/mm/shmem.c b/mm/shmem.c
index 313ae7df59d8..e37ff6a1a6d0 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -3801,7 +3801,7 @@ const struct address_space_operations shmem_aops = {
.write_end = shmem_write_end,
#endif
#ifdef CONFIG_MIGRATION
- .migratepage = migrate_page,
+ .migrate_folio = migrate_folio,
#endif
.error_remove_page = shmem_error_remove_page,
};
diff --git a/mm/swap_state.c b/mm/swap_state.c
index f5b6f5638908..0a2021fc55ad 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -33,7 +33,7 @@ static const struct address_space_operations swap_aops = {
.writepage = swap_writepage,
.dirty_folio = noop_dirty_folio,
#ifdef CONFIG_MIGRATION
- .migratepage = migrate_page,
+ .migrate_folio = migrate_folio,
#endif
};
--
2.35.1
Powered by blists - more mailing lists