[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250213200755.196832-2-imbrenda@linux.ibm.com>
Date: Thu, 13 Feb 2025 21:07:54 +0100
From: Claudio Imbrenda <imbrenda@...ux.ibm.com>
To: kvm@...r.kernel.org
Cc: linux-kernel@...r.kernel.org, linux-s390@...r.kernel.org,
frankja@...ux.ibm.com, borntraeger@...ibm.com, david@...hat.com,
nrb@...ux.ibm.com, seiden@...ux.ibm.com, nsg@...ux.ibm.com,
schlameuss@...ux.ibm.com, hca@...ux.ibm.com
Subject: [PATCH v1 1/2] KVM: s390: fix issues when splitting folios
When splitting a folio with split_folio(), the extra reference on the
folio gets assigned to the first page of the old folio. Use
split_huge_page_to_list_to_order() instead, which transfers the extra
reference to a specified page.
Fixes: 5cbe24350b7d ("KVM: s390: move pv gmap functions into kvm")
Signed-off-by: Claudio Imbrenda <imbrenda@...ux.ibm.com>
---
arch/s390/include/asm/gmap.h | 2 +-
arch/s390/kvm/gmap.c | 4 ++--
arch/s390/mm/gmap.c | 11 ++++++++---
3 files changed, 11 insertions(+), 6 deletions(-)
diff --git a/arch/s390/include/asm/gmap.h b/arch/s390/include/asm/gmap.h
index 4e73ef46d4b2..563df4d8ba90 100644
--- a/arch/s390/include/asm/gmap.h
+++ b/arch/s390/include/asm/gmap.h
@@ -139,7 +139,7 @@ int s390_replace_asce(struct gmap *gmap);
void s390_uv_destroy_pfns(unsigned long count, unsigned long *pfns);
int __s390_uv_destroy_range(struct mm_struct *mm, unsigned long start,
unsigned long end, bool interruptible);
-int kvm_s390_wiggle_split_folio(struct mm_struct *mm, struct folio *folio, bool split);
+int kvm_s390_wiggle_split_folio(struct mm_struct *mm, struct page *page, bool split);
unsigned long *gmap_table_walk(struct gmap *gmap, unsigned long gaddr, int level);
/**
diff --git a/arch/s390/kvm/gmap.c b/arch/s390/kvm/gmap.c
index 02adf151d4de..fc4d490d25a2 100644
--- a/arch/s390/kvm/gmap.c
+++ b/arch/s390/kvm/gmap.c
@@ -72,7 +72,7 @@ static int __gmap_make_secure(struct gmap *gmap, struct page *page, void *uvcb)
return -EFAULT;
if (folio_test_large(folio)) {
mmap_read_unlock(gmap->mm);
- rc = kvm_s390_wiggle_split_folio(gmap->mm, folio, true);
+ rc = kvm_s390_wiggle_split_folio(gmap->mm, page, true);
mmap_read_lock(gmap->mm);
if (rc)
return rc;
@@ -100,7 +100,7 @@ static int __gmap_make_secure(struct gmap *gmap, struct page *page, void *uvcb)
/* The folio has too many references, try to shake some off */
if (rc == -EBUSY) {
mmap_read_unlock(gmap->mm);
- kvm_s390_wiggle_split_folio(gmap->mm, folio, false);
+ kvm_s390_wiggle_split_folio(gmap->mm, page, false);
mmap_read_lock(gmap->mm);
return -EAGAIN;
}
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index 94d927785800..8117597419d3 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -2630,14 +2630,15 @@ EXPORT_SYMBOL_GPL(s390_replace_asce);
/**
* kvm_s390_wiggle_split_folio() - try to drain extra references to a folio and optionally split
* @mm: the mm containing the folio to work on
- * @folio: the folio
+ * @page: one of the pages of the folio that needs to be split
* @split: whether to split a large folio
*
* Context: Must be called while holding an extra reference to the folio;
* the mm lock should not be held.
*/
-int kvm_s390_wiggle_split_folio(struct mm_struct *mm, struct folio *folio, bool split)
+int kvm_s390_wiggle_split_folio(struct mm_struct *mm, struct page *page, bool split)
{
+ struct folio *folio = page_folio(page);
int rc;
lockdep_assert_not_held(&mm->mmap_lock);
@@ -2645,7 +2646,11 @@ int kvm_s390_wiggle_split_folio(struct mm_struct *mm, struct folio *folio, bool
lru_add_drain_all();
if (split) {
folio_lock(folio);
- rc = split_folio(folio);
+ rc = min_order_for_split(folio);
+ if (rc > 0)
+ rc = -EINVAL;
+ if (!rc)
+ rc = split_huge_page_to_list_to_order(page, NULL, 0);
folio_unlock(folio);
if (rc != -EBUSY)
--
2.48.1
Powered by blists - more mailing lists