[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <2477a817-b482-43ed-9fd3-a7f8f948495f@pankajraghav.com>
Date: Fri, 30 Aug 2024 16:59:57 +0200
From: Pankaj Raghav <kernel@...kajraghav.com>
To: Luis Chamberlain <mcgrof@...nel.org>, Zi Yan <ziy@...dia.com>
Cc: Matthew Wilcox <willy@...radead.org>, Sven Schnelle
<svens@...ux.ibm.com>, brauner@...nel.org, akpm@...ux-foundation.org,
chandan.babu@...cle.com, linux-fsdevel@...r.kernel.org, djwong@...nel.org,
hare@...e.de, gost.dev@...sung.com, linux-xfs@...r.kernel.org, hch@....de,
david@...morbit.com, yang@...amperecomputing.com,
linux-kernel@...r.kernel.org, linux-mm@...ck.org, john.g.garry@...cle.com,
cl@...amperecomputing.com, p.raghav@...sung.com, ryan.roberts@....com,
David Howells <dhowells@...hat.com>, linux-s390@...r.kernel.org
Subject: Re: [PATCH v13 04/10] mm: split a folio in minimum folio order chunks
On 30/08/2024 01:41, Luis Chamberlain wrote:
> On Thu, Aug 29, 2024 at 06:12:26PM -0400, Zi Yan wrote:
>> The issue is that the change to split_huge_page() makes split_huge_page_to_list_to_order()
>> unlocks the wrong subpage. split_huge_page() used to pass the “page” pointer
>> to split_huge_page_to_list_to_order(), which keeps that “page” still locked.
>> But this patch changes the “page” passed into split_huge_page_to_list_to_order()
>> always to the head page.
>>
>> This fixes the crash on my x86 VM, but it can be improved:
>>
>> diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
>> index 7c50aeed0522..eff5d2fb5d4e 100644
>> --- a/include/linux/huge_mm.h
>> +++ b/include/linux/huge_mm.h
>> @@ -320,10 +320,7 @@ bool can_split_folio(struct folio *folio, int *pextra_pins);
>> int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
>> unsigned int new_order);
>> int split_folio_to_list(struct folio *folio, struct list_head *list);
>> -static inline int split_huge_page(struct page *page)
>> -{
>> - return split_folio(page_folio(page));
>> -}
>> +int split_huge_page(struct page *page);
>> void deferred_split_folio(struct folio *folio);
>>
>> void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
>> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
>> index c29af9451d92..4d723dab4336 100644
>> --- a/mm/huge_memory.c
>> +++ b/mm/huge_memory.c
>> @@ -3297,6 +3297,25 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
>> return ret;
>> }
>>
>> +int split_huge_page(struct page *page)
>> +{
>> + unsigned int min_order = 0;
>> + struct folio *folio = page_folio(page);
>> +
>> + if (folio_test_anon(folio))
>> + goto out;
>> +
>> + if (!folio->mapping) {
>> + if (folio_test_pmd_mappable(folio))
>> + count_vm_event(THP_SPLIT_PAGE_FAILED);
>> + return -EBUSY;
>> + }
>> +
>> + min_order = mapping_min_folio_order(folio->mapping);
>> +out:
>> + return split_huge_page_to_list_to_order(page, NULL, min_order);
>> +}
>> +
>> int split_folio_to_list(struct folio *folio, struct list_head *list)
>> {
>> unsigned int min_order = 0;
>
>
> Confirmed, and also although you suggest it can be improved, I thought
> that we could do that by sharing more code and putting things in the
> headers, the below also fixes this but tries to share more code, but
> I think it is perhaps less easier to understand than your patch.
>
It feels a bit weird to pass both folio and the page in `split_page_folio_to_list()`.
How about we extract the code that returns the min order so that we don't repeat.
Something like this:
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index c275aa9cc105..d27febd5c639 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -331,10 +331,24 @@ unsigned long thp_get_unmapped_area_vmflags(struct file *filp, unsigned long add
bool can_split_folio(struct folio *folio, int caller_pins, int *pextra_pins);
int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
unsigned int new_order);
+int min_order_for_split(struct folio *folio);
int split_folio_to_list(struct folio *folio, struct list_head *list);
static inline int split_huge_page(struct page *page)
{
- return split_folio(page_folio(page));
+ struct folio *folio = page_folio(page);
+ int ret = min_order_for_split(folio);
+
+ if (ret)
+ return ret;
+
+ /*
+ * split_huge_page() locks the page before splitting and
+ * expects the same page that has been split to be locked when
+ * returned. split_folio_to_list() cannot be used here because
+ * it converts the page to folio and passes the head page to be
+ * split.
+ */
+ return split_huge_page_to_list_to_order(page, NULL, ret);
}
void deferred_split_folio(struct folio *folio);
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 169f1a71c95d..b167e036d01b 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -3529,12 +3529,10 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
return ret;
}
-int split_folio_to_list(struct folio *folio, struct list_head *list)
+int min_order_for_split(struct folio *folio)
{
- unsigned int min_order = 0;
-
if (folio_test_anon(folio))
- goto out;
+ return 0;
if (!folio->mapping) {
if (folio_test_pmd_mappable(folio))
@@ -3542,10 +3540,17 @@ int split_folio_to_list(struct folio *folio, struct list_head *list)
return -EBUSY;
}
- min_order = mapping_min_folio_order(folio->mapping);
-out:
- return split_huge_page_to_list_to_order(&folio->page, list,
- min_order);
+ return mapping_min_folio_order(folio->mapping);
+}
+
+int split_folio_to_list(struct folio *folio, struct list_head *list)
+{
+ int ret = min_order_for_split(folio);
+
+ if (ret)
+ return ret;
+
+ return split_huge_page_to_list_to_order(&folio->page, list, ret);
}
void __folio_undo_large_rmappable(struct folio *folio)
> So I think your patch is cleaner and easier as a fix.
>
> diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
> index c275aa9cc105..99cd9c7bf55b 100644
> --- a/include/linux/huge_mm.h
> +++ b/include/linux/huge_mm.h
> @@ -97,6 +97,7 @@ extern struct kobj_attribute thpsize_shmem_enabled_attr;
> (!!thp_vma_allowable_orders(vma, vm_flags, tva_flags, BIT(order)))
>
> #define split_folio(f) split_folio_to_list(f, NULL)
> +#define split_folio_to_list(f, list) split_page_folio_to_list(&f->page, f, list)
>
> #ifdef CONFIG_PGTABLE_HAS_HUGE_LEAVES
> #define HPAGE_PMD_SHIFT PMD_SHIFT
> @@ -331,10 +332,11 @@ unsigned long thp_get_unmapped_area_vmflags(struct file *filp, unsigned long add
> bool can_split_folio(struct folio *folio, int caller_pins, int *pextra_pins);
> int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
> unsigned int new_order);
> -int split_folio_to_list(struct folio *folio, struct list_head *list);
> +int split_page_folio_to_list(struct page *page, struct folio *folio,
> + struct list_head *list);
> static inline int split_huge_page(struct page *page)
> {
> - return split_folio(page_folio(page));
> + return split_page_folio_to_list(page, page_folio(page), NULL);
> }
> void deferred_split_folio(struct folio *folio);
>
> @@ -511,7 +513,9 @@ static inline int split_huge_page(struct page *page)
> return 0;
> }
>
> -static inline int split_folio_to_list(struct folio *folio, struct list_head *list)
> +static inline int split_page_folio_to_list(struct page *page,
> + struct folio *folio,
> + struct list_head *list)
> {
> return 0;
> }
> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> index 169f1a71c95d..b115bfe63b52 100644
> --- a/mm/huge_memory.c
> +++ b/mm/huge_memory.c
> @@ -3529,7 +3529,8 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
> return ret;
> }
>
> -int split_folio_to_list(struct folio *folio, struct list_head *list)
> +int split_page_folio_to_list(struct page *page, struct folio *folio,
> + struct list_head *list)
> {
> unsigned int min_order = 0;
>
> @@ -3544,8 +3545,7 @@ int split_folio_to_list(struct folio *folio, struct list_head *list)
>
> min_order = mapping_min_folio_order(folio->mapping);
> out:
> - return split_huge_page_to_list_to_order(&folio->page, list,
> - min_order);
> + return split_huge_page_to_list_to_order(page, list, min_order);
> }
>
> void __folio_undo_large_rmappable(struct folio *folio)
Powered by blists - more mailing lists