[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <ZtDyGHTSy3Bi3FkS@casper.infradead.org>
Date: Thu, 29 Aug 2024 23:11:36 +0100
From: Matthew Wilcox <willy@...radead.org>
To: "Pankaj Raghav (Samsung)" <kernel@...kajraghav.com>
Cc: brauner@...nel.org, akpm@...ux-foundation.org, chandan.babu@...cle.com,
linux-fsdevel@...r.kernel.org, djwong@...nel.org, hare@...e.de,
gost.dev@...sung.com, linux-xfs@...r.kernel.org, hch@....de,
david@...morbit.com, Zi Yan <ziy@...dia.com>,
yang@...amperecomputing.com, linux-kernel@...r.kernel.org,
linux-mm@...ck.org, john.g.garry@...cle.com,
cl@...amperecomputing.com, p.raghav@...sung.com, mcgrof@...nel.org,
ryan.roberts@....com, David Howells <dhowells@...hat.com>
Subject: Re: [PATCH v13 04/10] mm: split a folio in minimum folio order chunks
On Thu, Aug 22, 2024 at 03:50:12PM +0200, Pankaj Raghav (Samsung) wrote:
> @@ -317,9 +319,10 @@ unsigned long thp_get_unmapped_area_vmflags(struct file *filp, unsigned long add
> bool can_split_folio(struct folio *folio, int caller_pins, int *pextra_pins);
> int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
> unsigned int new_order);
> +int split_folio_to_list(struct folio *folio, struct list_head *list);
> static inline int split_huge_page(struct page *page)
> {
> - return split_huge_page_to_list_to_order(page, NULL, 0);
> + return split_folio(page_folio(page));
Oh! You can't do this!
split_huge_page() takes a precise page, NOT a folio. That page is
locked. When we return from split_huge_page(), the new folio which
contains the precise page is locked.
You've made it so that the caller's page's folio won't necessarily
be locked. More testing was needed ;-P
> }
> void deferred_split_folio(struct folio *folio);
>
> @@ -495,6 +498,12 @@ static inline int split_huge_page(struct page *page)
> {
> return 0;
> }
> +
> +static inline int split_folio_to_list(struct folio *folio, struct list_head *list)
> +{
> + return 0;
> +}
> +
> static inline void deferred_split_folio(struct folio *folio) {}
> #define split_huge_pmd(__vma, __pmd, __address) \
> do { } while (0)
> @@ -622,7 +631,4 @@ static inline int split_folio_to_order(struct folio *folio, int new_order)
> return split_folio_to_list_to_order(folio, NULL, new_order);
> }
>
> -#define split_folio_to_list(f, l) split_folio_to_list_to_order(f, l, 0)
> -#define split_folio(f) split_folio_to_order(f, 0)
> -
> #endif /* _LINUX_HUGE_MM_H */
> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> index cf8e34f62976f..06384b85a3a20 100644
> --- a/mm/huge_memory.c
> +++ b/mm/huge_memory.c
> @@ -3303,6 +3303,9 @@ bool can_split_folio(struct folio *folio, int caller_pins, int *pextra_pins)
> * released, or if some unexpected race happened (e.g., anon VMA disappeared,
> * truncation).
> *
> + * Callers should ensure that the order respects the address space mapping
> + * min-order if one is set for non-anonymous folios.
> + *
> * Returns -EINVAL when trying to split to an order that is incompatible
> * with the folio. Splitting to order 0 is compatible with all folios.
> */
> @@ -3384,6 +3387,7 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
> mapping = NULL;
> anon_vma_lock_write(anon_vma);
> } else {
> + unsigned int min_order;
> gfp_t gfp;
>
> mapping = folio->mapping;
> @@ -3394,6 +3398,14 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
> goto out;
> }
>
> + min_order = mapping_min_folio_order(folio->mapping);
> + if (new_order < min_order) {
> + VM_WARN_ONCE(1, "Cannot split mapped folio below min-order: %u",
> + min_order);
> + ret = -EINVAL;
> + goto out;
> + }
> +
> gfp = current_gfp_context(mapping_gfp_mask(mapping) &
> GFP_RECLAIM_MASK);
>
> @@ -3506,6 +3518,25 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
> return ret;
> }
>
> +int split_folio_to_list(struct folio *folio, struct list_head *list)
> +{
> + unsigned int min_order = 0;
> +
> + if (folio_test_anon(folio))
> + goto out;
> +
> + if (!folio->mapping) {
> + if (folio_test_pmd_mappable(folio))
> + count_vm_event(THP_SPLIT_PAGE_FAILED);
> + return -EBUSY;
> + }
> +
> + min_order = mapping_min_folio_order(folio->mapping);
> +out:
> + return split_huge_page_to_list_to_order(&folio->page, list,
> + min_order);
> +}
> +
> void __folio_undo_large_rmappable(struct folio *folio)
> {
> struct deferred_split *ds_queue;
> @@ -3736,6 +3767,8 @@ static int split_huge_pages_pid(int pid, unsigned long vaddr_start,
> struct vm_area_struct *vma = vma_lookup(mm, addr);
> struct folio_walk fw;
> struct folio *folio;
> + struct address_space *mapping;
> + unsigned int target_order = new_order;
>
> if (!vma)
> break;
> @@ -3753,7 +3786,13 @@ static int split_huge_pages_pid(int pid, unsigned long vaddr_start,
> if (!is_transparent_hugepage(folio))
> goto next;
>
> - if (new_order >= folio_order(folio))
> + if (!folio_test_anon(folio)) {
> + mapping = folio->mapping;
> + target_order = max(new_order,
> + mapping_min_folio_order(mapping));
> + }
> +
> + if (target_order >= folio_order(folio))
> goto next;
>
> total++;
> @@ -3771,9 +3810,14 @@ static int split_huge_pages_pid(int pid, unsigned long vaddr_start,
> folio_get(folio);
> folio_walk_end(&fw, vma);
>
> - if (!split_folio_to_order(folio, new_order))
> + if (!folio_test_anon(folio) && folio->mapping != mapping)
> + goto unlock;
> +
> + if (!split_folio_to_order(folio, target_order))
> split++;
>
> +unlock:
> +
> folio_unlock(folio);
> folio_put(folio);
>
> @@ -3802,6 +3846,8 @@ static int split_huge_pages_in_file(const char *file_path, pgoff_t off_start,
> pgoff_t index;
> int nr_pages = 1;
> unsigned long total = 0, split = 0;
> + unsigned int min_order;
> + unsigned int target_order;
>
> file = getname_kernel(file_path);
> if (IS_ERR(file))
> @@ -3815,6 +3861,8 @@ static int split_huge_pages_in_file(const char *file_path, pgoff_t off_start,
> file_path, off_start, off_end);
>
> mapping = candidate->f_mapping;
> + min_order = mapping_min_folio_order(mapping);
> + target_order = max(new_order, min_order);
>
> for (index = off_start; index < off_end; index += nr_pages) {
> struct folio *folio = filemap_get_folio(mapping, index);
> @@ -3829,15 +3877,19 @@ static int split_huge_pages_in_file(const char *file_path, pgoff_t off_start,
> total++;
> nr_pages = folio_nr_pages(folio);
>
> - if (new_order >= folio_order(folio))
> + if (target_order >= folio_order(folio))
> goto next;
>
> if (!folio_trylock(folio))
> goto next;
>
> - if (!split_folio_to_order(folio, new_order))
> + if (folio->mapping != mapping)
> + goto unlock;
> +
> + if (!split_folio_to_order(folio, target_order))
> split++;
>
> +unlock:
> folio_unlock(folio);
> next:
> folio_put(folio);
> --
> 2.44.1
>
Powered by blists - more mailing lists