[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230615221346.GA29046@monkey>
Date: Thu, 15 Jun 2023 15:13:46 -0700
From: Mike Kravetz <mike.kravetz@...cle.com>
To: Sidhartha Kumar <sidhartha.kumar@...cle.com>
Cc: linux-kernel@...r.kernel.org, linux-mm@...ck.org,
akpm@...ux-foundation.org, songmuchun@...edance.com,
willy@...radead.org, david@...hat.com, nphamcs@...il.com,
jthoughton@...gle.com
Subject: Re: [PATCH 1/2] mm/filemap: remove hugetlb special casing in
filemap.c
On 06/09/23 12:49, Sidhartha Kumar wrote:
> This patch aims to remove special cased hugetlb handling code within the
> page cache by changing the granularity of each index to the base page size
> rather than the huge page size.
>
> Signed-off-by: Sidhartha Kumar <sidhartha.kumar@...cle.com>
> ---
> include/linux/pagemap.h | 6 ------
> mm/filemap.c | 36 +++++++++++-------------------------
> 2 files changed, 11 insertions(+), 31 deletions(-)
I agree with Matthew that this patch can not be sent independently/prior
to the patch with hugetlb changes.
Code changes to remove hugetlb special casing below look fine.
Does not matter for your code changes, but I think some of the routines where
you are removing hugetlb checks can not be passed hugetlb folios/vmas today.
Specifically: folio_more_pages, filemap_get_folios_contig and
filemap_get_folios_tag.
--
Mike Kravetz
>
> diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
> index 716953ee1ebdb..17c414fc2136e 100644
> --- a/include/linux/pagemap.h
> +++ b/include/linux/pagemap.h
> @@ -723,9 +723,6 @@ static inline struct page *folio_file_page(struct folio *folio, pgoff_t index)
> */
> static inline bool folio_contains(struct folio *folio, pgoff_t index)
> {
> - /* HugeTLBfs indexes the page cache in units of hpage_size */
> - if (folio_test_hugetlb(folio))
> - return folio->index == index;
> return index - folio_index(folio) < folio_nr_pages(folio);
> }
>
> @@ -850,12 +847,9 @@ static inline loff_t folio_file_pos(struct folio *folio)
>
> /*
> * Get the offset in PAGE_SIZE (even for hugetlb folios).
> - * (TODO: hugetlb folios should have ->index in PAGE_SIZE)
> */
> static inline pgoff_t folio_pgoff(struct folio *folio)
> {
> - if (unlikely(folio_test_hugetlb(folio)))
> - return hugetlb_basepage_index(&folio->page);
> return folio->index;
> }
>
> diff --git a/mm/filemap.c b/mm/filemap.c
> index 60f6f63cfacba..7462d33f70e2f 100644
> --- a/mm/filemap.c
> +++ b/mm/filemap.c
> @@ -134,11 +134,8 @@ static void page_cache_delete(struct address_space *mapping,
>
> mapping_set_update(&xas, mapping);
>
> - /* hugetlb pages are represented by a single entry in the xarray */
> - if (!folio_test_hugetlb(folio)) {
> - xas_set_order(&xas, folio->index, folio_order(folio));
> - nr = folio_nr_pages(folio);
> - }
> + xas_set_order(&xas, folio->index, folio_order(folio));
> + nr = folio_nr_pages(folio);
>
> VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
>
> @@ -237,7 +234,7 @@ void filemap_free_folio(struct address_space *mapping, struct folio *folio)
> if (free_folio)
> free_folio(folio);
>
> - if (folio_test_large(folio) && !folio_test_hugetlb(folio))
> + if (folio_test_large(folio))
> refs = folio_nr_pages(folio);
> folio_put_refs(folio, refs);
> }
> @@ -858,14 +855,15 @@ noinline int __filemap_add_folio(struct address_space *mapping,
>
> if (!huge) {
> int error = mem_cgroup_charge(folio, NULL, gfp);
> - VM_BUG_ON_FOLIO(index & (folio_nr_pages(folio) - 1), folio);
> if (error)
> return error;
> charged = true;
> - xas_set_order(&xas, index, folio_order(folio));
> - nr = folio_nr_pages(folio);
> }
>
> + VM_BUG_ON_FOLIO(index & (folio_nr_pages(folio) - 1), folio);
> + xas_set_order(&xas, index, folio_order(folio));
> + nr = folio_nr_pages(folio);
> +
> gfp &= GFP_RECLAIM_MASK;
> folio_ref_add(folio, nr);
> folio->mapping = mapping;
> @@ -2048,7 +2046,7 @@ unsigned find_get_entries(struct address_space *mapping, pgoff_t *start,
> int idx = folio_batch_count(fbatch) - 1;
>
> folio = fbatch->folios[idx];
> - if (!xa_is_value(folio) && !folio_test_hugetlb(folio))
> + if (!xa_is_value(folio))
> nr = folio_nr_pages(folio);
> *start = indices[idx] + nr;
> }
> @@ -2112,7 +2110,7 @@ unsigned find_lock_entries(struct address_space *mapping, pgoff_t *start,
> int idx = folio_batch_count(fbatch) - 1;
>
> folio = fbatch->folios[idx];
> - if (!xa_is_value(folio) && !folio_test_hugetlb(folio))
> + if (!xa_is_value(folio))
> nr = folio_nr_pages(folio);
> *start = indices[idx] + nr;
> }
> @@ -2153,9 +2151,6 @@ unsigned filemap_get_folios(struct address_space *mapping, pgoff_t *start,
> continue;
> if (!folio_batch_add(fbatch, folio)) {
> unsigned long nr = folio_nr_pages(folio);
> -
> - if (folio_test_hugetlb(folio))
> - nr = 1;
> *start = folio->index + nr;
> goto out;
> }
> @@ -2181,7 +2176,7 @@ EXPORT_SYMBOL(filemap_get_folios);
> static inline
> bool folio_more_pages(struct folio *folio, pgoff_t index, pgoff_t max)
> {
> - if (!folio_test_large(folio) || folio_test_hugetlb(folio))
> + if (!folio_test_large(folio))
> return false;
> if (index >= max)
> return false;
> @@ -2231,9 +2226,6 @@ unsigned filemap_get_folios_contig(struct address_space *mapping,
>
> if (!folio_batch_add(fbatch, folio)) {
> nr = folio_nr_pages(folio);
> -
> - if (folio_test_hugetlb(folio))
> - nr = 1;
> *start = folio->index + nr;
> goto out;
> }
> @@ -2250,10 +2242,7 @@ unsigned filemap_get_folios_contig(struct address_space *mapping,
>
> if (nr) {
> folio = fbatch->folios[nr - 1];
> - if (folio_test_hugetlb(folio))
> - *start = folio->index + 1;
> - else
> - *start = folio->index + folio_nr_pages(folio);
> + *start = folio->index + folio_nr_pages(folio);
> }
> out:
> rcu_read_unlock();
> @@ -2291,9 +2280,6 @@ unsigned filemap_get_folios_tag(struct address_space *mapping, pgoff_t *start,
> continue;
> if (!folio_batch_add(fbatch, folio)) {
> unsigned long nr = folio_nr_pages(folio);
> -
> - if (folio_test_hugetlb(folio))
> - nr = 1;
> *start = folio->index + nr;
> goto out;
> }
> --
> 2.40.1
Powered by blists - more mailing lists