[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20200821160759.GE3432@quack2.suse.cz>
Date: Fri, 21 Aug 2020 18:07:59 +0200
From: Jan Kara <jack@...e.cz>
To: "Matthew Wilcox (Oracle)" <willy@...radead.org>
Cc: linux-mm@...ck.org, Andrew Morton <akpm@...ux-foundation.org>,
Hugh Dickins <hughd@...gle.com>,
William Kucharski <william.kucharski@...cle.com>,
Johannes Weiner <hannes@...xchg.org>, Jan Kara <jack@...e.cz>,
linux-kernel@...r.kernel.org
Subject: Re: [PATCH 3/7] mm: Add an 'end' parameter to find_get_entries
On Wed 19-08-20 16:05:51, Matthew Wilcox (Oracle) wrote:
> This simplifies the callers and leads to a more efficient implementation
> since the XArray has this functionality already.
>
> Signed-off-by: Matthew Wilcox (Oracle) <willy@...radead.org>
The patch looks good to me. Just I'd note that you could drop some:
if (index >= end)
break;
checks in shmem_undo_range() as well.
In the past I was considering moving find_get_entries() to the same API as
find_get_pages_range() has (which is essentially what you do now, but I
also had 'start' to be a pgoff_t * so that we can return there where the
iteration ended in the range). But in the end I've decided the churn is not
worth the few removed lines and didn't push the patch in the end. What you
did in this patch seems to be a reasonable middle-ground :)
Honza
> ---
> include/linux/pagemap.h | 4 ++--
> mm/filemap.c | 9 +++++----
> mm/shmem.c | 10 ++++------
> mm/swap.c | 2 +-
> 4 files changed, 12 insertions(+), 13 deletions(-)
>
> diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
> index 7de11dcd534d..3f0dc8d00f2a 100644
> --- a/include/linux/pagemap.h
> +++ b/include/linux/pagemap.h
> @@ -387,8 +387,8 @@ static inline struct page *find_subpage(struct page *head, pgoff_t index)
> struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
> struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset);
> unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
> - unsigned int nr_entries, struct page **entries,
> - pgoff_t *indices);
> + pgoff_t end, unsigned int nr_entries, struct page **entries,
> + pgoff_t *indices);
> unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start,
> pgoff_t end, unsigned int nr_pages,
> struct page **pages);
> diff --git a/mm/filemap.c b/mm/filemap.c
> index 1aaea26556cc..159cf3d6f1ae 100644
> --- a/mm/filemap.c
> +++ b/mm/filemap.c
> @@ -1742,6 +1742,7 @@ EXPORT_SYMBOL(pagecache_get_page);
> * find_get_entries - gang pagecache lookup
> * @mapping: The address_space to search
> * @start: The starting page cache index
> + * @end: The highest page cache index to return.
> * @nr_entries: The maximum number of entries
> * @entries: Where the resulting entries are placed
> * @indices: The cache indices corresponding to the entries in @entries
> @@ -1765,9 +1766,9 @@ EXPORT_SYMBOL(pagecache_get_page);
> *
> * Return: the number of pages and shadow entries which were found.
> */
> -unsigned find_get_entries(struct address_space *mapping,
> - pgoff_t start, unsigned int nr_entries,
> - struct page **entries, pgoff_t *indices)
> +unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
> + pgoff_t end, unsigned int nr_entries, struct page **entries,
> + pgoff_t *indices)
> {
> XA_STATE(xas, &mapping->i_pages, start);
> struct page *page;
> @@ -1777,7 +1778,7 @@ unsigned find_get_entries(struct address_space *mapping,
> return 0;
>
> rcu_read_lock();
> - xas_for_each(&xas, page, ULONG_MAX) {
> + xas_for_each(&xas, page, end) {
> if (xas_retry(&xas, page))
> continue;
> /*
> diff --git a/mm/shmem.c b/mm/shmem.c
> index 0f9f149f4b5e..abdbe61a1aa7 100644
> --- a/mm/shmem.c
> +++ b/mm/shmem.c
> @@ -906,9 +906,8 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
> pagevec_init(&pvec);
> index = start;
> while (index < end) {
> - pvec.nr = find_get_entries(mapping, index,
> - min(end - index, (pgoff_t)PAGEVEC_SIZE),
> - pvec.pages, indices);
> + pvec.nr = find_get_entries(mapping, index, end - 1,
> + PAGEVEC_SIZE, pvec.pages, indices);
> if (!pvec.nr)
> break;
> for (i = 0; i < pagevec_count(&pvec); i++) {
> @@ -977,9 +976,8 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
> while (index < end) {
> cond_resched();
>
> - pvec.nr = find_get_entries(mapping, index,
> - min(end - index, (pgoff_t)PAGEVEC_SIZE),
> - pvec.pages, indices);
> + pvec.nr = find_get_entries(mapping, index, end - 1,
> + PAGEVEC_SIZE, pvec.pages, indices);
> if (!pvec.nr) {
> /* If all gone or hole-punch or unfalloc, we're done */
> if (index == start || end != -1)
> diff --git a/mm/swap.c b/mm/swap.c
> index d16d65d9b4e0..fcf6ccb94b09 100644
> --- a/mm/swap.c
> +++ b/mm/swap.c
> @@ -1060,7 +1060,7 @@ unsigned pagevec_lookup_entries(struct pagevec *pvec,
> pgoff_t start, unsigned nr_entries,
> pgoff_t *indices)
> {
> - pvec->nr = find_get_entries(mapping, start, nr_entries,
> + pvec->nr = find_get_entries(mapping, start, ULONG_MAX, nr_entries,
> pvec->pages, indices);
> return pagevec_count(pvec);
> }
> --
> 2.28.0
>
--
Jan Kara <jack@...e.com>
SUSE Labs, CR
Powered by blists - more mailing lists