[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <95e1afd00e550ee227dd5d76a5947a2176730e1d.camel@kernel.org>
Date: Fri, 28 Oct 2022 13:20:58 -0400
From: Jeff Layton <jlayton@...nel.org>
To: "Vishal Moola (Oracle)" <vishal.moola@...il.com>,
linux-fsdevel@...r.kernel.org, David Howells <dhowells@...hat.com>
Cc: linux-afs@...ts.infradead.org, linux-kernel@...r.kernel.org,
linux-btrfs@...r.kernel.org, ceph-devel@...r.kernel.org,
linux-cifs@...r.kernel.org, linux-ext4@...r.kernel.org,
linux-f2fs-devel@...ts.sourceforge.net, cluster-devel@...hat.com,
linux-nilfs@...r.kernel.org, linux-mm@...ck.org
Subject: Re: [PATCH v3 08/23] ceph: Convert ceph_writepages_start() to use
filemap_get_folios_tag()
On Mon, 2022-10-17 at 13:24 -0700, Vishal Moola (Oracle) wrote:
> Convert function to use a folio_batch instead of pagevec. This is in
> preparation for the removal of find_get_pages_range_tag().
>
> Also some minor renaming for consistency.
>
> Signed-off-by: Vishal Moola (Oracle) <vishal.moola@...il.com>
> ---
> fs/ceph/addr.c | 58 ++++++++++++++++++++++++++------------------------
> 1 file changed, 30 insertions(+), 28 deletions(-)
>
> diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
> index dcf701b05cc1..d2361d51db39 100644
> --- a/fs/ceph/addr.c
> +++ b/fs/ceph/addr.c
> @@ -792,7 +792,7 @@ static int ceph_writepages_start(struct address_space *mapping,
> struct ceph_vino vino = ceph_vino(inode);
> pgoff_t index, start_index, end = -1;
> struct ceph_snap_context *snapc = NULL, *last_snapc = NULL, *pgsnapc;
> - struct pagevec pvec;
> + struct folio_batch fbatch;
> int rc = 0;
> unsigned int wsize = i_blocksize(inode);
> struct ceph_osd_request *req = NULL;
> @@ -821,7 +821,7 @@ static int ceph_writepages_start(struct address_space *mapping,
> if (fsc->mount_options->wsize < wsize)
> wsize = fsc->mount_options->wsize;
>
> - pagevec_init(&pvec);
> + folio_batch_init(&fbatch);
>
> start_index = wbc->range_cyclic ? mapping->writeback_index : 0;
> index = start_index;
> @@ -869,7 +869,7 @@ static int ceph_writepages_start(struct address_space *mapping,
>
> while (!done && index <= end) {
> int num_ops = 0, op_idx;
> - unsigned i, pvec_pages, max_pages, locked_pages = 0;
> + unsigned i, nr_folios, max_pages, locked_pages = 0;
> struct page **pages = NULL, **data_pages;
> struct page *page;
> pgoff_t strip_unit_end = 0;
> @@ -879,13 +879,13 @@ static int ceph_writepages_start(struct address_space *mapping,
> max_pages = wsize >> PAGE_SHIFT;
>
> get_more_pages:
> - pvec_pages = pagevec_lookup_range_tag(&pvec, mapping, &index,
> - end, PAGECACHE_TAG_DIRTY);
> - dout("pagevec_lookup_range_tag got %d\n", pvec_pages);
> - if (!pvec_pages && !locked_pages)
> + nr_folios = filemap_get_folios_tag(mapping, &index,
> + end, PAGECACHE_TAG_DIRTY, &fbatch);
> + dout("pagevec_lookup_range_tag got %d\n", nr_folios);
> + if (!nr_folios && !locked_pages)
> break;
> - for (i = 0; i < pvec_pages && locked_pages < max_pages; i++) {
> - page = pvec.pages[i];
> + for (i = 0; i < nr_folios && locked_pages < max_pages; i++) {
> + page = &fbatch.folios[i]->page;
> dout("? %p idx %lu\n", page, page->index);
> if (locked_pages == 0)
> lock_page(page); /* first page */
> @@ -995,7 +995,7 @@ static int ceph_writepages_start(struct address_space *mapping,
> len = 0;
> }
>
> - /* note position of first page in pvec */
> + /* note position of first page in fbatch */
> dout("%p will write page %p idx %lu\n",
> inode, page, page->index);
>
> @@ -1005,30 +1005,30 @@ static int ceph_writepages_start(struct address_space *mapping,
> fsc->write_congested = true;
>
> pages[locked_pages++] = page;
> - pvec.pages[i] = NULL;
> + fbatch.folios[i] = NULL;
>
> len += thp_size(page);
> }
>
> /* did we get anything? */
> if (!locked_pages)
> - goto release_pvec_pages;
> + goto release_folios;
> if (i) {
> unsigned j, n = 0;
> - /* shift unused page to beginning of pvec */
> - for (j = 0; j < pvec_pages; j++) {
> - if (!pvec.pages[j])
> + /* shift unused page to beginning of fbatch */
> + for (j = 0; j < nr_folios; j++) {
> + if (!fbatch.folios[j])
> continue;
> if (n < j)
> - pvec.pages[n] = pvec.pages[j];
> + fbatch.folios[n] = fbatch.folios[j];
> n++;
> }
> - pvec.nr = n;
> + fbatch.nr = n;
>
> - if (pvec_pages && i == pvec_pages &&
> + if (nr_folios && i == nr_folios &&
> locked_pages < max_pages) {
> - dout("reached end pvec, trying for more\n");
> - pagevec_release(&pvec);
> + dout("reached end fbatch, trying for more\n");
> + folio_batch_release(&fbatch);
> goto get_more_pages;
> }
> }
> @@ -1164,10 +1164,10 @@ static int ceph_writepages_start(struct address_space *mapping,
> if (wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE)
> done = true;
>
> -release_pvec_pages:
> - dout("pagevec_release on %d pages (%p)\n", (int)pvec.nr,
> - pvec.nr ? pvec.pages[0] : NULL);
> - pagevec_release(&pvec);
> +release_folios:
> + dout("folio_batch release on %d folios (%p)\n", (int)fbatch.nr,
> + fbatch.nr ? fbatch.folios[0] : NULL);
> + folio_batch_release(&fbatch);
> }
>
> if (should_loop && !done) {
> @@ -1184,15 +1184,17 @@ static int ceph_writepages_start(struct address_space *mapping,
> unsigned i, nr;
> index = 0;
> while ((index <= end) &&
> - (nr = pagevec_lookup_tag(&pvec, mapping, &index,
> - PAGECACHE_TAG_WRITEBACK))) {
> + (nr = filemap_get_folios_tag(mapping, &index,
> + (pgoff_t)-1,
> + PAGECACHE_TAG_WRITEBACK,
> + &fbatch))) {
> for (i = 0; i < nr; i++) {
> - page = pvec.pages[i];
> + page = &fbatch.folios[i]->page;
> if (page_snap_context(page) != snapc)
> continue;
> wait_on_page_writeback(page);
> }
> - pagevec_release(&pvec);
> + folio_batch_release(&fbatch);
> cond_resched();
> }
> }
I took a brief look and this looks like a fairly straightforward
conversion. It definitely needs testing however.
The hope was to get ceph converted over to using the netfs write
helpers, but that's taking a lot longer than expected. It's really up to
Xiubo at this point, but I don't have an issue in principle with taking
this patch in before the netfs conversion, particularly if it's blocking
other work.
Acked-by: Jeff Layton <jlayton@...nel.org>
Powered by blists - more mailing lists