[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230222134927.459b036b@canb.auug.org.au>
Date: Wed, 22 Feb 2023 13:49:27 +1100
From: Stephen Rothwell <sfr@...b.auug.org.au>
To: David Howells <dhowells@...hat.com>
Cc: Matthew Wilcox <willy@...radead.org>,
"Vishal Moola (Oracle)" <vishal.moola@...il.com>,
Andrew Morton <akpm@...ux-foundation.org>,
Steve French <smfrench@...il.com>,
Steve French <stfrench@...rosoft.com>,
Shyam Prasad N <nspmangalore@...il.com>,
Rohith Surabattula <rohiths.msft@...il.com>,
Tom Talpey <tom@...pey.com>, Paulo Alcantara <pc@....nz>,
Jeff Layton <jlayton@...nel.org>, linux-cifs@...r.kernel.org,
linux-kernel@...r.kernel.org, linux-next@...r.kernel.org
Subject: Re: linux-next: manual merge of the mm-stable tree with the cifs
tree
Hi David,
On Tue, 21 Feb 2023 14:39:24 +0000 David Howells <dhowells@...hat.com> wrote:
>
> Stephen Rothwell <sfr@...b.auug.org.au> wrote:
>
> > Andrew has already asked for it to be merged, so its up to Linus.
> >
> > You could fetch it yourself and do a trial merge and send me your
> > resolution ..
> >
> > git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm tags/mm-stable-2023-02-20-13-37
>
> Okay, did that. See attached. Also here:
>
> https://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-fs.git/log/?h=iov-cifs-mm
>
> David
> ---
> commit 71ad4f67439e60fe04bbf7aed8870e6f83a5d15e
> Author: David Howells <dhowells@...hat.com>
> Date: Tue Feb 21 13:23:05 2023 +0000
>
> cifs: Handle transition to filemap_get_folios_tag()
OK, so in the merge of mm-stable, I used the cifs version of
fs/cifs/file.c with this patch applied. The merge resolution for that
file looks like this:
diff --cc fs/cifs/file.c
index 0e602173ac76,162fab5a4583..000000000000
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@@ -2850,154 -2853,29 +2850,161 @@@ err_xid
return rc;
}
-static int
-cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
+/*
+ * write a region of pages back to the server
+ */
+static int cifs_writepages_region(struct address_space *mapping,
+ struct writeback_control *wbc,
+ loff_t start, loff_t end, loff_t *_next)
{
- int rc;
- unsigned int xid;
++ struct folio_batch fbatch;
+ struct folio *folio;
- struct page *head_page;
++ unsigned int i;
+ ssize_t ret;
+ int n, skips = 0;
- xid = get_xid();
-/* BB add check for wbc flags */
- get_page(page);
- if (!PageUptodate(page))
- cifs_dbg(FYI, "ppw - page not up to date\n");
++ folio_batch_init(&fbatch);
+
- /*
- * Set the "writeback" flag, and clear "dirty" in the radix tree.
- *
- * A writepage() implementation always needs to do either this,
- * or re-dirty the page with "redirty_page_for_writepage()" in
- * the case of a failure.
- *
- * Just unlocking the page will cause the radix tree tag-bits
- * to fail to update with the state of the page correctly.
- */
- set_page_writeback(page);
+ do {
+ pgoff_t index = start / PAGE_SIZE;
+
- n = find_get_pages_range_tag(mapping, &index, end / PAGE_SIZE,
- PAGECACHE_TAG_DIRTY, 1, &head_page);
++ n = filemap_get_folios_tag(mapping, &index, end / PAGE_SIZE,
++ PAGECACHE_TAG_DIRTY, &fbatch);
+ if (!n)
+ break;
+
- folio = page_folio(head_page);
- start = folio_pos(folio); /* May regress with THPs */
++ for (i = 0; i < n; i++) {
++ folio = fbatch.folios[i];
++ start = folio_pos(folio); /* May regress with THPs */
+
- /* At this point we hold neither the i_pages lock nor the
- * page lock: the page may be truncated or invalidated
- * (changing page->mapping to NULL), or even swizzled
- * back from swapper_space to tmpfs file mapping
- */
- if (wbc->sync_mode != WB_SYNC_NONE) {
- ret = folio_lock_killable(folio);
- if (ret < 0) {
- folio_put(folio);
- return ret;
- }
- } else {
- if (!folio_trylock(folio)) {
- folio_put(folio);
- return 0;
++ /* At this point we hold neither the i_pages lock nor the
++ * page lock: the page may be truncated or invalidated
++ * (changing page->mapping to NULL), or even swizzled
++ * back from swapper_space to tmpfs file mapping
++ */
++ if (wbc->sync_mode != WB_SYNC_NONE) {
++ ret = folio_lock_killable(folio);
++ if (ret < 0) {
++ folio_batch_release(&fbatch);
++ return ret;
++ }
++ } else {
++ if (!folio_trylock(folio))
++ continue;
+ }
- }
+
- if (folio_mapping(folio) != mapping ||
- !folio_test_dirty(folio)) {
- start += folio_size(folio);
- folio_unlock(folio);
- folio_put(folio);
- continue;
- }
++ if (folio->mapping != mapping ||
++ !folio_test_dirty(folio)) {
++ start += folio_size(folio);
++ folio_unlock(folio);
++ continue;
++ }
+
- if (folio_test_writeback(folio) ||
- folio_test_fscache(folio)) {
- folio_unlock(folio);
- if (wbc->sync_mode != WB_SYNC_NONE) {
- folio_wait_writeback(folio);
++ if (folio_test_writeback(folio) ||
++ folio_test_fscache(folio)) {
++ folio_unlock(folio);
++ if (wbc->sync_mode != WB_SYNC_NONE) {
++ folio_wait_writeback(folio);
+#ifdef CONFIG_CIFS_FSCACHE
- folio_wait_fscache(folio);
++ folio_wait_fscache(folio);
+#endif
- } else {
- start += folio_size(folio);
- }
- folio_put(folio);
- if (wbc->sync_mode == WB_SYNC_NONE) {
- if (skips >= 5 || need_resched())
- break;
- skips++;
++ } else {
++ start += folio_size(folio);
++ }
++ if (wbc->sync_mode == WB_SYNC_NONE) {
++ if (skips >= 5 || need_resched()) {
++ *_next = start;
++ return 0;
++ }
++ skips++;
++ }
++ continue;
+ }
- continue;
- }
+
- if (!folio_clear_dirty_for_io(folio))
- /* We hold the page lock - it should've been dirty. */
- WARN_ON(1);
++ if (!folio_clear_dirty_for_io(folio))
++ /* We hold the page lock - it should've been dirty. */
++ WARN_ON(1);
+
- ret = cifs_write_back_from_locked_folio(mapping, wbc, folio, start, end);
- folio_put(folio);
- if (ret < 0)
- return ret;
++ ret = cifs_write_back_from_locked_folio(mapping, wbc,
++ folio, start, end);
++ if (ret < 0) {
++ folio_batch_release(&fbatch);
++ return ret;
++ }
++
++ start += ret;
++ }
+
- start += ret;
++ folio_batch_release(&fbatch);
+ cond_resched();
+ } while (wbc->nr_to_write > 0);
+
+ *_next = start;
+ return 0;
+}
+
+/*
+ * Write some of the pending data back to the server
+ */
+static int cifs_writepages(struct address_space *mapping,
+ struct writeback_control *wbc)
+{
+ loff_t start, next;
+ int ret;
+
+ /* We have to be careful as we can end up racing with setattr()
+ * truncating the pagecache since the caller doesn't take a lock here
+ * to prevent it.
+ */
+
+ if (wbc->range_cyclic) {
+ start = mapping->writeback_index * PAGE_SIZE;
+ ret = cifs_writepages_region(mapping, wbc, start, LLONG_MAX, &next);
+ if (ret == 0) {
+ mapping->writeback_index = next / PAGE_SIZE;
+ if (start > 0 && wbc->nr_to_write > 0) {
+ ret = cifs_writepages_region(mapping, wbc, 0,
+ start, &next);
+ if (ret == 0)
+ mapping->writeback_index =
+ next / PAGE_SIZE;
+ }
+ }
+ } else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
+ ret = cifs_writepages_region(mapping, wbc, 0, LLONG_MAX, &next);
+ if (wbc->nr_to_write > 0 && ret == 0)
+ mapping->writeback_index = next / PAGE_SIZE;
+ } else {
+ ret = cifs_writepages_region(mapping, wbc,
+ wbc->range_start, wbc->range_end, &next);
+ }
+
+ return ret;
+}
+
+static int
+cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
+{
+ int rc;
+ unsigned int xid;
+
+ xid = get_xid();
+/* BB add check for wbc flags */
+ get_page(page);
+ if (!PageUptodate(page))
+ cifs_dbg(FYI, "ppw - page not up to date\n");
+
+ /*
+ * Set the "writeback" flag, and clear "dirty" in the radix tree.
+ *
+ * A writepage() implementation always needs to do either this,
+ * or re-dirty the page with "redirty_page_for_writepage()" in
+ * the case of a failure.
+ *
+ * Just unlocking the page will cause the radix tree tag-bits
+ * to fail to update with the state of the page correctly.
+ */
+ set_page_writeback(page);
retry_write:
rc = cifs_partialpagewrite(page, 0, PAGE_SIZE);
if (is_retryable_error(rc)) {
Which is much less obvious :-)
--
Cheers,
Stephen Rothwell
Content of type "application/pgp-signature" skipped
Powered by blists - more mailing lists