[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20211031152028.3724121-9-ruansy.fnst@fujitsu.com>
Date: Sun, 31 Oct 2021 23:20:28 +0800
From: Shiyang Ruan <ruansy.fnst@...itsu.com>
To: <linux-kernel@...r.kernel.org>, <linux-xfs@...r.kernel.org>,
<nvdimm@...ts.linux.dev>, <linux-mm@...ck.org>,
<linux-fsdevel@...r.kernel.org>
CC: <djwong@...nel.org>, <dan.j.williams@...el.com>,
<david@...morbit.com>, <hch@...radead.org>, <jane.chu@...cle.com>
Subject: [PATCH v8 8/8] fsdax: add exception for reflinked files
For reflinked files, one dax page may be associated more than once with
different fime mapping and index. It will report warning. Now, since
we have introduced dax-RMAP for this case and also have to keep its
functionality for other filesystems who are not support rmap, we add a
is cow flag, which is set true when it is CoW operation, for reflink
case. Normal cases work as before.
We will get the cow flag in the dax-reflink(v10) patchset. So, always
set false here for now.
Signed-off-by: Shiyang Ruan <ruansy.fnst@...itsu.com>
---
fs/dax.c | 39 +++++++++++++++++++++++++++------------
1 file changed, 27 insertions(+), 12 deletions(-)
diff --git a/fs/dax.c b/fs/dax.c
index 7e46569c6129..bc269852d91a 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -340,7 +340,7 @@ static unsigned long dax_end_pfn(void *entry)
* offsets.
*/
static void dax_associate_entry(void *entry, struct address_space *mapping,
- struct vm_area_struct *vma, unsigned long address)
+ struct vm_area_struct *vma, unsigned long address, bool cow)
{
unsigned long size = dax_entry_size(entry), pfn, index;
int i = 0;
@@ -352,14 +352,21 @@ static void dax_associate_entry(void *entry, struct address_space *mapping,
for_each_mapped_pfn(entry, pfn) {
struct page *page = pfn_to_page(pfn);
- WARN_ON_ONCE(page->mapping);
- page->mapping = mapping;
- page->index = index + i++;
+ if (cow) {
+ if (!page->mapping) {
+ page->mapping = mapping;
+ page->index = index + i++;
+ }
+ } else {
+ WARN_ON_ONCE(page->mapping);
+ page->mapping = mapping;
+ page->index = index + i++;
+ }
}
}
static void dax_disassociate_entry(void *entry, struct address_space *mapping,
- bool trunc)
+ bool trunc, bool cow)
{
unsigned long pfn;
@@ -370,9 +377,16 @@ static void dax_disassociate_entry(void *entry, struct address_space *mapping,
struct page *page = pfn_to_page(pfn);
WARN_ON_ONCE(trunc && page_ref_count(page) > 1);
- WARN_ON_ONCE(page->mapping && page->mapping != mapping);
- page->mapping = NULL;
- page->index = 0;
+ if (cow) {
+ if (page->mapping == mapping) {
+ page->mapping = NULL;
+ page->index = 0;
+ }
+ } else {
+ WARN_ON_ONCE(page->mapping && page->mapping != mapping);
+ page->mapping = NULL;
+ page->index = 0;
+ }
}
}
@@ -597,7 +611,7 @@ static void *grab_mapping_entry(struct xa_state *xas,
xas_lock_irq(xas);
}
- dax_disassociate_entry(entry, mapping, false);
+ dax_disassociate_entry(entry, mapping, false, false);
xas_store(xas, NULL); /* undo the PMD join */
dax_wake_entry(xas, entry, WAKE_ALL);
mapping->nrpages -= PG_PMD_NR;
@@ -734,7 +748,7 @@ static int __dax_invalidate_entry(struct address_space *mapping,
(xas_get_mark(&xas, PAGECACHE_TAG_DIRTY) ||
xas_get_mark(&xas, PAGECACHE_TAG_TOWRITE)))
goto out;
- dax_disassociate_entry(entry, mapping, trunc);
+ dax_disassociate_entry(entry, mapping, trunc, false);
xas_store(&xas, NULL);
mapping->nrpages -= 1UL << dax_entry_order(entry);
ret = 1;
@@ -827,8 +841,9 @@ static void *dax_insert_entry(struct xa_state *xas,
if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
void *old;
- dax_disassociate_entry(entry, mapping, false);
- dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address);
+ dax_disassociate_entry(entry, mapping, false, false);
+ dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address,
+ false);
/*
* Only swap our new entry into the page cache if the current
* entry is a zero page or an empty entry. If a normal PTE or
--
2.33.0
Powered by blists - more mailing lists