[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20210125225526.1048877-11-ruansy.fnst@cn.fujitsu.com>
Date: Tue, 26 Jan 2021 06:55:26 +0800
From: Shiyang Ruan <ruansy.fnst@...fujitsu.com>
To: <linux-kernel@...r.kernel.org>, <linux-xfs@...r.kernel.org>,
<linux-nvdimm@...ts.01.org>, <linux-mm@...ck.org>
CC: <linux-fsdevel@...r.kernel.org>, <linux-raid@...r.kernel.org>,
<darrick.wong@...cle.com>, <dan.j.williams@...el.com>,
<david@...morbit.com>, <hch@....de>, <song@...nel.org>,
<rgoldwyn@...e.de>, <qi.fuli@...itsu.com>, <y-goto@...itsu.com>
Subject: [PATCH v2 10/10] fs/dax: Remove useless functions
Since owner tarcking is triggerred by pmem device, these functions are
useless. So remove it.
Signed-off-by: Shiyang Ruan <ruansy.fnst@...fujitsu.com>
---
fs/dax.c | 46 ----------------------------------------------
1 file changed, 46 deletions(-)
diff --git a/fs/dax.c b/fs/dax.c
index c64c3a0e76a6..e20a5df03eec 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -323,48 +323,6 @@ static unsigned long dax_end_pfn(void *entry)
for (pfn = dax_to_pfn(entry); \
pfn < dax_end_pfn(entry); pfn++)
-/*
- * TODO: for reflink+dax we need a way to associate a single page with
- * multiple address_space instances at different linear_page_index()
- * offsets.
- */
-static void dax_associate_entry(void *entry, struct address_space *mapping,
- struct vm_area_struct *vma, unsigned long address)
-{
- unsigned long size = dax_entry_size(entry), pfn, index;
- int i = 0;
-
- if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
- return;
-
- index = linear_page_index(vma, address & ~(size - 1));
- for_each_mapped_pfn(entry, pfn) {
- struct page *page = pfn_to_page(pfn);
-
- WARN_ON_ONCE(page->mapping);
- page->mapping = mapping;
- page->index = index + i++;
- }
-}
-
-static void dax_disassociate_entry(void *entry, struct address_space *mapping,
- bool trunc)
-{
- unsigned long pfn;
-
- if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
- return;
-
- for_each_mapped_pfn(entry, pfn) {
- struct page *page = pfn_to_page(pfn);
-
- WARN_ON_ONCE(trunc && page_ref_count(page) > 1);
- WARN_ON_ONCE(page->mapping && page->mapping != mapping);
- page->mapping = NULL;
- page->index = 0;
- }
-}
-
static struct page *dax_busy_page(void *entry)
{
unsigned long pfn;
@@ -543,7 +501,6 @@ static void *grab_mapping_entry(struct xa_state *xas,
xas_lock_irq(xas);
}
- dax_disassociate_entry(entry, mapping, false);
xas_store(xas, NULL); /* undo the PMD join */
dax_wake_entry(xas, entry, true);
mapping->nrexceptional--;
@@ -680,7 +637,6 @@ static int __dax_invalidate_entry(struct address_space *mapping,
(xas_get_mark(&xas, PAGECACHE_TAG_DIRTY) ||
xas_get_mark(&xas, PAGECACHE_TAG_TOWRITE)))
goto out;
- dax_disassociate_entry(entry, mapping, trunc);
xas_store(&xas, NULL);
mapping->nrexceptional--;
ret = 1;
@@ -774,8 +730,6 @@ static void *dax_insert_entry(struct xa_state *xas,
if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
void *old;
- dax_disassociate_entry(entry, mapping, false);
- dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address);
/*
* Only swap our new entry into the page cache if the current
* entry is a zero page or an empty entry. If a normal PTE or
--
2.30.0
Powered by blists - more mailing lists