[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20191108000855.25209-4-t-fukasawa@vx.jp.nec.com>
Date: Fri, 8 Nov 2019 00:08:13 +0000
From: Toshiki Fukasawa <t-fukasawa@...jp.nec.com>
To: "linux-mm@...ck.org" <linux-mm@...ck.org>,
"dan.j.williams@...el.com" <dan.j.williams@...el.com>
CC: "linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
"akpm@...ux-foundation.org" <akpm@...ux-foundation.org>,
"mhocko@...nel.org" <mhocko@...nel.org>,
"adobriyan@...il.com" <adobriyan@...il.com>,
"hch@....de" <hch@....de>,
"longman@...hat.com" <longman@...hat.com>,
"sfr@...b.auug.org.au" <sfr@...b.auug.org.au>,
"mst@...hat.com" <mst@...hat.com>, "cai@....pw" <cai@....pw>,
Naoya Horiguchi <n-horiguchi@...jp.nec.com>,
Junichi Nomura <j-nomura@...jp.nec.com>
Subject: [PATCH 3/3] mm: make pfn walker support ZONE_DEVICE
This patch allows pfn walker to read pages on ZONE_DEVICE.
There are the following notes:
a) The reserved pages indicated by vmem_altmap->reserve
are uninitialized, so it must be skipped to read.
b) To get vmem_altmap, we need to use get_dev_pagemap(),
but doing it for all pfns is too slow.
This patch solves both of them. Since vmem_altmap could reserve
only first few pages, we can reduce the number of checks by
counting sequential valid pages.
Signed-off-by: Toshiki Fukasawa <t-fukasawa@...jp.nec.com>
---
fs/proc/page.c | 22 ++++++++++++++++++----
include/linux/memremap.h | 6 ++++++
mm/memremap.c | 29 +++++++++++++++++++++++++++++
3 files changed, 53 insertions(+), 4 deletions(-)
diff --git a/fs/proc/page.c b/fs/proc/page.c
index a49b638..b6241ea 100644
--- a/fs/proc/page.c
+++ b/fs/proc/page.c
@@ -33,6 +33,7 @@ static ssize_t kpage_common_read(struct file *file, char __user *buf,
struct page *ppage;
unsigned long src = *ppos;
unsigned long pfn;
+ unsigned long valid_pages = 0;
ssize_t ret = 0;
pfn = src / KPMSIZE;
@@ -41,11 +42,24 @@ static ssize_t kpage_common_read(struct file *file, char __user *buf,
return -EINVAL;
while (count > 0) {
- /*
- * TODO: ZONE_DEVICE support requires to identify
- * memmaps that were actually initialized.
- */
ppage = pfn_to_online_page(pfn);
+ if (!ppage && pfn_zone_device(pfn)) {
+ /*
+ * Skip to read first few uninitialized pages on
+ * ZONE_DEVICE. And count valid pages starting
+ * with the pfn so that minimize the number of
+ * calls to nr_valid_pages_zone_device().
+ */
+ if (!valid_pages)
+ valid_pages = nr_valid_pages_zone_device(pfn);
+ if (valid_pages) {
+ ppage = pfn_to_page(pfn);
+ valid_pages--;
+ }
+ } else if (valid_pages) {
+ /* ZONE_DEVICE has been hot removed */
+ valid_pages = 0;
+ }
if (put_user(read_fn(ppage), out)) {
ret = -EFAULT;
diff --git a/include/linux/memremap.h b/include/linux/memremap.h
index 6fefb09..d111ae3 100644
--- a/include/linux/memremap.h
+++ b/include/linux/memremap.h
@@ -123,6 +123,7 @@ static inline struct vmem_altmap *pgmap_altmap(struct dev_pagemap *pgmap)
}
#ifdef CONFIG_ZONE_DEVICE
+unsigned long nr_valid_pages_zone_device(unsigned long pfn);
void *memremap_pages(struct dev_pagemap *pgmap, int nid);
void memunmap_pages(struct dev_pagemap *pgmap);
void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap);
@@ -133,6 +134,11 @@ struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
unsigned long vmem_altmap_offset(struct vmem_altmap *altmap);
void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns);
#else
+static inline unsigned long nr_valid_pages_zone_device(unsigned long pfn)
+{
+ return 0;
+}
+
static inline void *devm_memremap_pages(struct device *dev,
struct dev_pagemap *pgmap)
{
diff --git a/mm/memremap.c b/mm/memremap.c
index 8a97fd4..307c73e 100644
--- a/mm/memremap.c
+++ b/mm/memremap.c
@@ -73,6 +73,35 @@ static unsigned long pfn_next(unsigned long pfn)
return pfn + 1;
}
+/*
+ * This returns the number of sequential valid pages starting from @pfn
+ * on ZONE_DEVICE. The invalid pages reserved by driver is first few
+ * pages on ZONE_DEVICE.
+ */
+unsigned long nr_valid_pages_zone_device(unsigned long pfn)
+{
+ struct dev_pagemap *pgmap;
+ struct vmem_altmap *altmap;
+ unsigned long pages;
+
+ pgmap = get_dev_pagemap(pfn, NULL);
+ if (!pgmap)
+ return 0;
+ altmap = pgmap_altmap(pgmap);
+ if (altmap && pfn < (altmap->base_pfn + altmap->reserve))
+ pages = 0;
+ else
+ /*
+ * PHYS_PFN(pgmap->res.end) is end pfn on pgmap
+ * (not start pfn on next mapping).
+ */
+ pages = PHYS_PFN(pgmap->res.end) - pfn + 1;
+
+ put_dev_pagemap(pgmap);
+
+ return pages;
+}
+
#define for_each_device_pfn(pfn, map) \
for (pfn = pfn_first(map); pfn < pfn_end(map); pfn = pfn_next(pfn))
--
1.8.3.1
Powered by blists - more mailing lists