[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200110190313.17144-9-joao.m.martins@oracle.com>
Date: Fri, 10 Jan 2020 19:03:11 +0000
From: Joao Martins <joao.m.martins@...cle.com>
To: linux-nvdimm@...ts.01.org
Cc: Dan Williams <dan.j.williams@...el.com>,
Vishal Verma <vishal.l.verma@...el.com>,
Dave Jiang <dave.jiang@...el.com>,
Ira Weiny <ira.weiny@...el.com>,
Alex Williamson <alex.williamson@...hat.com>,
Cornelia Huck <cohuck@...hat.com>, kvm@...r.kernel.org,
Andrew Morton <akpm@...ux-foundation.org>, linux-mm@...ck.org,
linux-kernel@...r.kernel.org, Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>, Borislav Petkov <bp@...en8.de>,
"H . Peter Anvin" <hpa@...or.com>, x86@...nel.org,
Liran Alon <liran.alon@...cle.com>,
Nikita Leshenko <nikita.leshchenko@...cle.com>,
Barret Rhoden <brho@...gle.com>,
Boris Ostrovsky <boris.ostrovsky@...cle.com>,
Matthew Wilcox <willy@...radead.org>,
Konrad Rzeszutek Wilk <konrad.wilk@...cle.com>
Subject: [PATCH RFC 08/10] dax/pmem: Add device-dax support for PFN_MODE_NONE
Allowing dax pmem driver to work without struct pages means
that user will request to not create any PFN metadata by writing
seed's device mode to PFN_MODE_NONE.
When the underlying nd_pfn->mode is PFN_MODE_NONE, most dax_pmem
initialization steps can be skipped because we won't have/need a
pfn superblock for the pagemap/struct-pages. We only allocate
an opaque zeroed object with the chosen align requested, and
finally add PFN_SPECIAL to the region pfn_flags.
Signed-off-by: Joao Martins <joao.m.martins@...cle.com>
---
drivers/dax/pmem/core.c | 36 ++++++++++++++++++++++++++++++------
1 file changed, 30 insertions(+), 6 deletions(-)
diff --git a/drivers/dax/pmem/core.c b/drivers/dax/pmem/core.c
index 2bedf8414fff..67f5604a8291 100644
--- a/drivers/dax/pmem/core.c
+++ b/drivers/dax/pmem/core.c
@@ -17,15 +17,38 @@ struct dev_dax *__dax_pmem_probe(struct device *dev, enum dev_dax_subsys subsys)
struct nd_namespace_io *nsio;
struct dax_region *dax_region;
struct dev_pagemap pgmap = { };
+ struct dev_pagemap *devmap = NULL;
struct nd_namespace_common *ndns;
struct nd_dax *nd_dax = to_nd_dax(dev);
struct nd_pfn *nd_pfn = &nd_dax->nd_pfn;
struct nd_region *nd_region = to_nd_region(dev->parent);
+ unsigned long long pfn_flags = PFN_DEV;
ndns = nvdimm_namespace_common_probe(dev);
if (IS_ERR(ndns))
return ERR_CAST(ndns);
+ rc = sscanf(dev_name(&ndns->dev), "namespace%d.%d", ®ion_id, &id);
+ if (rc != 2)
+ return ERR_PTR(-EINVAL);
+
+ if (is_nd_dax(&nd_pfn->dev) && nd_pfn->mode == PFN_MODE_NONE) {
+ /* allocate a dummy super block */
+ pfn_sb = devm_kzalloc(&nd_pfn->dev, sizeof(*pfn_sb),
+ GFP_KERNEL);
+ if (!pfn_sb)
+ return ERR_PTR(-ENOMEM);
+
+ memset(pfn_sb, 0, sizeof(*pfn_sb));
+ pfn_sb->align = nd_pfn->align;
+ nd_pfn->pfn_sb = pfn_sb;
+ pfn_flags |= PFN_SPECIAL;
+
+ nsio = to_nd_namespace_io(&ndns->dev);
+ memcpy(&res, &nsio->res, sizeof(res));
+ goto no_pfn_sb;
+ }
+
/* parse the 'pfn' info block via ->rw_bytes */
rc = devm_namespace_enable(dev, ndns, nd_info_block_reserve());
if (rc)
@@ -45,20 +68,21 @@ struct dev_dax *__dax_pmem_probe(struct device *dev, enum dev_dax_subsys subsys)
return ERR_PTR(-EBUSY);
}
- rc = sscanf(dev_name(&ndns->dev), "namespace%d.%d", ®ion_id, &id);
- if (rc != 2)
- return ERR_PTR(-EINVAL);
-
/* adjust the dax_region resource to the start of data */
memcpy(&res, &pgmap.res, sizeof(res));
res.start += offset;
+ devmap = &pgmap;
+ pfn_flags |= PFN_MAP;
+
+no_pfn_sb:
dax_region = alloc_dax_region(dev, region_id, &res,
nd_region->target_node, le32_to_cpu(pfn_sb->align),
- PFN_DEV|PFN_MAP);
+ pfn_flags);
if (!dax_region)
return ERR_PTR(-ENOMEM);
- dev_dax = __devm_create_dev_dax(dax_region, id, &pgmap, subsys);
+
+ dev_dax = __devm_create_dev_dax(dax_region, id, devmap, subsys);
/* child dev_dax instances now own the lifetime of the dax_region */
dax_region_put(dax_region);
--
2.17.1
Powered by blists - more mailing lists