[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <159625242681.3040297.14551750051856153463.stgit@dwillia2-desk3.amr.corp.intel.com>
Date: Fri, 31 Jul 2020 20:27:06 -0700
From: Dan Williams <dan.j.williams@...el.com>
To: akpm@...ux-foundation.org
Cc: Joao Martins <joao.m.martins@...cle.com>, peterz@...radead.org,
vishal.l.verma@...el.com, dave.hansen@...ux.intel.com,
ard.biesheuvel@...aro.org, vishal.l.verma@...el.com,
linux-mm@...ck.org, linux-nvdimm@...ts.01.org,
joao.m.martins@...cle.com, linux-kernel@...r.kernel.org,
linux-acpi@...r.kernel.org, dri-devel@...ts.freedesktop.org
Subject: [PATCH v3 23/23] device-dax: Add a range mapping allocation
attribute
From: Joao Martins <joao.m.martins@...cle.com>
Add a sysfs attribute which denotes a range from the dax region
to be allocated. It's an write only @mapping sysfs attribute in
the format of '<start>-<end>' to allocate a range. @start and
@end use hexadecimal values and the @pgoff is implicitly ordered
wrt to previous writes to @mapping sysfs e.g. a write of a range
of length 1G the pgoff is 0..1G(-4K), a second write will use
@pgoff for 1G+4K..<size>.
This range mapping interface is useful for:
1) Application which want to implement its own allocation logic,
and thus pick the desired ranges from dax_region.
2) For use cases like VMM fast restart[0] where after kexec we
want to the same gpa<->phys mappings (as originally created
before kexec).
[0] https://static.sched.com/hosted_files/kvmforum2019/66/VMM-fast-restart_kvmforum2019.pdf
Signed-off-by: Joao Martins <joao.m.martins@...cle.com>
Link: https://lore.kernel.org/r/20200716172913.19658-5-joao.m.martins@oracle.com
Signed-off-by: Dan Williams <dan.j.williams@...el.com>
---
drivers/dax/bus.c | 64 +++++++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 64 insertions(+)
diff --git a/drivers/dax/bus.c b/drivers/dax/bus.c
index 7a9439132573..aa67555ba183 100644
--- a/drivers/dax/bus.c
+++ b/drivers/dax/bus.c
@@ -1040,6 +1040,67 @@ static ssize_t size_store(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR_RW(size);
+static ssize_t range_parse(const char *opt, size_t len, struct range *range)
+{
+ unsigned long long addr = 0;
+ char *start, *end, *str;
+ ssize_t rc = EINVAL;
+
+ str = kstrdup(opt, GFP_KERNEL);
+ if (!str)
+ return rc;
+
+ end = str;
+ start = strsep(&end, "-");
+ if (!start || !end)
+ goto err;
+
+ rc = kstrtoull(start, 16, &addr);
+ if (rc)
+ goto err;
+ range->start = addr;
+
+ rc = kstrtoull(end, 16, &addr);
+ if (rc)
+ goto err;
+ range->end = addr;
+
+err:
+ kfree(str);
+ return rc;
+}
+
+static ssize_t mapping_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct dev_dax *dev_dax = to_dev_dax(dev);
+ struct dax_region *dax_region = dev_dax->region;
+ size_t to_alloc;
+ struct range r;
+ ssize_t rc;
+
+ rc = range_parse(buf, len, &r);
+ if (rc)
+ return rc;
+
+ rc = -ENXIO;
+ device_lock(dax_region->dev);
+ if (!dax_region->dev->driver) {
+ device_unlock(dax_region->dev);
+ return rc;
+ }
+ device_lock(dev);
+
+ to_alloc = range_len(&r);
+ if (alloc_is_aligned(dev_dax, to_alloc))
+ rc = alloc_dev_dax_range(dev_dax, r.start, to_alloc);
+ device_unlock(dev);
+ device_unlock(dax_region->dev);
+
+ return rc == 0 ? len : rc;
+}
+static DEVICE_ATTR_WO(mapping);
+
static ssize_t align_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -1181,6 +1242,8 @@ static umode_t dev_dax_visible(struct kobject *kobj, struct attribute *a, int n)
return 0;
if (a == &dev_attr_numa_node.attr && !IS_ENABLED(CONFIG_NUMA))
return 0;
+ if (a == &dev_attr_mapping.attr && is_static(dax_region))
+ return 0;
if ((a == &dev_attr_align.attr ||
a == &dev_attr_size.attr) && is_static(dax_region))
return 0444;
@@ -1190,6 +1253,7 @@ static umode_t dev_dax_visible(struct kobject *kobj, struct attribute *a, int n)
static struct attribute *dev_dax_attributes[] = {
&dev_attr_modalias.attr,
&dev_attr_size.attr,
+ &dev_attr_mapping.attr,
&dev_attr_target_node.attr,
&dev_attr_align.attr,
&dev_attr_resource.attr,
Powered by blists - more mailing lists