[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <7e7dfcbd-22a0-c1c3-de1b-7a99edb7f22a@intel.com>
Date: Wed, 30 Aug 2023 15:46:00 -0700
From: Dave Jiang <dave.jiang@...el.com>
To: <ira.weiny@...el.com>, Dan Williams <dan.j.williams@...el.com>
CC: Navneet Singh <navneet.singh@...el.com>,
Fan Ni <fan.ni@...sung.com>,
Jonathan Cameron <Jonathan.Cameron@...wei.com>,
Davidlohr Bueso <dave@...olabs.net>,
Alison Schofield <alison.schofield@...el.com>,
"Vishal Verma" <vishal.l.verma@...el.com>,
<linux-cxl@...r.kernel.org>, <linux-kernel@...r.kernel.org>
Subject: Re: [PATCH RFC v2 07/18] cxl/mem: Expose device dynamic capacity
configuration
On 8/28/23 22:20, ira.weiny@...el.com wrote:
> From: Navneet Singh <navneet.singh@...el.com>
>
> To properly configure CXL regions on Dynamic Capacity Devices (DCD),
> user space will need to know the details of the DC Regions available on
> a device.
>
> Expose driver dynamic capacity configuration through sysfs
> attributes.
>
> Co-developed-by: Navneet Singh <navneet.singh@...el.com>
> Signed-off-by: Navneet Singh <navneet.singh@...el.com>
> Signed-off-by: Ira Weiny <ira.weiny@...el.com>
>
> ---
> Changes for v2:
> [iweiny: Rebased on latest master/type2 work]
> [iweiny: add documentation for sysfs entries]
> [iweiny: s/dc_regions_count/region_count/]
> [iweiny: s/dcY_size/regionY_size/]
> [alison: change size format to %#llx]
> [iweiny: change count format to %d]
> [iweiny: Formatting updates]
> [iweiny: Fix crash when device is not a mem device: found with cxl-test]
> ---
> Documentation/ABI/testing/sysfs-bus-cxl | 17 ++++++++
> drivers/cxl/core/memdev.c | 77 +++++++++++++++++++++++++++++++++
> 2 files changed, 94 insertions(+)
>
> diff --git a/Documentation/ABI/testing/sysfs-bus-cxl b/Documentation/ABI/testing/sysfs-bus-cxl
> index 2268ffcdb604..aa65dc5b4e13 100644
> --- a/Documentation/ABI/testing/sysfs-bus-cxl
> +++ b/Documentation/ABI/testing/sysfs-bus-cxl
> @@ -37,6 +37,23 @@ Description:
> identically named field in the Identify Memory Device Output
> Payload in the CXL-2.0 specification.
>
> +What: /sys/bus/cxl/devices/memX/dc/region_count
> +Date: July, 2023
> +KernelVersion: v6.6
> +Contact: linux-cxl@...r.kernel.org
> +Description:
> + (RO) Number of Dynamic Capacity (DC) regions supported on the
> + device. May be 0 if the device does not support Dynamic
> + Capacity.
> +
> +What: /sys/bus/cxl/devices/memX/dc/regionY_size
> +Date: July, 2023
> +KernelVersion: v6.6
> +Contact: linux-cxl@...r.kernel.org
> +Description:
> + (RO) Size of the Dynamic Capacity (DC) region Y. Only
> + available on devices which support DC and only for those
> + region indexes supported by the device.
>
> What: /sys/bus/cxl/devices/memX/serial
> Date: January, 2022
> diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c
> index 492486707fd0..397262e0ebd2 100644
> --- a/drivers/cxl/core/memdev.c
> +++ b/drivers/cxl/core/memdev.c
> @@ -101,6 +101,20 @@ static ssize_t pmem_size_show(struct device *dev, struct device_attribute *attr,
> static struct device_attribute dev_attr_pmem_size =
> __ATTR(size, 0444, pmem_size_show, NULL);
>
> +static ssize_t region_count_show(struct device *dev, struct device_attribute *attr,
> + char *buf)
> +{
> + struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
> + struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
> + int len = 0;
> +
> + len = sysfs_emit(buf, "%d\n", mds->nr_dc_region);
> + return len;
> +}
> +
> +struct device_attribute dev_attr_region_count =
> + __ATTR(region_count, 0444, region_count_show, NULL);
> +
> static ssize_t serial_show(struct device *dev, struct device_attribute *attr,
> char *buf)
> {
> @@ -454,6 +468,62 @@ static struct attribute *cxl_memdev_security_attributes[] = {
> NULL,
> };
>
> +static ssize_t show_size_regionN(struct cxl_memdev *cxlmd, char *buf, int pos)
> +{
> + struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
> +
> + return sysfs_emit(buf, "%#llx\n", mds->dc_region[pos].decode_len);
> +}
> +
> +#define REGION_SIZE_ATTR_RO(n) \
> +static ssize_t region##n##_size_show(struct device *dev, \
> + struct device_attribute *attr, \
> + char *buf) \
> +{ \
> + return show_size_regionN(to_cxl_memdev(dev), buf, (n)); \
> +} \
> +static DEVICE_ATTR_RO(region##n##_size)
> +REGION_SIZE_ATTR_RO(0);
> +REGION_SIZE_ATTR_RO(1);
> +REGION_SIZE_ATTR_RO(2);
> +REGION_SIZE_ATTR_RO(3);
> +REGION_SIZE_ATTR_RO(4);
> +REGION_SIZE_ATTR_RO(5);
> +REGION_SIZE_ATTR_RO(6);
> +REGION_SIZE_ATTR_RO(7);
> +
> +static struct attribute *cxl_memdev_dc_attributes[] = {
> + &dev_attr_region0_size.attr,
> + &dev_attr_region1_size.attr,
> + &dev_attr_region2_size.attr,
> + &dev_attr_region3_size.attr,
> + &dev_attr_region4_size.attr,
> + &dev_attr_region5_size.attr,
> + &dev_attr_region6_size.attr,
> + &dev_attr_region7_size.attr,
> + &dev_attr_region_count.attr,
> + NULL,
> +};
> +
> +static umode_t cxl_dc_visible(struct kobject *kobj, struct attribute *a, int n)
> +{
> + struct device *dev = kobj_to_dev(kobj);
> + struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
> + struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
> +
> + /* Not a memory device */
> + if (!mds)
> + return 0;
> +
> + if (a == &dev_attr_region_count.attr)
> + return a->mode;
> +
> + if (n < mds->nr_dc_region)
> + return a->mode;
I would add a comment on who you are checking against nr_dc_region to
make it obvious.
DJ
> +
> + return 0;
> +}
> +
> static umode_t cxl_memdev_visible(struct kobject *kobj, struct attribute *a,
> int n)
> {
> @@ -482,11 +552,18 @@ static struct attribute_group cxl_memdev_security_attribute_group = {
> .attrs = cxl_memdev_security_attributes,
> };
>
> +static struct attribute_group cxl_memdev_dc_attribute_group = {
> + .name = "dc",
> + .attrs = cxl_memdev_dc_attributes,
> + .is_visible = cxl_dc_visible,
> +};
> +
> static const struct attribute_group *cxl_memdev_attribute_groups[] = {
> &cxl_memdev_attribute_group,
> &cxl_memdev_ram_attribute_group,
> &cxl_memdev_pmem_attribute_group,
> &cxl_memdev_security_attribute_group,
> + &cxl_memdev_dc_attribute_group,
> NULL,
> };
>
>
Powered by blists - more mailing lists