[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <1983025922.01750943882252.JavaMail.epsvc@epcpadp2new>
Date: Thu, 26 Jun 2025 15:24:07 +0530
From: Neeraj Kumar <s.neeraj@...sung.com>
To: Jonathan Cameron <Jonathan.Cameron@...wei.com>
Cc: dan.j.williams@...el.com, dave@...olabs.net, dave.jiang@...el.com,
alison.schofield@...el.com, vishal.l.verma@...el.com, ira.weiny@...el.com,
a.manzanares@...sung.com, nifan.cxl@...il.com, anisa.su@...sung.com,
vishak.g@...sung.com, krish.reddy@...sung.com, arun.george@...sung.com,
alok.rathore@...sung.com, neeraj.kernel@...il.com,
linux-kernel@...r.kernel.org, linux-cxl@...r.kernel.org,
nvdimm@...ts.linux.dev, gost.dev@...sung.com, cpgs@...sung.com
Subject: Re: [RFC PATCH 05/20] nvdimm/region_label: Add region label
updation routine
On 23/06/25 10:05AM, Jonathan Cameron wrote:
>On Tue, 17 Jun 2025 18:09:29 +0530
>Neeraj Kumar <s.neeraj@...sung.com> wrote:
>
>Add region label update routine
>
>
>> Added __pmem_region_label_update region label update routine to update
>> region label
>>
>> Signed-off-by: Neeraj Kumar <s.neeraj@...sung.com>
>A few trivial comments inline.
>
>Jonathan
>
>> ---
>> drivers/nvdimm/label.c | 142 ++++++++++++++++++++++++++++++++
>> drivers/nvdimm/label.h | 2 +
>> drivers/nvdimm/namespace_devs.c | 12 +++
>> drivers/nvdimm/nd.h | 20 +++++
>> include/linux/libnvdimm.h | 8 ++
>> 5 files changed, 184 insertions(+)
>>
>> diff --git a/drivers/nvdimm/label.c b/drivers/nvdimm/label.c
>> index d5cfaa99f976..7f33d14ce0ef 100644
>> --- a/drivers/nvdimm/label.c
>> +++ b/drivers/nvdimm/label.c
>> @@ -381,6 +381,16 @@ static void nsl_calculate_checksum(struct nvdimm_drvdata *ndd,
>> nsl_set_checksum(ndd, nd_label, sum);
>> }
>>
>> +static void rgl_calculate_checksum(struct nvdimm_drvdata *ndd,
>> + struct cxl_region_label *rg_label)
>> +{
>> + u64 sum;
>> +
>> + rgl_set_checksum(rg_label, 0);
>> + sum = nd_fletcher64(rg_label, sizeof_namespace_label(ndd), 1);
>> + rgl_set_checksum(rg_label, sum);
>> +}
>> +
>> static bool slot_valid(struct nvdimm_drvdata *ndd,
>> struct nd_lsa_label *nd_label, u32 slot)
>> {
>> @@ -1117,6 +1127,138 @@ int nd_pmem_namespace_label_update(struct nd_region *nd_region,
>> return 0;
>> }
>>
>> +static int __pmem_region_label_update(struct nd_region *nd_region,
>> + struct nd_mapping *nd_mapping, int pos, unsigned long flags)
>> +{
>> + struct nd_interleave_set *nd_set = nd_region->nd_set;
>> + struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
>> + struct nd_lsa_label *nd_label;
>> + struct cxl_region_label *rg_label;
>> + struct nd_namespace_index *nsindex;
>> + struct nd_label_ent *label_ent;
>> + unsigned long *free;
>> + u32 nslot, slot;
>> + size_t offset;
>> + int rc;
>> + uuid_t tmp;
>> +
>> + if (!preamble_next(ndd, &nsindex, &free, &nslot))
>> + return -ENXIO;
>> +
>> + /* allocate and write the label to the staging (next) index */
>> + slot = nd_label_alloc_slot(ndd);
>> + if (slot == UINT_MAX)
>> + return -ENXIO;
>> + dev_dbg(ndd->dev, "allocated: %d\n", slot);
>> +
>> + nd_label = to_label(ndd, slot);
>> +
>> + memset(nd_label, 0, sizeof_namespace_label(ndd));
>> + rg_label = &nd_label->rg_label;
>> +
>> + /* Set Region Label Format identification UUID */
>> + uuid_parse(CXL_REGION_UUID, &tmp);
>> + export_uuid(nd_label->rg_label.type, &tmp);
>> +
>> + /* Set Current Region Label UUID */
>> + export_uuid(nd_label->rg_label.uuid, &nd_set->uuid);
>> +
>> + rg_label->flags = __cpu_to_le32(flags);
>> + rg_label->nlabel = __cpu_to_le16(nd_region->ndr_mappings);
>> + rg_label->position = __cpu_to_le16(pos);
>> + rg_label->dpa = __cpu_to_le64(nd_mapping->start);
>> + rg_label->rawsize = __cpu_to_le64(nd_mapping->size);
>> + rg_label->hpa = __cpu_to_le64(nd_set->res->start);
>> + rg_label->slot = __cpu_to_le32(slot);
>> + rg_label->ig = __cpu_to_le32(nd_set->interleave_granularity);
>> + rg_label->align = __cpu_to_le16(0);
>> +
>> + /* Update fletcher64 Checksum */
>> + rgl_calculate_checksum(ndd, rg_label);
>> +
>> + /* update label */
>> + offset = nd_label_offset(ndd, nd_label);
>> + rc = nvdimm_set_config_data(ndd, offset, nd_label,
>> + sizeof_namespace_label(ndd));
>> + if (rc < 0) {
>> + nd_label_free_slot(ndd, slot);
>> + return rc;
>> + }
>> +
>> + /* Garbage collect the previous label */
>> + mutex_lock(&nd_mapping->lock);
>
>Perhaps use
>
> guard(mutex)(&nd_mapping->lock);
>
>so you can directly return in the error path below and simplify the flow
>a tiny bit.
>
Sure, Let me look at the gaurd(mutex). I will update it in V1
>> + list_for_each_entry(label_ent, &nd_mapping->labels, list) {
>> + if (!label_ent->label)
>> + continue;
>> + if (rgl_uuid_equal(&label_ent->label->rg_label, &nd_set->uuid))
>> + reap_victim(nd_mapping, label_ent);
>> + }
>> +
>> + /* update index */
>> + rc = nd_label_write_index(ndd, ndd->ns_next,
>> + nd_inc_seq(__le32_to_cpu(nsindex->seq)), 0);
>> +
>With guard this can be
> if (rc)
> return rec;
>
> list_for_each...
>
Sure, I will update it in V1
>> + if (rc == 0) {
>> + list_for_each_entry(label_ent, &nd_mapping->labels, list)
>> + if (!label_ent->label) {
>> + label_ent->label = nd_label;
>> + nd_label = NULL;
>> + break;
>> + }
>> + dev_WARN_ONCE(&nd_region->dev, nd_label,
>> + "failed to track label: %d\n",
>> + to_slot(ndd, nd_label));
>> + if (nd_label)
>> + rc = -ENXIO;
>> + }
>> + mutex_unlock(&nd_mapping->lock);
>> +
>> + return rc;
>> +}
>
Powered by blists - more mailing lists