lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CACygaLDfaV24aWUWFcsHCQaz-P3eSB5Ls=jP8Z0fJ_w=3Vo-cA@mail.gmail.com>
Date:	Wed, 27 Jan 2016 22:58:33 +0800
From:	Wenwei Tao <ww.tao0320@...il.com>
To:	Matias Bjørling <mb@...htnvm.io>
Cc:	linux-kernel@...r.kernel.org, linux-block@...r.kernel.org
Subject: Re: [PATCH v2 1/2] lightnvm: specify target's logical address area

static int nvm_core_init(struct nvm_dev *dev)
{

...
dev->sec_per_pg = grp->fpg_sz / grp->csecs;
...
/* calculated values */
dev->sec_per_pl = dev->sec_per_pg * dev->nr_planes;
dev->sec_per_blk = dev->sec_per_pl * dev->pgs_per_blk;
dev->sec_per_lun = dev->sec_per_blk * dev->blks_per_lun;
...

}

static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
{
  ...
  for (i = 0; i < rrpc->nr_luns; i++) {
     ...
    rrpc->nr_pages += dev->sec_per_lun;
    ...
  }
 ...
}

I prefer rrpc->nr_pages to be the number of pages allocated to rrpc,
but the code above indeed make me confuse about the sec and page
thing.
Hope I'm not misunderstand the code.
ps: I'm not an expert on flash, if the confusion is caused by lack of
knowledge about flash, pleas let me know.

2016-01-27 21:26 GMT+08:00 Matias Bjørling <mb@...htnvm.io>:
> On 01/27/2016 01:47 PM, Wenwei Tao wrote:
>> 2016-01-27 17:36 GMT+08:00 Matias Bjørling <mb@...htnvm.io>:
>>> On 01/27/2016 07:06 AM, Wenwei Tao wrote:
>>>> Thanks.
>>>>
>>>> 2016-01-27 13:52 GMT+08:00 Matias Bjørling <mb@...htnvm.io>:
>>>>> On 01/27/2016 03:21 AM, Wenwei Tao wrote:
>>>>>>
>>>>>> Yes, It's a spelling mistake, will correct it in next version.
>>>>>
>>>>>
>>>>> I can fix it in the version I apply. No problem.
>>>
>>> Hi Wenwei,
>>>
>>> I've changed it to this. Clean up the variables a bit. Is that ok with you?
>>>
>>> Thanks
>>>
>>> diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
>>> index 33224cb..27a59e8 100644
>>> --- a/drivers/lightnvm/core.c
>>> +++ b/drivers/lightnvm/core.c
>>> @@ -470,6 +470,7 @@ static int nvm_core_init(struct nvm_dev *dev)
>>>         dev->total_pages = dev->total_blocks * dev->pgs_per_blk;
>>>         INIT_LIST_HEAD(&dev->online_targets);
>>>         mutex_init(&dev->mlock);
>>> +       spin_lock_init(&dev->lock);
>>>
>>>         return 0;
>>>  }
>>> diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c
>>> index 7fb725b..6e2685d 100644
>>> --- a/drivers/lightnvm/gennvm.c
>>> +++ b/drivers/lightnvm/gennvm.c
>>> @@ -20,6 +20,63 @@
>>>
>>>  #include "gennvm.h"
>>>
>>> +static int gennvm_get_area(struct nvm_dev *dev, sector_t *begin_sect,
>>> +                                                       sector_t size)
>>> +{
>>> +       struct gen_nvm *gn = dev->mp;
>>> +       struct gennvm_area *area, *prev;
>>> +       int page_size = dev->sec_size * dev->sec_per_pg;
>>> +       sector_t begin = 0;
>>> +       sector_t max_sectors = (page_size * dev->total_pages) >> 9;
>>> +
>>> +       if (size > max_sectors)
>>> +               return -EINVAL;
>>> +
>>> +       area = kmalloc(sizeof(struct gennvm_area), GFP_KERNEL);
>>> +       if (!area)
>>> +               return -ENOMEM;
>>> +
>>> +       spin_lock(&dev->lock);
>>> +       list_for_each_entry(prev, &gn->area_list, list) {
>>> +               if (begin + size > prev->begin) {
>>> +                       begin = prev->end;
>>> +                       continue;
>>> +               }
>>> +               break;
>>> +       }
>>> +
>>> +       if ((begin + size) > max_sectors) {
>>> +               spin_unlock(&dev->lock);
>>> +               kfree(area);
>>> +               return -EINVAL;
>>> +       }
>>> +
>>> +       area->begin = *begin_sect = begin;
>>> +       area->end = begin + size;
>>> +       list_add(&area->list, &prev->list);
>>
>> I think I have made a mistake here. Insert the new area after prev
>> will not make the list increase by area->begin. And prev is not
>> trustable
>> when out of the loop, it may point to list_entry((head)->next,
>> typeof(*pos), member). Below is changed code:
>>
>> +static int gennvm_get_area(struct nvm_dev *dev, sector_t *begin_sect,
>> +                                                       sector_t size)
>> +{
>> +       struct gen_nvm *gn = dev->mp;
>> +       struct gennvm_area *area, *prev, *next;
>> +       sector_t begin = 0;
>> +       int page_size = dev->sec_size * dev->sec_per_pg;
>> +       sector_t max_sectors = (page_size * dev->total_pages) >> 9;
>> +
>> +       if (size > max_sectors)
>> +               return -EINVAL;
>> +       area = kmalloc(sizeof(struct gennvm_area), GFP_KERNEL);
>> +       if (!area)
>> +               return -ENOMEM;
>> +
>> +       prev = NULL;
>> +
>> +       spin_lock(&dev->lock);
>> +       list_for_each_entry(next, &gn->area_list, list) {
>> +               if (begin + size > next->begin) {
>> +                       begin = next->end;
>> +                       prev = next;
>> +                       continue;
>> +               }
>> +               break;
>> +       }
>> +
>> +       if ((begin + size) > max_sectors) {
>> +               spin_unlock(&dev->lock);
>> +               kfree(area);
>> +               return -EINVAL;
>> +       }
>> +
>> +       area->begin = *begin_sect =  begin;
>> +       area->end = begin + size;
>> +       if (prev)
>> +               list_add(&area->list, &prev->list);
>> +       else
>> +               list_add(&area->list, &gn->area_list);
>> +       spin_unlock(&dev->lock);
>> +       return 0;
>> +}
>>
>>
>>> +       spin_unlock(&dev->lock);
>>> +
>>> +       return 0;
>>> +}
>>> +
>>> +static void gennvm_put_area(struct nvm_dev *dev, sector_t begin)
>>> +{
>>> +       struct gen_nvm *gn = dev->mp;
>>> +       struct gennvm_area *area;
>>> +
>>> +       spin_lock(&dev->lock);
>>> +       list_for_each_entry(area, &gn->area_list, list) {
>>> +               if (area->begin != begin)
>>> +                       continue;
>>> +
>>> +               list_del(&area->list);
>>> +               spin_unlock(&dev->lock);
>>> +               kfree(area);
>>> +               return;
>>> +       }
>>> +       spin_unlock(&dev->lock);
>>> +}
>>> +
>>>  static void gennvm_blocks_free(struct nvm_dev *dev)
>>>  {
>>>         struct gen_nvm *gn = dev->mp;
>>> @@ -230,6 +287,7 @@ static int gennvm_register(struct nvm_dev *dev)
>>>
>>>         gn->dev = dev;
>>>         gn->nr_luns = dev->nr_luns;
>>> +       INIT_LIST_HEAD(&gn->area_list);
>>>         dev->mp = gn;
>>>
>>>         ret = gennvm_luns_init(dev, gn);
>>> @@ -466,6 +524,10 @@ static struct nvmm_type gennvm = {
>>>
>>>         .get_lun                = gennvm_get_lun,
>>>         .lun_info_print         = gennvm_lun_info_print,
>>> +
>>> +       .get_area               = gennvm_get_area,
>>> +       .put_area               = gennvm_put_area,
>>> +
>>>  };
>>>
>>>  static int __init gennvm_module_init(void)
>>> diff --git a/drivers/lightnvm/gennvm.h b/drivers/lightnvm/gennvm.h
>>> index 9c24b5b..04d7c23 100644
>>> --- a/drivers/lightnvm/gennvm.h
>>> +++ b/drivers/lightnvm/gennvm.h
>>> @@ -39,8 +39,14 @@ struct gen_nvm {
>>>
>>>         int nr_luns;
>>>         struct gen_lun *luns;
>>> +       struct list_head area_list;
>>>  };
>>>
>>> +struct gennvm_area {
>>> +       struct list_head list;
>>> +       sector_t begin;
>>> +       sector_t end;   /* end is excluded */
>>> +};
>>>  #define gennvm_for_each_lun(bm, lun, i) \
>>>                 for ((i) = 0, lun = &(bm)->luns[0]; \
>>>                         (i) < (bm)->nr_luns; (i)++, lun = &(bm)->luns[(i)])
>>> diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
>>> index e2710da..20afe1c 100644
>>> --- a/drivers/lightnvm/rrpc.c
>>> +++ b/drivers/lightnvm/rrpc.c
>>> @@ -1039,7 +1039,11 @@ static int rrpc_map_init(struct rrpc *rrpc)
>>>  {
>>>         struct nvm_dev *dev = rrpc->dev;
>>>         sector_t i;
>>> -       int ret;
>>> +       u64 slba;
>>> +       int ret, page_size;
>>> +
>>> +       page_size = dev->sec_per_pg * dev->sec_size;
>>> +       slba = rrpc->soffset >> (ilog2(page_size) - 9);
>>>
>>>         rrpc->trans_map = vzalloc(sizeof(struct rrpc_addr) * rrpc->nr_pages);
>>>         if (!rrpc->trans_map)
>>> @@ -1062,8 +1066,8 @@ static int rrpc_map_init(struct rrpc *rrpc)
>>>                 return 0;
>>>
>>>         /* Bring up the mapping table from device */
>>> -       ret = dev->ops->get_l2p_tbl(dev, 0, dev->total_pages,
>>> -                                                       rrpc_l2p_update, rrpc);
>>> +       ret = dev->ops->get_l2p_tbl(dev, slba, rrpc->nr_pages, rrpc_l2p_update,
>>> +                                                                       rrpc);
>>
>> In rrpc_luns_init, rrpc->nr_pages seems to be the target's sector
>> number and previously dev->total_pages is used, dev->total_pages is
>> the nvm device page number, so I am a little confusing here.
>>
>
> The dev->total pages is all pages on media. The rrpc->nr_pages is the
> number of pages allocated to rrpc... which should be dev->total_pages
> here, as we want to retrieve the full l2p table, and then reconstruct
> the state. Thanks. Feel free to send an updated patch with the changes.
>

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ