lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CACygaLA3Qo3veaOu5Dny37a84hVpBMm+Ff=O01ntpFK_2rF9wA@mail.gmail.com>
Date:	Wed, 27 Jan 2016 20:47:22 +0800
From:	Wenwei Tao <ww.tao0320@...il.com>
To:	Matias Bjørling <mb@...htnvm.io>
Cc:	linux-kernel@...r.kernel.org, linux-block@...r.kernel.org
Subject: Re: [PATCH v2 1/2] lightnvm: specify target's logical address area

2016-01-27 17:36 GMT+08:00 Matias Bjørling <mb@...htnvm.io>:
> On 01/27/2016 07:06 AM, Wenwei Tao wrote:
>> Thanks.
>>
>> 2016-01-27 13:52 GMT+08:00 Matias Bjørling <mb@...htnvm.io>:
>>> On 01/27/2016 03:21 AM, Wenwei Tao wrote:
>>>>
>>>> Yes, It's a spelling mistake, will correct it in next version.
>>>
>>>
>>> I can fix it in the version I apply. No problem.
>
> Hi Wenwei,
>
> I've changed it to this. Clean up the variables a bit. Is that ok with you?
>
> Thanks
>
> diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
> index 33224cb..27a59e8 100644
> --- a/drivers/lightnvm/core.c
> +++ b/drivers/lightnvm/core.c
> @@ -470,6 +470,7 @@ static int nvm_core_init(struct nvm_dev *dev)
>         dev->total_pages = dev->total_blocks * dev->pgs_per_blk;
>         INIT_LIST_HEAD(&dev->online_targets);
>         mutex_init(&dev->mlock);
> +       spin_lock_init(&dev->lock);
>
>         return 0;
>  }
> diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c
> index 7fb725b..6e2685d 100644
> --- a/drivers/lightnvm/gennvm.c
> +++ b/drivers/lightnvm/gennvm.c
> @@ -20,6 +20,63 @@
>
>  #include "gennvm.h"
>
> +static int gennvm_get_area(struct nvm_dev *dev, sector_t *begin_sect,
> +                                                       sector_t size)
> +{
> +       struct gen_nvm *gn = dev->mp;
> +       struct gennvm_area *area, *prev;
> +       int page_size = dev->sec_size * dev->sec_per_pg;
> +       sector_t begin = 0;
> +       sector_t max_sectors = (page_size * dev->total_pages) >> 9;
> +
> +       if (size > max_sectors)
> +               return -EINVAL;
> +
> +       area = kmalloc(sizeof(struct gennvm_area), GFP_KERNEL);
> +       if (!area)
> +               return -ENOMEM;
> +
> +       spin_lock(&dev->lock);
> +       list_for_each_entry(prev, &gn->area_list, list) {
> +               if (begin + size > prev->begin) {
> +                       begin = prev->end;
> +                       continue;
> +               }
> +               break;
> +       }
> +
> +       if ((begin + size) > max_sectors) {
> +               spin_unlock(&dev->lock);
> +               kfree(area);
> +               return -EINVAL;
> +       }
> +
> +       area->begin = *begin_sect = begin;
> +       area->end = begin + size;
> +       list_add(&area->list, &prev->list);

I think I have made a mistake here. Insert the new area after prev
will not make the list increase by area->begin. And prev is not
trustable
when out of the loop, it may point to list_entry((head)->next,
typeof(*pos), member). Below is changed code:

+static int gennvm_get_area(struct nvm_dev *dev, sector_t *begin_sect,
+                                                       sector_t size)
+{
+       struct gen_nvm *gn = dev->mp;
+       struct gennvm_area *area, *prev, *next;
+       sector_t begin = 0;
+       int page_size = dev->sec_size * dev->sec_per_pg;
+       sector_t max_sectors = (page_size * dev->total_pages) >> 9;
+
+       if (size > max_sectors)
+               return -EINVAL;
+       area = kmalloc(sizeof(struct gennvm_area), GFP_KERNEL);
+       if (!area)
+               return -ENOMEM;
+
+       prev = NULL;
+
+       spin_lock(&dev->lock);
+       list_for_each_entry(next, &gn->area_list, list) {
+               if (begin + size > next->begin) {
+                       begin = next->end;
+                       prev = next;
+                       continue;
+               }
+               break;
+       }
+
+       if ((begin + size) > max_sectors) {
+               spin_unlock(&dev->lock);
+               kfree(area);
+               return -EINVAL;
+       }
+
+       area->begin = *begin_sect =  begin;
+       area->end = begin + size;
+       if (prev)
+               list_add(&area->list, &prev->list);
+       else
+               list_add(&area->list, &gn->area_list);
+       spin_unlock(&dev->lock);
+       return 0;
+}


> +       spin_unlock(&dev->lock);
> +
> +       return 0;
> +}
> +
> +static void gennvm_put_area(struct nvm_dev *dev, sector_t begin)
> +{
> +       struct gen_nvm *gn = dev->mp;
> +       struct gennvm_area *area;
> +
> +       spin_lock(&dev->lock);
> +       list_for_each_entry(area, &gn->area_list, list) {
> +               if (area->begin != begin)
> +                       continue;
> +
> +               list_del(&area->list);
> +               spin_unlock(&dev->lock);
> +               kfree(area);
> +               return;
> +       }
> +       spin_unlock(&dev->lock);
> +}
> +
>  static void gennvm_blocks_free(struct nvm_dev *dev)
>  {
>         struct gen_nvm *gn = dev->mp;
> @@ -230,6 +287,7 @@ static int gennvm_register(struct nvm_dev *dev)
>
>         gn->dev = dev;
>         gn->nr_luns = dev->nr_luns;
> +       INIT_LIST_HEAD(&gn->area_list);
>         dev->mp = gn;
>
>         ret = gennvm_luns_init(dev, gn);
> @@ -466,6 +524,10 @@ static struct nvmm_type gennvm = {
>
>         .get_lun                = gennvm_get_lun,
>         .lun_info_print         = gennvm_lun_info_print,
> +
> +       .get_area               = gennvm_get_area,
> +       .put_area               = gennvm_put_area,
> +
>  };
>
>  static int __init gennvm_module_init(void)
> diff --git a/drivers/lightnvm/gennvm.h b/drivers/lightnvm/gennvm.h
> index 9c24b5b..04d7c23 100644
> --- a/drivers/lightnvm/gennvm.h
> +++ b/drivers/lightnvm/gennvm.h
> @@ -39,8 +39,14 @@ struct gen_nvm {
>
>         int nr_luns;
>         struct gen_lun *luns;
> +       struct list_head area_list;
>  };
>
> +struct gennvm_area {
> +       struct list_head list;
> +       sector_t begin;
> +       sector_t end;   /* end is excluded */
> +};
>  #define gennvm_for_each_lun(bm, lun, i) \
>                 for ((i) = 0, lun = &(bm)->luns[0]; \
>                         (i) < (bm)->nr_luns; (i)++, lun = &(bm)->luns[(i)])
> diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
> index e2710da..20afe1c 100644
> --- a/drivers/lightnvm/rrpc.c
> +++ b/drivers/lightnvm/rrpc.c
> @@ -1039,7 +1039,11 @@ static int rrpc_map_init(struct rrpc *rrpc)
>  {
>         struct nvm_dev *dev = rrpc->dev;
>         sector_t i;
> -       int ret;
> +       u64 slba;
> +       int ret, page_size;
> +
> +       page_size = dev->sec_per_pg * dev->sec_size;
> +       slba = rrpc->soffset >> (ilog2(page_size) - 9);
>
>         rrpc->trans_map = vzalloc(sizeof(struct rrpc_addr) * rrpc->nr_pages);
>         if (!rrpc->trans_map)
> @@ -1062,8 +1066,8 @@ static int rrpc_map_init(struct rrpc *rrpc)
>                 return 0;
>
>         /* Bring up the mapping table from device */
> -       ret = dev->ops->get_l2p_tbl(dev, 0, dev->total_pages,
> -                                                       rrpc_l2p_update, rrpc);
> +       ret = dev->ops->get_l2p_tbl(dev, slba, rrpc->nr_pages, rrpc_l2p_update,
> +                                                                       rrpc);

In rrpc_luns_init, rrpc->nr_pages seems to be the target's sector
number and previously dev->total_pages is used, dev->total_pages is
the nvm device page number, so I am a little confusing here.

>         if (ret) {
>                 pr_err("nvm: rrpc: could not read L2P table.\n");
>                 return -EINVAL;
> @@ -1072,7 +1076,6 @@ static int rrpc_map_init(struct rrpc *rrpc)
>         return 0;
>  }
>
> -
>  /* Minimum pages needed within a lun */
>  #define PAGE_POOL_SIZE 16
>  #define ADDR_POOL_SIZE 64
> @@ -1186,12 +1189,32 @@ err:
>         return -ENOMEM;
>  }
>
> +/* returns 0 on success and stores the beginning address in *begin */
> +static int rrpc_area_init(struct rrpc *rrpc, sector_t *begin)
> +{
> +       struct nvm_dev *dev = rrpc->dev;
> +       struct nvmm_type *mt = dev->mt;
> +       sector_t size = rrpc->nr_luns * dev->sec_per_lun * dev->sec_size;
> +
> +       size >>= 9;
> +       return mt->get_area(dev, begin, size);
> +}
> +
> +static void rrpc_area_free(struct rrpc *rrpc)
> +{
> +       struct nvm_dev *dev = rrpc->dev;
> +       struct nvmm_type *mt = dev->mt;
> +
> +       mt->put_area(dev, rrpc->soffset);
> +}
> +
>  static void rrpc_free(struct rrpc *rrpc)
>  {
>         rrpc_gc_free(rrpc);
>         rrpc_map_free(rrpc);
>         rrpc_core_free(rrpc);
>         rrpc_luns_free(rrpc);
> +       rrpc_area_free(rrpc);
>
>         kfree(rrpc);
>  }
> @@ -1312,6 +1335,7 @@ static void *rrpc_init(struct nvm_dev *dev, struct
> gendisk *tdisk,
>         struct request_queue *bqueue = dev->q;
>         struct request_queue *tqueue = tdisk->queue;
>         struct rrpc *rrpc;
> +       sector_t soffset;
>         int ret;
>
>         if (!(dev->identity.dom & NVM_RSP_L2P)) {
> @@ -1337,6 +1361,13 @@ static void *rrpc_init(struct nvm_dev *dev,
> struct gendisk *tdisk,
>         /* simple round-robin strategy */
>         atomic_set(&rrpc->next_lun, -1);
>
> +       ret = rrpc_area_init(rrpc, &soffset);
> +       if (ret < 0) {
> +               pr_err("nvm: rrpc: could not initialize area\n");
> +               return ERR_PTR(ret);
> +       }
> +       rrpc->soffset = soffset;
> +
>         ret = rrpc_luns_init(rrpc, lun_begin, lun_end);
>         if (ret) {
>                 pr_err("nvm: rrpc: could not initialize luns\n");
> diff --git a/drivers/lightnvm/rrpc.h b/drivers/lightnvm/rrpc.h
> index ef13ac7..9380c68 100644
> --- a/drivers/lightnvm/rrpc.h
> +++ b/drivers/lightnvm/rrpc.h
> @@ -97,6 +97,7 @@ struct rrpc {
>         struct nvm_dev *dev;
>         struct gendisk *disk;
>
> +       sector_t soffset; /* logical sector offset */
>         u64 poffset; /* physical page offset */
>         int lun_offset;
>
> diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
> index d675011..18f1bb0 100644
> --- a/include/linux/lightnvm.h
> +++ b/include/linux/lightnvm.h
> @@ -351,6 +351,7 @@ struct nvm_dev {
>         char name[DISK_NAME_LEN];
>
>         struct mutex mlock;
> +       spinlock_t lock;
>  };
>
>  static inline struct ppa_addr generic_to_dev_addr(struct nvm_dev *dev,
> @@ -463,6 +464,9 @@ typedef int (nvmm_erase_blk_fn)(struct nvm_dev *,
> struct nvm_block *,
>  typedef struct nvm_lun *(nvmm_get_lun_fn)(struct nvm_dev *, int);
>  typedef void (nvmm_lun_info_print_fn)(struct nvm_dev *);
>
> +typedef int (nvmm_get_area_fn)(struct nvm_dev *, sector_t *, sector_t);
> +typedef void (nvmm_put_area_fn)(struct nvm_dev *, sector_t);
> +
>  struct nvmm_type {
>         const char *name;
>         unsigned int version[3];
> @@ -487,6 +491,10 @@ struct nvmm_type {
>
>         /* Statistics */
>         nvmm_lun_info_print_fn *lun_info_print;
> +
> +       nvmm_get_area_fn *get_area;
> +       nvmm_put_area_fn *put_area;
> +
>         struct list_head list;
>  };
>
>
>
>

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ