[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <569F8588.4040405@lightnvm.io>
Date: Wed, 20 Jan 2016 14:03:04 +0100
From: Matias Bjørling <mb@...htnvm.io>
To: Wenwei Tao <ww.tao0320@...il.com>
Cc: linux-kernel@...r.kernel.org, linux-block@...r.kernel.org
Subject: Re: [RFC PATCH 1/2] lightnvm: specify target's logical address area
On 01/15/2016 12:44 PM, Wenwei Tao wrote:
> We can create more than one target on a lightnvm
> device by specifying its begin lun and end lun.
>
> But only specify the physical address area is not
> enough, we need to get the corresponding non-
> intersection logical address area division from
> the backend device's logcial address space.
> Otherwise the targets on the device might use
> the same logical addresses and this will cause
> incorrect information in the device's l2p table.
>
> Signed-off-by: Wenwei Tao <ww.tao0320@...il.com>
> ---
> drivers/lightnvm/core.c | 1 +
> drivers/lightnvm/gennvm.c | 57 +++++++++++++++++++++++++++++++++++++++++++++++
> drivers/lightnvm/gennvm.h | 7 ++++++
> drivers/lightnvm/rrpc.c | 44 ++++++++++++++++++++++++++++++++----
> drivers/lightnvm/rrpc.h | 1 +
> include/linux/lightnvm.h | 8 +++++++
> 6 files changed, 114 insertions(+), 4 deletions(-)
>
> diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
> index 8f41b24..d938636 100644
> --- a/drivers/lightnvm/core.c
> +++ b/drivers/lightnvm/core.c
> @@ -238,6 +238,7 @@ static int nvm_core_init(struct nvm_dev *dev)
> dev->nr_chnls;
> dev->total_pages = dev->total_blocks * dev->pgs_per_blk;
> INIT_LIST_HEAD(&dev->online_targets);
> + spin_lock_init(&dev->lock);
>
> return 0;
> }
> diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c
> index 62c6f4d..f7c4495 100644
> --- a/drivers/lightnvm/gennvm.c
> +++ b/drivers/lightnvm/gennvm.c
> @@ -20,6 +20,59 @@
>
> #include "gennvm.h"
>
> +static sector_t gennvm_get_area(struct nvm_dev *dev, sector_t size)
> +{
> + struct gen_nvm *gn = dev->mp;
> + struct gennvm_area *area, *prev;
> + sector_t start = 0;
Rename to begin?
> + int page_size = dev->sec_size * dev->sec_per_pg;
> + sector_t max = page_size * dev->total_pages >> 9;
Can we put parentheses around this, just for clarity. Maybe also rename
the variable to max_sect/max_sectors?
> +
> + if (size > max)
> + return -EINVAL;
> + area = kmalloc(sizeof(*area), GFP_KERNEL);
I prefer sizeof(struct gennvm_area)
> + if (!area)
> + return -ENOMEM;
> +
> + spin_lock(&dev->lock);
> + list_for_each_entry(prev, &gn->area_list, list) {
> + if (start + size > prev->start) {
> + start = prev->end;
> + continue;
> + }
> + break;
> + }
> +
> + if (start + size > max) {
Same with parentheses here. Just for clarity.
> + spin_unlock(&dev->lock);
> + kfree(area);
> + return -EINVAL;
> + }
> +
> + area->start = start;
> + area->end = start + size;
> + list_add(&area->list, &prev->list);
> + spin_unlock(&dev->lock);
> + return start;
> +}
> +
> +static void gennvm_put_area(struct nvm_dev *dev, sector_t start)
> +{
> + struct gen_nvm *gn = dev->mp;
> + struct gennvm_area *area;
> +
> + spin_lock(&dev->lock);
> + list_for_each_entry(area, &gn->area_list, list) {
> + if (area->start == start) {
> + list_del(&area->list);
> + spin_unlock(&dev->lock);
> + kfree(area);
> + return;
> + }
> + }
> + spin_unlock(&dev->lock);
> +}
> +
> static void gennvm_blocks_free(struct nvm_dev *dev)
> {
> struct gen_nvm *gn = dev->mp;
> @@ -228,6 +281,7 @@ static int gennvm_register(struct nvm_dev *dev)
>
> gn->dev = dev;
> gn->nr_luns = dev->nr_luns;
> + INIT_LIST_HEAD(&gn->area_list);
> dev->mp = gn;
>
> ret = gennvm_luns_init(dev, gn);
> @@ -506,6 +560,9 @@ static struct nvmm_type gennvm = {
>
> .get_lun = gennvm_get_lun,
> .lun_info_print = gennvm_lun_info_print,
> +
> + .get_area = gennvm_get_area,
> + .put_area = gennvm_put_area,
> };
>
> static int __init gennvm_module_init(void)
> diff --git a/drivers/lightnvm/gennvm.h b/drivers/lightnvm/gennvm.h
> index 9c24b5b..b51813a 100644
> --- a/drivers/lightnvm/gennvm.h
> +++ b/drivers/lightnvm/gennvm.h
> @@ -39,6 +39,13 @@ struct gen_nvm {
>
> int nr_luns;
> struct gen_lun *luns;
> + struct list_head area_list;
> +};
> +
> +struct gennvm_area {
> + struct list_head list;
> + sector_t start;
Begin/end fits better.
> + sector_t end; /* end is excluded */
> };
>
> #define gennvm_for_each_lun(bm, lun, i) \
> diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
> index 8628a5d..ab1d17a 100644
> --- a/drivers/lightnvm/rrpc.c
> +++ b/drivers/lightnvm/rrpc.c
> @@ -1017,7 +1017,17 @@ static int rrpc_map_init(struct rrpc *rrpc)
> {
> struct nvm_dev *dev = rrpc->dev;
> sector_t i;
> - int ret;
> + u64 slba;
> + int ret, page_size;
> + int page_shfit, nr_pages;
> +
> + page_size = dev->sec_per_pg * dev->sec_size;
> + page_shfit = ilog2(page_size);
> + nr_pages = rrpc->nr_luns *
> + dev->nr_planes *
> + dev->blks_per_lun *
> + dev->pgs_per_blk;
Can the last three be replaced with dev->sec_per_lun?
> + slba = rrpc->soffset >> (page_shfit - 9);
>
> rrpc->trans_map = vzalloc(sizeof(struct rrpc_addr) * rrpc->nr_pages);
> if (!rrpc->trans_map)
> @@ -1040,8 +1050,7 @@ static int rrpc_map_init(struct rrpc *rrpc)
> return 0;
>
> /* Bring up the mapping table from device */
> - ret = dev->ops->get_l2p_tbl(dev, 0, dev->total_pages,
> - rrpc_l2p_update, rrpc);
> + ret = dev->ops->get_l2p_tbl(dev, slba, nr_pages, rrpc_l2p_update, rrpc);
> if (ret) {
> pr_err("nvm: rrpc: could not read L2P table.\n");
> return -EINVAL;
> @@ -1050,7 +1059,6 @@ static int rrpc_map_init(struct rrpc *rrpc)
> return 0;
> }
>
> -
> /* Minimum pages needed within a lun */
> #define PAGE_POOL_SIZE 16
> #define ADDR_POOL_SIZE 64
> @@ -1160,12 +1168,33 @@ err:
> return -ENOMEM;
> }
>
> +static int rrpc_area_init(struct rrpc *rrpc)
> +{
> + struct nvm_dev *dev = rrpc->dev;
> + struct nvmm_type *mt = dev->mt;
> + sector_t size = rrpc->nr_luns *
> + dev->sec_per_lun *
> + dev->sec_size;
> +
> + size >>= 9;
> + return mt->get_area(dev, size);
> +}
> +
> +static void rrpc_area_free(struct rrpc *rrpc)
> +{
> + struct nvm_dev *dev = rrpc->dev;
> + struct nvmm_type *mt = dev->mt;
> +
> + mt->put_area(dev, rrpc->soffset);
> +}
> +
> static void rrpc_free(struct rrpc *rrpc)
> {
> rrpc_gc_free(rrpc);
> rrpc_map_free(rrpc);
> rrpc_core_free(rrpc);
> rrpc_luns_free(rrpc);
> + rrpc_area_free(rrpc);
>
> kfree(rrpc);
> }
> @@ -1311,6 +1340,13 @@ static void *rrpc_init(struct nvm_dev *dev, struct gendisk *tdisk,
> /* simple round-robin strategy */
> atomic_set(&rrpc->next_lun, -1);
>
> + ret = rrpc_area_init(rrpc);
gennvm_get_area returns sector_t, while ret is int.
> + if (ret < 0) {
> + pr_err("nvm: rrpc: could not initialize area\n");
> + return ERR_PTR(ret);
> + }
> + rrpc->soffset = ret;
> +
> ret = rrpc_luns_init(rrpc, lun_begin, lun_end);
> if (ret) {
> pr_err("nvm: rrpc: could not initialize luns\n");
> diff --git a/drivers/lightnvm/rrpc.h b/drivers/lightnvm/rrpc.h
> index a9696a0..f26ba5b 100644
> --- a/drivers/lightnvm/rrpc.h
> +++ b/drivers/lightnvm/rrpc.h
> @@ -86,6 +86,7 @@ struct rrpc {
> struct nvm_dev *dev;
> struct gendisk *disk;
>
> + sector_t soffset; /* logical sector offset */
> u64 poffset; /* physical page offset */
> int lun_offset;
>
> diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
> index 034117b..4f3db10 100644
> --- a/include/linux/lightnvm.h
> +++ b/include/linux/lightnvm.h
> @@ -240,6 +240,8 @@ struct nvm_block {
> struct nvm_dev {
> struct nvm_dev_ops *ops;
>
> + spinlock_t lock;
> +
> struct list_head devices;
> struct list_head online_targets;
>
> @@ -388,6 +390,8 @@ typedef int (nvmm_erase_blk_fn)(struct nvm_dev *, struct nvm_block *,
> unsigned long);
> typedef struct nvm_lun *(nvmm_get_lun_fn)(struct nvm_dev *, int);
> typedef void (nvmm_lun_info_print_fn)(struct nvm_dev *);
> +typedef sector_t (nvmm_get_area_fn)(struct nvm_dev *, sector_t);
> +typedef void (nvmm_put_area_fn)(struct nvm_dev *, sector_t);
>
> struct nvmm_type {
> const char *name;
> @@ -412,6 +416,10 @@ struct nvmm_type {
>
> /* Statistics */
> nvmm_lun_info_print_fn *lun_info_print;
> +
> + nvmm_get_area_fn *get_area;
> + nvmm_put_area_fn *put_area;
> +
> struct list_head list;
> };
>
>
Both patches could use a rebase on top of the for-next branch.
Powered by blists - more mailing lists