[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date: Thu, 5 May 2016 11:34:39 +0200
From: Matias Bjørling <mb@...htnvm.io>
To: Javier González <jg@...htnvm.io>
Cc: linux-kernel@...r.kernel.org, linux-block@...r.kernel.org,
Javier González <javier@...xlabs.com>
Subject: Re: [PATCH 2/4] lightnvm: rename nr_pages to nr_ppas on nvm_rq
On 05/04/2016 05:31 PM, Javier González wrote:
> The number of ppas contained on a request is not necessarily the number
> of pages that it maps to neither on the target nor on the device side.
> In order to avoid confusion, rename nr_pages to nr_ppas since it is what
> the variable actually contains.
>
> Signed-off-by: Javier González <javier@...xlabs.com>
> ---
> drivers/lightnvm/core.c | 16 ++++++++--------
> drivers/lightnvm/gennvm.c | 2 +-
> drivers/lightnvm/rrpc.c | 6 +++---
> drivers/lightnvm/rrpc.h | 2 +-
> drivers/lightnvm/sysblk.c | 2 +-
> drivers/nvme/host/lightnvm.c | 4 ++--
> include/linux/lightnvm.h | 2 +-
> 7 files changed, 17 insertions(+), 17 deletions(-)
>
> diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
> index 32375b6..4cd9803 100644
> --- a/drivers/lightnvm/core.c
> +++ b/drivers/lightnvm/core.c
> @@ -254,8 +254,8 @@ void nvm_addr_to_generic_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
> {
> int i;
>
> - if (rqd->nr_pages > 1) {
> - for (i = 0; i < rqd->nr_pages; i++)
> + if (rqd->nr_ppas > 1) {
> + for (i = 0; i < rqd->nr_ppas; i++)
> rqd->ppa_list[i] = dev_to_generic_addr(dev,
> rqd->ppa_list[i]);
> } else {
> @@ -268,8 +268,8 @@ void nvm_generic_to_addr_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
> {
> int i;
>
> - if (rqd->nr_pages > 1) {
> - for (i = 0; i < rqd->nr_pages; i++)
> + if (rqd->nr_ppas > 1) {
> + for (i = 0; i < rqd->nr_ppas; i++)
> rqd->ppa_list[i] = generic_to_dev_addr(dev,
> rqd->ppa_list[i]);
> } else {
> @@ -284,13 +284,13 @@ int nvm_set_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd,
> int i, plane_cnt, pl_idx;
>
> if ((!vblk || dev->plane_mode == NVM_PLANE_SINGLE) && nr_ppas == 1) {
> - rqd->nr_pages = nr_ppas;
> + rqd->nr_ppas = nr_ppas;
> rqd->ppa_addr = ppas[0];
>
> return 0;
> }
>
> - rqd->nr_pages = nr_ppas;
> + rqd->nr_ppas = nr_ppas;
> rqd->ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL, &rqd->dma_ppa_list);
> if (!rqd->ppa_list) {
> pr_err("nvm: failed to allocate dma memory\n");
> @@ -302,7 +302,7 @@ int nvm_set_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd,
> rqd->ppa_list[i] = ppas[i];
> } else {
> plane_cnt = dev->plane_mode;
> - rqd->nr_pages *= plane_cnt;
> + rqd->nr_ppas *= plane_cnt;
>
> for (i = 0; i < nr_ppas; i++) {
> for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) {
> @@ -423,7 +423,7 @@ int nvm_submit_ppa_list(struct nvm_dev *dev, struct ppa_addr *ppa_list,
>
> memset(&rqd, 0, sizeof(struct nvm_rq));
>
> - rqd.nr_pages = nr_ppas;
> + rqd.nr_ppas = nr_ppas;
> if (nr_ppas > 1)
> rqd.ppa_list = ppa_list;
> else
> diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c
> index 211d7f7..dc726d9 100644
> --- a/drivers/lightnvm/gennvm.c
> +++ b/drivers/lightnvm/gennvm.c
> @@ -446,7 +446,7 @@ static void gennvm_mark_blk_bad(struct nvm_dev *dev, struct nvm_rq *rqd)
> nvm_addr_to_generic_mode(dev, rqd);
>
> /* look up blocks and mark them as bad */
> - if (rqd->nr_pages == 1) {
> + if (rqd->nr_ppas == 1) {
> gennvm_mark_blk(dev, rqd->ppa_addr, NVM_BLK_ST_BAD);
> return;
> }
> diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
> index 48862ead..72aca96 100644
> --- a/drivers/lightnvm/rrpc.c
> +++ b/drivers/lightnvm/rrpc.c
> @@ -695,7 +695,7 @@ static void rrpc_end_io(struct nvm_rq *rqd)
> {
> struct rrpc *rrpc = container_of(rqd->ins, struct rrpc, instance);
> struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
> - uint8_t npages = rqd->nr_pages;
> + uint8_t npages = rqd->nr_ppas;
> sector_t laddr = rrpc_get_laddr(rqd->bio) - npages;
>
> if (bio_data_dir(rqd->bio) == WRITE)
> @@ -883,7 +883,7 @@ static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
> bio_get(bio);
> rqd->bio = bio;
> rqd->ins = &rrpc->instance;
> - rqd->nr_pages = nr_pages;
> + rqd->nr_ppas = nr_pages;
> rrq->flags = flags;
>
> err = nvm_submit_io(rrpc->dev, rqd);
> @@ -892,7 +892,7 @@ static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
> bio_put(bio);
> if (!(flags & NVM_IOTYPE_GC)) {
> rrpc_unlock_rq(rrpc, rqd);
> - if (rqd->nr_pages > 1)
> + if (rqd->nr_ppas > 1)
> nvm_dev_dma_free(rrpc->dev,
> rqd->ppa_list, rqd->dma_ppa_list);
> }
> diff --git a/drivers/lightnvm/rrpc.h b/drivers/lightnvm/rrpc.h
> index 2653484..87e84b5 100644
> --- a/drivers/lightnvm/rrpc.h
> +++ b/drivers/lightnvm/rrpc.h
> @@ -251,7 +251,7 @@ static inline void rrpc_unlock_laddr(struct rrpc *rrpc,
> static inline void rrpc_unlock_rq(struct rrpc *rrpc, struct nvm_rq *rqd)
> {
> struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
> - uint8_t pages = rqd->nr_pages;
> + uint8_t pages = rqd->nr_ppas;
>
> BUG_ON((r->l_start + pages) > rrpc->nr_sects);
>
> diff --git a/drivers/lightnvm/sysblk.c b/drivers/lightnvm/sysblk.c
> index b98ca19..994697a 100644
> --- a/drivers/lightnvm/sysblk.c
> +++ b/drivers/lightnvm/sysblk.c
> @@ -280,7 +280,7 @@ static int nvm_set_bb_tbl(struct nvm_dev *dev, struct sysblk_scan *s, int type)
> nvm_set_rqd_ppalist(dev, &rqd, s->ppas, s->nr_ppas, 1);
> nvm_generic_to_addr_mode(dev, &rqd);
>
> - ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_pages, type);
> + ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type);
> nvm_free_rqd_ppalist(dev, &rqd);
> if (ret) {
> pr_err("nvm: sysblk failed bb mark\n");
> diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
> index 65de1e5..a0af055 100644
> --- a/drivers/nvme/host/lightnvm.c
> +++ b/drivers/nvme/host/lightnvm.c
> @@ -471,7 +471,7 @@ static inline void nvme_nvm_rqtocmd(struct request *rq, struct nvm_rq *rqd,
> c->ph_rw.spba = cpu_to_le64(rqd->ppa_addr.ppa);
> c->ph_rw.metadata = cpu_to_le64(rqd->dma_meta_list);
> c->ph_rw.control = cpu_to_le16(rqd->flags);
> - c->ph_rw.length = cpu_to_le16(rqd->nr_pages - 1);
> + c->ph_rw.length = cpu_to_le16(rqd->nr_ppas - 1);
>
> if (rqd->opcode == NVM_OP_HBWRITE || rqd->opcode == NVM_OP_HBREAD)
> c->hb_rw.slba = cpu_to_le64(nvme_block_nr(ns,
> @@ -542,7 +542,7 @@ static int nvme_nvm_erase_block(struct nvm_dev *dev, struct nvm_rq *rqd)
> c.erase.opcode = NVM_OP_ERASE;
> c.erase.nsid = cpu_to_le32(ns->ns_id);
> c.erase.spba = cpu_to_le64(rqd->ppa_addr.ppa);
> - c.erase.length = cpu_to_le16(rqd->nr_pages - 1);
> + c.erase.length = cpu_to_le16(rqd->nr_ppas - 1);
>
> return nvme_submit_sync_cmd(q, (struct nvme_command *)&c, NULL, 0);
> }
> diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
> index 6c02209..272a98b 100644
> --- a/include/linux/lightnvm.h
> +++ b/include/linux/lightnvm.h
> @@ -246,7 +246,7 @@ struct nvm_rq {
> nvm_end_io_fn *end_io;
>
> uint8_t opcode;
> - uint16_t nr_pages;
> + uint16_t nr_ppas;
> uint16_t flags;
>
> u64 ppa_status; /* ppa media status */
>
Thanks, applied for 4.7.
Powered by blists - more mailing lists