[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <20171001132555.GA5763@hercules.tuxera.com>
Date: Sun, 1 Oct 2017 16:25:56 +0300
From: Rakesh Pandit <rakesh@...era.com>
To: Matias Bjørling <mb@...htnvm.io>,
<linux-block@...r.kernel.org>, <linux-kernel@...r.kernel.org>
CC: Javier González <jg@...htnvm.io>
Subject: [PATCH 5/6] lightnvm: pblk: free up mempool allocation for erases
correctly
While separating read and erase mempools in 22da65a1b pblk_g_rq_cache
was used two times to set aside memory both for erase and read
requests. Because same kmem cache is used repeatedly a single call to
kmem_cache_destroy wouldn't deallocate everything. Repeatedly doing
loading and unloading of pblk modules would eventually result in some
leak.
The fix is to really use separate kmem cache and track it
appropriately.
Fixes: 22da65a1b ("lightnvm: pblk: decouple read/erase mempools")
Signed-off-by: Rakesh Pandit <rakesh@...era.com>
---
drivers/lightnvm/pblk-init.c | 16 ++++++++++++++--
drivers/lightnvm/pblk.h | 1 +
2 files changed, 15 insertions(+), 2 deletions(-)
diff --git a/drivers/lightnvm/pblk-init.c b/drivers/lightnvm/pblk-init.c
index 9d9adcf..519e5cf 100644
--- a/drivers/lightnvm/pblk-init.c
+++ b/drivers/lightnvm/pblk-init.c
@@ -21,7 +21,7 @@
#include "pblk.h"
static struct kmem_cache *pblk_ws_cache, *pblk_rec_cache, *pblk_g_rq_cache,
- *pblk_w_rq_cache;
+ *pblk_w_rq_cache, *pblk_e_rq_cache;
static DECLARE_RWSEM(pblk_lock);
struct bio_set *pblk_bio_set;
@@ -206,12 +206,23 @@ static int pblk_init_global_caches(struct pblk *pblk)
return -ENOMEM;
}
+ pblk_e_rq_cache = kmem_cache_create("pblk_e_rq", pblk_e_rq_size,
+ 0, 0, NULL);
+ if (!pblk_e_rq_cache) {
+ kmem_cache_destroy(pblk_ws_cache);
+ kmem_cache_destroy(pblk_rec_cache);
+ kmem_cache_destroy(pblk_g_rq_cache);
+ up_write(&pblk_lock);
+ return -ENOMEM;
+ }
+
pblk_w_rq_cache = kmem_cache_create("pblk_w_rq", pblk_w_rq_size,
0, 0, NULL);
if (!pblk_w_rq_cache) {
kmem_cache_destroy(pblk_ws_cache);
kmem_cache_destroy(pblk_rec_cache);
kmem_cache_destroy(pblk_g_rq_cache);
+ kmem_cache_destroy(pblk_e_rq_cache);
up_write(&pblk_lock);
return -ENOMEM;
}
@@ -252,7 +263,7 @@ static int pblk_core_init(struct pblk *pblk)
goto free_rec_pool;
pblk->e_rq_pool = mempool_create_slab_pool(geo->nr_luns,
- pblk_g_rq_cache);
+ pblk_e_rq_cache);
if (!pblk->e_rq_pool)
goto free_r_rq_pool;
@@ -327,6 +338,7 @@ static void pblk_core_free(struct pblk *pblk)
kmem_cache_destroy(pblk_ws_cache);
kmem_cache_destroy(pblk_rec_cache);
kmem_cache_destroy(pblk_g_rq_cache);
+ kmem_cache_destroy(pblk_e_rq_cache);
kmem_cache_destroy(pblk_w_rq_cache);
}
diff --git a/drivers/lightnvm/pblk.h b/drivers/lightnvm/pblk.h
index fcac246..03834d1 100644
--- a/drivers/lightnvm/pblk.h
+++ b/drivers/lightnvm/pblk.h
@@ -651,6 +651,7 @@ struct pblk_line_ws {
#define pblk_g_rq_size (sizeof(struct nvm_rq) + sizeof(struct pblk_g_ctx))
#define pblk_w_rq_size (sizeof(struct nvm_rq) + sizeof(struct pblk_c_ctx))
+#define pblk_e_rq_size pblk_g_rq_size
/*
* pblk ring buffer operations
--
2.7.4
Powered by blists - more mailing lists