[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <20200924111641.28922-1-linmiaohe@huawei.com>
Date: Thu, 24 Sep 2020 07:16:41 -0400
From: Miaohe Lin <linmiaohe@...wei.com>
To: <akpm@...ux-foundation.org>
CC: <linux-mm@...ck.org>, <linux-kernel@...r.kernel.org>,
<linmiaohe@...wei.com>
Subject: [PATCH v2] mm/mempool: Add 'else' to split mutually exclusive case
Add else to split mutually exclusive case and avoid some unnecessary check.
It doesn't seem to change code generation (compiler is smart), but I think
it helps readability.
Signed-off-by: Miaohe Lin <linmiaohe@...wei.com>
---
mm/mempool.c | 14 ++++++--------
1 file changed, 6 insertions(+), 8 deletions(-)
diff --git a/mm/mempool.c b/mm/mempool.c
index 79bff63ecf27..95fa8e601730 100644
--- a/mm/mempool.c
+++ b/mm/mempool.c
@@ -58,11 +58,10 @@ static void __check_element(mempool_t *pool, void *element, size_t size)
static void check_element(mempool_t *pool, void *element)
{
/* Mempools backed by slab allocator */
- if (pool->free == mempool_free_slab || pool->free == mempool_kfree)
+ if (pool->free == mempool_free_slab || pool->free == mempool_kfree) {
__check_element(pool, element, ksize(element));
-
/* Mempools backed by page allocator */
- if (pool->free == mempool_free_pages) {
+ } else if (pool->free == mempool_free_pages) {
int order = (int)(long)pool->pool_data;
void *addr = kmap_atomic((struct page *)element);
@@ -82,11 +81,10 @@ static void __poison_element(void *element, size_t size)
static void poison_element(mempool_t *pool, void *element)
{
/* Mempools backed by slab allocator */
- if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
+ if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc) {
__poison_element(element, ksize(element));
-
/* Mempools backed by page allocator */
- if (pool->alloc == mempool_alloc_pages) {
+ } else if (pool->alloc == mempool_alloc_pages) {
int order = (int)(long)pool->pool_data;
void *addr = kmap_atomic((struct page *)element);
@@ -107,7 +105,7 @@ static __always_inline void kasan_poison_element(mempool_t *pool, void *element)
{
if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
kasan_poison_kfree(element, _RET_IP_);
- if (pool->alloc == mempool_alloc_pages)
+ else if (pool->alloc == mempool_alloc_pages)
kasan_free_pages(element, (unsigned long)pool->pool_data);
}
@@ -115,7 +113,7 @@ static void kasan_unpoison_element(mempool_t *pool, void *element)
{
if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
kasan_unpoison_slab(element);
- if (pool->alloc == mempool_alloc_pages)
+ else if (pool->alloc == mempool_alloc_pages)
kasan_alloc_pages(element, (unsigned long)pool->pool_data);
}
--
2.19.1
Powered by blists - more mailing lists