[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220301075839.4156-5-xiam0nd.tong@gmail.com>
Date: Tue, 1 Mar 2022 15:58:37 +0800
From: Xiaomeng Tong <xiam0nd.tong@...il.com>
To: torvalds@...ux-foundation.org
Cc: arnd@...db.de, jakobkoschel@...il.com,
linux-kernel@...r.kernel.org, gregkh@...uxfoundation.org,
keescook@...omium.org, jannh@...gle.com,
linux-kbuild@...r.kernel.org, linux-mm@...ck.org,
netdev@...r.kernel.org, Xiaomeng Tong <xiam0nd.tong@...il.com>
Subject: [PATCH 4/6] mm: remove iterator use outside the loop
Demonstrations for:
- list_for_each_entry_inside
- list_for_each_entry_reverse_inside
- list_for_each_entry_safe_inside
- list_for_each_entry_from_inside
- list_for_each_entry_continue_reverse_inside
Signed-off-by: Xiaomeng Tong <xiam0nd.tong@...il.com>
---
mm/list_lru.c | 10 ++++++----
mm/slab_common.c | 7 ++-----
mm/vmalloc.c | 6 +++---
3 files changed, 11 insertions(+), 12 deletions(-)
diff --git a/mm/list_lru.c b/mm/list_lru.c
index 0cd5e89ca..d8aab53a7 100644
--- a/mm/list_lru.c
+++ b/mm/list_lru.c
@@ -493,20 +493,22 @@ static void memcg_cancel_update_list_lru(struct list_lru *lru,
int memcg_update_all_list_lrus(int new_size)
{
int ret = 0;
- struct list_lru *lru;
+ struct list_lru *ll = NULL;
int old_size = memcg_nr_cache_ids;
mutex_lock(&list_lrus_mutex);
- list_for_each_entry(lru, &memcg_list_lrus, list) {
+ list_for_each_entry_inside(lru, struct list_lru, &memcg_list_lrus, list) {
ret = memcg_update_list_lru(lru, old_size, new_size);
- if (ret)
+ if (ret) {
+ ll = lru;
goto fail;
+ }
}
out:
mutex_unlock(&list_lrus_mutex);
return ret;
fail:
- list_for_each_entry_continue_reverse(lru, &memcg_list_lrus, list)
+ list_for_each_entry_continue_reverse_inside(lru, ll, &memcg_list_lrus, list)
memcg_cancel_update_list_lru(lru, old_size, new_size);
goto out;
}
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 23f2ab071..68a25d385 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -186,8 +186,6 @@ int slab_unmergeable(struct kmem_cache *s)
struct kmem_cache *find_mergeable(unsigned int size, unsigned int align,
slab_flags_t flags, const char *name, void (*ctor)(void *))
{
- struct kmem_cache *s;
-
if (slab_nomerge)
return NULL;
@@ -202,7 +200,7 @@ struct kmem_cache *find_mergeable(unsigned int size, unsigned int align,
if (flags & SLAB_NEVER_MERGE)
return NULL;
- list_for_each_entry_reverse(s, &slab_caches, list) {
+ list_for_each_entry_reverse_inside(s, struct kmem_cache, &slab_caches, list) {
if (slab_unmergeable(s))
continue;
@@ -419,7 +417,6 @@ EXPORT_SYMBOL(kmem_cache_create);
static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work)
{
LIST_HEAD(to_destroy);
- struct kmem_cache *s, *s2;
/*
* On destruction, SLAB_TYPESAFE_BY_RCU kmem_caches are put on the
@@ -439,7 +436,7 @@ static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work)
rcu_barrier();
- list_for_each_entry_safe(s, s2, &to_destroy, list) {
+ list_for_each_entry_safe_inside(s, s2, struct kmem_cache, &to_destroy, list) {
debugfs_slab_release(s);
kfence_shutdown_cache(s);
#ifdef SLAB_SUPPORTS_SYSFS
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 4165304d3..65a9f1db7 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -3417,14 +3417,14 @@ long vread(char *buf, char *addr, unsigned long count)
if ((unsigned long)addr + count <= va->va_start)
goto finished;
- list_for_each_entry_from(va, &vmap_area_list, list) {
+ list_for_each_entry_from_inside(iter, va, &vmap_area_list, list) {
if (!count)
break;
- if (!va->vm)
+ if (!iter->vm)
continue;
- vm = va->vm;
+ vm = iter->vm;
vaddr = (char *) vm->addr;
if (addr >= vaddr + get_vm_area_size(vm))
continue;
--
2.17.1
Powered by blists - more mailing lists