[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1621938235-11947-2-git-send-email-yejunedeng@gmail.com>
Date: Tue, 25 May 2021 18:23:55 +0800
From: Yejune Deng <yejune.deng@...il.com>
To: cl@...ux.com, penberg@...nel.org, rientjes@...gle.com,
iamjoonsoo.kim@....com, akpm@...ux-foundation.org, vbabka@...e.cz
Cc: linux-mm@...ck.org, linux-kernel@...r.kernel.org,
Yejune Deng <yejunedeng@...il.com>
Subject: [PATCH 2/2] mm: slub: use DEFINE_RAW_SPINLOCK init object_map_lock
Use DEFINE_RAW_SPINLOCK instead of DEFINE_SPINLOCK object_map_lock
that won't be preempted on mainline and PREEMPT_RT kernels.
Signed-off-by: Yejune Deng <yejunedeng@...il.com>
---
mm/slub.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/mm/slub.c b/mm/slub.c
index c2f63c3..995f3d0 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -445,7 +445,7 @@ static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
#ifdef CONFIG_SLUB_DEBUG
static unsigned long object_map[BITS_TO_LONGS(MAX_OBJS_PER_PAGE)];
-static DEFINE_SPINLOCK(object_map_lock);
+static DEFINE_RAW_SPINLOCK(object_map_lock);
#if IS_ENABLED(CONFIG_KUNIT)
static bool slab_add_kunit_errors(void)
@@ -481,7 +481,7 @@ static unsigned long *get_map(struct kmem_cache *s, struct page *page)
VM_BUG_ON(!irqs_disabled());
- spin_lock(&object_map_lock);
+ raw_spin_lock(&object_map_lock);
bitmap_zero(object_map, page->objects);
@@ -494,7 +494,7 @@ static unsigned long *get_map(struct kmem_cache *s, struct page *page)
static void put_map(unsigned long *map) __releases(&object_map_lock)
{
VM_BUG_ON(map != object_map);
- spin_unlock(&object_map_lock);
+ raw_spin_unlock(&object_map_lock);
}
static inline unsigned int size_from_object(struct kmem_cache *s)
--
2.7.4
Powered by blists - more mailing lists