[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1479474236-4139-10-git-send-email-hch@lst.de>
Date: Fri, 18 Nov 2016 14:03:55 +0100
From: Christoph Hellwig <hch@....de>
To: akpm@...ux-foundation.org
Cc: aryabinin@...tuozzo.com, joelaf@...gle.com, jszhang@...vell.com,
chris@...is-wilson.co.uk, joaodias@...gle.com, linux-mm@...ck.org,
linux-rt-users@...r.kernel.org, x86@...nel.org,
linux-kernel@...r.kernel.org
Subject: [PATCH 09/10] mm: turn vmap_purge_lock into a mutex
The purge_lock spinlock causes high latencies with non RT kernel. This
has been reported multiple times on lkml [1] [2] and affects
applications like audio.
This patch replaces it with a mutex to allow preemption while holding
the lock.
Thanks to Joel Fernandes for the detailed report and analysis as well
as an earlier attempt at fixing this issue.
[1] http://lists.openwall.net/linux-kernel/2016/03/23/29
[2] https://lkml.org/lkml/2016/10/9/59
Signed-off-by: Christoph Hellwig <hch@....de>
Tested-by: Jisheng Zhang <jszhang@...vell.com>
---
mm/vmalloc.c | 14 +++++++-------
1 file changed, 7 insertions(+), 7 deletions(-)
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 25283af..dccf242 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -606,7 +606,7 @@ static atomic_t vmap_lazy_nr = ATOMIC_INIT(0);
* by this look, but we want to avoid concurrent calls for performance
* reasons and to make the pcpu_get_vm_areas more deterministic.
*/
-static DEFINE_SPINLOCK(vmap_purge_lock);
+static DEFINE_MUTEX(vmap_purge_lock);
/* for per-CPU blocks */
static void purge_fragmented_blocks_allcpus(void);
@@ -660,9 +660,9 @@ static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
*/
static void try_purge_vmap_area_lazy(void)
{
- if (spin_trylock(&vmap_purge_lock)) {
+ if (mutex_trylock(&vmap_purge_lock)) {
__purge_vmap_area_lazy(ULONG_MAX, 0);
- spin_unlock(&vmap_purge_lock);
+ mutex_unlock(&vmap_purge_lock);
}
}
@@ -671,10 +671,10 @@ static void try_purge_vmap_area_lazy(void)
*/
static void purge_vmap_area_lazy(void)
{
- spin_lock(&vmap_purge_lock);
+ mutex_lock(&vmap_purge_lock);
purge_fragmented_blocks_allcpus();
__purge_vmap_area_lazy(ULONG_MAX, 0);
- spin_unlock(&vmap_purge_lock);
+ mutex_unlock(&vmap_purge_lock);
}
/*
@@ -1063,11 +1063,11 @@ void vm_unmap_aliases(void)
rcu_read_unlock();
}
- spin_lock(&vmap_purge_lock);
+ mutex_lock(&vmap_purge_lock);
purge_fragmented_blocks_allcpus();
if (!__purge_vmap_area_lazy(start, end) && flush)
flush_tlb_kernel_range(start, end);
- spin_unlock(&vmap_purge_lock);
+ mutex_unlock(&vmap_purge_lock);
}
EXPORT_SYMBOL_GPL(vm_unmap_aliases);
--
2.1.4
Powered by blists - more mailing lists