[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <d9b06aeb-3408-75c6-f55d-9143d831ae6c@loongson.cn>
Date: Tue, 15 Mar 2022 20:56:23 +0800
From: wangjianxing <wangjianxing@...ngson.cn>
To: will@...nel.org, aneesh.kumar@...ux.ibm.com,
akpm@...ux-foundation.org, npiggin@...il.com, peterz@...radead.org
Cc: linux-arch@...r.kernel.org, linux-mm@...ck.org,
linux-kernel@...r.kernel.org
Subject: Re: [PATCH 1/1] mm/mmu_gather: limit tlb batch count and add schedule
point in tlb_batch_pages_flush
1. I try to increase the overcommit ratio of cpu to 1:2~1:3 in KVM
hypervisor, per-vm has the same number of vcpu with host cpu, then setup
2 or 3 vm.
2. Run ltpstress(20180926) test in per vm, both host and guest is
non-preemptiable
kernel, vm dmesg will throw some rcu_sched warning.
3. PAGE_SIZE in my kernel config is 16K, tlb batch max count is 2015,
it's too long in non-preemptible state.
The issue's orignal
link:https://patchwork.kernel.org/project/linux-mm/patch/20220302013825.2290315-1-wangjianxing@loongson.cn/
On 03/15/2022 08:55 PM, Jianxing Wang wrote:
> free a large list of pages maybe cause rcu_sched starved on
> non-preemptible kernels. howerver free_unref_page_list maybe can't
> cond_resched as it maybe called in interrupt or atomic context,
> especially can't detect atomic context in CONFIG_PREEMPTION=n.
>
> tlb flush batch count depends on PAGE_SIZE, it's too large if
> PAGE_SIZE > 4K, here limit max batch size with 4K.
> And add schedule point in tlb_batch_pages_flush.
>
> rcu: rcu_sched kthread starved for 5359 jiffies! g454793 f0x0
> RCU_GP_WAIT_FQS(5) ->state=0x0 ->cpu=19
> [...]
> Call Trace:
> free_unref_page_list+0x19c/0x270
> release_pages+0x3cc/0x498
> tlb_flush_mmu_free+0x44/0x70
> zap_pte_range+0x450/0x738
> unmap_page_range+0x108/0x240
> unmap_vmas+0x74/0xf0
> unmap_region+0xb0/0x120
> do_munmap+0x264/0x438
> vm_munmap+0x58/0xa0
> sys_munmap+0x10/0x20
> syscall_common+0x24/0x38
>
> Signed-off-by: Jianxing Wang <wangjianxing@...ngson.cn>
> ---
> include/asm-generic/tlb.h | 7 ++++++-
> mm/mmu_gather.c | 7 +++++--
> 2 files changed, 11 insertions(+), 3 deletions(-)
>
> diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
> index 2c68a545ffa7..47c7f93ca695 100644
> --- a/include/asm-generic/tlb.h
> +++ b/include/asm-generic/tlb.h
> @@ -230,8 +230,13 @@ struct mmu_gather_batch {
> struct page *pages[0];
> };
>
> +#if PAGE_SIZE > 4096UL
> +#define MAX_GATHER_BATCH_SZ 4096
> +#else
> +#define MAX_GATHER_BATCH_SZ PAGE_SIZE
> +#endif
> #define MAX_GATHER_BATCH \
> - ((PAGE_SIZE - sizeof(struct mmu_gather_batch)) / sizeof(void *))
> + ((MAX_GATHER_BATCH_SZ - sizeof(struct mmu_gather_batch)) / sizeof(void *))
>
> /*
> * Limit the maximum number of mmu_gather batches to reduce a risk of soft
> diff --git a/mm/mmu_gather.c b/mm/mmu_gather.c
> index afb7185ffdc4..f2c105810b3f 100644
> --- a/mm/mmu_gather.c
> +++ b/mm/mmu_gather.c
> @@ -8,6 +8,7 @@
> #include <linux/rcupdate.h>
> #include <linux/smp.h>
> #include <linux/swap.h>
> +#include <linux/slab.h>
>
> #include <asm/pgalloc.h>
> #include <asm/tlb.h>
> @@ -27,7 +28,7 @@ static bool tlb_next_batch(struct mmu_gather *tlb)
> if (tlb->batch_count == MAX_GATHER_BATCH_COUNT)
> return false;
>
> - batch = (void *)__get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
> + batch = kmalloc(MAX_GATHER_BATCH_SZ, GFP_NOWAIT | __GFP_NOWARN);
> if (!batch)
> return false;
>
> @@ -49,6 +50,8 @@ static void tlb_batch_pages_flush(struct mmu_gather *tlb)
> for (batch = &tlb->local; batch && batch->nr; batch = batch->next) {
> free_pages_and_swap_cache(batch->pages, batch->nr);
> batch->nr = 0;
> +
> + cond_resched();
> }
> tlb->active = &tlb->local;
> }
> @@ -59,7 +62,7 @@ static void tlb_batch_list_free(struct mmu_gather *tlb)
>
> for (batch = tlb->local.next; batch; batch = next) {
> next = batch->next;
> - free_pages((unsigned long)batch, 0);
> + kfree(batch);
> }
> tlb->local.next = NULL;
> }
Powered by blists - more mailing lists