lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Wed, 16 Mar 2022 09:57:52 +0100
From:   Peter Zijlstra <peterz@...radead.org>
To:     Jianxing Wang <wangjianxing@...ngson.cn>
Cc:     will@...nel.org, aneesh.kumar@...ux.ibm.com,
        akpm@...ux-foundation.org, npiggin@...il.com,
        linux-arch@...r.kernel.org, linux-mm@...ck.org,
        linux-kernel@...r.kernel.org
Subject: Re: [PATCH 1/1] mm/mmu_gather: limit tlb batch count and add
 schedule point in tlb_batch_pages_flush

On Tue, Mar 15, 2022 at 08:55:36AM -0400, Jianxing Wang wrote:
> free a large list of pages maybe cause rcu_sched starved on
> non-preemptible kernels. howerver free_unref_page_list maybe can't
> cond_resched as it maybe called in interrupt or atomic context,
> especially can't detect atomic context in CONFIG_PREEMPTION=n.
> 
> tlb flush batch count depends on PAGE_SIZE, it's too large if
> PAGE_SIZE > 4K, here limit max batch size with 4K.
> And add schedule point in tlb_batch_pages_flush.
> 
> rcu: rcu_sched kthread starved for 5359 jiffies! g454793 f0x0
> RCU_GP_WAIT_FQS(5) ->state=0x0 ->cpu=19
> [...]
> Call Trace:
>    free_unref_page_list+0x19c/0x270
>    release_pages+0x3cc/0x498
>    tlb_flush_mmu_free+0x44/0x70
>    zap_pte_range+0x450/0x738
>    unmap_page_range+0x108/0x240
>    unmap_vmas+0x74/0xf0
>    unmap_region+0xb0/0x120
>    do_munmap+0x264/0x438
>    vm_munmap+0x58/0xa0
>    sys_munmap+0x10/0x20
>    syscall_common+0x24/0x38
> 
> Signed-off-by: Jianxing Wang <wangjianxing@...ngson.cn>
> ---
>  include/asm-generic/tlb.h | 7 ++++++-
>  mm/mmu_gather.c           | 7 +++++--
>  2 files changed, 11 insertions(+), 3 deletions(-)
> 
> diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
> index 2c68a545ffa7..47c7f93ca695 100644
> --- a/include/asm-generic/tlb.h
> +++ b/include/asm-generic/tlb.h
> @@ -230,8 +230,13 @@ struct mmu_gather_batch {
>  	struct page		*pages[0];
>  };
>  
> +#if PAGE_SIZE > 4096UL
> +#define MAX_GATHER_BATCH_SZ	4096
> +#else
> +#define MAX_GATHER_BATCH_SZ	PAGE_SIZE
> +#endif
>  #define MAX_GATHER_BATCH	\
> -	((PAGE_SIZE - sizeof(struct mmu_gather_batch)) / sizeof(void *))
> +	((MAX_GATHER_BATCH_SZ - sizeof(struct mmu_gather_batch)) / sizeof(void *))
>  
>  /*
>   * Limit the maximum number of mmu_gather batches to reduce a risk of soft
> diff --git a/mm/mmu_gather.c b/mm/mmu_gather.c
> index afb7185ffdc4..f2c105810b3f 100644
> --- a/mm/mmu_gather.c
> +++ b/mm/mmu_gather.c
> @@ -8,6 +8,7 @@
>  #include <linux/rcupdate.h>
>  #include <linux/smp.h>
>  #include <linux/swap.h>
> +#include <linux/slab.h>
>  
>  #include <asm/pgalloc.h>
>  #include <asm/tlb.h>
> @@ -27,7 +28,7 @@ static bool tlb_next_batch(struct mmu_gather *tlb)
>  	if (tlb->batch_count == MAX_GATHER_BATCH_COUNT)
>  		return false;
>  
> -	batch = (void *)__get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
> +	batch = kmalloc(MAX_GATHER_BATCH_SZ, GFP_NOWAIT | __GFP_NOWARN);
>  	if (!batch)
>  		return false;
>  
> @@ -49,6 +50,8 @@ static void tlb_batch_pages_flush(struct mmu_gather *tlb)
>  	for (batch = &tlb->local; batch && batch->nr; batch = batch->next) {
>  		free_pages_and_swap_cache(batch->pages, batch->nr);
>  		batch->nr = 0;
> +
> +		cond_resched();
>  	}
>  	tlb->active = &tlb->local;
>  }
> @@ -59,7 +62,7 @@ static void tlb_batch_list_free(struct mmu_gather *tlb)
>  
>  	for (batch = tlb->local.next; batch; batch = next) {
>  		next = batch->next;
> -		free_pages((unsigned long)batch, 0);
> +		kfree(batch);
>  	}
>  	tlb->local.next = NULL;
>  }

This seems like a really complicated way of writing something like the
below...

diff --git a/mm/mmu_gather.c b/mm/mmu_gather.c
index afb7185ffdc4..b382e86c1b47 100644
--- a/mm/mmu_gather.c
+++ b/mm/mmu_gather.c
@@ -47,8 +47,17 @@ static void tlb_batch_pages_flush(struct mmu_gather *tlb)
 	struct mmu_gather_batch *batch;
 
 	for (batch = &tlb->local; batch && batch->nr; batch = batch->next) {
-		free_pages_and_swap_cache(batch->pages, batch->nr);
-		batch->nr = 0;
+		struct page_struct *pages = batch->pages;
+
+		do {
+			int nr = min(512, batch->nr);
+
+			free_pages_and_swap_cache(pages, nr);
+			pages += nr;
+			batch->nr -= nr;
+
+			cond_resched();
+		} while (batch->nr);
 	}
 	tlb->active = &tlb->local;
 }

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ