lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <1291086332.32570.334.camel@pasglop>
Date:	Tue, 30 Nov 2010 14:05:32 +1100
From:	Benjamin Herrenschmidt <benh@...nel.crashing.org>
To:	Peter Zijlstra <a.p.zijlstra@...llo.nl>
Cc:	Andrea Arcangeli <aarcange@...hat.com>,
	Avi Kivity <avi@...hat.com>,
	Thomas Gleixner <tglx@...utronix.de>,
	Rik van Riel <riel@...hat.com>, Ingo Molnar <mingo@...e.hu>,
	akpm@...ux-foundation.org,
	Linus Torvalds <torvalds@...ux-foundation.org>,
	linux-kernel@...r.kernel.org, linux-arch@...r.kernel.org,
	linux-mm@...ck.org, David Miller <davem@...emloft.net>,
	Hugh Dickins <hugh.dickins@...cali.co.uk>,
	Mel Gorman <mel@....ul.ie>, Nick Piggin <npiggin@...nel.dk>,
	Paul McKenney <paulmck@...ux.vnet.ibm.com>,
	Yanmin Zhang <yanmin_zhang@...ux.intel.com>,
	Stephen Rothwell <sfr@...b.auug.org.au>
Subject: Re: [PATCH 16/21] mm, powerpc: Move the RCU page-table freeing
 into generic code

On Fri, 2010-11-26 at 15:38 +0100, Peter Zijlstra wrote:
> plain text document attachment (mm-preempt-tlb-gather-rcu.patch)
> In case other architectures require RCU freed page-tables to implement
> gup_fast() and software filled hashes and similar things, provide the
> means to do so by moving the logic into generic code.

A little bit of build breakage on our side that comes from this patch,
which seem to mostly be fixed by changing:

+	select HAVE_RCU_TABLE_FREE if PPC64

to:

+	select HAVE_RCU_TABLE_FREE if SMP

There's some additional breakage for the 64-bit BookE case though but
that's a different patch (and a different email) :-) I'll do some
runtime testing now.

Cheers,
Ben.


> Requested-by: David Miller <davem@...emloft.net>
> Cc: Benjamin Herrenschmidt <benh@...nel.crashing.org>
> Signed-off-by: Peter Zijlstra <a.p.zijlstra@...llo.nl>
> ---
>  arch/Kconfig                       |    3 +
>  arch/powerpc/Kconfig               |    1 
>  arch/powerpc/include/asm/pgalloc.h |   21 ++++++-
>  arch/powerpc/include/asm/tlb.h     |   10 ---
>  arch/powerpc/mm/pgtable.c          |   98 -------------------------------------
>  arch/powerpc/mm/tlb_hash32.c       |    3 -
>  arch/powerpc/mm/tlb_hash64.c       |    3 -
>  arch/powerpc/mm/tlb_nohash.c       |    3 -
>  include/asm-generic/tlb.h          |   57 +++++++++++++++++++--
>  mm/memory.c                        |   77 +++++++++++++++++++++++++++++
>  10 files changed, 151 insertions(+), 125 deletions(-)
> 
> Index: linux-2.6/arch/powerpc/include/asm/pgalloc.h
> ===================================================================
> --- linux-2.6.orig/arch/powerpc/include/asm/pgalloc.h
> +++ linux-2.6/arch/powerpc/include/asm/pgalloc.h
> @@ -31,14 +31,29 @@ static inline void pte_free(struct mm_st
>  #endif
>  
>  #ifdef CONFIG_SMP
> -extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, unsigned shift);
> -extern void pte_free_finish(struct mmu_gather *tlb);
> +struct mmu_gather;
> +extern void tlb_remove_table(struct mmu_gather *, void *);
> +
> +static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
> +{
> +	unsigned long pgf = (unsigned long)table;
> +	BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
> +	pgf |= shift;
> +	tlb_remove_table(tlb, (void *)pgf);
> +}
> +
> +static inline void __tlb_remove_table(void *_table)
> +{
> +	void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
> +	unsigned shift = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE;
> +
> +	pgtable_free(table, shift);
> +}
>  #else /* CONFIG_SMP */
>  static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, unsigned shift)
>  {
>  	pgtable_free(table, shift);
>  }
> -static inline void pte_free_finish(struct mmu_gather *tlb) { }
>  #endif /* !CONFIG_SMP */
>  
>  static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *ptepage,
> Index: linux-2.6/arch/powerpc/include/asm/tlb.h
> ===================================================================
> --- linux-2.6.orig/arch/powerpc/include/asm/tlb.h
> +++ linux-2.6/arch/powerpc/include/asm/tlb.h
> @@ -28,16 +28,6 @@
>  #define tlb_start_vma(tlb, vma)	do { } while (0)
>  #define tlb_end_vma(tlb, vma)	do { } while (0)
>  
> -#define HAVE_ARCH_MMU_GATHER 1
> -
> -struct pte_freelist_batch;
> -
> -struct arch_mmu_gather {
> -	struct pte_freelist_batch *batch;
> -};
> -
> -#define ARCH_MMU_GATHER_INIT (struct arch_mmu_gather){ .batch = NULL, }
> -
>  extern void tlb_flush(struct mmu_gather *tlb);
>  
>  /* Get the generic bits... */
> Index: linux-2.6/arch/powerpc/mm/pgtable.c
> ===================================================================
> --- linux-2.6.orig/arch/powerpc/mm/pgtable.c
> +++ linux-2.6/arch/powerpc/mm/pgtable.c
> @@ -33,104 +33,6 @@
>  
>  #include "mmu_decl.h"
>  
> -#ifdef CONFIG_SMP
> -
> -/*
> - * Handle batching of page table freeing on SMP. Page tables are
> - * queued up and send to be freed later by RCU in order to avoid
> - * freeing a page table page that is being walked without locks
> - */
> -
> -static unsigned long pte_freelist_forced_free;
> -
> -struct pte_freelist_batch
> -{
> -	struct rcu_head	rcu;
> -	unsigned int	index;
> -	unsigned long	tables[0];
> -};
> -
> -#define PTE_FREELIST_SIZE \
> -	((PAGE_SIZE - sizeof(struct pte_freelist_batch)) \
> -	  / sizeof(unsigned long))
> -
> -static void pte_free_smp_sync(void *arg)
> -{
> -	/* Do nothing, just ensure we sync with all CPUs */
> -}
> -
> -/* This is only called when we are critically out of memory
> - * (and fail to get a page in pte_free_tlb).
> - */
> -static void pgtable_free_now(void *table, unsigned shift)
> -{
> -	pte_freelist_forced_free++;
> -
> -	smp_call_function(pte_free_smp_sync, NULL, 1);
> -
> -	pgtable_free(table, shift);
> -}
> -
> -static void pte_free_rcu_callback(struct rcu_head *head)
> -{
> -	struct pte_freelist_batch *batch =
> -		container_of(head, struct pte_freelist_batch, rcu);
> -	unsigned int i;
> -
> -	for (i = 0; i < batch->index; i++) {
> -		void *table = (void *)(batch->tables[i] & ~MAX_PGTABLE_INDEX_SIZE);
> -		unsigned shift = batch->tables[i] & MAX_PGTABLE_INDEX_SIZE;
> -
> -		pgtable_free(table, shift);
> -	}
> -
> -	free_page((unsigned long)batch);
> -}
> -
> -static void pte_free_submit(struct pte_freelist_batch *batch)
> -{
> -	call_rcu_sched(&batch->rcu, pte_free_rcu_callback);
> -}
> -
> -void pgtable_free_tlb(struct mmu_gather *tlb, void *table, unsigned shift)
> -{
> -	struct pte_freelist_batch **batchp = &tlb->arch.batch;
> -	unsigned long pgf;
> -
> -	if (atomic_read(&tlb->mm->mm_users) < 2) {
> -		pgtable_free(table, shift);
> -		return;
> -	}
> -
> -	if (*batchp == NULL) {
> -		*batchp = (struct pte_freelist_batch *)__get_free_page(GFP_ATOMIC);
> -		if (*batchp == NULL) {
> -			pgtable_free_now(table, shift);
> -			return;
> -		}
> -		(*batchp)->index = 0;
> -	}
> -	BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
> -	pgf = (unsigned long)table | shift;
> -	(*batchp)->tables[(*batchp)->index++] = pgf;
> -	if ((*batchp)->index == PTE_FREELIST_SIZE) {
> -		pte_free_submit(*batchp);
> -		*batchp = NULL;
> -	}
> -}
> -
> -void pte_free_finish(struct mmu_gather *tlb)
> -{
> -	struct pte_freelist_batch **batchp = &tlb->arch.batch;
> -
> -	if (*batchp == NULL)
> -		return;
> -	pte_free_submit(*batchp);
> -	*batchp = NULL;
> -}
> -
> -#endif /* CONFIG_SMP */
> -
>  static inline int is_exec_fault(void)
>  {
>  	return current->thread.regs && TRAP(current->thread.regs) == 0x400;
> Index: linux-2.6/arch/powerpc/mm/tlb_hash32.c
> ===================================================================
> --- linux-2.6.orig/arch/powerpc/mm/tlb_hash32.c
> +++ linux-2.6/arch/powerpc/mm/tlb_hash32.c
> @@ -71,9 +71,6 @@ void tlb_flush(struct mmu_gather *tlb)
>  		 */
>  		_tlbia();
>  	}
> -
> -	/* Push out batch of freed page tables */
> -	pte_free_finish(tlb);
>  }
>  
>  /*
> Index: linux-2.6/arch/powerpc/mm/tlb_hash64.c
> ===================================================================
> --- linux-2.6.orig/arch/powerpc/mm/tlb_hash64.c
> +++ linux-2.6/arch/powerpc/mm/tlb_hash64.c
> @@ -165,9 +165,6 @@ void tlb_flush(struct mmu_gather *tlb)
>  		__flush_tlb_pending(tlbbatch);
>  
>  	put_cpu_var(ppc64_tlb_batch);
> -
> -	/* Push out batch of freed page tables */
> -	pte_free_finish(tlb);
>  }
>  
>  /**
> Index: linux-2.6/arch/powerpc/mm/tlb_nohash.c
> ===================================================================
> --- linux-2.6.orig/arch/powerpc/mm/tlb_nohash.c
> +++ linux-2.6/arch/powerpc/mm/tlb_nohash.c
> @@ -299,9 +299,6 @@ EXPORT_SYMBOL(flush_tlb_range);
>  void tlb_flush(struct mmu_gather *tlb)
>  {
>  	flush_tlb_mm(tlb->mm);
> -
> -	/* Push out batch of freed page tables */
> -	pte_free_finish(tlb);
>  }
>  
>  /*
> Index: linux-2.6/include/asm-generic/tlb.h
> ===================================================================
> --- linux-2.6.orig/include/asm-generic/tlb.h
> +++ linux-2.6/include/asm-generic/tlb.h
> @@ -27,6 +27,49 @@
>    #define tlb_fast_mode(tlb) 1
>  #endif
>  
> +#ifdef CONFIG_HAVE_RCU_TABLE_FREE
> +/*
> + * Semi RCU freeing of the page directories.
> + *
> + * This is needed by some architectures to implement software pagetable walkers.
> + *
> + * gup_fast() and other software pagetable walkers do a lockless page-table
> + * walk and therefore needs some synchronization with the freeing of the page
> + * directories. The chosen means to accomplish that is by disabling IRQs over
> + * the walk.
> + *
> + * Architectures that use IPIs to flush TLBs will then automagically DTRT,
> + * since we unlink the page, flush TLBs, free the page. Since the disabling of
> + * IRQs delays the copmletion of the TLB flush we can never observe an already
> + * freed page.
> + *
> + * Architectures that do not have this (PPC) need to delay the freeing by some
> + * other means, this is that means.
> + *
> + * What we do is batch the freed directory pages (tables) and RCU free them.
> + * We use the sched RCU variant, as that guarantees that IRQ/preempt disabling
> + * holds off grace periods.
> + *
> + * However, in order to batch these pages we need to allocate storage, this
> + * allocation is deep inside the MM code and can thus easily fail on memory
> + * pressure. To guarantee progress we fall back to single table freeing, see
> + * the implementation of tlb_remove_table_one().
> + *
> + */
> +struct mmu_table_batch {
> +	struct rcu_head		rcu;
> +	unsigned int		nr;
> +	void			*tables[0];
> +};
> +
> +#define MAX_TABLE_BATCH		\
> +	((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
> +
> +extern void tlb_table_flush(struct mmu_gather *tlb);
> +extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
> +
> +#endif
> +
>  /* struct mmu_gather is an opaque type used by the mm code for passing around
>   * any data needed by arch specific code for tlb_remove_page.
>   */
> @@ -36,11 +79,12 @@ struct mmu_gather {
>  	unsigned int		max;	/* nr < max */
>  	unsigned int		need_flush;/* Really unmapped some ptes? */
>  	unsigned int		fullmm; /* non-zero means full mm flush */
> -#ifdef HAVE_ARCH_MMU_GATHER
> -	struct arch_mmu_gather	arch;
> -#endif
>  	struct page		**pages;
>  	struct page		*local[8];
> +
> +#ifdef CONFIG_HAVE_RCU_TABLE_FREE
> +	struct mmu_table_batch	*batch;
> +#endif
>  };
>  
>  static inline void __tlb_alloc_pages(struct mmu_gather *tlb)
> @@ -72,8 +116,8 @@ tlb_gather_mmu(struct mmu_gather *tlb, s
>  
>  	tlb->fullmm = full_mm_flush;
>  
> -#ifdef HAVE_ARCH_MMU_GATHER
> -	tlb->arch = ARCH_MMU_GATHER_INIT;
> +#ifdef CONFIG_HAVE_RCU_TABLE_FREE
> +	tlb->batch = NULL;
>  #endif
>  }
>  
> @@ -84,6 +128,9 @@ tlb_flush_mmu(struct mmu_gather *tlb, un
>  		return;
>  	tlb->need_flush = 0;
>  	tlb_flush(tlb);
> +#ifdef CONFIG_HAVE_RCU_TABLE_FREE
> +	tlb_table_flush(tlb);
> +#endif
>  	if (!tlb_fast_mode(tlb)) {
>  		free_pages_and_swap_cache(tlb->pages, tlb->nr);
>  		tlb->nr = 0;
> Index: linux-2.6/mm/memory.c
> ===================================================================
> --- linux-2.6.orig/mm/memory.c
> +++ linux-2.6/mm/memory.c
> @@ -193,6 +193,83 @@ static void check_sync_rss_stat(struct t
>  
>  #endif
>  
> +#ifdef CONFIG_HAVE_RCU_TABLE_FREE
> +
> +/*
> + * See the comment near struct mmu_table_batch.
> + */
> +
> +static void tlb_remove_table_smp_sync(void *arg)
> +{
> +	/* Simply deliver the interrupt */
> +}
> +
> +static void tlb_remove_table_one(void *table)
> +{
> +	/*
> +	 * This isn't an RCU grace period and hence the page-tables cannot be
> +	 * assumed to be actually RCU-freed.
> +	 *
> +	 * It is however sufficient for software page-table walkers that rely on
> +	 * IRQ disabling. See the comment near struct mmu_table_batch.
> +	 */
> +	smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
> +	__tlb_remove_table(table);
> +}
> +
> +static void tlb_remove_table_rcu(struct rcu_head *head)
> +{
> +	struct mmu_table_batch *batch;
> +	int i;
> +
> +	batch = container_of(head, struct mmu_table_batch, rcu);
> +
> +	for (i = 0; i < batch->nr; i++)
> +		__tlb_remove_table(batch->tables[i]);
> +
> +	free_page((unsigned long)batch);
> +}
> +
> +void tlb_table_flush(struct mmu_gather *tlb)
> +{
> +	struct mmu_table_batch **batch = &tlb->batch;
> +
> +	if (*batch) {
> +		call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
> +		*batch = NULL;
> +	}
> +}
> +
> +void tlb_remove_table(struct mmu_gather *tlb, void *table)
> +{
> +	struct mmu_table_batch **batch = &tlb->batch;
> +
> +	tlb->need_flush = 1;
> +
> +	/*
> +	 * When there's less then two users of this mm there cannot be a
> +	 * concurrent page-table walk.
> +	 */
> +	if (atomic_read(&tlb->mm->mm_users) < 2) {
> +		__tlb_remove_table(table);
> +		return;
> +	}
> +
> +	if (*batch == NULL) {
> +		*batch = (struct mmu_table_batch *)__get_free_page(GFP_ATOMIC);
> +		if (*batch == NULL) {
> +			tlb_remove_table_one(table);
> +			return;
> +		}
> +		(*batch)->nr = 0;
> +	}
> +	(*batch)->tables[(*batch)->nr++] = table;
> +	if ((*batch)->nr == MAX_TABLE_BATCH)
> +		tlb_table_flush(tlb);
> +}
> +
> +#endif
> +
>  /*
>   * If a p?d_bad entry is found while walking page tables, report
>   * the error, before resetting entry to p?d_none.  Usually (but
> Index: linux-2.6/arch/Kconfig
> ===================================================================
> --- linux-2.6.orig/arch/Kconfig
> +++ linux-2.6/arch/Kconfig
> @@ -175,4 +175,7 @@ config HAVE_PERF_EVENTS_NMI
>  config HAVE_ARCH_JUMP_LABEL
>  	bool
>  
> +config HAVE_RCU_TABLE_FREE
> +	bool
> +
>  source "kernel/gcov/Kconfig"
> Index: linux-2.6/arch/powerpc/Kconfig
> ===================================================================
> --- linux-2.6.orig/arch/powerpc/Kconfig
> +++ linux-2.6/arch/powerpc/Kconfig
> @@ -140,6 +140,7 @@ config PPC
>  	select HAVE_PERF_EVENTS
>  	select HAVE_REGS_AND_STACK_ACCESS_API
>  	select HAVE_HW_BREAKPOINT if PERF_EVENTS && PPC_BOOK3S_64
> +	select HAVE_RCU_TABLE_FREE if PPC64
>  
>  config EARLY_PRINTK
>  	bool
> 
> 
> --
> To unsubscribe from this list: send the line "unsubscribe linux-arch" in
> the body of a message to majordomo@...r.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ