lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Wed, 31 Oct 2007 21:16:59 -0700 (PDT)
From:	Christoph Lameter <clameter@....com>
To:	David Miller <davem@...emloft.net>
cc:	akpm@...ux-foundation.org, linux-arch@...r.kernel.org,
	linux-kernel@...r.kernel.org, mathieu.desnoyers@...ymtl.ca,
	penberg@...helsinki.fi
Subject: Re: [patch 0/7] [RFC] SLUB: Improve allocpercpu to reduce per cpu
 access overhead

Hmmmm... Got this to run on an ia64 big iron. One problem is the sizing of 
the pool. Somehow this needs to be dynamic.

Apply this fix on top of the others.

---
 include/asm-ia64/page.h   |    2 +-
 include/asm-ia64/percpu.h |    9 ++++++---
 mm/allocpercpu.c          |   12 ++++++++++--
 3 files changed, 17 insertions(+), 6 deletions(-)

Index: linux-2.6/mm/allocpercpu.c
===================================================================
--- linux-2.6.orig/mm/allocpercpu.c	2007-10-31 20:53:16.565486654 -0700
+++ linux-2.6/mm/allocpercpu.c	2007-10-31 21:00:27.553486484 -0700
@@ -28,7 +28,12 @@
 /*
  * Maximum allowed per cpu data per cpu
  */
+#ifdef CONFIG_NUMA
+#define PER_CPU_ALLOC_SIZE (32768 + MAX_NUMNODES * 512)
+#else
 #define PER_CPU_ALLOC_SIZE 32768
+#endif
+
 
 #define UNIT_SIZE sizeof(unsigned long long)
 #define UNITS_PER_CPU (PER_CPU_ALLOC_SIZE / UNIT_SIZE)
@@ -37,7 +42,7 @@ enum unit_type { FREE, END, USED };
 
 static u8 cpu_alloc_map[UNITS_PER_CPU] = { 1, };
 static DEFINE_SPINLOCK(cpu_alloc_map_lock);
-static DEFINE_PER_CPU(int, cpu_area)[UNITS_PER_CPU];
+static DEFINE_PER_CPU(unsigned long long, cpu_area)[UNITS_PER_CPU];
 
 #define CPU_DATA_OFFSET ((unsigned long)&per_cpu__cpu_area)
 
@@ -97,8 +102,11 @@ static void *cpu_alloc(unsigned long siz
 		while (start < UNITS_PER_CPU &&
 				cpu_alloc_map[start] != FREE)
 			start++;
-		if (start == UNITS_PER_CPU)
+		if (start == UNITS_PER_CPU) {
+			spin_unlock(&cpu_alloc_map_lock);
+			printk(KERN_CRIT "Dynamic per cpu memory exhausted\n");
 			return NULL;
+		}
 
 		end = start + 1;
 		while (end < UNITS_PER_CPU && end - start < units &&
Index: linux-2.6/include/asm-ia64/page.h
===================================================================
--- linux-2.6.orig/include/asm-ia64/page.h	2007-10-31 20:53:16.573486483 -0700
+++ linux-2.6/include/asm-ia64/page.h	2007-10-31 20:56:19.372870091 -0700
@@ -44,7 +44,7 @@
 #define PAGE_MASK		(~(PAGE_SIZE - 1))
 #define PAGE_ALIGN(addr)	(((addr) + PAGE_SIZE - 1) & PAGE_MASK)
 
-#define PERCPU_PAGE_SHIFT	16	/* log2() of max. size of per-CPU area */
+#define PERCPU_PAGE_SHIFT	20	/* log2() of max. size of per-CPU area */
 #define PERCPU_PAGE_SIZE	(__IA64_UL_CONST(1) << PERCPU_PAGE_SHIFT)
 
 
Index: linux-2.6/include/asm-ia64/percpu.h
===================================================================
--- linux-2.6.orig/include/asm-ia64/percpu.h	2007-10-31 20:53:30.424553062 -0700
+++ linux-2.6/include/asm-ia64/percpu.h	2007-10-31 20:53:36.248486656 -0700
@@ -40,6 +40,12 @@
 #endif
 
 /*
+ * This will make per cpu access to the local area use the virtually mapped
+ * areas.
+ */
+#define this_cpu_offset()			0
+
+/*
  * Pretty much a literal copy of asm-generic/percpu.h, except that percpu_modcopy() is an
  * external routine, to avoid include-hell.
  */
@@ -51,8 +57,6 @@ extern unsigned long __per_cpu_offset[NR
 /* Equal to __per_cpu_offset[smp_processor_id()], but faster to access: */
 DECLARE_PER_CPU(unsigned long, local_per_cpu_offset);
 
-#define this_cpu_offset() __ia64_per_cpu_var(local_per_cpu_offset)
-
 #define per_cpu(var, cpu)  (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset[cpu]))
 #define __get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __ia64_per_cpu_var(local_per_cpu_offset)))
 #define __raw_get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __ia64_per_cpu_var(local_per_cpu_offset)))
@@ -67,7 +71,6 @@ extern void *per_cpu_init(void);
 #define __get_cpu_var(var)			per_cpu__##var
 #define __raw_get_cpu_var(var)			per_cpu__##var
 #define per_cpu_init()				(__phys_per_cpu_start)
-#define this_cpu_offset()			0
 
 #endif	/* SMP */
 
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ