lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [day] [month] [year] [list]
Date:	Fri, 17 Apr 2009 21:11:56 -0400
From:	"H Hartley Sweeten" <hartleys@...ionengravers.com>
To:	<linux-kernel@...r.kernel.org>
Subject: [PATCH] mm/slab.c: fix sparse warnings

Fix the following sparse warnings in mm/slab.c.

  warning: symbol 'initkmem_list3' was not declared. Should it be
static?
  warning: symbol 'drain_array' was not declared. Should it be static?
  warning: symbol 'slabinfo_write' was not declared. Should it be
static?

Signed-off-by: H Hartley Sweeten <hsweeten@...ionengravers.com>

---

diff --git a/mm/slab.c b/mm/slab.c
index 9a90b00..d647b3f 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -306,7 +306,7 @@ struct kmem_list3 {
  * Need this for bootstrapping a per node allocator.
  */
 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
-struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
+static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
 #define	CACHE_CACHE 0
 #define	SIZE_AC MAX_NUMNODES
 #define	SIZE_L3 (2 * MAX_NUMNODES)
@@ -2434,9 +2434,34 @@ static void check_spinlock_acquired_node(struct
kmem_cache *cachep, int node)
 #define check_spinlock_acquired_node(x, y) do { } while(0)
 #endif
 
+/*
+ * Drain an array if it contains any elements taking the l3 lock only
if
+ * necessary. Note that the l3 listlock also protects the array_cache
+ * if drain_array() is used on the shared array.
+ */
 static void drain_array(struct kmem_cache *cachep, struct kmem_list3
*l3,
-			struct array_cache *ac,
-			int force, int node);
+			 struct array_cache *ac, int force, int node)
+{
+	int tofree;
+
+	if (!ac || !ac->avail)
+		return;
+	if (ac->touched && !force) {
+		ac->touched = 0;
+	} else {
+		spin_lock_irq(&l3->list_lock);
+		if (ac->avail) {
+			tofree = force ? ac->avail : (ac->limit + 4) /
5;
+			if (tofree > ac->avail)
+				tofree = (ac->avail + 1) / 2;
+			free_block(cachep, ac->entry, tofree, node);
+			ac->avail -= tofree;
+			memmove(ac->entry, &(ac->entry[tofree]),
+				sizeof(void *) * ac->avail);
+		}
+		spin_unlock_irq(&l3->list_lock);
+	}
+}
 
 static void do_drain(void *arg)
 {
@@ -4001,35 +4026,6 @@ static int enable_cpucache(struct kmem_cache
*cachep)
 	return err;
 }
 
-/*
- * Drain an array if it contains any elements taking the l3 lock only
if
- * necessary. Note that the l3 listlock also protects the array_cache
- * if drain_array() is used on the shared array.
- */
-void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
-			 struct array_cache *ac, int force, int node)
-{
-	int tofree;
-
-	if (!ac || !ac->avail)
-		return;
-	if (ac->touched && !force) {
-		ac->touched = 0;
-	} else {
-		spin_lock_irq(&l3->list_lock);
-		if (ac->avail) {
-			tofree = force ? ac->avail : (ac->limit + 4) /
5;
-			if (tofree > ac->avail)
-				tofree = (ac->avail + 1) / 2;
-			free_block(cachep, ac->entry, tofree, node);
-			ac->avail -= tofree;
-			memmove(ac->entry, &(ac->entry[tofree]),
-				sizeof(void *) * ac->avail);
-		}
-		spin_unlock_irq(&l3->list_lock);
-	}
-}
-
 /**
  * cache_reap - Reclaim memory from caches.
  * @w: work descriptor
@@ -4269,7 +4265,7 @@ static const struct seq_operations slabinfo_op = {
  * @count: data length
  * @ppos: unused
  */
-ssize_t slabinfo_write(struct file *file, const char __user * buffer,
+static ssize_t slabinfo_write(struct file *file, const char __user *
buffer,
 		       size_t count, loff_t *ppos)
 {
 	char kbuf[MAX_SLABINFO_WRITE + 1], *tmp; 
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists