lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Sun,  5 Dec 2010 19:49:03 +0200
From:	Alexey Dobriyan <adobriyan@...il.com>
To:	akpm@...ux-foundation.org
Cc:	linux-kernel@...r.kernel.org, Alexey Dobriyan <adobriyan@...il.com>
Subject: [PATCH 06/45] kstrtox: convert mm/

In mm/vmscan.c:
make "scan_unevictable_pages" write-only.

Signed-off-by: Alexey Dobriyan <adobriyan@...il.com>
---
 include/linux/slub_def.h |    2 +-
 mm/hugetlb.c             |   12 ++++++------
 mm/kmemleak.c            |    4 ++--
 mm/ksm.c                 |   28 ++++++++++------------------
 mm/slub.c                |   19 ++++++++++---------
 mm/vmscan.c              |   25 ++++++++++---------------
 6 files changed, 39 insertions(+), 51 deletions(-)

diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index e4f5ed1..37f439d 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -95,7 +95,7 @@ struct kmem_cache {
 	/*
 	 * Defragmentation by allocating from a remote node.
 	 */
-	int remote_node_defrag_ratio;
+	unsigned int remote_node_defrag_ratio;
 #endif
 	struct kmem_cache_node *node[MAX_NUMNODES];
 };
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 8585524..a9a5460 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1438,9 +1438,9 @@ static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
 	struct hstate *h;
 	NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY);
 
-	err = strict_strtoul(buf, 10, &count);
-	if (err)
-		return 0;
+	err = kstrtoul(buf, 10, &count);
+	if (err < 0)
+		return err;
 
 	h = kobj_to_hstate(kobj, &nid);
 	if (nid == NUMA_NO_NODE) {
@@ -1517,9 +1517,9 @@ static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
 	unsigned long input;
 	struct hstate *h = kobj_to_hstate(kobj, NULL);
 
-	err = strict_strtoul(buf, 10, &input);
-	if (err)
-		return 0;
+	err = kstrtoul(buf, 10, &input);
+	if (err < 0)
+		return err;
 
 	spin_lock(&hugetlb_lock);
 	h->nr_overcommit_huge_pages = input;
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index bd9bc21..b3fb3f5 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -1557,9 +1557,9 @@ static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
 	else if (strncmp(buf, "scan=off", 8) == 0)
 		stop_scan_thread();
 	else if (strncmp(buf, "scan=", 5) == 0) {
-		unsigned long secs;
+		unsigned int secs;
 
-		ret = strict_strtoul(buf + 5, 0, &secs);
+		ret = kstrtouint(buf + 5, 0, &secs);
 		if (ret < 0)
 			goto out;
 		stop_scan_thread();
diff --git a/mm/ksm.c b/mm/ksm.c
index 43bc893..c03345b 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1773,15 +1773,11 @@ static ssize_t sleep_millisecs_store(struct kobject *kobj,
 				     struct kobj_attribute *attr,
 				     const char *buf, size_t count)
 {
-	unsigned long msecs;
 	int err;
 
-	err = strict_strtoul(buf, 10, &msecs);
-	if (err || msecs > UINT_MAX)
-		return -EINVAL;
-
-	ksm_thread_sleep_millisecs = msecs;
-
+	err = kstrtouint(buf, 10, &ksm_thread_sleep_millisecs);
+	if (err < 0)
+		return err;
 	return count;
 }
 KSM_ATTR(sleep_millisecs);
@@ -1797,14 +1793,10 @@ static ssize_t pages_to_scan_store(struct kobject *kobj,
 				   const char *buf, size_t count)
 {
 	int err;
-	unsigned long nr_pages;
-
-	err = strict_strtoul(buf, 10, &nr_pages);
-	if (err || nr_pages > UINT_MAX)
-		return -EINVAL;
-
-	ksm_thread_pages_to_scan = nr_pages;
 
+	err = kstrtouint(buf, 10, &ksm_thread_pages_to_scan);
+	if (err < 0)
+		return err;
 	return count;
 }
 KSM_ATTR(pages_to_scan);
@@ -1819,11 +1811,11 @@ static ssize_t run_store(struct kobject *kobj, struct kobj_attribute *attr,
 			 const char *buf, size_t count)
 {
 	int err;
-	unsigned long flags;
+	unsigned int flags;
 
-	err = strict_strtoul(buf, 10, &flags);
-	if (err || flags > UINT_MAX)
-		return -EINVAL;
+	err = kstrtouint(buf, 10, &flags);
+	if (err < 0)
+		return err;
 	if (flags > KSM_RUN_UNMERGE)
 		return -EINVAL;
 
diff --git a/mm/slub.c b/mm/slub.c
index 981fb73..3170e52 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3928,11 +3928,11 @@ SLAB_ATTR_RO(objs_per_slab);
 static ssize_t order_store(struct kmem_cache *s,
 				const char *buf, size_t length)
 {
-	unsigned long order;
+	int order;
 	int err;
 
-	err = strict_strtoul(buf, 10, &order);
-	if (err)
+	err = kstrtoint(buf, 10, &order);
+	if (err < 0)
 		return err;
 
 	if (order > slub_max_order || order < slub_min_order)
@@ -3959,7 +3959,7 @@ static ssize_t min_partial_store(struct kmem_cache *s, const char *buf,
 	unsigned long min;
 	int err;
 
-	err = strict_strtoul(buf, 10, &min);
+	err = kstrtoul(buf, 10, &min);
 	if (err)
 		return err;
 
@@ -4219,21 +4219,22 @@ SLAB_ATTR(shrink);
 #ifdef CONFIG_NUMA
 static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf)
 {
-	return sprintf(buf, "%d\n", s->remote_node_defrag_ratio / 10);
+	return sprintf(buf, "%u\n", s->remote_node_defrag_ratio / 10);
 }
 
 static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s,
 				const char *buf, size_t length)
 {
-	unsigned long ratio;
+	unsigned int ratio;
 	int err;
 
-	err = strict_strtoul(buf, 10, &ratio);
+	err = kstrtouint(buf, 10, &ratio);
 	if (err)
 		return err;
+	if (ratio > 100)
+		return -EINVAL;
 
-	if (ratio <= 100)
-		s->remote_node_defrag_ratio = ratio * 10;
+	s->remote_node_defrag_ratio = ratio * 10;
 
 	return length;
 }
diff --git a/mm/vmscan.c b/mm/vmscan.c
index d31d7ce..61acce3 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -3055,37 +3055,32 @@ int scan_unevictable_handler(struct ctl_table *table, int write,
  * per node 'scan_unevictable_pages' attribute.  On demand re-scan of
  * a specified node's per zone unevictable lists for evictable pages.
  */
-
-static ssize_t read_scan_unevictable_node(struct sys_device *dev,
-					  struct sysdev_attribute *attr,
-					  char *buf)
-{
-	return sprintf(buf, "0\n");	/* always zero; should fit... */
-}
-
 static ssize_t write_scan_unevictable_node(struct sys_device *dev,
 					   struct sysdev_attribute *attr,
 					const char *buf, size_t count)
 {
 	struct zone *node_zones = NODE_DATA(dev->id)->node_zones;
 	struct zone *zone;
-	unsigned long res;
-	unsigned long req = strict_strtoul(buf, 10, &res);
+	unsigned long val;
+	int rv;
 
-	if (!req)
-		return 1;	/* zero is no-op */
+	rv = kstrtoul(buf, 10, &val);
+	if (rv < 0)
+		return rv;
+	if (val == 0)
+		return count;
 
 	for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
 		if (!populated_zone(zone))
 			continue;
 		scan_zone_unevictable_pages(zone);
 	}
-	return 1;
+	return count;
 }
 
 
-static SYSDEV_ATTR(scan_unevictable_pages, S_IRUGO | S_IWUSR,
-			read_scan_unevictable_node,
+static SYSDEV_ATTR(scan_unevictable_pages, S_IWUSR,
+			NULL,
 			write_scan_unevictable_node);
 
 int scan_unevictable_register_node(struct node *node)
-- 
1.7.2.2

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ