lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Thu, 18 Feb 2010 19:21:04 +0100 (MET)
From:	Patrick McHardy <kaber@...sh.net>
To:	davem@...emloft.net
Cc:	netdev@...r.kernel.org, Patrick McHardy <kaber@...sh.net>,
	netfilter-devel@...r.kernel.org
Subject: netfilter 01/09: ebtables: split do_replace into two functions

commit e788759f44b29e5b1bc27a265dece7dcfa4234af
Author: Florian Westphal <fw@...len.de>
Date:   Thu Feb 4 18:38:53 2010 +0100

    netfilter: ebtables: split do_replace into two functions
    
    once CONFIG_COMPAT support is merged this allows
    to call do_replace_finish() after doing the CONFIG_COMPAT conversion
    instead of copy & pasting this.
    
    Signed-off-by: Florian Westphal <fw@...len.de>

diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 4370e96..a707dbd 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -959,91 +959,45 @@ static void get_counters(const struct ebt_counter *oldcounters,
 	}
 }
 
-/* replace the table */
-static int do_replace(struct net *net, const void __user *user,
-		      unsigned int len)
+static int do_replace_finish(struct net *net, struct ebt_replace *repl,
+			      struct ebt_table_info *newinfo)
 {
-	int ret, i, countersize;
-	struct ebt_table_info *newinfo;
-	struct ebt_replace tmp;
-	struct ebt_table *t;
+	int ret, i;
 	struct ebt_counter *counterstmp = NULL;
 	/* used to be able to unlock earlier */
 	struct ebt_table_info *table;
-
-	if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
-		return -EFAULT;
-
-	if (len != sizeof(tmp) + tmp.entries_size) {
-		BUGPRINT("Wrong len argument\n");
-		return -EINVAL;
-	}
-
-	if (tmp.entries_size == 0) {
-		BUGPRINT("Entries_size never zero\n");
-		return -EINVAL;
-	}
-	/* overflow check */
-	if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) / NR_CPUS -
-			SMP_CACHE_BYTES) / sizeof(struct ebt_counter))
-		return -ENOMEM;
-	if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
-		return -ENOMEM;
-
-	countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
-	newinfo = vmalloc(sizeof(*newinfo) + countersize);
-	if (!newinfo)
-		return -ENOMEM;
-
-	if (countersize)
-		memset(newinfo->counters, 0, countersize);
-
-	newinfo->entries = vmalloc(tmp.entries_size);
-	if (!newinfo->entries) {
-		ret = -ENOMEM;
-		goto free_newinfo;
-	}
-	if (copy_from_user(
-	   newinfo->entries, tmp.entries, tmp.entries_size) != 0) {
-		BUGPRINT("Couldn't copy entries from userspace\n");
-		ret = -EFAULT;
-		goto free_entries;
-	}
+	struct ebt_table *t;
 
 	/* the user wants counters back
 	   the check on the size is done later, when we have the lock */
-	if (tmp.num_counters) {
-		counterstmp = vmalloc(tmp.num_counters * sizeof(*counterstmp));
-		if (!counterstmp) {
-			ret = -ENOMEM;
-			goto free_entries;
-		}
+	if (repl->num_counters) {
+		unsigned long size = repl->num_counters * sizeof(*counterstmp);
+		counterstmp = vmalloc(size);
+		if (!counterstmp)
+			return -ENOMEM;
 	}
-	else
-		counterstmp = NULL;
 
-	/* this can get initialized by translate_table() */
 	newinfo->chainstack = NULL;
-	ret = ebt_verify_pointers(&tmp, newinfo);
+	ret = ebt_verify_pointers(repl, newinfo);
 	if (ret != 0)
 		goto free_counterstmp;
 
-	ret = translate_table(net, tmp.name, newinfo);
+	ret = translate_table(net, repl->name, newinfo);
 
 	if (ret != 0)
 		goto free_counterstmp;
 
-	t = find_table_lock(net, tmp.name, &ret, &ebt_mutex);
+	t = find_table_lock(net, repl->name, &ret, &ebt_mutex);
 	if (!t) {
 		ret = -ENOENT;
 		goto free_iterate;
 	}
 
 	/* the table doesn't like it */
-	if (t->check && (ret = t->check(newinfo, tmp.valid_hooks)))
+	if (t->check && (ret = t->check(newinfo, repl->valid_hooks)))
 		goto free_unlock;
 
-	if (tmp.num_counters && tmp.num_counters != t->private->nentries) {
+	if (repl->num_counters && repl->num_counters != t->private->nentries) {
 		BUGPRINT("Wrong nr. of counters requested\n");
 		ret = -EINVAL;
 		goto free_unlock;
@@ -1059,7 +1013,7 @@ static int do_replace(struct net *net, const void __user *user,
 		module_put(t->me);
 	/* we need an atomic snapshot of the counters */
 	write_lock_bh(&t->lock);
-	if (tmp.num_counters)
+	if (repl->num_counters)
 		get_counters(t->private->counters, counterstmp,
 		   t->private->nentries);
 
@@ -1070,10 +1024,9 @@ static int do_replace(struct net *net, const void __user *user,
 	   allocation. Only reason why this is done is because this way the lock
 	   is held only once, while this doesn't bring the kernel into a
 	   dangerous state. */
-	if (tmp.num_counters &&
-	   copy_to_user(tmp.counters, counterstmp,
-	   tmp.num_counters * sizeof(struct ebt_counter))) {
-		BUGPRINT("Couldn't copy counters to userspace\n");
+	if (repl->num_counters &&
+	   copy_to_user(repl->counters, counterstmp,
+	   repl->num_counters * sizeof(struct ebt_counter))) {
 		ret = -EFAULT;
 	}
 	else
@@ -1107,6 +1060,59 @@ free_counterstmp:
 			vfree(newinfo->chainstack[i]);
 		vfree(newinfo->chainstack);
 	}
+	return ret;
+}
+
+/* replace the table */
+static int do_replace(struct net *net, const void __user *user,
+		      unsigned int len)
+{
+	int ret, countersize;
+	struct ebt_table_info *newinfo;
+	struct ebt_replace tmp;
+
+	if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
+		return -EFAULT;
+
+	if (len != sizeof(tmp) + tmp.entries_size) {
+		BUGPRINT("Wrong len argument\n");
+		return -EINVAL;
+	}
+
+	if (tmp.entries_size == 0) {
+		BUGPRINT("Entries_size never zero\n");
+		return -EINVAL;
+	}
+	/* overflow check */
+	if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) /
+			NR_CPUS - SMP_CACHE_BYTES) / sizeof(struct ebt_counter))
+		return -ENOMEM;
+	if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
+		return -ENOMEM;
+
+	countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
+	newinfo = vmalloc(sizeof(*newinfo) + countersize);
+	if (!newinfo)
+		return -ENOMEM;
+
+	if (countersize)
+		memset(newinfo->counters, 0, countersize);
+
+	newinfo->entries = vmalloc(tmp.entries_size);
+	if (!newinfo->entries) {
+		ret = -ENOMEM;
+		goto free_newinfo;
+	}
+	if (copy_from_user(
+	   newinfo->entries, tmp.entries, tmp.entries_size) != 0) {
+		BUGPRINT("Couldn't copy entries from userspace\n");
+		ret = -EFAULT;
+		goto free_entries;
+	}
+
+	ret = do_replace_finish(net, &tmp, newinfo);
+	if (ret == 0)
+		return ret;
 free_entries:
 	vfree(newinfo->entries);
 free_newinfo:
--
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ