lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Date:   Fri, 31 May 2019 21:50:14 +0200
From:   Albert Vaca Cintora <albertvaka@...il.com>
To:     albertvaka@...il.com, akpm@...ux-foundation.org,
        rdunlap@...radead.org, mingo@...nel.org, jack@...e.cz,
        ebiederm@...ssion.com, nsaenzjulienne@...e.de,
        linux-kernel@...r.kernel.org, corbet@....net,
        linux-doc@...r.kernel.org, mbrugger@...e.com
Subject: [PATCH v3 1/3] Move *_ucounts functions above

So we can use them from proc_handler functions in user_table

Signed-off-by: Albert Vaca Cintora <albertvaka@...il.com>
---
 kernel/ucount.c | 122 ++++++++++++++++++++++++------------------------
 1 file changed, 61 insertions(+), 61 deletions(-)

diff --git a/kernel/ucount.c b/kernel/ucount.c
index f48d1b6376a4..909c856e809f 100644
--- a/kernel/ucount.c
+++ b/kernel/ucount.c
@@ -57,6 +57,67 @@ static struct ctl_table_root set_root = {
 	.permissions = set_permissions,
 };
 
+static struct ucounts *find_ucounts(struct user_namespace *ns, kuid_t uid, struct hlist_head *hashent)
+{
+	struct ucounts *ucounts;
+
+	hlist_for_each_entry(ucounts, hashent, node) {
+		if (uid_eq(ucounts->uid, uid) && (ucounts->ns == ns))
+			return ucounts;
+	}
+	return NULL;
+}
+
+static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid)
+{
+	struct hlist_head *hashent = ucounts_hashentry(ns, uid);
+	struct ucounts *ucounts, *new;
+
+	spin_lock_irq(&ucounts_lock);
+	ucounts = find_ucounts(ns, uid, hashent);
+	if (!ucounts) {
+		spin_unlock_irq(&ucounts_lock);
+
+		new = kzalloc(sizeof(*new), GFP_KERNEL);
+		if (!new)
+			return NULL;
+
+		new->ns = ns;
+		new->uid = uid;
+		new->count = 0;
+
+		spin_lock_irq(&ucounts_lock);
+		ucounts = find_ucounts(ns, uid, hashent);
+		if (ucounts) {
+			kfree(new);
+		} else {
+			hlist_add_head(&new->node, hashent);
+			ucounts = new;
+		}
+	}
+	if (ucounts->count == INT_MAX)
+		ucounts = NULL;
+	else
+		ucounts->count += 1;
+	spin_unlock_irq(&ucounts_lock);
+	return ucounts;
+}
+
+static void put_ucounts(struct ucounts *ucounts)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&ucounts_lock, flags);
+	ucounts->count -= 1;
+	if (!ucounts->count)
+		hlist_del_init(&ucounts->node);
+	else
+		ucounts = NULL;
+	spin_unlock_irqrestore(&ucounts_lock, flags);
+
+	kfree(ucounts);
+}
+
 static int zero = 0;
 static int int_max = INT_MAX;
 #define UCOUNT_ENTRY(name)				\
@@ -118,67 +179,6 @@ void retire_userns_sysctls(struct user_namespace *ns)
 #endif
 }
 
-static struct ucounts *find_ucounts(struct user_namespace *ns, kuid_t uid, struct hlist_head *hashent)
-{
-	struct ucounts *ucounts;
-
-	hlist_for_each_entry(ucounts, hashent, node) {
-		if (uid_eq(ucounts->uid, uid) && (ucounts->ns == ns))
-			return ucounts;
-	}
-	return NULL;
-}
-
-static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid)
-{
-	struct hlist_head *hashent = ucounts_hashentry(ns, uid);
-	struct ucounts *ucounts, *new;
-
-	spin_lock_irq(&ucounts_lock);
-	ucounts = find_ucounts(ns, uid, hashent);
-	if (!ucounts) {
-		spin_unlock_irq(&ucounts_lock);
-
-		new = kzalloc(sizeof(*new), GFP_KERNEL);
-		if (!new)
-			return NULL;
-
-		new->ns = ns;
-		new->uid = uid;
-		new->count = 0;
-
-		spin_lock_irq(&ucounts_lock);
-		ucounts = find_ucounts(ns, uid, hashent);
-		if (ucounts) {
-			kfree(new);
-		} else {
-			hlist_add_head(&new->node, hashent);
-			ucounts = new;
-		}
-	}
-	if (ucounts->count == INT_MAX)
-		ucounts = NULL;
-	else
-		ucounts->count += 1;
-	spin_unlock_irq(&ucounts_lock);
-	return ucounts;
-}
-
-static void put_ucounts(struct ucounts *ucounts)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&ucounts_lock, flags);
-	ucounts->count -= 1;
-	if (!ucounts->count)
-		hlist_del_init(&ucounts->node);
-	else
-		ucounts = NULL;
-	spin_unlock_irqrestore(&ucounts_lock, flags);
-
-	kfree(ucounts);
-}
-
 static inline bool atomic_inc_below(atomic_t *v, int u)
 {
 	int c, old;
-- 
2.21.0

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ