lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Sun, 12 Feb 2023 19:00:19 -0600
From:   Mike Christie <michael.christie@...cle.com>
To:     brauner@...nel.org, ebiederm@...ssion.com,
        torvalds@...ux-foundation.org, linux-kernel@...r.kernel.org
Cc:     Mike Christie <michael.christie@...cle.com>
Subject: [PATCH 4/5] kernel: Prepare set_kthread_struct to be used for setup_thread_fn

This preps set_kthread_struct to be used for a setup_thread_fn callback
by having it set the task's comm and also returning an int instead of a
bool.

Signed-off-by: Mike Christie <michael.christie@...cle.com>
---
 include/linux/kthread.h |  2 +-
 kernel/fork.c           |  2 +-
 kernel/kthread.c        | 89 ++++++++++++++++++++++++++++-------------
 kernel/sched/core.c     |  2 +-
 4 files changed, 64 insertions(+), 31 deletions(-)

diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index 30e5bec81d2b..94dffdfa17df 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -34,7 +34,7 @@ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
 					  const char *namefmt);
 
 void get_kthread_comm(char *buf, size_t buf_size, struct task_struct *tsk);
-bool set_kthread_struct(struct task_struct *p);
+int kthread_setup_struct(struct task_struct *p, void *data);
 
 void kthread_set_per_cpu(struct task_struct *k, int cpu);
 bool kthread_is_per_cpu(struct task_struct *k);
diff --git a/kernel/fork.c b/kernel/fork.c
index 057274da64fb..bd60ecc6b29c 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -2190,7 +2190,7 @@ static __latent_entropy struct task_struct *copy_process(
 	audit_set_context(p, NULL);
 	cgroup_fork(p);
 	if (args->kthread) {
-		if (!set_kthread_struct(p))
+		if (kthread_setup_struct(p, args->fn_arg))
 			goto bad_fork_cleanup_delayacct;
 	}
 #ifdef CONFIG_NUMA
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 5e7c8f3f184f..b67c2caf2ccb 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -46,6 +46,10 @@ struct kthread_create_info
 	struct task_struct *result;
 	struct completion *done;
 
+	struct mutex name_mutex;
+	const char *name_fmt;
+	va_list *name_args;
+
 	struct list_head list;
 };
 
@@ -107,23 +111,58 @@ void get_kthread_comm(char *buf, size_t buf_size, struct task_struct *tsk)
 	strscpy_pad(buf, kthread->full_name, buf_size);
 }
 
-bool set_kthread_struct(struct task_struct *p)
+static int set_kthread_comm(struct task_struct *tsk, struct kthread *kthread,
+			    struct kthread_create_info *create)
+{
+	va_list name_args;
+	int len;
+
+	mutex_lock(&create->name_mutex);
+	if (!create->name_args) {
+		mutex_unlock(&create->name_mutex);
+		return -EINTR;
+	}
+
+	va_copy(name_args, *create->name_args);
+	len = vsnprintf(tsk->comm, TASK_COMM_LEN, create->name_fmt, name_args);
+	va_end(name_args);
+	if (len >= TASK_COMM_LEN) {
+		/* leave it truncated when out of memory. */
+		kthread->full_name = kvasprintf(GFP_KERNEL, create->name_fmt,
+						*create->name_args);
+	}
+	mutex_unlock(&create->name_mutex);
+	return 0;
+}
+
+int kthread_setup_struct(struct task_struct *p, void *data)
 {
 	struct kthread *kthread;
+	int ret;
 
 	if (WARN_ON_ONCE(to_kthread(p)))
-		return false;
+		return -EINVAL;
 
 	kthread = kzalloc(sizeof(*kthread), GFP_KERNEL);
 	if (!kthread)
-		return false;
+		return -ENOMEM;
+
+	if (data) {
+		ret = set_kthread_comm(p, kthread, data);
+		if (ret)
+			goto free_kthread;
+	}
 
 	init_completion(&kthread->exited);
 	init_completion(&kthread->parked);
 	p->vfork_done = &kthread->exited;
 
 	p->worker_private = kthread;
-	return true;
+	return 0;
+
+free_kthread:
+	kfree(kthread);
+	return ret;
 }
 
 void free_kthread_struct(struct task_struct *k)
@@ -131,7 +170,7 @@ void free_kthread_struct(struct task_struct *k)
 	struct kthread *kthread;
 
 	/*
-	 * Can be NULL if kmalloc() in set_kthread_struct() failed.
+	 * Can be NULL if kmalloc() in kthread_setup_struct() failed.
 	 */
 	kthread = to_kthread(k);
 	if (!kthread)
@@ -423,6 +462,8 @@ struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data),
 {
 	DECLARE_COMPLETION_ONSTACK(done);
 	struct task_struct *task;
+	va_list name_args;
+	int ret;
 	struct kthread_create_info *create = kmalloc(sizeof(*create),
 						     GFP_KERNEL);
 
@@ -432,6 +473,10 @@ struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data),
 	create->data = data;
 	create->node = node;
 	create->done = &done;
+	mutex_init(&create->name_mutex);
+	create->name_fmt = namefmt;
+	va_copy(name_args, args);
+	create->name_args = &name_args;
 
 	spin_lock(&kthread_create_lock);
 	list_add_tail(&create->list, &kthread_create_list);
@@ -443,14 +488,20 @@ struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data),
 	 * the OOM killer while kthreadd is trying to allocate memory for
 	 * new kernel thread.
 	 */
-	if (unlikely(wait_for_completion_killable(&done))) {
+	ret = wait_for_completion_killable(&done);
+	if (unlikely(ret)) {
+		mutex_lock(&create->name_mutex);
+		create->name_args = NULL;
+		mutex_unlock(&create->name_mutex);
 		/*
 		 * If I was killed by a fatal signal before kthreadd (or new
 		 * kernel thread) calls complete(), leave the cleanup of this
 		 * structure to that thread.
 		 */
-		if (xchg(&create->done, NULL))
-			return ERR_PTR(-EINTR);
+		if (xchg(&create->done, NULL)) {
+			task = ERR_PTR(-EINTR);
+			goto end_args;
+		}
 		/*
 		 * kthreadd (or new kernel thread) will call complete()
 		 * shortly.
@@ -458,27 +509,9 @@ struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data),
 		wait_for_completion(&done);
 	}
 	task = create->result;
-	if (!IS_ERR(task)) {
-		char name[TASK_COMM_LEN];
-		va_list aq;
-		int len;
-
-		/*
-		 * task is already visible to other tasks, so updating
-		 * COMM must be protected.
-		 */
-		va_copy(aq, args);
-		len = vsnprintf(name, sizeof(name), namefmt, aq);
-		va_end(aq);
-		if (len >= TASK_COMM_LEN) {
-			struct kthread *kthread = to_kthread(task);
-
-			/* leave it truncated when out of memory. */
-			kthread->full_name = kvasprintf(GFP_KERNEL, namefmt, args);
-		}
-		set_task_comm(task, name);
-	}
 	kfree(create);
+end_args:
+	va_end(name_args);
 	return task;
 }
 
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index e838feb6adc5..289b9941b58d 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -9925,7 +9925,7 @@ void __init sched_init(void)
 	 * if we want to avoid special-casing it in code that deals with per-CPU
 	 * kthreads.
 	 */
-	WARN_ON(!set_kthread_struct(current));
+	WARN_ON(kthread_setup_struct(current, NULL));
 
 	/*
 	 * Make us the idle thread. Technically, schedule() should not be
-- 
2.25.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ