lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Fri, 04 Oct 2013 16:02:03 +0530
From:	Janani Venkataraman <jananive@...ibm.com>
To:	linux-kernel@...r.kernel.org
Cc:	amwang@...hat.com, rdunlap@...otime.net, andi@...stfloor.org,
	aravinda@...ux.vnet.ibm.com, hch@....de, mhiramat@...hat.com,
	jeremy.fitzhardinge@...rix.com, xemul@...allels.com,
	suzuki@...ux.vnet.ibm.com, kosaki.motohiro@...fujitsu.com,
	adobriyan@...il.com, tarundsk@...ux.vnet.ibm.com,
	vapier@...too.org, roland@...k.frob.com, tj@...nel.org,
	ananth@...ux.vnet.ibm.com, gorcunov@...nvz.org, avagin@...nvz.org,
	oleg@...hat.com, eparis@...hat.com, d.hatayama@...fujitsu.com,
	james.hogan@...tec.com, akpm@...ux-foundation.org,
	torvalds@...ux-foundation.org
Subject: [PATCH 12/19] Hold the threads using task_work_add

From:Janani Venkataraman <janani@...ibm.com>

Hold the threads in a killable state, for collection of register set.
This was implemented in the past using the freezer system. The freezer functions in
kernel essentially help start and stop sets of tasks and this approach
exploited the existing freezer subsystem kernel interface effectively to
quiesce all the threads of the application before triggering the core dump.
This approach was not accepted due to the potential Dos attack. Also the
community discussed that "freeze" is a bit dangerous because an application
which is frozen cannot be ended and while it's frozen and there is no
information "its frozen" via usual user commands as 'ps' or 'top'.

In this method we assign work to the threads of the task, which will make
them wait in a killable state until the operation is complete, using
'completion'. Once the dump is complete we release the threads.

The following are yet to be implemented:

1) A check to ensure all the threads have entered the work that was queued
2) Handling of threads blocked in kernel.These will not reach the work
   queued and hence the dump may be delayed. If the process is in a non
   running state we can probably take the dump as it would not lead to
   inconsistency. We would like to community views on the same.

Signed-off-by: Janani Venkataraman <jananive@...ibm.com >
Signed-off-by: Suzuki K. Poulose <suzuki@...ibm.com>
---
 fs/proc/gencore.c |   42 +++++++++++++++++++++++++++++++++++++++---
 fs/proc/gencore.h |    4 ++++
 2 files changed, 43 insertions(+), 3 deletions(-)

diff --git a/fs/proc/gencore.c b/fs/proc/gencore.c
index 5f56910..d741f18 100644
--- a/fs/proc/gencore.c
+++ b/fs/proc/gencore.c
@@ -20,8 +20,10 @@
  * Authors:
  *      Ananth N.Mavinakayanahalli <ananth@...ibm.com>
  *      Suzuki K. Poulose <suzuki@...ibm.com>
+ *      Janani Venkataraman <jananive@...ibm.com>
  */
 
+#include <linux/task_work.h>
 #include <linux/elf.h>
 #include <linux/seq_file.h>
 #include <linux/slab.h>
@@ -67,22 +69,45 @@ out:
 	return ret;
 }
 
+static void gencore_work(struct callback_head *open_work)
+{
+	/* TODO A method to know when all the threads have reached here */ 
+	struct core_proc *cp = container_of(open_work, struct core_proc, twork);
+	/* If a thread is exiting we could let it go */
+	if (current->flags & PF_EXITING)
+		return;
+	wait_for_completion_killable(&cp->hold);
+}
+ 
 static int release_gencore(struct inode *inode, struct file *file)
 {
 	struct task_struct *task = get_proc_task(file->f_dentry->d_inode);
 	struct core_proc *cp;
+	struct task_struct *t;
 
 	if (!task)
 		return -EIO;
 
 	mutex_lock(&core_mutex);
 	cp = get_core_proc(task);
-	if (cp) {
+	if (cp) 
 		list_del(&cp->list);
-		kfree(cp);
-	}
+	else
+		return -ENOENT;
+	
+	complete_all(&cp->hold);
 	mutex_unlock(&core_mutex);
+	/* Cancelling the work added */
+	t = task;
+	read_lock(&tasklist_lock);
+	do {
+		if (t != current)
+			task_work_cancel(t, gencore_work);
+	} while_each_thread(cp->task, t);
+
+	read_unlock(&tasklist_lock);   
 	put_task_struct(task);
+	kfree(cp);
 	return 0;
 }
 
@@ -117,6 +142,7 @@ static int open_gencore(struct inode *inode, struct file *filp)
 {
 	struct task_struct *task = get_proc_task(inode);
 	struct core_proc *cp;
+	struct task_struct *t;
 	int elf_class;
 	int ret = 0;
 	if (!task)
@@ -145,6 +171,16 @@ static int open_gencore(struct inode *inode, struct file *filp)
 	mutex_lock(&core_mutex);
 	list_add(&cp->list, &core_list);
 	mutex_unlock(&core_mutex);
+	init_completion(&cp->hold);
+	/* Adding the work for all the threads except current */
+	t = cp->task;
+	init_task_work(&cp->twork, gencore_work);
+	read_lock(&tasklist_lock);
+	do {
+		if (t != current)
+			task_work_add(t, &cp->twork, true);
+	} while_each_thread(cp->task, t);
+	read_unlock(&tasklist_lock); 
 
 out:
 	put_task_struct(task);
diff --git a/fs/proc/gencore.h b/fs/proc/gencore.h
index c98fddf..1a88e24 100644
--- a/fs/proc/gencore.h
+++ b/fs/proc/gencore.h
@@ -1,12 +1,16 @@
 #ifndef __GEN_CORE_H
 #define __GEN_CORE_H
 
+#include <linux/types.h>
 #include <linux/list.h>
 #include <linux/sched.h>
+#include <linux/completion.h>
 
 struct core_proc {
 	struct list_head list;
 	struct task_struct *task;
+	struct completion hold;
+	struct callback_head twork;
 };
 
 #endif

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ