lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <572C0674.9080006@huawei.com>
Date:	Fri, 6 May 2016 10:50:28 +0800
From:	zhouchengming <zhouchengming1@...wei.com>
To:	Andrew Morton <akpm@...ux-foundation.org>
CC:	<hughd@...gle.com>, <aarcange@...hat.com>,
	<kirill.shutemov@...ux.intel.com>, <vbabka@...e.cz>,
	<geliangtang@....com>, <minchan@...nel.org>, <linux-mm@...ck.org>,
	<linux-kernel@...r.kernel.org>, <guohanjun@...wei.com>,
	<dingtianhong@...wei.com>, <huawei.libin@...wei.com>,
	<thunder.leizhen@...wei.com>, <qiuxishi@...wei.com>
Subject: Re: [PATCH] ksm: fix conflict between mmput and scan_get_next_rmap_item

On 2016/5/6 5:07, Andrew Morton wrote:
> On Thu, 5 May 2016 20:42:56 +0800 Zhou Chengming<zhouchengming1@...wei.com>  wrote:
>
>> A concurrency issue about KSM in the function scan_get_next_rmap_item.
>>
>> task A (ksmd):				|task B (the mm's task):
>> 					|
>> mm = slot->mm;				|
>> down_read(&mm->mmap_sem);		|
>> 					|
>> ...					|
>> 					|
>> spin_lock(&ksm_mmlist_lock);		|
>> 					|
>> ksm_scan.mm_slot go to the next slot;	|
>> 					|
>> spin_unlock(&ksm_mmlist_lock);		|
>> 					|mmput() ->
>> 					|	ksm_exit():
>> 					|
>> 					|spin_lock(&ksm_mmlist_lock);
>> 					|if (mm_slot&&  ksm_scan.mm_slot != mm_slot) {
>> 					|	if (!mm_slot->rmap_list) {
>> 					|		easy_to_free = 1;
>> 					|		...
>> 					|
>> 					|if (easy_to_free) {
>> 					|	mmdrop(mm);
>> 					|	...
>> 					|
>> 					|So this mm_struct will be freed successfully.
>> 					|
>> up_read(&mm->mmap_sem);			|
>>
>> As we can see above, the ksmd thread may access a mm_struct that already
>> been freed to the kmem_cache.
>> Suppose a fork will get this mm_struct from the kmem_cache, the ksmd thread
>> then call up_read(&mm->mmap_sem), will cause mmap_sem.count to become -1.
>> I changed the scan_get_next_rmap_item function refered to the khugepaged
>> scan function.
>
> Thanks.
>
> We need to decide whether this fix should be backported into earlier
> (-stable) kernels.  Can you tell us how easily this is triggered and
> share your thoughts on this?
>
>
> .
>

I write a patch that can easily trigger this bug.
When ksmd go to sleep, if a fork get this mm_struct, BUG_ON
will be triggered.

 From eedfdd12eb11858f69ff4a4300acad42946ca260 Mon Sep 17 00:00:00 2001
From: Zhou Chengming <zhouchengming1@...wei.com>
Date: Thu, 5 May 2016 17:49:22 +0800
Subject: [PATCH] ksm: trigger a bug

Signed-off-by: Zhou Chengming <zhouchengming1@...wei.com>
---
  mm/ksm.c |   17 +++++++++++++++++
  1 files changed, 17 insertions(+), 0 deletions(-)

diff --git a/mm/ksm.c b/mm/ksm.c
index ca6d2a0..676368c 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1519,6 +1519,18 @@ static struct rmap_item 
*get_next_rmap_item(struct mm_slot *mm_slot,
  	return rmap_item;
  }

+static void trigger_a_bug(struct task_struct *p, struct mm_struct *mm)
+{
+	/* send KILL sig to the task, hope the mm_struct will be freed */
+	do_send_sig_info(SIGKILL, SEND_SIG_FORCED, p, true);
+	/* sleep for 5s, the mm_struct will be freed and another fork
+	 * will use this mm_struct
+	 */
+	schedule_timeout(msecs_to_jiffies(5000));
+	/* the mm_struct owned by another task */
+	BUG_ON(mm->owner != p);
+}
+
  static struct rmap_item *scan_get_next_rmap_item(struct page **page)
  {
  	struct mm_struct *mm;
@@ -1526,6 +1538,7 @@ static struct rmap_item 
*scan_get_next_rmap_item(struct page **page)
  	struct vm_area_struct *vma;
  	struct rmap_item *rmap_item;
  	int nid;
+	struct task_struct *taskp;

  	if (list_empty(&ksm_mm_head.mm_list))
  		return NULL;
@@ -1636,6 +1649,8 @@ next_mm:
  	remove_trailing_rmap_items(slot, ksm_scan.rmap_list);

  	spin_lock(&ksm_mmlist_lock);
+	/* get the mm's task now in the ksm_mmlist_lock */
+	taskp = mm->owner;
  	ksm_scan.mm_slot = list_entry(slot->mm_list.next,
  						struct mm_slot, mm_list);
  	if (ksm_scan.address == 0) {
@@ -1651,6 +1666,7 @@ next_mm:
  		hash_del(&slot->link);
  		list_del(&slot->mm_list);
  		spin_unlock(&ksm_mmlist_lock);
+		trigger_a_bug(taskp, mm);

  		free_mm_slot(slot);
  		clear_bit(MMF_VM_MERGEABLE, &mm->flags);
@@ -1658,6 +1674,7 @@ next_mm:
  		mmdrop(mm);
  	} else {
  		spin_unlock(&ksm_mmlist_lock);
+		trigger_a_bug(taskp, mm);
  		up_read(&mm->mmap_sem);
  	}

-- 
1.7.7


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ