lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20100126141055.5AAD.A69D9226@jp.fujitsu.com>
Date:	Tue, 26 Jan 2010 14:19:23 +0900 (JST)
From:	KOSAKI Motohiro <kosaki.motohiro@...fujitsu.com>
To:	"Roman Jarosz" <kedgedev@...il.com>
Cc:	kosaki.motohiro@...fujitsu.com,
	lkml <linux-kernel@...r.kernel.org>,
	"A. Boulan" <arnaud.boulan@...ertysurf.fr>, michael@...nelt.co.at,
	jcnengel@...glemail.com, rientjes@...gle.com, earny@...4u.de,
	Jesse Barnes <jbarnes@...tuousgeek.org>,
	Eric Anholt <eric@...olt.net>
Subject: Re: OOM-Killer kills too much with 2.6.32.2

(cc to lots related person)

> On Mon, 25 Jan 2010 02:48:08 +0100, KOSAKI Motohiro  
> <kosaki.motohiro@...fujitsu.com> wrote:
> 
> >> Hi,
> >>
> >> since kernel 2.6.32.2 (also tried 2.6.32.3) I get a lot of oom-killer
> >> kills when I do hard disk intensive tasks (mainly in VirtualBox which is
> >> running Windows XP) and IMHO it kills processes even if I have a lot of
> >> free memory.
> >>
> >> Is this a known bug? I have self compiled kernel so I can try patches.
> >
> > Can you please post your .config?

Hi all,

Strangely, all reproduce machine are x86_64 with Intel i915. but I don't
have any solid evidence.
Can anyone please apply following debug patch and reproduce this issue?

this patch write some debug message into /var/log/messages.

Thanks.



---
 mm/memory.c |   45 +++++++++++++++++++++++++++++++++++++--------
 1 files changed, 37 insertions(+), 8 deletions(-)

diff --git a/mm/memory.c b/mm/memory.c
index 09e4b1b..5c9ebd8 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2128,17 +2128,23 @@ reuse:
 gotten:
 	pte_unmap_unlock(page_table, ptl);
 
-	if (unlikely(anon_vma_prepare(vma)))
+	if (unlikely(anon_vma_prepare(vma))) {
+		printk(KERN_ERR "OOM at %s:%d\n", __FILE__, __LINE__);
 		goto oom;
+	}
 
 	if (is_zero_pfn(pte_pfn(orig_pte))) {
 		new_page = alloc_zeroed_user_highpage_movable(vma, address);
-		if (!new_page)
+		if (!new_page) {
+			printk(KERN_ERR "OOM at %s:%d\n", __FILE__, __LINE__);
 			goto oom;
+		}
 	} else {
 		new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
-		if (!new_page)
+		if (!new_page) {
+			printk(KERN_ERR "OOM at %s:%d\n", __FILE__, __LINE__);
 			goto oom;
+		}
 		cow_user_page(new_page, old_page, address, vma);
 	}
 	__SetPageUptodate(new_page);
@@ -2153,8 +2159,10 @@ gotten:
 		unlock_page(old_page);
 	}
 
-	if (mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))
+	if (mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL)) {
+		printk(KERN_ERR "OOM at %s:%d\n", __FILE__, __LINE__);
 		goto oom_free_new;
+	}
 
 	/*
 	 * Re-check the pte - we dropped the lock
@@ -2272,6 +2280,10 @@ oom:
 
 unwritable_page:
 	page_cache_release(old_page);
+
+	if (ret & VM_FAULT_OOM)
+		printk(KERN_ERR "do_wp ->page_mkwrite OOM %pf %x\n", vma->vm_ops->page_mkwrite, ret);
+
 	return ret;
 }
 
@@ -2670,15 +2682,21 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
 	/* Allocate our own private page. */
 	pte_unmap(page_table);
 
-	if (unlikely(anon_vma_prepare(vma)))
+	if (unlikely(anon_vma_prepare(vma))) {
+		printk(KERN_ERR "OOM at %s:%d\n", __FILE__, __LINE__);
 		goto oom;
+	}
 	page = alloc_zeroed_user_highpage_movable(vma, address);
-	if (!page)
+	if (!page) {
+		printk(KERN_ERR "OOM at %s:%d\n", __FILE__, __LINE__);
 		goto oom;
+	}
 	__SetPageUptodate(page);
 
-	if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))
+	if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL)) {
+		printk(KERN_ERR "OOM at %s:%d\n", __FILE__, __LINE__);
 		goto oom_free_page;
+	}
 
 	entry = mk_pte(page, vma->vm_page_prot);
 	if (vma->vm_flags & VM_WRITE)
@@ -2742,8 +2760,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
 	vmf.page = NULL;
 
 	ret = vma->vm_ops->fault(vma, &vmf);
-	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))
+	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) {
+		if (ret & VM_FAULT_OOM)
+			printk(KERN_ERR "->fault OOM %pf %x %x\n", vma->vm_ops->fault, ret, flags);
+
 		return ret;
+	}
 
 	if (unlikely(PageHWPoison(vmf.page))) {
 		if (ret & VM_FAULT_LOCKED)
@@ -2768,16 +2790,19 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
 		if (!(vma->vm_flags & VM_SHARED)) {
 			anon = 1;
 			if (unlikely(anon_vma_prepare(vma))) {
+				printk(KERN_ERR "OOM at %s:%d\n", __FILE__, __LINE__);
 				ret = VM_FAULT_OOM;
 				goto out;
 			}
 			page = alloc_page_vma(GFP_HIGHUSER_MOVABLE,
 						vma, address);
 			if (!page) {
+				printk(KERN_ERR "OOM at %s:%d\n", __FILE__, __LINE__);
 				ret = VM_FAULT_OOM;
 				goto out;
 			}
 			if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL)) {
+				printk(KERN_ERR "OOM at %s:%d\n", __FILE__, __LINE__);
 				ret = VM_FAULT_OOM;
 				page_cache_release(page);
 				goto out;
@@ -2896,6 +2921,10 @@ out:
 
 unwritable_page:
 	page_cache_release(page);
+
+	if (ret & VM_FAULT_OOM)
+		printk(KERN_ERR "->page_mkwrite OOM %pf %x %x\n", vma->vm_ops->page_mkwrite, ret, flags);
+
 	return ret;
 }
 
-- 
1.6.5.2



--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ