[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20161216141556.75130-4-kirill.shutemov@linux.intel.com>
Date: Fri, 16 Dec 2016 17:15:56 +0300
From: "Kirill A. Shutemov" <kirill.shutemov@...ux.intel.com>
To: Michal Hocko <mhocko@...e.com>,
Peter Zijlstra <peterz@...radead.org>,
Rik van Riel <riel@...hat.com>,
Andrew Morton <akpm@...ux-foundation.org>
Cc: linux-mm@...ck.org, linux-kernel@...r.kernel.org,
"Kirill A. Shutemov" <kirill.shutemov@...ux.intel.com>
Subject: [PATCH 4/4] oom-reaper: use madvise_dontneed() instead of unmap_page_range()
Logic on whether we can reap pages from the VMA should match what we
have in madvise_dontneed(). In particular, we should skip, VM_PFNMAP
VMAs, but we don't now.
Let's just call madvise_dontneed() from __oom_reap_task_mm(), so we
won't need to sync the logic in the future.
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@...ux.intel.com>
---
mm/internal.h | 7 +++----
mm/madvise.c | 2 +-
mm/memory.c | 2 +-
mm/oom_kill.c | 15 ++-------------
4 files changed, 7 insertions(+), 19 deletions(-)
diff --git a/mm/internal.h b/mm/internal.h
index 44d68895a9b9..5c355855e4ad 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -41,10 +41,9 @@ int do_swap_page(struct vm_fault *vmf);
void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
unsigned long floor, unsigned long ceiling);
-void unmap_page_range(struct mmu_gather *tlb,
- struct vm_area_struct *vma,
- unsigned long addr, unsigned long end,
- struct zap_details *details);
+long madvise_dontneed(struct vm_area_struct *vma,
+ struct vm_area_struct **prev,
+ unsigned long start, unsigned long end);
extern int __do_page_cache_readahead(struct address_space *mapping,
struct file *filp, pgoff_t offset, unsigned long nr_to_read,
diff --git a/mm/madvise.c b/mm/madvise.c
index aa4c502caecb..8c9f19b62b4a 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -468,7 +468,7 @@ static long madvise_free(struct vm_area_struct *vma,
* An interface that causes the system to free clean pages and flush
* dirty pages is already available as msync(MS_INVALIDATE).
*/
-static long madvise_dontneed(struct vm_area_struct *vma,
+long madvise_dontneed(struct vm_area_struct *vma,
struct vm_area_struct **prev,
unsigned long start, unsigned long end)
{
diff --git a/mm/memory.c b/mm/memory.c
index eed102070dcb..f8836232a492 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1271,7 +1271,7 @@ static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
return addr;
}
-void unmap_page_range(struct mmu_gather *tlb,
+static void unmap_page_range(struct mmu_gather *tlb,
struct vm_area_struct *vma,
unsigned long addr, unsigned long end,
struct zap_details *details)
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 96a53ab0c9eb..59a00b1c3145 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -479,7 +479,7 @@ static bool __oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
* out_of_memory
* select_bad_process
* # no TIF_MEMDIE task selects new victim
- * unmap_page_range # frees some memory
+ * madvise_dontneed # frees some memory
*/
mutex_lock(&oom_lock);
@@ -508,16 +508,6 @@ static bool __oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
tlb_gather_mmu(&tlb, mm, 0, -1);
for (vma = mm->mmap ; vma; vma = vma->vm_next) {
- if (is_vm_hugetlb_page(vma))
- continue;
-
- /*
- * mlocked VMAs require explicit munlocking before unmap.
- * Let's keep it simple here and skip such VMAs.
- */
- if (vma->vm_flags & VM_LOCKED)
- continue;
-
/*
* Only anonymous pages have a good chance to be dropped
* without additional steps which we cannot afford as we
@@ -529,8 +519,7 @@ static bool __oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
* count elevated without a good reason.
*/
if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED))
- unmap_page_range(&tlb, vma, vma->vm_start, vma->vm_end,
- NULL);
+ madvise_dontneed(vma, &vma, vma->vm_start, vma->vm_end);
}
tlb_finish_mmu(&tlb, 0, -1);
pr_info("oom_reaper: reaped process %d (%s), now anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
--
2.10.2
Powered by blists - more mailing lists