[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20190625235325.2096441-3-songliubraving@fb.com>
Date: Tue, 25 Jun 2019 16:53:23 -0700
From: Song Liu <songliubraving@...com>
To: <linux-kernel@...r.kernel.org>, <linux-mm@...ck.org>
CC: <matthew.wilcox@...cle.com>, <kirill.shutemov@...ux.intel.com>,
<peterz@...radead.org>, <oleg@...hat.com>, <rostedt@...dmis.org>,
<kernel-team@...com>, <william.kucharski@...cle.com>,
Song Liu <songliubraving@...com>
Subject: [PATCH v7 2/4] uprobe: use original page when all uprobes are removed
Currently, uprobe swaps the target page with a anonymous page in both
install_breakpoint() and remove_breakpoint(). When all uprobes on a page
are removed, the given mm is still using an anonymous page (not the
original page).
This patch allows uprobe to use original page when possible (all uprobes
on the page are already removed).
Acked-by: Kirill A. Shutemov <kirill.shutemov@...ux.intel.com>
Signed-off-by: Song Liu <songliubraving@...com>
---
kernel/events/uprobes.c | 45 +++++++++++++++++++++++++++++++++--------
1 file changed, 37 insertions(+), 8 deletions(-)
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 78f61bfc6b79..f7c61a1ef720 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -160,16 +160,19 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
int err;
struct mmu_notifier_range range;
struct mem_cgroup *memcg;
+ bool orig = new_page->mapping != NULL; /* new_page == orig_page */
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, addr,
addr + PAGE_SIZE);
VM_BUG_ON_PAGE(PageTransHuge(old_page), old_page);
- err = mem_cgroup_try_charge(new_page, vma->vm_mm, GFP_KERNEL, &memcg,
- false);
- if (err)
- return err;
+ if (!orig) {
+ err = mem_cgroup_try_charge(new_page, vma->vm_mm, GFP_KERNEL,
+ &memcg, false);
+ if (err)
+ return err;
+ }
/* For try_to_free_swap() and munlock_vma_page() below */
lock_page(old_page);
@@ -177,15 +180,24 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
mmu_notifier_invalidate_range_start(&range);
err = -EAGAIN;
if (!page_vma_mapped_walk(&pvmw)) {
- mem_cgroup_cancel_charge(new_page, memcg, false);
+ if (!orig)
+ mem_cgroup_cancel_charge(new_page, memcg, false);
goto unlock;
}
VM_BUG_ON_PAGE(addr != pvmw.address, old_page);
get_page(new_page);
- page_add_new_anon_rmap(new_page, vma, addr, false);
- mem_cgroup_commit_charge(new_page, memcg, false, false);
- lru_cache_add_active_or_unevictable(new_page, vma);
+ if (orig) {
+ lock_page(new_page); /* for page_add_file_rmap() */
+ page_add_file_rmap(new_page, false);
+ unlock_page(new_page);
+ inc_mm_counter(mm, mm_counter_file(new_page));
+ dec_mm_counter(mm, MM_ANONPAGES);
+ } else {
+ page_add_new_anon_rmap(new_page, vma, addr, false);
+ mem_cgroup_commit_charge(new_page, memcg, false, false);
+ lru_cache_add_active_or_unevictable(new_page, vma);
+ }
if (!PageAnon(old_page)) {
dec_mm_counter(mm, mm_counter_file(old_page));
@@ -501,6 +513,23 @@ int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm,
copy_highpage(new_page, old_page);
copy_to_page(new_page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE);
+ if (!is_register) {
+ struct page *orig_page;
+ pgoff_t index;
+
+ index = vaddr_to_offset(vma, vaddr & PAGE_MASK) >> PAGE_SHIFT;
+ orig_page = find_get_page(vma->vm_file->f_inode->i_mapping,
+ index);
+
+ if (orig_page) {
+ if (pages_identical(new_page, orig_page)) {
+ put_page(new_page);
+ new_page = orig_page;
+ } else
+ put_page(orig_page);
+ }
+ }
+
ret = __replace_page(vma, vaddr, old_page, new_page);
put_page(new_page);
put_old:
--
2.17.1
Powered by blists - more mailing lists