[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20141209182219.GB22569@quack.suse.cz>
Date: Tue, 9 Dec 2014 19:22:19 +0100
From: Jan Kara <jack@...e.cz>
To: Johannes Weiner <hannes@...xchg.org>
Cc: Andrew Morton <akpm@...ux-foundation.org>,
Tejun Heo <tj@...nel.org>, Hugh Dickins <hughd@...gle.com>,
Michel Lespinasse <walken@...gle.com>, Jan Kara <jack@...e.cz>,
"Kirill A. Shutemov" <kirill@...temov.name>, linux-mm@...ck.org,
linux-fsdevel@...r.kernel.org, linux-kernel@...r.kernel.org
Subject: Re: [patch 3/3] mm: memory: merge shared-writable dirtying branches
in do_wp_page()
On Fri 05-12-14 09:52:46, Johannes Weiner wrote:
> Whether there is a vm_ops->page_mkwrite or not, the page dirtying is
> pretty much the same. Make sure the page references are the same in
> both cases, then merge the two branches.
>
> It's tempting to go even further and page-lock the !page_mkwrite case,
> to get it in line with everybody else setting the page table and thus
> further simplify the model. But that's not quite compelling enough to
> justify dropping the pte lock, then relocking and verifying the entry
> for filesystems without ->page_mkwrite, which notably includes tmpfs.
> Leave it for now and lock the page late in the !page_mkwrite case.
>
> Signed-off-by: Johannes Weiner <hannes@...xchg.org>
> Acked-by: Kirill A. Shutemov <kirill.shutemov@...ux.intel.com>
The patch looks good. You can add:
Reviewed-by: Jan Kara <jack@...e.cz>
Honza
> ---
> mm/memory.c | 48 +++++++++++++++++-------------------------------
> 1 file changed, 17 insertions(+), 31 deletions(-)
>
> diff --git a/mm/memory.c b/mm/memory.c
> index 5640a718ac58..df47fd0a4b7f 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -2046,7 +2046,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
> pte_t entry;
> int ret = 0;
> int page_mkwrite = 0;
> - struct page *dirty_page = NULL;
> + bool dirty_shared = false;
> unsigned long mmun_start = 0; /* For mmu_notifiers */
> unsigned long mmun_end = 0; /* For mmu_notifiers */
> struct mem_cgroup *memcg;
> @@ -2097,6 +2097,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
> unlock_page(old_page);
> } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
> (VM_WRITE|VM_SHARED))) {
> + page_cache_get(old_page);
> /*
> * Only catch write-faults on shared writable pages,
> * read-only shared pages can get COWed by
> @@ -2104,7 +2105,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
> */
> if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
> int tmp;
> - page_cache_get(old_page);
> +
> pte_unmap_unlock(page_table, ptl);
> tmp = do_page_mkwrite(vma, old_page, address);
> if (unlikely(!tmp || (tmp &
> @@ -2124,11 +2125,10 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
> unlock_page(old_page);
> goto unlock;
> }
> -
> page_mkwrite = 1;
> }
> - dirty_page = old_page;
> - get_page(dirty_page);
> +
> + dirty_shared = true;
>
> reuse:
> /*
> @@ -2147,43 +2147,29 @@ reuse:
> pte_unmap_unlock(page_table, ptl);
> ret |= VM_FAULT_WRITE;
>
> - if (!dirty_page)
> - return ret;
> -
> - if (!page_mkwrite) {
> + if (dirty_shared) {
> struct address_space *mapping;
> int dirtied;
>
> - lock_page(dirty_page);
> - dirtied = set_page_dirty(dirty_page);
> - VM_BUG_ON_PAGE(PageAnon(dirty_page), dirty_page);
> - mapping = dirty_page->mapping;
> - unlock_page(dirty_page);
> + if (!page_mkwrite)
> + lock_page(old_page);
>
> - if (dirtied && mapping) {
> - /*
> - * Some device drivers do not set page.mapping
> - * but still dirty their pages
> - */
> - balance_dirty_pages_ratelimited(mapping);
> - }
> + dirtied = set_page_dirty(old_page);
> + VM_BUG_ON_PAGE(PageAnon(old_page), old_page);
> + mapping = old_page->mapping;
> + unlock_page(old_page);
> + page_cache_release(old_page);
>
> - file_update_time(vma->vm_file);
> - }
> - put_page(dirty_page);
> - if (page_mkwrite) {
> - struct address_space *mapping = dirty_page->mapping;
> -
> - set_page_dirty(dirty_page);
> - unlock_page(dirty_page);
> - page_cache_release(dirty_page);
> - if (mapping) {
> + if ((dirtied || page_mkwrite) && mapping) {
> /*
> * Some device drivers do not set page.mapping
> * but still dirty their pages
> */
> balance_dirty_pages_ratelimited(mapping);
> }
> +
> + if (!page_mkwrite)
> + file_update_time(vma->vm_file);
> }
>
> return ret;
> --
> 2.1.3
>
--
Jan Kara <jack@...e.cz>
SUSE Labs, CR
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists