[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1429225378-22965-3-git-send-email-mike.kravetz@oracle.com>
Date: Thu, 16 Apr 2015 16:02:56 -0700
From: Mike Kravetz <mike.kravetz@...cle.com>
To: linux-mm@...ck.org, linux-kernel@...r.kernel.org
Cc: Dave Hansen <dave.hansen@...ux.intel.com>,
Naoya Horiguchi <n-horiguchi@...jp.nec.com>,
David Rientjes <rientjes@...gle.com>,
Hugh Dickins <hughd@...gle.com>,
Davidlohr Bueso <dave@...olabs.net>,
Aneesh Kumar <aneesh.kumar@...ux.vnet.ibm.com>,
Mike Kravetz <mike.kravetz@...cle.com>
Subject: [RFC PATCH 2/4] hugetlbfs: New huge_add_to_page_cache helper routine
Currently, there is only a single place where hugetlbfs pages are
added to the page cache. The new fallocate code be adding a second
one, so break the functionality out into its own helper.
Signed-off-by: Dave Hansen <dave.hansen@...ux.intel.com>
Signed-off-by: Mike Kravetz <mike.kravetz@...cle.com>
---
include/linux/hugetlb.h | 2 ++
mm/hugetlb.c | 27 ++++++++++++++++++---------
2 files changed, 20 insertions(+), 9 deletions(-)
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 7b57850..6425945 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -325,6 +325,8 @@ struct huge_bootmem_page {
struct page *alloc_huge_page_node(struct hstate *h, int nid);
struct page *alloc_huge_page_noerr(struct vm_area_struct *vma,
unsigned long addr, int avoid_reserve);
+int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
+ pgoff_t idx);
/* arch callback */
int __init alloc_bootmem_huge_page(struct hstate *h);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index c41b2a0..7cda328 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2959,6 +2959,23 @@ static bool hugetlbfs_pagecache_present(struct hstate *h,
return page != NULL;
}
+int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
+ pgoff_t idx)
+{
+ struct inode *inode = mapping->host;
+ struct hstate *h = hstate_inode(inode);
+ int err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
+
+ if (err)
+ return err;
+ ClearPagePrivate(page);
+
+ spin_lock(&inode->i_lock);
+ inode->i_blocks += blocks_per_huge_page(h);
+ spin_unlock(&inode->i_lock);
+ return 0;
+}
+
static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
struct address_space *mapping, pgoff_t idx,
unsigned long address, pte_t *ptep, unsigned int flags)
@@ -3005,21 +3022,13 @@ retry:
__SetPageUptodate(page);
if (vma->vm_flags & VM_MAYSHARE) {
- int err;
- struct inode *inode = mapping->host;
-
- err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
+ int err = huge_add_to_page_cache(page, mapping, idx);
if (err) {
put_page(page);
if (err == -EEXIST)
goto retry;
goto out;
}
- ClearPagePrivate(page);
-
- spin_lock(&inode->i_lock);
- inode->i_blocks += blocks_per_huge_page(h);
- spin_unlock(&inode->i_lock);
} else {
lock_page(page);
if (unlikely(anon_vma_prepare(vma))) {
--
2.1.0
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists