[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <148804251828.36605.14910389618497006945.stgit@dwillia2-desk3.amr.corp.intel.com>
Date: Sat, 25 Feb 2017 09:08:38 -0800
From: Dan Williams <dan.j.williams@...el.com>
To: akpm@...ux-foundation.org
Cc: x86@...nel.org, Dave Hansen <dave.hansen@...ux.intel.com>,
linux-kernel@...r.kernel.org, linux-mm@...ck.org,
Ingo Molnar <mingo@...hat.com>,
"H. Peter Anvin" <hpa@...or.com>,
Thomas Gleixner <tglx@...utronix.de>,
torvalds@...ux-foundation.org,
Ross Zwisler <ross.zwisler@...ux.intel.com>
Subject: [PATCH 2/2] x86, mm: unify exit paths in gup_pte_range()
All exit paths from gup_pte_range() require pte_unmap() of the original
pte page before returning. Refactor the code to have a single exit point
to do the unmap.
This mirrors the flow of the generic gup_pte_range() in mm/gup.c.
Cc: <x86@...nel.org>
Cc: Ingo Molnar <mingo@...hat.com>
Cc: "H. Peter Anvin" <hpa@...or.com>
Cc: Thomas Gleixner <tglx@...utronix.de>
Cc: Dave Hansen <dave.hansen@...ux.intel.com>
Cc: Ross Zwisler <ross.zwisler@...ux.intel.com>
Signed-off-by: Dan Williams <dan.j.williams@...el.com>
---
arch/x86/mm/gup.c | 39 ++++++++++++++++++++-------------------
1 file changed, 20 insertions(+), 19 deletions(-)
diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
index 1680768d392c..e703f09c1d78 100644
--- a/arch/x86/mm/gup.c
+++ b/arch/x86/mm/gup.c
@@ -106,36 +106,35 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
unsigned long end, int write, struct page **pages, int *nr)
{
struct dev_pagemap *pgmap = NULL;
- int nr_start = *nr;
- pte_t *ptep;
+ int nr_start = *nr, ret = 0;
+ pte_t *ptep, *ptem;
- ptep = pte_offset_map(&pmd, addr);
+ /*
+ * Keep the original mapped PTE value (ptem) around since we
+ * might increment ptep off the end of the page when finishing
+ * our loop iteration.
+ */
+ ptem = ptep = pte_offset_map(&pmd, addr);
do {
pte_t pte = gup_get_pte(ptep);
struct page *page;
/* Similar to the PMD case, NUMA hinting must take slow path */
- if (pte_protnone(pte)) {
- pte_unmap(ptep);
- return 0;
- }
+ if (pte_protnone(pte))
+ break;
- if (!pte_allows_gup(pte_val(pte), write)) {
- pte_unmap(ptep);
- return 0;
- }
+ if (!pte_allows_gup(pte_val(pte), write))
+ break;
if (pte_devmap(pte)) {
pgmap = get_dev_pagemap(pte_pfn(pte), pgmap);
if (unlikely(!pgmap)) {
undo_dev_pagemap(nr, nr_start, pages);
- pte_unmap(ptep);
- return 0;
+ break;
}
- } else if (pte_special(pte)) {
- pte_unmap(ptep);
- return 0;
- }
+ } else if (pte_special(pte))
+ break;
+
VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
page = pte_page(pte);
get_page(page);
@@ -145,9 +144,11 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
(*nr)++;
} while (ptep++, addr += PAGE_SIZE, addr != end);
- pte_unmap(ptep - 1);
+ if (addr == end)
+ ret = 1;
+ pte_unmap(ptem);
- return 1;
+ return ret;
}
static inline void get_head_page_multiple(struct page *page, int nr)
Powered by blists - more mailing lists