[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230830184958.2333078-4-ankur.a.arora@oracle.com>
Date: Wed, 30 Aug 2023 11:49:52 -0700
From: Ankur Arora <ankur.a.arora@...cle.com>
To: linux-kernel@...r.kernel.org, linux-mm@...ck.org, x86@...nel.org
Cc: akpm@...ux-foundation.org, luto@...nel.org, bp@...en8.de,
dave.hansen@...ux.intel.com, hpa@...or.com, mingo@...hat.com,
juri.lelli@...hat.com, vincent.guittot@...aro.org,
willy@...radead.org, mgorman@...e.de, peterz@...radead.org,
rostedt@...dmis.org, tglx@...utronix.de, jon.grimm@....com,
bharata@....com, raghavendra.kt@....com,
boris.ostrovsky@...cle.com, konrad.wilk@...cle.com,
Ankur Arora <ankur.a.arora@...cle.com>
Subject: [PATCH v2 3/9] mm/huge_page: cleanup clear_/copy_subpage()
clear_subpage(), copy_subpage():
static int clear_subpage(unsigned long addr, int idx, void *arg)
static int copy_subpage(unsigned long addr, int idx, void *arg)
take as parameters: an index, a post indexing virtual address,
and a struct page * (clear_subpage()), or an opaque structure
(copy_subpage()), both of which we would then resolve using the
index.
Fold clear_subpage() back into the caller and cleanup the indexing
for copy_subpage().
Signed-off-by: Ankur Arora <ankur.a.arora@...cle.com>
---
mm/memory.c | 58 +++++++++++++++++------------------------------------
1 file changed, 18 insertions(+), 40 deletions(-)
diff --git a/mm/memory.c b/mm/memory.c
index 6e005b787608..4f1ce3ad0af5 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -5914,24 +5914,14 @@ static void clear_gigantic_page(struct page *page,
}
}
-static int clear_subpage(unsigned long addr, int idx, void *arg)
-{
- struct page *page = arg;
-
- clear_user_highpage(page + idx, addr);
- return 0;
-}
-
/*
* Clear subpages of the specified huge page. The target subpage will be
* processed last to keep its cache lines hot.
*/
-static int __clear_huge_page(
- unsigned long addr_hint, unsigned int pages_per_huge_page,
- int (*process_subpage)(unsigned long addr, int idx, void *arg),
- void *arg)
+static void __clear_huge_page(struct page *page,
+ unsigned long addr_hint, unsigned int pages_per_huge_page)
{
- int i, n, base, l, ret;
+ int i, n, base, l;
unsigned long addr = addr_hint &
~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
@@ -5945,9 +5935,7 @@ static int __clear_huge_page(
/* Process subpages at the end of huge page */
for (i = pages_per_huge_page - 1; i >= 2 * n; i--) {
cond_resched();
- ret = process_subpage(addr + i * PAGE_SIZE, i, arg);
- if (ret)
- return ret;
+ clear_user_highpage(page + i, addr + i * PAGE_SIZE);
}
} else {
/* If target subpage in second half of huge page */
@@ -5956,9 +5944,7 @@ static int __clear_huge_page(
/* Process subpages at the begin of huge page */
for (i = 0; i < base; i++) {
cond_resched();
- ret = process_subpage(addr + i * PAGE_SIZE, i, arg);
- if (ret)
- return ret;
+ clear_user_highpage(page + i, addr + i * PAGE_SIZE);
}
}
/*
@@ -5970,15 +5956,11 @@ static int __clear_huge_page(
int right_idx = base + 2 * l - 1 - i;
cond_resched();
- ret = process_subpage(addr + left_idx * PAGE_SIZE, left_idx, arg);
- if (ret)
- return ret;
+ clear_user_highpage(page + left_idx, addr + left_idx * PAGE_SIZE);
+
cond_resched();
- ret = process_subpage(addr + right_idx * PAGE_SIZE, right_idx, arg);
- if (ret)
- return ret;
+ clear_user_highpage(page + right_idx, addr + right_idx * PAGE_SIZE);
}
- return 0;
}
__weak void clear_huge_page(struct page *page,
@@ -5993,7 +5975,7 @@ __weak void clear_huge_page(struct page *page,
return;
}
- __clear_huge_page(addr_hint, pages_per_huge_page, clear_subpage, page);
+ __clear_huge_page(page, addr_hint, pages_per_huge_page);
}
static int copy_user_gigantic_page(struct folio *dst, struct folio *src,
@@ -6025,12 +6007,10 @@ struct copy_subpage_arg {
struct vm_area_struct *vma;
};
-static int copy_subpage(unsigned long addr, int idx, void *arg)
+static int copy_subpage(struct copy_subpage_arg *copy_arg, unsigned long addr, int idx)
{
- struct copy_subpage_arg *copy_arg = arg;
-
if (copy_mc_user_highpage(copy_arg->dst + idx, copy_arg->src + idx,
- addr, copy_arg->vma)) {
+ addr + idx * PAGE_SIZE, copy_arg->vma)) {
memory_failure_queue(page_to_pfn(copy_arg->src + idx), 0);
return -EHWPOISON;
}
@@ -6041,10 +6021,8 @@ static int copy_subpage(unsigned long addr, int idx, void *arg)
* Copy subpages of the specified huge page. The target subpage will be
* processed last to keep its cache lines hot.
*/
-static int __copy_huge_page(
- unsigned long addr_hint, unsigned int pages_per_huge_page,
- int (*process_subpage)(unsigned long addr, int idx, void *arg),
- void *arg)
+static int __copy_huge_page(struct copy_subpage_arg *arg,
+ unsigned long addr_hint, unsigned int pages_per_huge_page)
{
int i, n, base, l, ret;
unsigned long addr = addr_hint &
@@ -6060,7 +6038,7 @@ static int __copy_huge_page(
/* Process subpages at the end of huge page */
for (i = pages_per_huge_page - 1; i >= 2 * n; i--) {
cond_resched();
- ret = process_subpage(addr + i * PAGE_SIZE, i, arg);
+ ret = copy_subpage(arg, addr, i);
if (ret)
return ret;
}
@@ -6071,7 +6049,7 @@ static int __copy_huge_page(
/* Process subpages at the begin of huge page */
for (i = 0; i < base; i++) {
cond_resched();
- ret = process_subpage(addr + i * PAGE_SIZE, i, arg);
+ ret = copy_subpage(arg, addr, i);
if (ret)
return ret;
}
@@ -6085,11 +6063,11 @@ static int __copy_huge_page(
int right_idx = base + 2 * l - 1 - i;
cond_resched();
- ret = process_subpage(addr + left_idx * PAGE_SIZE, left_idx, arg);
+ ret = copy_subpage(arg, addr, left_idx);
if (ret)
return ret;
cond_resched();
- ret = process_subpage(addr + right_idx * PAGE_SIZE, right_idx, arg);
+ ret = copy_subpage(arg, addr, right_idx);
if (ret)
return ret;
}
@@ -6112,7 +6090,7 @@ int copy_user_large_folio(struct folio *dst, struct folio *src,
return copy_user_gigantic_page(dst, src, addr, vma,
pages_per_huge_page);
- return __copy_huge_page(addr_hint, pages_per_huge_page, copy_subpage, &arg);
+ return __copy_huge_page(&arg, addr_hint, pages_per_huge_page);
}
long copy_folio_from_user(struct folio *dst_folio,
--
2.31.1
Powered by blists - more mailing lists