[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240916110754.1236200-2-dev.jain@arm.com>
Date: Mon, 16 Sep 2024 16:37:53 +0530
From: Dev Jain <dev.jain@....com>
To: akpm@...ux-foundation.org,
david@...hat.com,
willy@...radead.org
Cc: ryan.roberts@....com,
anshuman.khandual@....com,
baohua@...nel.org,
hughd@...gle.com,
ioworker0@...il.com,
wangkefeng.wang@...wei.com,
baolin.wang@...ux.alibaba.com,
gshan@...hat.com,
linux-kernel@...r.kernel.org,
linux-mm@...ck.org,
Dev Jain <dev.jain@....com>
Subject: [PATCH v2 1/2] mm: Make pte_range_none() return number of empty PTEs
In preparation for the second patch, make pte_range_none() return
the number of contiguous empty PTEs.
Signed-off-by: Dev Jain <dev.jain@....com>
---
mm/memory.c | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/mm/memory.c b/mm/memory.c
index 6469ac99f2f7..8bb1236de93c 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -4617,16 +4617,16 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
return ret;
}
-static bool pte_range_none(pte_t *pte, int nr_pages)
+static int pte_range_none(pte_t *pte, int nr_pages)
{
int i;
for (i = 0; i < nr_pages; i++) {
if (!pte_none(ptep_get_lockless(pte + i)))
- return false;
+ return i;
}
- return true;
+ return nr_pages;
}
static struct folio *alloc_anon_folio(struct vm_fault *vmf)
@@ -4671,7 +4671,7 @@ static struct folio *alloc_anon_folio(struct vm_fault *vmf)
order = highest_order(orders);
while (orders) {
addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order);
- if (pte_range_none(pte + pte_index(addr), 1 << order))
+ if (pte_range_none(pte + pte_index(addr), 1 << order) == 1 << order)
break;
order = next_order(&orders, order);
}
@@ -4787,7 +4787,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
if (nr_pages == 1 && vmf_pte_changed(vmf)) {
update_mmu_tlb(vma, addr, vmf->pte);
goto release;
- } else if (nr_pages > 1 && !pte_range_none(vmf->pte, nr_pages)) {
+ } else if (nr_pages > 1 && pte_range_none(vmf->pte, nr_pages) != nr_pages) {
update_mmu_tlb_range(vma, addr, vmf->pte, nr_pages);
goto release;
}
@@ -5121,7 +5121,7 @@ vm_fault_t finish_fault(struct vm_fault *vmf)
update_mmu_tlb(vma, addr, vmf->pte);
ret = VM_FAULT_NOPAGE;
goto unlock;
- } else if (nr_pages > 1 && !pte_range_none(vmf->pte, nr_pages)) {
+ } else if (nr_pages > 1 && pte_range_none(vmf->pte, nr_pages) != nr_pages) {
update_mmu_tlb_range(vma, addr, vmf->pte, nr_pages);
ret = VM_FAULT_NOPAGE;
goto unlock;
--
2.30.2
Powered by blists - more mailing lists