[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <0c417434-94a8-4b1c-b798-47da6ccf964f@arm.com>
Date: Thu, 5 Sep 2024 14:15:11 +0530
From: Dev Jain <dev.jain@....com>
To: "Kirill A. Shutemov" <kirill.shutemov@...ux.intel.com>
Cc: akpm@...ux-foundation.org, david@...hat.com, willy@...radead.org,
ryan.roberts@....com, anshuman.khandual@....com, catalin.marinas@....com,
cl@...two.org, vbabka@...e.cz, mhocko@...e.com, apopple@...dia.com,
dave.hansen@...ux.intel.com, will@...nel.org, baohua@...nel.org,
jack@...e.cz, mark.rutland@....com, hughd@...gle.com,
aneesh.kumar@...nel.org, yang@...amperecomputing.com, peterx@...hat.com,
ioworker0@...il.com, jglisse@...gle.com,
linux-arm-kernel@...ts.infradead.org, linux-kernel@...r.kernel.org,
linux-mm@...ck.org
Subject: Re: [PATCH v2 1/2] mm: Abstract THP allocation
On 9/5/24 13:50, Kirill A. Shutemov wrote:
> On Wed, Sep 04, 2024 at 03:39:22PM +0530, Dev Jain wrote:
>> In preparation for the second patch, abstract away the THP allocation
>> logic present in the create_huge_pmd() path, which corresponds to the
>> faulting case when no page is present.
>>
>> There should be no functional change as a result of applying
>> this patch.
>>
>> Signed-off-by: Dev Jain <dev.jain@....com>
>> ---
>> mm/huge_memory.c | 110 +++++++++++++++++++++++++++++------------------
>> 1 file changed, 67 insertions(+), 43 deletions(-)
>>
>> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
>> index 67c86a5d64a6..58125fbcc532 100644
>> --- a/mm/huge_memory.c
>> +++ b/mm/huge_memory.c
>> @@ -943,47 +943,89 @@ unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
>> }
>> EXPORT_SYMBOL_GPL(thp_get_unmapped_area);
>>
>> -static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
>> - struct page *page, gfp_t gfp)
>> +static vm_fault_t thp_fault_alloc(gfp_t gfp, int order, struct vm_area_struct *vma,
>> + unsigned long haddr, struct folio **foliop,
>> + unsigned long addr)
> foliop is awkward.
>
> Why not return folio? NULL would indicate to the caller to fallback.
I took inspiration from other call sites like mfill_atomic_pte_copy() which
have a double pointer to the folio. If we do as you say, then in thp_fault_alloc(),
we will have to do all the fallback stat computation, and return the folio, then
check in the caller whether that is NULL, then set ret = VM_FAULT_FALLBACK, then
goto out/release. Basically, stat computation for fallback, and the actual setting
of return value to fallback would get separated. Currently, I can simultaneously
set the folio pointer and the return value.
>
>> {
>> - struct vm_area_struct *vma = vmf->vma;
>> - struct folio *folio = page_folio(page);
>> - pgtable_t pgtable;
>> - unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
>> - vm_fault_t ret = 0;
>> + struct folio *folio = vma_alloc_folio(gfp, order, vma, haddr, true);
>>
>> - VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
>> + *foliop = folio;
>> + if (unlikely(!folio)) {
>> + count_vm_event(THP_FAULT_FALLBACK);
>> + count_mthp_stat(order, MTHP_STAT_ANON_FAULT_FALLBACK);
>> + return VM_FAULT_FALLBACK;
>> + }
>>
>> + VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
>> if (mem_cgroup_charge(folio, vma->vm_mm, gfp)) {
>> folio_put(folio);
>> count_vm_event(THP_FAULT_FALLBACK);
>> count_vm_event(THP_FAULT_FALLBACK_CHARGE);
>> - count_mthp_stat(HPAGE_PMD_ORDER, MTHP_STAT_ANON_FAULT_FALLBACK);
>> - count_mthp_stat(HPAGE_PMD_ORDER, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE);
>> + count_mthp_stat(order, MTHP_STAT_ANON_FAULT_FALLBACK);
>> + count_mthp_stat(order, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE);
>> return VM_FAULT_FALLBACK;
>> }
>> folio_throttle_swaprate(folio, gfp);
>>
>> - pgtable = pte_alloc_one(vma->vm_mm);
>> - if (unlikely(!pgtable)) {
>> - ret = VM_FAULT_OOM;
>> - goto release;
>> - }
>> -
>> - folio_zero_user(folio, vmf->address);
>> + folio_zero_user(folio, addr);
>> /*
>> * The memory barrier inside __folio_mark_uptodate makes sure that
>> * folio_zero_user writes become visible before the set_pmd_at()
>> * write.
>> */
>> __folio_mark_uptodate(folio);
>> + return 0;
>> +}
>> +
>> +static void __thp_fault_success_stats(struct vm_area_struct *vma, int order)
>> +{
>> + count_vm_event(THP_FAULT_ALLOC);
>> + count_mthp_stat(order, MTHP_STAT_ANON_FAULT_ALLOC);
>> + count_memcg_event_mm(vma->vm_mm, THP_FAULT_ALLOC);
>> +}
>> +
>> +static void map_pmd_thp(struct folio *folio, struct vm_fault *vmf,
>> + struct vm_area_struct *vma, unsigned long haddr,
>> + pgtable_t pgtable)
>> +{
>> + pmd_t entry;
>> +
>> + entry = mk_huge_pmd(&folio->page, vma->vm_page_prot);
>> + entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
>> + folio_add_new_anon_rmap(folio, vma, haddr, RMAP_EXCLUSIVE);
>> + folio_add_lru_vma(folio, vma);
>> + pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
>> + set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
>> + update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
>> + add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR);
>> + mm_inc_nr_ptes(vma->vm_mm);
>> +}
>> +
>> +static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf)
>> +{
>> + struct vm_area_struct *vma = vmf->vma;
>> + struct folio *folio = NULL;
>> + pgtable_t pgtable;
>> + unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
>> + vm_fault_t ret = 0;
>> + gfp_t gfp = vma_thp_gfp_mask(vma);
>> +
>> + pgtable = pte_alloc_one(vma->vm_mm);
>> + if (unlikely(!pgtable)) {
>> + ret = VM_FAULT_OOM;
>> + goto release;
>> + }
>> +
>> + ret = thp_fault_alloc(gfp, HPAGE_PMD_ORDER, vma, haddr, &folio,
>> + vmf->address);
>> + if (ret)
>> + goto release;
> THP page allocation has higher probability to fail than pgtable allocation. It
> is better to allocate it first, before pgtable and do less work on error
> path.
Thanks, makes sense.
Powered by blists - more mailing lists