[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAHS8izPg2W394+kNMGN1oWu1KQpcQGEw5xHR95wE=-zPeQmySA@mail.gmail.com>
Date: Wed, 7 Dec 2022 15:33:05 -0800
From: Mina Almasry <almasrymina@...gle.com>
To: James Houghton <jthoughton@...gle.com>
Cc: Mike Kravetz <mike.kravetz@...cle.com>,
Muchun Song <songmuchun@...edance.com>,
Peter Xu <peterx@...hat.com>,
David Hildenbrand <david@...hat.com>,
David Rientjes <rientjes@...gle.com>,
Axel Rasmussen <axelrasmussen@...gle.com>,
"Zach O'Keefe" <zokeefe@...gle.com>,
Manish Mishra <manish.mishra@...anix.com>,
Naoya Horiguchi <naoya.horiguchi@....com>,
"Dr . David Alan Gilbert" <dgilbert@...hat.com>,
"Matthew Wilcox (Oracle)" <willy@...radead.org>,
Vlastimil Babka <vbabka@...e.cz>,
Baolin Wang <baolin.wang@...ux.alibaba.com>,
Miaohe Lin <linmiaohe@...wei.com>,
Yang Shi <shy828301@...il.com>,
Andrew Morton <akpm@...ux-foundation.org>, linux-mm@...ck.org,
linux-kernel@...r.kernel.org
Subject: Re: [RFC PATCH v2 05/47] hugetlb: make hugetlb_vma_lock_alloc return
its failure reason
On Fri, Oct 21, 2022 at 9:37 AM James Houghton <jthoughton@...gle.com> wrote:
>
> Currently hugetlb_vma_lock_alloc doesn't return anything, as there is no
> need: if it fails, PMD sharing won't be enabled. However, HGM requires
> that the VMA lock exists, so we need to verify that
> hugetlb_vma_lock_alloc actually succeeded. If hugetlb_vma_lock_alloc
> fails, then we can pass that up to the caller that is attempting to
> enable HGM.
>
> Signed-off-by: James Houghton <jthoughton@...gle.com>
> ---
> mm/hugetlb.c | 16 +++++++++-------
> 1 file changed, 9 insertions(+), 7 deletions(-)
>
> diff --git a/mm/hugetlb.c b/mm/hugetlb.c
> index 52cec5b0789e..dc82256b89dd 100644
> --- a/mm/hugetlb.c
> +++ b/mm/hugetlb.c
> @@ -92,7 +92,7 @@ struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;
> /* Forward declaration */
> static int hugetlb_acct_memory(struct hstate *h, long delta);
> static void hugetlb_vma_lock_free(struct vm_area_struct *vma);
> -static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma);
> +static int hugetlb_vma_lock_alloc(struct vm_area_struct *vma);
> static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma);
>
> static inline bool subpool_is_free(struct hugepage_subpool *spool)
> @@ -7001,17 +7001,17 @@ static void hugetlb_vma_lock_free(struct vm_area_struct *vma)
> }
> }
>
> -static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma)
> +static int hugetlb_vma_lock_alloc(struct vm_area_struct *vma)
> {
> struct hugetlb_vma_lock *vma_lock;
>
> /* Only establish in (flags) sharable vmas */
> if (!vma || !(vma->vm_flags & VM_MAYSHARE))
> - return;
> + return -EINVAL;
>
> - /* Should never get here with non-NULL vm_private_data */
> + /* We've already allocated the lock. */
> if (vma->vm_private_data)
> - return;
> + return 0;
I would have expected -EEXIST here.
Also even if the patch looks generally fine it's hard to provide
Acked-by now. I need to look at the call site which is in another
patch in the series. If there is an opportunity to squash changes to
helpers and their call sites please do.
>
> vma_lock = kmalloc(sizeof(*vma_lock), GFP_KERNEL);
> if (!vma_lock) {
> @@ -7026,13 +7026,14 @@ static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma)
> * allocation failure.
> */
> pr_warn_once("HugeTLB: unable to allocate vma specific lock\n");
> - return;
> + return -ENOMEM;
> }
>
> kref_init(&vma_lock->refs);
> init_rwsem(&vma_lock->rw_sema);
> vma_lock->vma = vma;
> vma->vm_private_data = vma_lock;
> + return 0;
> }
>
> /*
> @@ -7160,8 +7161,9 @@ static void hugetlb_vma_lock_free(struct vm_area_struct *vma)
> {
> }
>
> -static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma)
> +static int hugetlb_vma_lock_alloc(struct vm_area_struct *vma)
> {
> + return 0;
> }
>
> pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
> --
> 2.38.0.135.g90850a2211-goog
>
Powered by blists - more mailing lists