[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20190818175824.GA6635@bharath12345-Inspiron-5559>
Date: Sun, 18 Aug 2019 23:28:24 +0530
From: Bharath Vedartham <linux.bhar@...il.com>
To: jhubbard@...dia.com, gregkh@...uxfoundation.org, sivanich@....com,
arnd@...db.de
Cc: ira.weiny@...el.com, jglisse@...hat.com,
william.kucharski@...cle.com, hch@....de,
linux-kernel@...r.kernel.org, linux-mm@...ck.org,
linux-kernel-mentees@...ts.linuxfoundation.org
Subject: Re: [Linux-kernel-mentees][PATCH v5 1/1] sgi-gru: Remove *pte_lookup
functions, Convert to get_user_page*()
Hi Dimitri,
Can you confirm that this driver will run gru_vtop() in interrupt
context?
If so, I ll send you another set of patches in which I don't change the
*pte_lookup functions but only change put_page to put_user_page and
remove the ifdef for CONFIG_HUGETLB_PAGE.
Thank you for your time.
Thank you
Bharath
On Sat, Aug 10, 2019 at 01:08:17AM +0530, Bharath Vedartham wrote:
> For pages that were retained via get_user_pages*(), release those pages
> via the new put_user_page*() routines, instead of via put_page() or
> release_pages().
>
> This is part a tree-wide conversion, as described in commit fc1d8e7cca2d
> ("mm: introduce put_user_page*(), placeholder versions").
>
> As part of this conversion, the *pte_lookup functions can be removed and
> be easily replaced with get_user_pages_fast() functions. In the case of
> atomic lookup, __get_user_pages_fast() is used, because it does not fall
> back to the slow path: get_user_pages(). get_user_pages_fast(), on the other
> hand, first calls __get_user_pages_fast(), but then falls back to the
> slow path if __get_user_pages_fast() fails.
>
> Also: remove unnecessary CONFIG_HUGETLB ifdefs.
>
> Cc: Ira Weiny <ira.weiny@...el.com>
> Cc: John Hubbard <jhubbard@...dia.com>
> Cc: Jérôme Glisse <jglisse@...hat.com>
> Cc: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
> Cc: Dimitri Sivanich <sivanich@....com>
> Cc: Arnd Bergmann <arnd@...db.de>
> Cc: William Kucharski <william.kucharski@...cle.com>
> Cc: Christoph Hellwig <hch@....de>
> Cc: linux-kernel@...r.kernel.org
> Cc: linux-mm@...ck.org
> Cc: linux-kernel-mentees@...ts.linuxfoundation.org
> Reviewed-by: Ira Weiny <ira.weiny@...el.com>
> Reviewed-by: John Hubbard <jhubbard@...dia.com>
> Reviewed-by: William Kucharski <william.kucharski@...cle.com>
> Signed-off-by: Bharath Vedartham <linux.bhar@...il.com>
> ---
> This is a fold of the 3 patches in the v2 patch series.
> The review tags were given to the individual patches.
>
> Changes since v3
> - Used gup flags in get_user_pages_fast rather than
> boolean flags.
> Changes since v4
> - Updated changelog according to John Hubbard.
> ---
> drivers/misc/sgi-gru/grufault.c | 112 +++++++++-------------------------------
> 1 file changed, 24 insertions(+), 88 deletions(-)
>
> diff --git a/drivers/misc/sgi-gru/grufault.c b/drivers/misc/sgi-gru/grufault.c
> index 4b713a8..304e9c5 100644
> --- a/drivers/misc/sgi-gru/grufault.c
> +++ b/drivers/misc/sgi-gru/grufault.c
> @@ -166,96 +166,20 @@ static void get_clear_fault_map(struct gru_state *gru,
> }
>
> /*
> - * Atomic (interrupt context) & non-atomic (user context) functions to
> - * convert a vaddr into a physical address. The size of the page
> - * is returned in pageshift.
> - * returns:
> - * 0 - successful
> - * < 0 - error code
> - * 1 - (atomic only) try again in non-atomic context
> - */
> -static int non_atomic_pte_lookup(struct vm_area_struct *vma,
> - unsigned long vaddr, int write,
> - unsigned long *paddr, int *pageshift)
> -{
> - struct page *page;
> -
> -#ifdef CONFIG_HUGETLB_PAGE
> - *pageshift = is_vm_hugetlb_page(vma) ? HPAGE_SHIFT : PAGE_SHIFT;
> -#else
> - *pageshift = PAGE_SHIFT;
> -#endif
> - if (get_user_pages(vaddr, 1, write ? FOLL_WRITE : 0, &page, NULL) <= 0)
> - return -EFAULT;
> - *paddr = page_to_phys(page);
> - put_page(page);
> - return 0;
> -}
> -
> -/*
> - * atomic_pte_lookup
> + * mmap_sem is already helod on entry to this function. This guarantees
> + * existence of the page tables.
> *
> - * Convert a user virtual address to a physical address
> * Only supports Intel large pages (2MB only) on x86_64.
> - * ZZZ - hugepage support is incomplete
> - *
> - * NOTE: mmap_sem is already held on entry to this function. This
> - * guarantees existence of the page tables.
> + * ZZZ - hugepage support is incomplete.
> */
> -static int atomic_pte_lookup(struct vm_area_struct *vma, unsigned long vaddr,
> - int write, unsigned long *paddr, int *pageshift)
> -{
> - pgd_t *pgdp;
> - p4d_t *p4dp;
> - pud_t *pudp;
> - pmd_t *pmdp;
> - pte_t pte;
> -
> - pgdp = pgd_offset(vma->vm_mm, vaddr);
> - if (unlikely(pgd_none(*pgdp)))
> - goto err;
> -
> - p4dp = p4d_offset(pgdp, vaddr);
> - if (unlikely(p4d_none(*p4dp)))
> - goto err;
> -
> - pudp = pud_offset(p4dp, vaddr);
> - if (unlikely(pud_none(*pudp)))
> - goto err;
> -
> - pmdp = pmd_offset(pudp, vaddr);
> - if (unlikely(pmd_none(*pmdp)))
> - goto err;
> -#ifdef CONFIG_X86_64
> - if (unlikely(pmd_large(*pmdp)))
> - pte = *(pte_t *) pmdp;
> - else
> -#endif
> - pte = *pte_offset_kernel(pmdp, vaddr);
> -
> - if (unlikely(!pte_present(pte) ||
> - (write && (!pte_write(pte) || !pte_dirty(pte)))))
> - return 1;
> -
> - *paddr = pte_pfn(pte) << PAGE_SHIFT;
> -#ifdef CONFIG_HUGETLB_PAGE
> - *pageshift = is_vm_hugetlb_page(vma) ? HPAGE_SHIFT : PAGE_SHIFT;
> -#else
> - *pageshift = PAGE_SHIFT;
> -#endif
> - return 0;
> -
> -err:
> - return 1;
> -}
> -
> static int gru_vtop(struct gru_thread_state *gts, unsigned long vaddr,
> int write, int atomic, unsigned long *gpa, int *pageshift)
> {
> struct mm_struct *mm = gts->ts_mm;
> struct vm_area_struct *vma;
> unsigned long paddr;
> - int ret, ps;
> + int ret;
> + struct page *page;
>
> vma = find_vma(mm, vaddr);
> if (!vma)
> @@ -263,21 +187,33 @@ static int gru_vtop(struct gru_thread_state *gts, unsigned long vaddr,
>
> /*
> * Atomic lookup is faster & usually works even if called in non-atomic
> - * context.
> + * context. get_user_pages_fast does atomic lookup before falling back to
> + * slow gup.
> */
> rmb(); /* Must/check ms_range_active before loading PTEs */
> - ret = atomic_pte_lookup(vma, vaddr, write, &paddr, &ps);
> - if (ret) {
> - if (atomic)
> + if (atomic) {
> + ret = __get_user_pages_fast(vaddr, 1, write, &page);
> + if (!ret)
> goto upm;
> - if (non_atomic_pte_lookup(vma, vaddr, write, &paddr, &ps))
> + } else {
> + ret = get_user_pages_fast(vaddr, 1, write ? FOLL_WRITE : 0, &page);
> + if (!ret)
> goto inval;
> }
> +
> + paddr = page_to_phys(page);
> + put_user_page(page);
> +
> + if (unlikely(is_vm_hugetlb_page(vma)))
> + *pageshift = HPAGE_SHIFT;
> + else
> + *pageshift = PAGE_SHIFT;
> +
> if (is_gru_paddr(paddr))
> goto inval;
> - paddr = paddr & ~((1UL << ps) - 1);
> + paddr = paddr & ~((1UL << *pageshift) - 1);
> *gpa = uv_soc_phys_ram_to_gpa(paddr);
> - *pageshift = ps;
> +
> return VTOP_SUCCESS;
>
> inval:
> --
> 2.7.4
>
Powered by blists - more mailing lists