[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20180622121511.00ae9d00@roar.ozlabs.ibm.com>
Date: Fri, 22 Jun 2018 12:15:11 +1000
From: Nicholas Piggin <npiggin@...il.com>
To: Matthew Wilcox <willy@...radead.org>
Cc: linux-kernel@...r.kernel.org,
Benjamin Herrenschmidt <benh@...nel.crashing.org>,
Paul Mackerras <paulus@...ba.org>,
Michael Ellerman <mpe@...erman.id.au>,
"Aneesh Kumar K.V" <aneesh.kumar@...ux.vnet.ibm.com>,
Thiago Jung Bauermann <bauerman@...ux.vnet.ibm.com>,
Ram Pai <linuxram@...ibm.com>, linuxppc-dev@...ts.ozlabs.org
Subject: Re: [PATCH 13/26] ppc: Convert mmu context allocation to new IDA
API
On Thu, 21 Jun 2018 14:28:22 -0700
Matthew Wilcox <willy@...radead.org> wrote:
> ida_alloc_range is the perfect fit for this use case. Eliminates
> a custom spinlock, a call to ida_pre_get and a local check for the
> allocated ID exceeding a maximum.
>
> Signed-off-by: Matthew Wilcox <willy@...radead.org>
> ---
> arch/powerpc/mm/mmu_context_book3s64.c | 44 +++-----------------------
> 1 file changed, 4 insertions(+), 40 deletions(-)
>
> diff --git a/arch/powerpc/mm/mmu_context_book3s64.c b/arch/powerpc/mm/mmu_context_book3s64.c
> index f3d4b4a0e561..5a0cf2cc8ba0 100644
> --- a/arch/powerpc/mm/mmu_context_book3s64.c
> +++ b/arch/powerpc/mm/mmu_context_book3s64.c
> @@ -26,48 +26,16 @@
> #include <asm/mmu_context.h>
> #include <asm/pgalloc.h>
>
> -static DEFINE_SPINLOCK(mmu_context_lock);
> static DEFINE_IDA(mmu_context_ida);
>
> static int alloc_context_id(int min_id, int max_id)
> {
> - int index, err;
> -
> -again:
> - if (!ida_pre_get(&mmu_context_ida, GFP_KERNEL))
> - return -ENOMEM;
> -
> - spin_lock(&mmu_context_lock);
> - err = ida_get_new_above(&mmu_context_ida, min_id, &index);
> - spin_unlock(&mmu_context_lock);
> -
> - if (err == -EAGAIN)
> - goto again;
> - else if (err)
> - return err;
> -
> - if (index > max_id) {
> - spin_lock(&mmu_context_lock);
> - ida_remove(&mmu_context_ida, index);
> - spin_unlock(&mmu_context_lock);
> - return -ENOMEM;
> - }
> -
> - return index;
> + return ida_alloc_range(&mmu_context_ida, min_id, max_id, GFP_KERNEL);
> }
>
> void hash__reserve_context_id(int id)
> {
> - int rc, result = 0;
> -
> - do {
> - if (!ida_pre_get(&mmu_context_ida, GFP_KERNEL))
> - break;
> -
> - spin_lock(&mmu_context_lock);
> - rc = ida_get_new_above(&mmu_context_ida, id, &result);
> - spin_unlock(&mmu_context_lock);
> - } while (rc == -EAGAIN);
> + int result = ida_alloc_range(&mmu_context_ida, id, id, GFP_KERNEL);
>
> WARN(result != id, "mmu: Failed to reserve context id %d (rc %d)\n", id, result);
> }
> @@ -172,9 +140,7 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
>
> void __destroy_context(int context_id)
> {
> - spin_lock(&mmu_context_lock);
> - ida_remove(&mmu_context_ida, context_id);
> - spin_unlock(&mmu_context_lock);
> + ida_free(&mmu_context_ida, context_id);
> }
> EXPORT_SYMBOL_GPL(__destroy_context);
>
> @@ -182,13 +148,11 @@ static void destroy_contexts(mm_context_t *ctx)
> {
> int index, context_id;
>
> - spin_lock(&mmu_context_lock);
> for (index = 0; index < ARRAY_SIZE(ctx->extended_id); index++) {
> context_id = ctx->extended_id[index];
> if (context_id)
> - ida_remove(&mmu_context_ida, context_id);
> + ida_free(&mmu_context_ida, context_id);
> }
> - spin_unlock(&mmu_context_lock);
> }
>
> static void pte_frag_destroy(void *pte_frag)
This hunk should be okay because the mmu_context_lock does not protect
the extended_id array, right Aneesh?
Reviewed-by: Nicholas Piggin <npiggin@...il.com>
Thanks,
Nick
Powered by blists - more mailing lists