lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <87wourbcew.fsf@linux.vnet.ibm.com>
Date:   Fri, 22 Jun 2018 11:17:19 +0530
From:   "Aneesh Kumar K.V" <aneesh.kumar@...ux.vnet.ibm.com>
To:     Nicholas Piggin <npiggin@...il.com>,
        Matthew Wilcox <willy@...radead.org>
Cc:     Ram Pai <linuxram@...ibm.com>, linux-kernel@...r.kernel.org,
        Paul Mackerras <paulus@...ba.org>,
        Thiago Jung Bauermann <bauerman@...ux.vnet.ibm.com>,
        linuxppc-dev@...ts.ozlabs.org
Subject: Re: [PATCH 13/26] ppc: Convert mmu context allocation to new IDA API

Nicholas Piggin <npiggin@...il.com> writes:

> On Thu, 21 Jun 2018 14:28:22 -0700
> Matthew Wilcox <willy@...radead.org> wrote:
>
>> ida_alloc_range is the perfect fit for this use case.  Eliminates
>> a custom spinlock, a call to ida_pre_get and a local check for the
>> allocated ID exceeding a maximum.
>> 
>> Signed-off-by: Matthew Wilcox <willy@...radead.org>
>> ---
>>  arch/powerpc/mm/mmu_context_book3s64.c | 44 +++-----------------------
>>  1 file changed, 4 insertions(+), 40 deletions(-)
>> 
>> diff --git a/arch/powerpc/mm/mmu_context_book3s64.c b/arch/powerpc/mm/mmu_context_book3s64.c
>> index f3d4b4a0e561..5a0cf2cc8ba0 100644
>> --- a/arch/powerpc/mm/mmu_context_book3s64.c
>> +++ b/arch/powerpc/mm/mmu_context_book3s64.c
>> @@ -26,48 +26,16 @@
>>  #include <asm/mmu_context.h>
>>  #include <asm/pgalloc.h>
>>  
>> -static DEFINE_SPINLOCK(mmu_context_lock);
>>  static DEFINE_IDA(mmu_context_ida);
>>  
>>  static int alloc_context_id(int min_id, int max_id)
>>  {
>> -	int index, err;
>> -
>> -again:
>> -	if (!ida_pre_get(&mmu_context_ida, GFP_KERNEL))
>> -		return -ENOMEM;
>> -
>> -	spin_lock(&mmu_context_lock);
>> -	err = ida_get_new_above(&mmu_context_ida, min_id, &index);
>> -	spin_unlock(&mmu_context_lock);
>> -
>> -	if (err == -EAGAIN)
>> -		goto again;
>> -	else if (err)
>> -		return err;
>> -
>> -	if (index > max_id) {
>> -		spin_lock(&mmu_context_lock);
>> -		ida_remove(&mmu_context_ida, index);
>> -		spin_unlock(&mmu_context_lock);
>> -		return -ENOMEM;
>> -	}
>> -
>> -	return index;
>> +	return ida_alloc_range(&mmu_context_ida, min_id, max_id, GFP_KERNEL);
>>  }
>>  
>>  void hash__reserve_context_id(int id)
>>  {
>> -	int rc, result = 0;
>> -
>> -	do {
>> -		if (!ida_pre_get(&mmu_context_ida, GFP_KERNEL))
>> -			break;
>> -
>> -		spin_lock(&mmu_context_lock);
>> -		rc = ida_get_new_above(&mmu_context_ida, id, &result);
>> -		spin_unlock(&mmu_context_lock);
>> -	} while (rc == -EAGAIN);
>> +	int result = ida_alloc_range(&mmu_context_ida, id, id, GFP_KERNEL);
>>  
>>  	WARN(result != id, "mmu: Failed to reserve context id %d (rc %d)\n", id, result);
>>  }
>> @@ -172,9 +140,7 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
>>  
>>  void __destroy_context(int context_id)
>>  {
>> -	spin_lock(&mmu_context_lock);
>> -	ida_remove(&mmu_context_ida, context_id);
>> -	spin_unlock(&mmu_context_lock);
>> +	ida_free(&mmu_context_ida, context_id);
>>  }
>>  EXPORT_SYMBOL_GPL(__destroy_context);
>>  
>> @@ -182,13 +148,11 @@ static void destroy_contexts(mm_context_t *ctx)
>>  {
>>  	int index, context_id;
>>  
>> -	spin_lock(&mmu_context_lock);
>>  	for (index = 0; index < ARRAY_SIZE(ctx->extended_id); index++) {
>>  		context_id = ctx->extended_id[index];
>>  		if (context_id)
>> -			ida_remove(&mmu_context_ida, context_id);
>> +			ida_free(&mmu_context_ida, context_id);
>>  	}
>> -	spin_unlock(&mmu_context_lock);
>>  }
>>  
>>  static void pte_frag_destroy(void *pte_frag)
>
> This hunk should be okay because the mmu_context_lock does not protect
> the extended_id array, right Aneesh?

Yes. This is called at process exit, so we should not find parallel
calls. On the allocation side, we are protected by mmap_sem. We do
allocate extended_id when doing mmap.

-aneesh

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ