lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <c58d729a-4d8a-4a19-9313-85fa33b5d0af@gaisler.com>
Date: Wed, 8 May 2024 19:32:08 +0200
From: Andreas Larsson <andreas@...sler.com>
To: Dawei Li <dawei.li@...ngroup.cn>, davem@...emloft.net
Cc: sparclinux@...r.kernel.org, linux-kernel@...r.kernel.org, sam@...nborg.org
Subject: Re: [PATCH v4 1/5] sparc/srmmu: Remove on-stack cpumask var

On 2024-04-24 04:55, Dawei Li wrote:
> In general it's preferable to avoid placing cpumasks on the stack, as
> for large values of NR_CPUS these can consume significant amounts of
> stack space and make stack overflows more likely.
> 
> Use cpumask_any_but() to avoid the need for a temporary cpumask on
> the stack and simplify code.
> 
> Reviewed-by: Sam Ravnborg <sam@...nborg.org>
> Signed-off-by: Dawei Li <dawei.li@...ngroup.cn>
> ---
>  arch/sparc/mm/srmmu.c | 40 ++++++++++++----------------------------
>  1 file changed, 12 insertions(+), 28 deletions(-)
> 
> diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
> index 852085ada368..9df51a62333d 100644
> --- a/arch/sparc/mm/srmmu.c
> +++ b/arch/sparc/mm/srmmu.c
> @@ -1653,13 +1653,15 @@ static void smp_flush_tlb_all(void)
>  	local_ops->tlb_all();
>  }
>  
> +static bool any_other_mm_cpus(struct mm_struct *mm)
> +{
> +	return cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids;
> +}
> +
>  static void smp_flush_cache_mm(struct mm_struct *mm)
>  {
>  	if (mm->context != NO_CONTEXT) {
> -		cpumask_t cpu_mask;
> -		cpumask_copy(&cpu_mask, mm_cpumask(mm));
> -		cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
> -		if (!cpumask_empty(&cpu_mask))
> +		if (any_other_mm_cpus(mm))
>  			xc1(local_ops->cache_mm, (unsigned long)mm);
>  		local_ops->cache_mm(mm);
>  	}
> @@ -1668,10 +1670,7 @@ static void smp_flush_cache_mm(struct mm_struct *mm)
>  static void smp_flush_tlb_mm(struct mm_struct *mm)
>  {
>  	if (mm->context != NO_CONTEXT) {
> -		cpumask_t cpu_mask;
> -		cpumask_copy(&cpu_mask, mm_cpumask(mm));
> -		cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
> -		if (!cpumask_empty(&cpu_mask)) {
> +		if (any_other_mm_cpus(mm)) {
>  			xc1(local_ops->tlb_mm, (unsigned long)mm);
>  			if (atomic_read(&mm->mm_users) == 1 && current->active_mm == mm)
>  				cpumask_copy(mm_cpumask(mm),
> @@ -1688,10 +1687,7 @@ static void smp_flush_cache_range(struct vm_area_struct *vma,
>  	struct mm_struct *mm = vma->vm_mm;
>  
>  	if (mm->context != NO_CONTEXT) {
> -		cpumask_t cpu_mask;
> -		cpumask_copy(&cpu_mask, mm_cpumask(mm));
> -		cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
> -		if (!cpumask_empty(&cpu_mask))
> +		if (any_other_mm_cpus(mm))
>  			xc3(local_ops->cache_range, (unsigned long)vma, start,
>  			    end);
>  		local_ops->cache_range(vma, start, end);
> @@ -1705,10 +1701,7 @@ static void smp_flush_tlb_range(struct vm_area_struct *vma,
>  	struct mm_struct *mm = vma->vm_mm;
>  
>  	if (mm->context != NO_CONTEXT) {
> -		cpumask_t cpu_mask;
> -		cpumask_copy(&cpu_mask, mm_cpumask(mm));
> -		cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
> -		if (!cpumask_empty(&cpu_mask))
> +		if (any_other_mm_cpus(mm))
>  			xc3(local_ops->tlb_range, (unsigned long)vma, start,
>  			    end);
>  		local_ops->tlb_range(vma, start, end);
> @@ -1720,10 +1713,7 @@ static void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
>  	struct mm_struct *mm = vma->vm_mm;
>  
>  	if (mm->context != NO_CONTEXT) {
> -		cpumask_t cpu_mask;
> -		cpumask_copy(&cpu_mask, mm_cpumask(mm));
> -		cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
> -		if (!cpumask_empty(&cpu_mask))
> +		if (any_other_mm_cpus(mm))
>  			xc2(local_ops->cache_page, (unsigned long)vma, page);
>  		local_ops->cache_page(vma, page);
>  	}
> @@ -1734,10 +1724,7 @@ static void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
>  	struct mm_struct *mm = vma->vm_mm;
>  
>  	if (mm->context != NO_CONTEXT) {
> -		cpumask_t cpu_mask;
> -		cpumask_copy(&cpu_mask, mm_cpumask(mm));
> -		cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
> -		if (!cpumask_empty(&cpu_mask))
> +		if (any_other_mm_cpus(mm))
>  			xc2(local_ops->tlb_page, (unsigned long)vma, page);
>  		local_ops->tlb_page(vma, page);
>  	}
> @@ -1759,10 +1746,7 @@ static void smp_flush_page_to_ram(unsigned long page)
>  
>  static void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
>  {
> -	cpumask_t cpu_mask;
> -	cpumask_copy(&cpu_mask, mm_cpumask(mm));
> -	cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
> -	if (!cpumask_empty(&cpu_mask))
> +	if (any_other_mm_cpus(mm))
>  		xc2(local_ops->sig_insns, (unsigned long)mm, insn_addr);
>  	local_ops->sig_insns(mm, insn_addr);
>  }

Reviewed-by: Andreas Larsson <andreas@...sler.com>
Tested-by: Andreas Larsson <andreas@...sler.com>

Picking this up to my for-next.

Thanks,
Andreas


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ