lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <aJ2fd3iD6GqZ_LWw@kernel.org>
Date: Thu, 14 Aug 2025 11:33:59 +0300
From: Mike Rapoport <rppt@...nel.org>
To: Lorenzo Stoakes <lorenzo.stoakes@...cle.com>
Cc: Andrew Morton <akpm@...ux-foundation.org>,
	Alexander Gordeev <agordeev@...ux.ibm.com>,
	Gerald Schaefer <gerald.schaefer@...ux.ibm.com>,
	Heiko Carstens <hca@...ux.ibm.com>,
	Vasily Gorbik <gor@...ux.ibm.com>,
	Christian Borntraeger <borntraeger@...ux.ibm.com>,
	Sven Schnelle <svens@...ux.ibm.com>,
	"David S . Miller" <davem@...emloft.net>,
	Andreas Larsson <andreas@...sler.com>,
	Dave Hansen <dave.hansen@...ux.intel.com>,
	Andy Lutomirski <luto@...nel.org>,
	Peter Zijlstra <peterz@...radead.org>,
	Thomas Gleixner <tglx@...utronix.de>,
	Ingo Molnar <mingo@...hat.com>, Borislav Petkov <bp@...en8.de>,
	"H . Peter Anvin" <hpa@...or.com>,
	Alexander Viro <viro@...iv.linux.org.uk>,
	Christian Brauner <brauner@...nel.org>, Jan Kara <jack@...e.cz>,
	Kees Cook <kees@...nel.org>, David Hildenbrand <david@...hat.com>,
	Zi Yan <ziy@...dia.com>,
	Baolin Wang <baolin.wang@...ux.alibaba.com>,
	"Liam R . Howlett" <Liam.Howlett@...cle.com>,
	Nico Pache <npache@...hat.com>, Ryan Roberts <ryan.roberts@....com>,
	Dev Jain <dev.jain@....com>, Barry Song <baohua@...nel.org>,
	Xu Xin <xu.xin16@....com.cn>,
	Chengming Zhou <chengming.zhou@...ux.dev>,
	Vlastimil Babka <vbabka@...e.cz>,
	Suren Baghdasaryan <surenb@...gle.com>,
	Michal Hocko <mhocko@...e.com>,
	David Rientjes <rientjes@...gle.com>,
	Shakeel Butt <shakeel.butt@...ux.dev>,
	Arnaldo Carvalho de Melo <acme@...nel.org>,
	Namhyung Kim <namhyung@...nel.org>,
	Mark Rutland <mark.rutland@....com>,
	Alexander Shishkin <alexander.shishkin@...ux.intel.com>,
	Jiri Olsa <jolsa@...nel.org>, Ian Rogers <irogers@...gle.com>,
	Adrian Hunter <adrian.hunter@...el.com>,
	Kan Liang <kan.liang@...ux.intel.com>,
	Masami Hiramatsu <mhiramat@...nel.org>,
	Oleg Nesterov <oleg@...hat.com>, Juri Lelli <juri.lelli@...hat.com>,
	Vincent Guittot <vincent.guittot@...aro.org>,
	Dietmar Eggemann <dietmar.eggemann@....com>,
	Steven Rostedt <rostedt@...dmis.org>,
	Ben Segall <bsegall@...gle.com>, Mel Gorman <mgorman@...e.de>,
	Valentin Schneider <vschneid@...hat.com>,
	Jason Gunthorpe <jgg@...pe.ca>, John Hubbard <jhubbard@...dia.com>,
	Peter Xu <peterx@...hat.com>, Jann Horn <jannh@...gle.com>,
	Pedro Falcato <pfalcato@...e.de>,
	Matthew Wilcox <willy@...radead.org>,
	Mateusz Guzik <mjguzik@...il.com>, linux-s390@...r.kernel.org,
	linux-kernel@...r.kernel.org, sparclinux@...r.kernel.org,
	linux-fsdevel@...r.kernel.org, linux-mm@...ck.org,
	linux-trace-kernel@...r.kernel.org,
	linux-perf-users@...r.kernel.org
Subject: Re: [PATCH 05/10] mm: convert uprobes to mm_flags_*() accessors

On Tue, Aug 12, 2025 at 04:44:14PM +0100, Lorenzo Stoakes wrote:
> As part of the effort to move to mm->flags becoming a bitmap field, convert
> existing users to making use of the mm_flags_*() accessors which will, when
> the conversion is complete, be the only means of accessing mm_struct flags.
> 
> No functional change intended.
> 
> Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@...cle.com>

Reviewed-by: Mike Rapoport (Microsoft) <rppt@...nel.org>

> ---
>  kernel/events/uprobes.c | 32 ++++++++++++++++----------------
>  1 file changed, 16 insertions(+), 16 deletions(-)
> 
> diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
> index 7ca1940607bd..31a12b60055f 100644
> --- a/kernel/events/uprobes.c
> +++ b/kernel/events/uprobes.c
> @@ -1153,15 +1153,15 @@ static int install_breakpoint(struct uprobe *uprobe, struct vm_area_struct *vma,
>  	 * set MMF_HAS_UPROBES in advance for uprobe_pre_sstep_notifier(),
>  	 * the task can hit this breakpoint right after __replace_page().
>  	 */
> -	first_uprobe = !test_bit(MMF_HAS_UPROBES, &mm->flags);
> +	first_uprobe = !mm_flags_test(MMF_HAS_UPROBES, mm);
>  	if (first_uprobe)
> -		set_bit(MMF_HAS_UPROBES, &mm->flags);
> +		mm_flags_set(MMF_HAS_UPROBES, mm);
>  
>  	ret = set_swbp(&uprobe->arch, vma, vaddr);
>  	if (!ret)
> -		clear_bit(MMF_RECALC_UPROBES, &mm->flags);
> +		mm_flags_clear(MMF_RECALC_UPROBES, mm);
>  	else if (first_uprobe)
> -		clear_bit(MMF_HAS_UPROBES, &mm->flags);
> +		mm_flags_clear(MMF_HAS_UPROBES, mm);
>  
>  	return ret;
>  }
> @@ -1171,7 +1171,7 @@ static int remove_breakpoint(struct uprobe *uprobe, struct vm_area_struct *vma,
>  {
>  	struct mm_struct *mm = vma->vm_mm;
>  
> -	set_bit(MMF_RECALC_UPROBES, &mm->flags);
> +	mm_flags_set(MMF_RECALC_UPROBES, mm);
>  	return set_orig_insn(&uprobe->arch, vma, vaddr);
>  }
>  
> @@ -1303,7 +1303,7 @@ register_for_each_vma(struct uprobe *uprobe, struct uprobe_consumer *new)
>  			/* consult only the "caller", new consumer. */
>  			if (consumer_filter(new, mm))
>  				err = install_breakpoint(uprobe, vma, info->vaddr);
> -		} else if (test_bit(MMF_HAS_UPROBES, &mm->flags)) {
> +		} else if (mm_flags_test(MMF_HAS_UPROBES, mm)) {
>  			if (!filter_chain(uprobe, mm))
>  				err |= remove_breakpoint(uprobe, vma, info->vaddr);
>  		}
> @@ -1595,7 +1595,7 @@ int uprobe_mmap(struct vm_area_struct *vma)
>  
>  	if (vma->vm_file &&
>  	    (vma->vm_flags & (VM_WRITE|VM_SHARED)) == VM_WRITE &&
> -	    test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags))
> +	    mm_flags_test(MMF_HAS_UPROBES, vma->vm_mm))
>  		delayed_ref_ctr_inc(vma);
>  
>  	if (!valid_vma(vma, true))
> @@ -1655,12 +1655,12 @@ void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned lon
>  	if (!atomic_read(&vma->vm_mm->mm_users)) /* called by mmput() ? */
>  		return;
>  
> -	if (!test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags) ||
> -	     test_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags))
> +	if (!mm_flags_test(MMF_HAS_UPROBES, vma->vm_mm) ||
> +	     mm_flags_test(MMF_RECALC_UPROBES, vma->vm_mm))
>  		return;
>  
>  	if (vma_has_uprobes(vma, start, end))
> -		set_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags);
> +		mm_flags_set(MMF_RECALC_UPROBES, vma->vm_mm);
>  }
>  
>  static vm_fault_t xol_fault(const struct vm_special_mapping *sm,
> @@ -1823,10 +1823,10 @@ void uprobe_end_dup_mmap(void)
>  
>  void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm)
>  {
> -	if (test_bit(MMF_HAS_UPROBES, &oldmm->flags)) {
> -		set_bit(MMF_HAS_UPROBES, &newmm->flags);
> +	if (mm_flags_test(MMF_HAS_UPROBES, oldmm)) {
> +		mm_flags_set(MMF_HAS_UPROBES, newmm);
>  		/* unconditionally, dup_mmap() skips VM_DONTCOPY vmas */
> -		set_bit(MMF_RECALC_UPROBES, &newmm->flags);
> +		mm_flags_set(MMF_RECALC_UPROBES, newmm);
>  	}
>  }
>  
> @@ -2370,7 +2370,7 @@ static void mmf_recalc_uprobes(struct mm_struct *mm)
>  			return;
>  	}
>  
> -	clear_bit(MMF_HAS_UPROBES, &mm->flags);
> +	mm_flags_clear(MMF_HAS_UPROBES, mm);
>  }
>  
>  static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
> @@ -2468,7 +2468,7 @@ static struct uprobe *find_active_uprobe_rcu(unsigned long bp_vaddr, int *is_swb
>  		*is_swbp = -EFAULT;
>  	}
>  
> -	if (!uprobe && test_and_clear_bit(MMF_RECALC_UPROBES, &mm->flags))
> +	if (!uprobe && mm_flags_test_and_clear(MMF_RECALC_UPROBES, mm))
>  		mmf_recalc_uprobes(mm);
>  	mmap_read_unlock(mm);
>  
> @@ -2818,7 +2818,7 @@ int uprobe_pre_sstep_notifier(struct pt_regs *regs)
>  	if (!current->mm)
>  		return 0;
>  
> -	if (!test_bit(MMF_HAS_UPROBES, &current->mm->flags) &&
> +	if (!mm_flags_test(MMF_HAS_UPROBES, current->mm) &&
>  	    (!current->utask || !current->utask->return_instances))
>  		return 0;
>  
> -- 
> 2.50.1
> 

-- 
Sincerely yours,
Mike.

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ