lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-Id: <20250211104318.7847e50326089b08222fb09d@kernel.org>
Date: Tue, 11 Feb 2025 10:43:18 +0900
From: Masami Hiramatsu (Google) <mhiramat@...nel.org>
To: Steven Rostedt <rostedt@...dmis.org>
Cc: linux-kernel@...r.kernel.org, linux-trace-kernel@...r.kernel.org, Mark
 Rutland <mark.rutland@....com>, Mathieu Desnoyers
 <mathieu.desnoyers@...icios.com>, Andrew Morton
 <akpm@...ux-foundation.org>, Mike Rapoport <rppt@...nel.org>
Subject: Re: [RFC PATCH v2 2/3] mm/memblock: Add reserved memory release
 function

On Mon, 10 Feb 2025 19:03:06 -0500
Steven Rostedt <rostedt@...dmis.org> wrote:

> On Tue, 11 Feb 2025 00:44:08 +0900
> "Masami Hiramatsu (Google)" <mhiramat@...nel.org> wrote:
> 
> > From: Masami Hiramatsu (Google) <mhiramat@...nel.org>
> > 
> > Add reserved_mem_release_by_name() to release a reserved memory region
> > with a given name. This allows us to release reserved memory which is
> > defined by kernel cmdline, after boot.
> 
> Since Mike wrote the reserve_mem code, we should Cc him on changes.
> 
> Also, I would make patches 2 and 3 a separate patchset, as it is unrelated
> to the other code.

OK, let me make a separate series and sending Cc mm maintainers too.


> 
> > 
> > Signed-off-by: Masami Hiramatsu (Google) <mhiramat@...nel.org>
> > ---
> >  include/linux/mm.h |    1 +
> >  mm/memblock.c      |   72 +++++++++++++++++++++++++++++++++++++++++++---------
> >  2 files changed, 61 insertions(+), 12 deletions(-)
> > 
> > diff --git a/include/linux/mm.h b/include/linux/mm.h
> > index f02925447e59..a7201824c1fc 100644
> > --- a/include/linux/mm.h
> > +++ b/include/linux/mm.h
> > @@ -4197,6 +4197,7 @@ void vma_pgtable_walk_begin(struct vm_area_struct *vma);
> >  void vma_pgtable_walk_end(struct vm_area_struct *vma);
> >  
> >  int reserve_mem_find_by_name(const char *name, phys_addr_t *start, phys_addr_t *size);
> > +int reserved_mem_release_by_name(const char *name);
> 
> Should be "reserve_mem_release_by_name()" and not "reserved_mem" to stay consistent.

Ahh, good catch. I confused reserved_mem and reserve_mem.

> 
> >  
> >  #ifdef CONFIG_64BIT
> >  int do_mseal(unsigned long start, size_t len_in, unsigned long flags);
> > diff --git a/mm/memblock.c b/mm/memblock.c
> > index 095c18b5c430..9962fbb90597 100644
> > --- a/mm/memblock.c
> > +++ b/mm/memblock.c
> > @@ -16,6 +16,7 @@
> >  #include <linux/kmemleak.h>
> >  #include <linux/seq_file.h>
> >  #include <linux/memblock.h>
> > +#include <linux/mutex.h>
> >  
> >  #include <asm/sections.h>
> >  #include <linux/io.h>
> > @@ -2263,6 +2264,7 @@ struct reserve_mem_table {
> >  };
> >  static struct reserve_mem_table reserved_mem_table[RESERVE_MEM_MAX_ENTRIES];
> >  static int reserved_mem_count;
> > +static DEFINE_MUTEX(reserved_mem_lock);
> 
> "reserve_mem_lock".

OK.

Thanks!

> 
> >  
> >  /* Add wildcard region with a lookup name */
> >  static void __init reserved_mem_add(phys_addr_t start, phys_addr_t size,
> > @@ -2276,6 +2278,21 @@ static void __init reserved_mem_add(phys_addr_t start, phys_addr_t size,
> >  	strscpy(map->name, name);
> >  }
> >  
> > +static struct reserve_mem_table *reserve_mem_find_by_name_nolock(const char *name)
> > +{
> > +	struct reserve_mem_table *map;
> > +	int i;
> > +
> > +	for (i = 0; i < reserved_mem_count; i++) {
> > +		map = &reserved_mem_table[i];
> > +		if (!map->size)
> > +			continue;
> > +		if (strcmp(name, map->name) == 0)
> > +			return map;
> > +	}
> > +	return NULL;
> > +}
> > +
> >  /**
> >   * reserve_mem_find_by_name - Find reserved memory region with a given name
> >   * @name: The name that is attached to a reserved memory region
> > @@ -2289,22 +2306,53 @@ static void __init reserved_mem_add(phys_addr_t start, phys_addr_t size,
> >  int reserve_mem_find_by_name(const char *name, phys_addr_t *start, phys_addr_t *size)
> >  {
> >  	struct reserve_mem_table *map;
> > -	int i;
> >  
> > -	for (i = 0; i < reserved_mem_count; i++) {
> > -		map = &reserved_mem_table[i];
> > -		if (!map->size)
> > -			continue;
> > -		if (strcmp(name, map->name) == 0) {
> > -			*start = map->start;
> > -			*size = map->size;
> > -			return 1;
> > -		}
> > -	}
> > -	return 0;
> > +	guard(mutex)(&reserved_mem_lock);
> > +	map = reserve_mem_find_by_name_nolock(name);
> > +	if (!map)
> > +		return 0;
> > +
> > +	*start = map->start;
> > +	*size = map->size;
> > +	return 1;
> >  }
> >  EXPORT_SYMBOL_GPL(reserve_mem_find_by_name);
> >  
> > +/**
> > + * reserved_mem_release_by_name - Release reserved memory region with a given name
> > + * @name: The name that is attatched to a reserved memory region
> > + *
> > + * Forcibly release the pages in the reserved memory region so that those memory
> > + * can be used as free memory. After released the reserved region size becomes 0.
> > + *
> > + * Returns: 1 if released or 0 if not found.
> > + */
> > +int reserved_mem_release_by_name(const char *name)
> > +{
> > +	struct reserve_mem_table *map;
> > +	unsigned int page_count;
> > +	phys_addr_t start;
> > +
> > +	guard(mutex)(&reserved_mem_lock);
> > +	map = reserve_mem_find_by_name_nolock(name);
> > +	if (!map)
> > +		return 0;
> > +
> > +	start = map->start;
> > +	page_count = DIV_ROUND_UP(map->size, PAGE_SIZE);
> > +
> > +	for (int i = 0; i < page_count; i++) {
> > +		phys_addr_t addr = start + i * PAGE_SIZE;
> > +		struct page *page = pfn_to_page(addr >> PAGE_SHIFT);
> > +
> > +		page->flags &= ~BIT(PG_reserved);
> > +		__free_page(page);
> > +	}
> > +	map->size = 0;
> > +
> > +	return 1;
> > +}
> > +
> >  /*
> >   * Parse reserve_mem=nn:align:name
> >   */
> 
> 
> Thanks,
> 
> -- Steve


-- 
Masami Hiramatsu (Google) <mhiramat@...nel.org>

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ