[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-Id: <20250212083940.8a88be7b1f9cf68a2cb8465b@kernel.org>
Date: Wed, 12 Feb 2025 08:39:40 +0900
From: Masami Hiramatsu (Google) <mhiramat@...nel.org>
To: Steven Rostedt <rostedt@...dmis.org>
Cc: linux-kernel@...r.kernel.org, linux-trace-kernel@...r.kernel.org, Mark
Rutland <mark.rutland@....com>, Mathieu Desnoyers
<mathieu.desnoyers@...icios.com>, Andrew Morton <akpm@...ux-foundation.org>
Subject: Re: [PATCH v3 1/2] mm/memblock: Add reserved memory release
function
On Tue, 11 Feb 2025 12:52:11 -0500
Steven Rostedt <rostedt@...dmis.org> wrote:
> On Tue, 11 Feb 2025 23:47:03 +0900
> "Masami Hiramatsu (Google)" <mhiramat@...nel.org> wrote:
>
> > From: Masami Hiramatsu (Google) <mhiramat@...nel.org>
> >
> > Add reserve_mem_release_by_name() to release a reserved memory region
> > with a given name. This allows us to release reserved memory which is
> > defined by kernel cmdline, after boot.
> >
> > Signed-off-by: Masami Hiramatsu (Google) <mhiramat@...nel.org>
> > Cc: Andrew Morton <akpm@...ux-foundation.org>
> > Cc: Mike Rapoport <rppt@...nel.org>
> > Cc: linux-mm@...ck.org
>
> Hi, can we get one of the Memory Management maintainers to ack this patch?
>
> We will be having devices going out with the reserve_mem option to perform
> tracing in the field. But that only happens if the user grants permission
> to do so. But the kernel command line does not change between users that
> granted permission and those that do not. We would like to free up the
> memory for those devices where the users did not grant permission to trace,
> as then the memory is just wasted.
Thanks Steve for explaining, I missed Ccing the background information.
Here is the covermail of this series.
https://lore.kernel.org/all/173928521419.906035.17750338150436695675.stgit@devnote2/
Thank you,
>
> Thanks!
>
> -- Steve
>
>
> > ---
> > Changes in v2:
> > - Rename reserved_mem_* to reserve_mem_*.
> > ---
> > include/linux/mm.h | 1 +
> > mm/memblock.c | 72 +++++++++++++++++++++++++++++++++++++++++++---------
> > 2 files changed, 61 insertions(+), 12 deletions(-)
> >
> > diff --git a/include/linux/mm.h b/include/linux/mm.h
> > index f02925447e59..fe5f7711df04 100644
> > --- a/include/linux/mm.h
> > +++ b/include/linux/mm.h
> > @@ -4197,6 +4197,7 @@ void vma_pgtable_walk_begin(struct vm_area_struct *vma);
> > void vma_pgtable_walk_end(struct vm_area_struct *vma);
> >
> > int reserve_mem_find_by_name(const char *name, phys_addr_t *start, phys_addr_t *size);
> > +int reserve_mem_release_by_name(const char *name);
> >
> > #ifdef CONFIG_64BIT
> > int do_mseal(unsigned long start, size_t len_in, unsigned long flags);
> > diff --git a/mm/memblock.c b/mm/memblock.c
> > index 095c18b5c430..c8d207ebb93c 100644
> > --- a/mm/memblock.c
> > +++ b/mm/memblock.c
> > @@ -16,6 +16,7 @@
> > #include <linux/kmemleak.h>
> > #include <linux/seq_file.h>
> > #include <linux/memblock.h>
> > +#include <linux/mutex.h>
> >
> > #include <asm/sections.h>
> > #include <linux/io.h>
> > @@ -2263,6 +2264,7 @@ struct reserve_mem_table {
> > };
> > static struct reserve_mem_table reserved_mem_table[RESERVE_MEM_MAX_ENTRIES];
> > static int reserved_mem_count;
> > +static DEFINE_MUTEX(reserve_mem_lock);
> >
> > /* Add wildcard region with a lookup name */
> > static void __init reserved_mem_add(phys_addr_t start, phys_addr_t size,
> > @@ -2276,6 +2278,21 @@ static void __init reserved_mem_add(phys_addr_t start, phys_addr_t size,
> > strscpy(map->name, name);
> > }
> >
> > +static struct reserve_mem_table *reserve_mem_find_by_name_nolock(const char *name)
> > +{
> > + struct reserve_mem_table *map;
> > + int i;
> > +
> > + for (i = 0; i < reserved_mem_count; i++) {
> > + map = &reserved_mem_table[i];
> > + if (!map->size)
> > + continue;
> > + if (strcmp(name, map->name) == 0)
> > + return map;
> > + }
> > + return NULL;
> > +}
> > +
> > /**
> > * reserve_mem_find_by_name - Find reserved memory region with a given name
> > * @name: The name that is attached to a reserved memory region
> > @@ -2289,22 +2306,53 @@ static void __init reserved_mem_add(phys_addr_t start, phys_addr_t size,
> > int reserve_mem_find_by_name(const char *name, phys_addr_t *start, phys_addr_t *size)
> > {
> > struct reserve_mem_table *map;
> > - int i;
> >
> > - for (i = 0; i < reserved_mem_count; i++) {
> > - map = &reserved_mem_table[i];
> > - if (!map->size)
> > - continue;
> > - if (strcmp(name, map->name) == 0) {
> > - *start = map->start;
> > - *size = map->size;
> > - return 1;
> > - }
> > - }
> > - return 0;
> > + guard(mutex)(&reserve_mem_lock);
> > + map = reserve_mem_find_by_name_nolock(name);
> > + if (!map)
> > + return 0;
> > +
> > + *start = map->start;
> > + *size = map->size;
> > + return 1;
> > }
> > EXPORT_SYMBOL_GPL(reserve_mem_find_by_name);
> >
> > +/**
> > + * reserve_mem_release_by_name - Release reserved memory region with a given name
> > + * @name: The name that is attatched to a reserved memory region
> > + *
> > + * Forcibly release the pages in the reserved memory region so that those memory
> > + * can be used as free memory. After released the reserved region size becomes 0.
> > + *
> > + * Returns: 1 if released or 0 if not found.
> > + */
> > +int reserve_mem_release_by_name(const char *name)
> > +{
> > + struct reserve_mem_table *map;
> > + unsigned int page_count;
> > + phys_addr_t start;
> > +
> > + guard(mutex)(&reserve_mem_lock);
> > + map = reserve_mem_find_by_name_nolock(name);
> > + if (!map)
> > + return 0;
> > +
> > + start = map->start;
> > + page_count = DIV_ROUND_UP(map->size, PAGE_SIZE);
> > +
> > + for (int i = 0; i < page_count; i++) {
> > + phys_addr_t addr = start + i * PAGE_SIZE;
> > + struct page *page = pfn_to_page(addr >> PAGE_SHIFT);
> > +
> > + page->flags &= ~BIT(PG_reserved);
> > + __free_page(page);
> > + }
> > + map->size = 0;
> > +
> > + return 1;
> > +}
> > +
> > /*
> > * Parse reserve_mem=nn:align:name
> > */
>
--
Masami Hiramatsu (Google) <mhiramat@...nel.org>
Powered by blists - more mailing lists