[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <2023112041-kelp-properly-2b6e@gregkh>
Date: Mon, 20 Nov 2023 11:53:52 +0100
From: Greg KH <gregkh@...uxfoundation.org>
To: Sumanth Korikkar <sumanthk@...ux.ibm.com>
Cc: linux-mm <linux-mm@...ck.org>,
Andrew Morton <akpm@...ux-foundation.org>,
David Hildenbrand <david@...hat.com>,
Oscar Salvador <osalvador@...e.de>,
Michal Hocko <mhocko@...e.com>,
"Aneesh Kumar K.V" <aneesh.kumar@...ux.ibm.com>,
Anshuman Khandual <anshuman.khandual@....com>,
Gerald Schaefer <gerald.schaefer@...ux.ibm.com>,
Alexander Gordeev <agordeev@...ux.ibm.com>,
Heiko Carstens <hca@...ux.ibm.com>,
Vasily Gorbik <gor@...ux.ibm.com>,
linux-s390 <linux-s390@...r.kernel.org>,
LKML <linux-kernel@...r.kernel.org>, stable@...r.kernel.org
Subject: Re: [PATCH v2 1/3] mm/memory_hotplug: add missing mem_hotplug_lock
On Mon, Nov 20, 2023 at 11:27:32AM +0100, Sumanth Korikkar wrote:
> >From Documentation/core-api/memory-hotplug.rst:
> When adding/removing/onlining/offlining memory or adding/removing
> heterogeneous/device memory, we should always hold the mem_hotplug_lock
> in write mode to serialise memory hotplug (e.g. access to global/zone
> variables).
>
> mhp_(de)init_memmap_on_memory() functions can change zone stats and
> struct page content, but they are currently called w/o the
> mem_hotplug_lock.
>
> When memory block is being offlined and when kmemleak goes through each
> populated zone, the following theoretical race conditions could occur:
> CPU 0: | CPU 1:
> memory_offline() |
> -> offline_pages() |
> -> mem_hotplug_begin() |
> ... |
> -> mem_hotplug_done() |
> | kmemleak_scan()
> | -> get_online_mems()
> | ...
> -> mhp_deinit_memmap_on_memory() |
> [not protected by mem_hotplug_begin/done()]|
> Marks memory section as offline, | Retrieves zone_start_pfn
> poisons vmemmap struct pages and updates | and struct page members.
> the zone related data |
> | ...
> | -> put_online_mems()
>
> Fix this by ensuring mem_hotplug_lock is taken before performing
> mhp_init_memmap_on_memory(). Also ensure that
> mhp_deinit_memmap_on_memory() holds the lock.
>
> online/offline_pages() are currently only called from
> memory_block_online/offline(), so it is safe to move the locking there.
>
> Fixes: a08a2ae34613 ("mm,memory_hotplug: allocate memmap from the added memory range")
> Reviewed-by: Gerald Schaefer <gerald.schaefer@...ux.ibm.com>
> Acked-by: David Hildenbrand <david@...hat.com>
> Signed-off-by: Sumanth Korikkar <sumanthk@...ux.ibm.com>
> ---
> drivers/base/memory.c | 18 +++++++++++++++---
> mm/memory_hotplug.c | 13 ++++++-------
> 2 files changed, 21 insertions(+), 10 deletions(-)
>
> diff --git a/drivers/base/memory.c b/drivers/base/memory.c
> index f3b9a4d0fa3b..8a13babd826c 100644
> --- a/drivers/base/memory.c
> +++ b/drivers/base/memory.c
> @@ -180,6 +180,9 @@ static inline unsigned long memblk_nr_poison(struct memory_block *mem)
> }
> #endif
>
> +/*
> + * Must acquire mem_hotplug_lock in write mode.
> + */
> static int memory_block_online(struct memory_block *mem)
> {
> unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
> @@ -204,10 +207,11 @@ static int memory_block_online(struct memory_block *mem)
> if (mem->altmap)
> nr_vmemmap_pages = mem->altmap->free;
>
> + mem_hotplug_begin();
> if (nr_vmemmap_pages) {
> ret = mhp_init_memmap_on_memory(start_pfn, nr_vmemmap_pages, zone);
> if (ret)
> - return ret;
> + goto out;
> }
>
> ret = online_pages(start_pfn + nr_vmemmap_pages,
> @@ -215,7 +219,7 @@ static int memory_block_online(struct memory_block *mem)
> if (ret) {
> if (nr_vmemmap_pages)
> mhp_deinit_memmap_on_memory(start_pfn, nr_vmemmap_pages);
> - return ret;
> + goto out;
> }
>
> /*
> @@ -227,9 +231,14 @@ static int memory_block_online(struct memory_block *mem)
> nr_vmemmap_pages);
>
> mem->zone = zone;
> +out:
> + mem_hotplug_done();
> return ret;
> }
>
> +/*
> + * Must acquire mem_hotplug_lock in write mode.
> + */
> static int memory_block_offline(struct memory_block *mem)
> {
> unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
> @@ -247,6 +256,7 @@ static int memory_block_offline(struct memory_block *mem)
> if (mem->altmap)
> nr_vmemmap_pages = mem->altmap->free;
>
> + mem_hotplug_begin();
> if (nr_vmemmap_pages)
> adjust_present_page_count(pfn_to_page(start_pfn), mem->group,
> -nr_vmemmap_pages);
> @@ -258,13 +268,15 @@ static int memory_block_offline(struct memory_block *mem)
> if (nr_vmemmap_pages)
> adjust_present_page_count(pfn_to_page(start_pfn),
> mem->group, nr_vmemmap_pages);
> - return ret;
> + goto out;
> }
>
> if (nr_vmemmap_pages)
> mhp_deinit_memmap_on_memory(start_pfn, nr_vmemmap_pages);
>
> mem->zone = NULL;
> +out:
> + mem_hotplug_done();
> return ret;
> }
>
> diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
> index 1b03f4ec6fd2..c8238fc5edcb 100644
> --- a/mm/memory_hotplug.c
> +++ b/mm/memory_hotplug.c
> @@ -1129,6 +1129,9 @@ void mhp_deinit_memmap_on_memory(unsigned long pfn, unsigned long nr_pages)
> kasan_remove_zero_shadow(__va(PFN_PHYS(pfn)), PFN_PHYS(nr_pages));
> }
>
> +/*
> + * Must be called with mem_hotplug_lock in write mode.
> + */
> int __ref online_pages(unsigned long pfn, unsigned long nr_pages,
> struct zone *zone, struct memory_group *group)
> {
> @@ -1149,7 +1152,6 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages,
> !IS_ALIGNED(pfn + nr_pages, PAGES_PER_SECTION)))
> return -EINVAL;
>
> - mem_hotplug_begin();
>
> /* associate pfn range with the zone */
> move_pfn_range_to_zone(zone, pfn, nr_pages, NULL, MIGRATE_ISOLATE);
> @@ -1208,7 +1210,6 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages,
> writeback_set_ratelimit();
>
> memory_notify(MEM_ONLINE, &arg);
> - mem_hotplug_done();
> return 0;
>
> failed_addition:
> @@ -1217,7 +1218,6 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages,
> (((unsigned long long) pfn + nr_pages) << PAGE_SHIFT) - 1);
> memory_notify(MEM_CANCEL_ONLINE, &arg);
> remove_pfn_range_from_zone(zone, pfn, nr_pages);
> - mem_hotplug_done();
> return ret;
> }
>
> @@ -1863,6 +1863,9 @@ static int count_system_ram_pages_cb(unsigned long start_pfn,
> return 0;
> }
>
> +/*
> + * Must be called with mem_hotplug_lock in write mode.
> + */
> int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages,
> struct zone *zone, struct memory_group *group)
> {
> @@ -1885,8 +1888,6 @@ int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages,
> !IS_ALIGNED(start_pfn + nr_pages, PAGES_PER_SECTION)))
> return -EINVAL;
>
> - mem_hotplug_begin();
> -
> /*
> * Don't allow to offline memory blocks that contain holes.
> * Consequently, memory blocks with holes can never get onlined
> @@ -2027,7 +2028,6 @@ int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages,
>
> memory_notify(MEM_OFFLINE, &arg);
> remove_pfn_range_from_zone(zone, start_pfn, nr_pages);
> - mem_hotplug_done();
> return 0;
>
> failed_removal_isolated:
> @@ -2042,7 +2042,6 @@ int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages,
> (unsigned long long) start_pfn << PAGE_SHIFT,
> ((unsigned long long) end_pfn << PAGE_SHIFT) - 1,
> reason);
> - mem_hotplug_done();
> return ret;
> }
>
> --
> 2.41.0
>
Hi,
This is the friendly patch-bot of Greg Kroah-Hartman. You have sent him
a patch that has triggered this response. He used to manually respond
to these common problems, but in order to save his sanity (he kept
writing the same thing over and over, yet to different people), I was
created. Hopefully you will not take offence and will fix the problem
in your patch and resubmit it so that it can be accepted into the Linux
kernel tree.
You are receiving this message because of the following common error(s)
as indicated below:
- You have marked a patch with a "Fixes:" tag for a commit that is in an
older released kernel, yet you do not have a cc: stable line in the
signed-off-by area at all, which means that the patch will not be
applied to any older kernel releases. To properly fix this, please
follow the documented rules in the
Documentation/process/stable-kernel-rules.rst file for how to resolve
this.
If you wish to discuss this problem further, or you have questions about
how to resolve this issue, please feel free to respond to this email and
Greg will reply once he has dug out from the pending patches received
from other developers.
thanks,
greg k-h's patch email bot
Powered by blists - more mailing lists