[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20200220061427.GB32521@richard>
Date: Thu, 20 Feb 2020 14:14:27 +0800
From: Wei Yang <richardw.yang@...ux.intel.com>
To: Baoquan He <bhe@...hat.com>
Cc: linux-kernel@...r.kernel.org, linux-mm@...ck.org,
akpm@...ux-foundation.org, richardw.yang@...ux.intel.com,
david@...hat.com, osalvador@...e.de, dan.j.williams@...el.com,
mhocko@...e.com, rppt@...ux.ibm.com, robin.murphy@....com
Subject: Re: [PATCH v2 2/7] mm/sparse.c: introduce new function
fill_subsection_map()
On Thu, Feb 20, 2020 at 12:33:11PM +0800, Baoquan He wrote:
>Wrap the codes filling subsection map from section_activate() into
>fill_subsection_map(), this makes section_activate() cleaner and
>easier to follow.
>
>Signed-off-by: Baoquan He <bhe@...hat.com>
Reviewed-by: Wei Yang <richardw.yang@...ux.intel.com>
>---
> mm/sparse.c | 45 ++++++++++++++++++++++++++++++++++-----------
> 1 file changed, 34 insertions(+), 11 deletions(-)
>
>diff --git a/mm/sparse.c b/mm/sparse.c
>index b8e52c8fed7f..977b47acd38d 100644
>--- a/mm/sparse.c
>+++ b/mm/sparse.c
>@@ -790,24 +790,28 @@ static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
> ms->section_mem_map = (unsigned long)NULL;
> }
>
>-static struct page * __meminit section_activate(int nid, unsigned long pfn,
>- unsigned long nr_pages, struct vmem_altmap *altmap)
>+/**
>+ * fill_subsection_map - fill subsection map of a memory region
>+ * @pfn - start pfn of the memory range
>+ * @nr_pages - number of pfns to add in the region
>+ *
>+ * This fills the related subsection map inside one section, and only
>+ * intended for hotplug.
>+ *
>+ * Return:
>+ * * 0 - On success.
>+ * * -EINVAL - Invalid memory region.
>+ * * -EEXIST - Subsection map has been set.
>+ */
>+static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages)
> {
>- DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 };
> struct mem_section *ms = __pfn_to_section(pfn);
>- struct mem_section_usage *usage = NULL;
>+ DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 };
> unsigned long *subsection_map;
>- struct page *memmap;
> int rc = 0;
>
> subsection_mask_set(map, pfn, nr_pages);
>
>- if (!ms->usage) {
>- usage = kzalloc(mem_section_usage_size(), GFP_KERNEL);
>- if (!usage)
>- return ERR_PTR(-ENOMEM);
>- ms->usage = usage;
>- }
> subsection_map = &ms->usage->subsection_map[0];
>
> if (bitmap_empty(map, SUBSECTIONS_PER_SECTION))
>@@ -818,6 +822,25 @@ static struct page * __meminit section_activate(int nid, unsigned long pfn,
> bitmap_or(subsection_map, map, subsection_map,
> SUBSECTIONS_PER_SECTION);
>
>+ return rc;
>+}
>+
>+static struct page * __meminit section_activate(int nid, unsigned long pfn,
>+ unsigned long nr_pages, struct vmem_altmap *altmap)
>+{
>+ struct mem_section *ms = __pfn_to_section(pfn);
>+ struct mem_section_usage *usage = NULL;
>+ struct page *memmap;
>+ int rc = 0;
>+
>+ if (!ms->usage) {
>+ usage = kzalloc(mem_section_usage_size(), GFP_KERNEL);
>+ if (!usage)
>+ return ERR_PTR(-ENOMEM);
>+ ms->usage = usage;
>+ }
>+
>+ rc = fill_subsection_map(pfn, nr_pages);
> if (rc) {
> if (usage)
> ms->usage = NULL;
>--
>2.17.2
--
Wei Yang
Help you, Help me
Powered by blists - more mailing lists