[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190614100114.311-2-david@redhat.com>
Date: Fri, 14 Jun 2019 12:01:09 +0200
From: David Hildenbrand <david@...hat.com>
To: linux-kernel@...r.kernel.org
Cc: Dan Williams <dan.j.williams@...el.com>,
Andrew Morton <akpm@...ux-foundation.org>,
linuxppc-dev@...ts.ozlabs.org, linux-acpi@...r.kernel.org,
linux-mm@...ck.org, David Hildenbrand <david@...hat.com>,
Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
"Rafael J. Wysocki" <rafael@...nel.org>,
Vlastimil Babka <vbabka@...e.cz>,
Michal Hocko <mhocko@...e.com>,
Mel Gorman <mgorman@...hsingularity.net>,
Wei Yang <richard.weiyang@...il.com>,
Johannes Weiner <hannes@...xchg.org>,
Arun KS <arunks@...eaurora.org>,
Pavel Tatashin <pasha.tatashin@...cle.com>,
Oscar Salvador <osalvador@...e.de>,
Stephen Rothwell <sfr@...b.auug.org.au>,
Mike Rapoport <rppt@...ux.vnet.ibm.com>,
Baoquan He <bhe@...hat.com>
Subject: [PATCH v1 1/6] mm: Section numbers use the type "unsigned long"
We are using a mixture of "int" and "unsigned long". Let's make this
consistent by using "unsigned long" everywhere. We'll do the same with
memory block ids next.
Cc: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
Cc: "Rafael J. Wysocki" <rafael@...nel.org>
Cc: Andrew Morton <akpm@...ux-foundation.org>
Cc: Vlastimil Babka <vbabka@...e.cz>
Cc: Michal Hocko <mhocko@...e.com>
Cc: Dan Williams <dan.j.williams@...el.com>
Cc: Mel Gorman <mgorman@...hsingularity.net>
Cc: Wei Yang <richard.weiyang@...il.com>
Cc: Johannes Weiner <hannes@...xchg.org>
Cc: Arun KS <arunks@...eaurora.org>
Cc: Pavel Tatashin <pasha.tatashin@...cle.com>
Cc: Oscar Salvador <osalvador@...e.de>
Cc: Stephen Rothwell <sfr@...b.auug.org.au>
Cc: Mike Rapoport <rppt@...ux.vnet.ibm.com>
Cc: Baoquan He <bhe@...hat.com>
Signed-off-by: David Hildenbrand <david@...hat.com>
---
drivers/base/memory.c | 9 +++++----
include/linux/mmzone.h | 4 ++--
mm/sparse.c | 12 ++++++------
3 files changed, 13 insertions(+), 12 deletions(-)
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 826dd76f662e..5b3a2fd250ba 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -34,7 +34,7 @@ static DEFINE_MUTEX(mem_sysfs_mutex);
static int sections_per_block;
-static inline int base_memory_block_id(int section_nr)
+static inline int base_memory_block_id(unsigned long section_nr)
{
return section_nr / sections_per_block;
}
@@ -691,10 +691,11 @@ static int init_memory_block(struct memory_block **memory, int block_id,
return ret;
}
-static int add_memory_block(int base_section_nr)
+static int add_memory_block(unsigned long base_section_nr)
{
+ int ret, section_count = 0;
struct memory_block *mem;
- int i, ret, section_count = 0;
+ unsigned long i;
for (i = base_section_nr;
i < base_section_nr + sections_per_block;
@@ -822,7 +823,7 @@ static const struct attribute_group *memory_root_attr_groups[] = {
*/
int __init memory_dev_init(void)
{
- unsigned int i;
+ unsigned long i;
int ret;
int err;
unsigned long block_sz;
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 427b79c39b3c..83b6aae16f13 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -1220,7 +1220,7 @@ static inline struct mem_section *__nr_to_section(unsigned long nr)
return NULL;
return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK];
}
-extern int __section_nr(struct mem_section* ms);
+extern unsigned long __section_nr(struct mem_section *ms);
extern unsigned long usemap_size(void);
/*
@@ -1292,7 +1292,7 @@ static inline struct mem_section *__pfn_to_section(unsigned long pfn)
return __nr_to_section(pfn_to_section_nr(pfn));
}
-extern int __highest_present_section_nr;
+extern unsigned long __highest_present_section_nr;
#ifndef CONFIG_HAVE_ARCH_PFN_VALID
static inline int pfn_valid(unsigned long pfn)
diff --git a/mm/sparse.c b/mm/sparse.c
index 1552c855d62a..e8c57e039be8 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -102,7 +102,7 @@ static inline int sparse_index_init(unsigned long section_nr, int nid)
#endif
#ifdef CONFIG_SPARSEMEM_EXTREME
-int __section_nr(struct mem_section* ms)
+unsigned long __section_nr(struct mem_section *ms)
{
unsigned long root_nr;
struct mem_section *root = NULL;
@@ -121,9 +121,9 @@ int __section_nr(struct mem_section* ms)
return (root_nr * SECTIONS_PER_ROOT) + (ms - root);
}
#else
-int __section_nr(struct mem_section* ms)
+unsigned long __section_nr(struct mem_section *ms)
{
- return (int)(ms - mem_section[0]);
+ return (unsigned long)(ms - mem_section[0]);
}
#endif
@@ -178,10 +178,10 @@ void __meminit mminit_validate_memmodel_limits(unsigned long *start_pfn,
* Keeping track of this gives us an easy way to break out of
* those loops early.
*/
-int __highest_present_section_nr;
+unsigned long __highest_present_section_nr;
static void section_mark_present(struct mem_section *ms)
{
- int section_nr = __section_nr(ms);
+ unsigned long section_nr = __section_nr(ms);
if (section_nr > __highest_present_section_nr)
__highest_present_section_nr = section_nr;
@@ -189,7 +189,7 @@ static void section_mark_present(struct mem_section *ms)
ms->section_mem_map |= SECTION_MARKED_PRESENT;
}
-static inline int next_present_section_nr(int section_nr)
+static inline unsigned long next_present_section_nr(unsigned long section_nr)
{
do {
section_nr++;
--
2.21.0
Powered by blists - more mailing lists