lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Thu, 12 Feb 2015 16:32:14 +0900
From:	Joonsoo Kim <iamjoonsoo.kim@....com>
To:	Andrew Morton <akpm@...ux-foundation.org>
Cc:	Rik van Riel <riel@...hat.com>,
	Johannes Weiner <hannes@...xchg.org>,
	Mel Gorman <mgorman@...e.de>,
	Laura Abbott <lauraa@...eaurora.org>,
	Minchan Kim <minchan@...nel.org>,
	Heesub Shin <heesub.shin@...sung.com>,
	Marek Szyprowski <m.szyprowski@...sung.com>,
	Michal Nazarewicz <mina86@...a86.com>,
	"Aneesh Kumar K.V" <aneesh.kumar@...ux.vnet.ibm.com>,
	linux-mm@...ck.org, linux-kernel@...r.kernel.org,
	Hui Zhu <zhuhui@...omi.com>, Gioh Kim <gioh.kim@....com>,
	Bartlomiej Zolnierkiewicz <b.zolnierkie@...sung.com>,
	Ritesh Harjani <ritesh.list@...il.com>,
	Vlastimil Babka <vbabka@...e.cz>,
	Joonsoo Kim <iamjoonsoo.kim@....com>
Subject: [RFC 10/16] mm/highmem: remove is_highmem_idx()

We can use is_highmem() on every callsites of is_highmem_idx() so
is_highmem_idx() isn't really needed. And, if we introduce a new zone
for CMA, we need to modify it to adapt for new zone, so it's
inconvenient. Therefore, this patch remove it before introducing
a new zone.

Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@....com>
---
 include/linux/mmzone.h |   18 ++++--------------
 lib/show_mem.c         |    2 +-
 mm/page_alloc.c        |    6 +++---
 mm/vmscan.c            |    2 +-
 4 files changed, 9 insertions(+), 19 deletions(-)

diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index ffe66e3..90237f2 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -854,16 +854,6 @@ static inline int zone_movable_is_highmem(void)
 #endif
 }
 
-static inline int is_highmem_idx(enum zone_type idx)
-{
-#ifdef CONFIG_HIGHMEM
-	return (idx == ZONE_HIGHMEM ||
-		(idx == ZONE_MOVABLE && zone_movable_is_highmem()));
-#else
-	return 0;
-#endif
-}
-
 /**
  * is_highmem - helper function to quickly check if a struct zone is a 
  *              highmem zone or not.  This is an attempt to keep references
@@ -873,10 +863,10 @@ static inline int is_highmem_idx(enum zone_type idx)
 static inline int is_highmem(struct zone *zone)
 {
 #ifdef CONFIG_HIGHMEM
-	int zone_off = (char *)zone - (char *)zone->zone_pgdat->node_zones;
-	return zone_off == ZONE_HIGHMEM * sizeof(*zone) ||
-	       (zone_off == ZONE_MOVABLE * sizeof(*zone) &&
-		zone_movable_is_highmem());
+	int idx = zone_idx(zone);
+
+	return (idx == ZONE_HIGHMEM ||
+		(idx == ZONE_MOVABLE && zone_movable_is_highmem()));
 #else
 	return 0;
 #endif
diff --git a/lib/show_mem.c b/lib/show_mem.c
index 5e25627..f336c5b1 100644
--- a/lib/show_mem.c
+++ b/lib/show_mem.c
@@ -30,7 +30,7 @@ void show_mem(unsigned int filter)
 			total += zone->present_pages;
 			reserved += zone->present_pages - zone->managed_pages;
 
-			if (is_highmem_idx(zoneid))
+			if (is_highmem(zone))
 				highmem += zone->present_pages;
 		}
 		pgdat_resize_unlock(pgdat, &flags);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 7733663..416e036 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -4151,7 +4151,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
 		INIT_LIST_HEAD(&page->lru);
 #ifdef WANT_PAGE_VIRTUAL
 		/* The shift won't overflow because ZONE_NORMAL is below 4G. */
-		if (!is_highmem_idx(zone))
+		if (!is_highmem(z))
 			set_page_address(page, __va(pfn << PAGE_SHIFT));
 #endif
 	}
@@ -4881,7 +4881,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
 					zone_names[0], dma_reserve);
 		}
 
-		if (!is_highmem_idx(j))
+		if (!is_highmem(zone))
 			nr_kernel_pages += freesize;
 		/* Charge for highmem memmap if there are enough kernel pages */
 		else if (nr_kernel_pages > memmap_pages * 2)
@@ -4895,7 +4895,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
 		 * when the bootmem allocator frees pages into the buddy system.
 		 * And all highmem pages will be managed by the buddy system.
 		 */
-		zone->managed_pages = is_highmem_idx(j) ? realsize : freesize;
+		zone->managed_pages = is_highmem(zone) ? realsize : freesize;
 #ifdef CONFIG_NUMA
 		zone->node = nid;
 		zone->min_unmapped_pages = (freesize*sysctl_min_unmapped_ratio)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index dcb4707..30c34dc 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -3074,7 +3074,7 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
 			 * has a highmem zone, force kswapd to reclaim from
 			 * it to relieve lowmem pressure.
 			 */
-			if (buffer_heads_over_limit && is_highmem_idx(i)) {
+			if (buffer_heads_over_limit && is_highmem(zone)) {
 				end_zone = i;
 				break;
 			}
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ