lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20150723015016.GA1844@dhcp-17-102.nay.redhat.com>
Date:	Thu, 23 Jul 2015 09:50:16 +0800
From:	Baoquan He <bhe@...hat.com>
To:	tj@...nel.org, cl@...ux-foundation.org, linux-mm@...ck.org,
	linux-kernel@...r.kernel.org
Subject: [PATCH v2 3/3] percpu: add macro PCPU_MAP_BUSY

chunk->map[] contains <offset|in-use flag> of each area. Now add a
new macro PCPU_MAP_BUSY and use it as the in-use flag to replace all
magic number '1'.

Signed-off-by: Baoquan He <bhe@...hat.com>
---
 mm/percpu.c | 26 ++++++++++++++++++--------
 1 file changed, 18 insertions(+), 8 deletions(-)

diff --git a/mm/percpu.c b/mm/percpu.c
index a63b4d8..8cf18dc 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -81,6 +81,15 @@
 #define PCPU_EMPTY_POP_PAGES_LOW	2
 #define PCPU_EMPTY_POP_PAGES_HIGH	4
 
+/* we use int array chunk->map[] to describe each area of chunk. Each array
+ * element is represented by one int - contains offset|1 for <offset, in use>
+ * or offset for <ofset, free> (offset need be guaranteed to be even). In the
+ * end there's a sentry entry - <total size, in-use>. So the size of the N-th
+ * area would be the offset of (N+1)-th - the offset of N-th, namely
+ * SIZEn = chunk->map[N+1]&~1 - chunk->map[N]&~1
+ * For more read-able code define PCPU_MAP_BUSY to represent in-use flag.*/
+#define PCPU_MAP_BUSY			1
+
 #ifdef CONFIG_SMP
 /* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
 #ifndef __addr_to_pcpu_ptr
@@ -328,8 +337,8 @@ static void pcpu_mem_free(void *ptr, size_t size)
  */
 static int pcpu_count_occupied_pages(struct pcpu_chunk *chunk, int i)
 {
-	int off = chunk->map[i] & ~1;
-	int end = chunk->map[i + 1] & ~1;
+	int off = chunk->map[i] & ~PCPU_MAP_BUSY;
+	int end = chunk->map[i + 1] & ~PCPU_MAP_BUSY;
 
 	if (!PAGE_ALIGNED(off) && i > 0) {
 		int prev = chunk->map[i - 1];
@@ -340,7 +349,7 @@ static int pcpu_count_occupied_pages(struct pcpu_chunk *chunk, int i)
 
 	if (!PAGE_ALIGNED(end) && i + 1 < chunk->map_used) {
 		int next = chunk->map[i + 1];
-		int nend = chunk->map[i + 2] & ~1;
+		int nend = chunk->map[i + 2] & ~PCPU_MAP_BUSY;
 
 		if (!(next & 1) && nend >= round_up(end, PAGE_SIZE))
 			end = round_up(end, PAGE_SIZE);
@@ -738,7 +747,7 @@ static struct pcpu_chunk *pcpu_alloc_chunk(void)
 
 	chunk->map_alloc = PCPU_DFL_MAP_ALLOC;
 	chunk->map[0] = 0;
-	chunk->map[1] = pcpu_unit_size | 1;
+	chunk->map[1] = pcpu_unit_size | PCPU_MAP_BUSY;
 	chunk->map_used = 1;
 
 	INIT_LIST_HEAD(&chunk->list);
@@ -1664,12 +1673,12 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
 	}
 	schunk->contig_hint = schunk->free_size;
 
-	schunk->map[0] = 1;
+	schunk->map[0] = PCPU_MAP_BUSY;
 	schunk->map[1] = ai->static_size;
 	schunk->map_used = 1;
 	if (schunk->free_size)
 		schunk->map[++schunk->map_used] = ai->static_size + schunk->free_size;
-	schunk->map[schunk->map_used] |= 1;
+	schunk->map[schunk->map_used] |= PCPU_MAP_BUSY;
 
 	/* init dynamic chunk if necessary */
 	if (dyn_size) {
@@ -1684,9 +1693,10 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
 		dchunk->nr_populated = pcpu_unit_pages;
 
 		dchunk->contig_hint = dchunk->free_size = dyn_size;
-		dchunk->map[0] = 1;
+		dchunk->map[0] = PCPU_MAP_BUSY;
 		dchunk->map[1] = pcpu_reserved_chunk_limit;
-		dchunk->map[2] = (pcpu_reserved_chunk_limit + dchunk->free_size) | 1;
+		dchunk->map[2] = (pcpu_reserved_chunk_limit + dchunk->free_size)
+					| PCPU_MAP_BUSY;
 		dchunk->map_used = 2;
 	}
 
-- 
2.4.3

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ