lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190725184253.21160-3-lpf.vector@gmail.com>
Date:   Fri, 26 Jul 2019 02:42:45 +0800
From:   Pengfei Li <lpf.vector@...il.com>
To:     akpm@...ux-foundation.org
Cc:     mgorman@...hsingularity.net, mhocko@...e.com, vbabka@...e.cz,
        cai@....pw, aryabinin@...tuozzo.com, osalvador@...e.de,
        rostedt@...dmis.org, mingo@...hat.com,
        pavel.tatashin@...rosoft.com, rppt@...ux.ibm.com,
        linux-kernel@...r.kernel.org, linux-mm@...ck.org,
        Pengfei Li <lpf.vector@...il.com>
Subject: [PATCH 02/10] mm/page_alloc: use unsigned int for "order" in __rmqueue_fallback()

Because "order" will never be negative in __rmqueue_fallback(),
so just make "order" unsigned int.
And modify trace_mm_page_alloc_extfrag() accordingly.

Signed-off-by: Pengfei Li <lpf.vector@...il.com>
---
 include/trace/events/kmem.h | 6 +++---
 mm/page_alloc.c             | 4 ++--
 2 files changed, 5 insertions(+), 5 deletions(-)

diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h
index eb57e3037deb..31f4d09aa31f 100644
--- a/include/trace/events/kmem.h
+++ b/include/trace/events/kmem.h
@@ -277,7 +277,7 @@ TRACE_EVENT(mm_page_pcpu_drain,
 TRACE_EVENT(mm_page_alloc_extfrag,
 
 	TP_PROTO(struct page *page,
-		int alloc_order, int fallback_order,
+		unsigned int alloc_order, int fallback_order,
 		int alloc_migratetype, int fallback_migratetype),
 
 	TP_ARGS(page,
@@ -286,7 +286,7 @@ TRACE_EVENT(mm_page_alloc_extfrag,
 
 	TP_STRUCT__entry(
 		__field(	unsigned long,	pfn			)
-		__field(	int,		alloc_order		)
+		__field(	unsigned int,	alloc_order		)
 		__field(	int,		fallback_order		)
 		__field(	int,		alloc_migratetype	)
 		__field(	int,		fallback_migratetype	)
@@ -303,7 +303,7 @@ TRACE_EVENT(mm_page_alloc_extfrag,
 					get_pageblock_migratetype(page));
 	),
 
-	TP_printk("page=%p pfn=%lu alloc_order=%d fallback_order=%d pageblock_order=%d alloc_migratetype=%d fallback_migratetype=%d fragmenting=%d change_ownership=%d",
+	TP_printk("page=%p pfn=%lu alloc_order=%u fallback_order=%d pageblock_order=%d alloc_migratetype=%d fallback_migratetype=%d fragmenting=%d change_ownership=%d",
 		pfn_to_page(__entry->pfn),
 		__entry->pfn,
 		__entry->alloc_order,
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 75c18f4fd66a..1432cbcd87cd 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2631,8 +2631,8 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
  * condition simpler.
  */
 static __always_inline bool
-__rmqueue_fallback(struct zone *zone, int order, int start_migratetype,
-						unsigned int alloc_flags)
+__rmqueue_fallback(struct zone *zone, unsigned int order,
+		int start_migratetype, unsigned int alloc_flags)
 {
 	struct free_area *area;
 	int current_order;
-- 
2.21.0

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ