lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Date:   Mon, 10 Aug 2020 07:24:25 +0000
From:   Dmitry Monakhov <dmtrmonakhov@...dex-team.ru>
To:     linux-kernel@...r.kernel.org
Cc:     koct9i@...il.com, Dmitry Monakhov <dmtrmonakhov@...dex-team.ru>
Subject: [PATCH 1/2] lib/test_lockup.c: add measure_alloc_pages_wait option

measure_alloc_pages_wait=Y  measure maximum page allocation wait time

Signed-off-by: Dmitry Monakhov <dmtrmonakhov@...dex-team.ru>
---
 lib/test_lockup.c | 27 +++++++++++++++++++++++++--
 1 file changed, 25 insertions(+), 2 deletions(-)

diff --git a/lib/test_lockup.c b/lib/test_lockup.c
index 0f81252..867b2f4 100644
--- a/lib/test_lockup.c
+++ b/lib/test_lockup.c
@@ -77,6 +77,10 @@ static bool call_cond_resched;
 module_param(call_cond_resched, bool, 0600);
 MODULE_PARM_DESC(call_cond_resched, "call cond_resched() between iterations");
 
+static bool measure_alloc_pages_wait;
+module_param(measure_alloc_pages_wait, bool, 0400);
+MODULE_PARM_DESC(measure_alloc_pages_wait, "measure page allocation wait time");
+
 static bool measure_lock_wait;
 module_param(measure_lock_wait, bool, 0400);
 MODULE_PARM_DESC(measure_lock_wait, "measure lock wait time");
@@ -162,6 +166,7 @@ MODULE_PARM_DESC(lock_sb_umount, "lock file -> sb -> s_umount");
 static atomic_t alloc_pages_failed = ATOMIC_INIT(0);
 
 static atomic64_t max_lock_wait = ATOMIC64_INIT(0);
+static atomic64_t max_alloc_pages_wait = ATOMIC64_INIT(0);
 
 static struct task_struct *main_task;
 static int master_cpu;
@@ -305,6 +310,10 @@ static void test_alloc_pages(struct list_head *pages)
 {
 	struct page *page;
 	unsigned int i;
+	u64 wait_start;
+
+	if (measure_alloc_pages_wait)
+		wait_start = local_clock();
 
 	for (i = 0; i < alloc_pages_nr; i++) {
 		page = alloc_pages(alloc_pages_gfp, alloc_pages_order);
@@ -314,6 +323,17 @@ static void test_alloc_pages(struct list_head *pages)
 		}
 		list_add(&page->lru, pages);
 	}
+	if (measure_alloc_pages_wait) {
+		s64 cur_wait = local_clock() - wait_start;
+		s64 max_wait = atomic64_read(&max_alloc_pages_wait);
+
+		do {
+			if (cur_wait < max_wait)
+				break;
+			max_wait = atomic64_cmpxchg(&max_alloc_pages_wait,
+						    max_wait, cur_wait);
+		} while (max_wait != cur_wait);
+	}
 }
 
 static void test_free_pages(struct list_head *pages)
@@ -578,10 +598,13 @@ static int __init test_lockup_init(void)
 		pr_notice("Maximum lock wait: %lld ns\n",
 			  atomic64_read(&max_lock_wait));
 
-	if (alloc_pages_nr)
+	if (alloc_pages_nr) {
 		pr_notice("Page allocation failed %u times\n",
 			  atomic_read(&alloc_pages_failed));
-
+		if (measure_alloc_pages_wait)
+			pr_notice("Maximum pages allocation wait: %lld ns\n",
+				  atomic64_read(&max_alloc_pages_wait));
+	}
 	pr_notice("FINISH in %llu ns\n", local_clock() - test_start);
 
 	if (test_file)
-- 
2.7.4

Powered by blists - more mailing lists