lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <4948CADF.6050205@cn.fujitsu.com>
Date:	Wed, 17 Dec 2008 17:48:15 +0800
From:	Lai Jiangshan <laijs@...fujitsu.com>
To:	Steven Rostedt <rostedt@...dmis.org>, Ingo Molnar <mingo@...e.hu>,
	Linux Kernel Mailing List <linux-kernel@...r.kernel.org>
Subject: [PATCH] ring_bufer: fix BUF_PAGE_SIZE


impact: make BUF_PAGE_SIZE changeable.

Except allocating/freeing page and the code using PAGE_MASK,
all code expect buffer_page's length is BUF_PAGE_SIZE.

This patch make this behavior more concordant.

Signed-off-by: Lai Jiangshan <laijs@...fujitsu.com>
---
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 668bbb5..0cf6caf 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -158,6 +158,10 @@ struct buffer_page {
 	void *page;			/* Actual data page */
 };
 
+#define BUF_PAGE_ORDER 0
+#define BUF_PAGE_SIZE (PAGE_SIZE << BUF_PAGE_ORDER)
+#define BUF_PAGE_MASK (~(BUF_PAGE_SIZE - 1))
+
 /*
  * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
  * this issue out.
@@ -165,7 +169,7 @@ struct buffer_page {
 static inline void free_buffer_page(struct buffer_page *bpage)
 {
 	if (bpage->page)
-		free_page((unsigned long)bpage->page);
+		free_pages((unsigned long)bpage->page, BUF_PAGE_ORDER);
 	kfree(bpage);
 }
 
@@ -179,8 +183,6 @@ static inline int test_time_stamp(u64 delta)
 	return 0;
 }
 
-#define BUF_PAGE_SIZE PAGE_SIZE
-
 /*
  * head_page == tail_page && head == tail then buffer is empty.
  */
@@ -289,7 +291,7 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
 			goto free_pages;
 		list_add(&page->list, &pages);
 
-		addr = __get_free_page(GFP_KERNEL);
+		addr = __get_free_pages(GFP_KERNEL, BUF_PAGE_ORDER);
 		if (!addr)
 			goto free_pages;
 		page->page = (void *)addr;
@@ -333,7 +335,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
 		goto fail_free_buffer;
 
 	cpu_buffer->reader_page = page;
-	addr = __get_free_page(GFP_KERNEL);
+	addr = __get_free_pages(GFP_KERNEL, BUF_PAGE_ORDER);
 	if (!addr)
 		goto fail_free_reader;
 	page->page = (void *)addr;
@@ -592,7 +594,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
 			if (!page)
 				goto free_pages;
 			list_add(&page->list, &pages);
-			addr = __get_free_page(GFP_KERNEL);
+			addr = __get_free_pages(GFP_KERNEL, BUF_PAGE_ORDER);
 			if (!addr)
 				goto free_pages;
 			page->page = (void *)addr;
@@ -718,7 +720,7 @@ rb_event_index(struct ring_buffer_event *event)
 {
 	unsigned long addr = (unsigned long)event;
 
-	return (addr & ~PAGE_MASK) - (PAGE_SIZE - BUF_PAGE_SIZE);
+	return addr & ~BUF_PAGE_MASK;
 }
 
 static inline int
@@ -729,7 +731,7 @@ rb_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
 	unsigned long index;
 
 	index = rb_event_index(event);
-	addr &= PAGE_MASK;
+	addr &= BUF_PAGE_MASK;
 
 	return cpu_buffer->commit_page->page == (void *)addr &&
 		rb_commit_index(cpu_buffer) == index;
@@ -743,7 +745,7 @@ rb_set_commit_event(struct ring_buffer_per_cpu *cpu_buffer,
 	unsigned long index;
 
 	index = rb_event_index(event);
-	addr &= PAGE_MASK;
+	addr &= BUF_PAGE_MASK;
 
 	while (cpu_buffer->commit_page->page != (void *)addr) {
 		RB_WARN_ON(cpu_buffer,


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ