lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Thu, 11 Jun 2009 09:12:58 +0800
From:	Lai Jiangshan <laijs@...fujitsu.com>
To:	Steven Rostedt <rostedt@...dmis.org>
CC:	linux-kernel@...r.kernel.org, Ingo Molnar <mingo@...e.hu>,
	Andrew Morton <akpm@...ux-foundation.org>,
	Thomas Gleixner <tglx@...utronix.de>,
	Peter Zijlstra <peterz@...radead.org>,
	Frederic Weisbecker <fweisbec@...il.com>,
	Theodore Tso <tytso@....edu>,
	Arnaldo Carvalho de Melo <acme@...hat.com>,
	Mathieu Desnoyers <compudj@...stal.dyndns.org>,
	"Martin J. Bligh" <mbligh@...igh.org>,
	Christoph Hellwig <hch@...radead.org>,
	Li Zefan <lizf@...fujitsu.com>,
	Huang Ying <ying.huang@...el.com>,
	"H. Peter Anvin" <hpa@...or.com>,
	Hidetoshi Seto <seto.hidetoshi@...fujitsu.com>,
	Masami Hiramatsu <mhiramat@...hat.com>,
	Steven Rostedt <srostedt@...hat.com>
Subject: Re: [PATCH 1/3] ring-buffer: make the buffer a true circular link
 list

Steven Rostedt wrote:
> From: Steven Rostedt <srostedt@...hat.com>
> 
> This patch changes the ring buffer data pages from using a link list
> head pointer, to making each buffer page point to another buffer page
> and never back to a "head".
> 
> This makes the handling of the ring buffer less complex, since the
> traversing of the ring buffer pages no longer needs to account for the
> head pointer.
> 
> This change also is needed to make the ring buffer lockless.
> 
> [ Impact: simplify the ring buffer to help make it lockless ]
> 
> Signed-off-by: Steven Rostedt <srostedt@...hat.com>
> ---
>  kernel/trace/ring_buffer.c |   54 ++++++++++++++++++++++++++++++-------------
>  1 files changed, 37 insertions(+), 17 deletions(-)
> 
> diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
> index 2e642b2..d1edd63 100644
> --- a/kernel/trace/ring_buffer.c
> +++ b/kernel/trace/ring_buffer.c
> @@ -404,7 +404,7 @@ struct ring_buffer_per_cpu {
>  	spinlock_t			reader_lock; /* serialize readers */
>  	raw_spinlock_t			lock;
>  	struct lock_class_key		lock_key;
> -	struct list_head		pages;
> +	struct list_head		*pages;
>  	struct buffer_page		*head_page;	/* read from head */
>  	struct buffer_page		*tail_page;	/* write to tail */
>  	struct buffer_page		*commit_page;	/* committed pages */
> @@ -494,7 +494,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
>   */
>  static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
>  {
> -	struct list_head *head = &cpu_buffer->pages;
> +	struct list_head *head = cpu_buffer->pages;
>  	struct buffer_page *bpage, *tmp;
>  
>  	if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
> @@ -517,12 +517,13 @@ static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
>  static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
>  			     unsigned nr_pages)
>  {
> -	struct list_head *head = &cpu_buffer->pages;
>  	struct buffer_page *bpage, *tmp;
>  	unsigned long addr;
>  	LIST_HEAD(pages);
>  	unsigned i;
>  
> +	WARN_ON(!nr_pages);
> +
>  	for (i = 0; i < nr_pages; i++) {
>  		bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
>  				    GFP_KERNEL, cpu_to_node(cpu_buffer->cpu));
> @@ -537,7 +538,18 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
>  		rb_init_page(bpage->page);
>  	}
>  
> -	list_splice(&pages, head);
> +	/*
> +	 * The ring buffer page list is a circular list that does not
> +	 * start and end with a list head. All page list items point to
> +	 * other pages. Remove one of the pages, init its list head,
> +	 * and use list splice to move the rest of the pages to it.
> +	 */
> +	bpage = list_entry(pages.next, struct buffer_page, list);
> +	list_del_init(&bpage->list);
> +	cpu_buffer->pages = &bpage->list;
> +
> +	list_splice(&pages, cpu_buffer->pages);
> +

Is it proper?

	cpu_buffer->pages = pages.next;
	list_del(&pages);

>  
>  	rb_check_pages(cpu_buffer);
>  
> @@ -569,7 +581,6 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
>  	spin_lock_init(&cpu_buffer->reader_lock);
>  	lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
>  	cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
> -	INIT_LIST_HEAD(&cpu_buffer->pages);
>  
>  	bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
>  			    GFP_KERNEL, cpu_to_node(cpu));
> @@ -590,7 +601,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
>  		goto fail_free_reader;
>  
>  	cpu_buffer->head_page
> -		= list_entry(cpu_buffer->pages.next, struct buffer_page, list);
> +		= list_entry(cpu_buffer->pages, struct buffer_page, list);
>  	cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
>  
>  	return cpu_buffer;
> @@ -605,15 +616,20 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
>  
>  static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
>  {
> -	struct list_head *head = &cpu_buffer->pages;
> +	struct list_head *head = cpu_buffer->pages;
>  	struct buffer_page *bpage, *tmp;
>  
>  	free_buffer_page(cpu_buffer->reader_page);
>  
> -	list_for_each_entry_safe(bpage, tmp, head, list) {
> -		list_del_init(&bpage->list);
> +	if (head) {
> +		list_for_each_entry_safe(bpage, tmp, head, list) {
> +			list_del_init(&bpage->list);
> +			free_buffer_page(bpage);
> +		}
> +		bpage = list_entry(head, struct buffer_page, list);
>  		free_buffer_page(bpage);
>  	}
> +
>  	kfree(cpu_buffer);
>  }
>  
> @@ -767,14 +783,14 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
>  	synchronize_sched();
>  
>  	for (i = 0; i < nr_pages; i++) {
> -		if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages)))
> +		if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)))
>  			return;
> -		p = cpu_buffer->pages.next;
> +		p = cpu_buffer->pages->next;
>  		bpage = list_entry(p, struct buffer_page, list);
>  		list_del_init(&bpage->list);
>  		free_buffer_page(bpage);
>  	}
> -	if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages)))
> +	if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)))
>  		return;
>  
>  	rb_reset_cpu(cpu_buffer);
> @@ -802,7 +818,7 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
>  		p = pages->next;
>  		bpage = list_entry(p, struct buffer_page, list);
>  		list_del_init(&bpage->list);
> -		list_add_tail(&bpage->list, &cpu_buffer->pages);
> +		list_add_tail(&bpage->list, cpu_buffer->pages);
>  	}
>  	rb_reset_cpu(cpu_buffer);
>  
> @@ -999,9 +1015,6 @@ static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
>  {
>  	struct list_head *p = (*bpage)->list.next;
>  
> -	if (p == &cpu_buffer->pages)
> -		p = p->next;
> -
>  	*bpage = list_entry(p, struct buffer_page, list);
>  }
>  
> @@ -2212,6 +2225,13 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
>  	cpu_buffer->reader_page->list.next = reader->list.next;
>  	cpu_buffer->reader_page->list.prev = reader->list.prev;
>  
> +	/*
> +	 * cpu_buffer->pages just needs to point to the buffer, it
> +	 *  has no specific buffer page to point to. Lets move it out
> +	 *  of our way so we don't accidently swap it.
> +	 */
> +	cpu_buffer->pages = reader->list.prev;
> +
>  	local_set(&cpu_buffer->reader_page->write, 0);
>  	local_set(&cpu_buffer->reader_page->entries, 0);
>  	local_set(&cpu_buffer->reader_page->page->commit, 0);
> @@ -2656,7 +2676,7 @@ static void
>  rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
>  {
>  	cpu_buffer->head_page
> -		= list_entry(cpu_buffer->pages.next, struct buffer_page, list);
> +		= list_entry(cpu_buffer->pages, struct buffer_page, list);
>  	local_set(&cpu_buffer->head_page->write, 0);
>  	local_set(&cpu_buffer->head_page->entries, 0);
>  	local_set(&cpu_buffer->head_page->page->commit, 0);


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ