lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <ZQRV7fAXEZNFWAx4@MiWiFi-R3L-srv>
Date:   Fri, 15 Sep 2023 21:02:37 +0800
From:   Baoquan He <bhe@...hat.com>
To:     "Uladzislau Rezki (Sony)" <urezki@...il.com>
Cc:     linux-mm@...ck.org, Andrew Morton <akpm@...ux-foundation.org>,
        LKML <linux-kernel@...r.kernel.org>,
        Lorenzo Stoakes <lstoakes@...il.com>,
        Christoph Hellwig <hch@...radead.org>,
        Matthew Wilcox <willy@...radead.org>,
        "Liam R . Howlett" <Liam.Howlett@...cle.com>,
        Dave Chinner <david@...morbit.com>,
        "Paul E . McKenney" <paulmck@...nel.org>,
        Joel Fernandes <joel@...lfernandes.org>,
        Oleksiy Avramchenko <oleksiy.avramchenko@...y.com>
Subject: Re: [PATCH v2 8/9] mm: vmalloc: Support multiple nodes in vmallocinfo

On 08/29/23 at 10:11am, Uladzislau Rezki (Sony) wrote:
> Allocated areas are spread among nodes, it implies that
> the scanning has to be performed individually of each node
> in order to dump all existing VAs.
> 
> Signed-off-by: Uladzislau Rezki (Sony) <urezki@...il.com>
> ---
>  mm/vmalloc.c | 120 ++++++++++++++++++++-------------------------------
>  1 file changed, 47 insertions(+), 73 deletions(-)

LGTM,

Reviewed-by: Baoquan He <bhe@...hat.com>

> 
> diff --git a/mm/vmalloc.c b/mm/vmalloc.c
> index 968144c16237..9cce012aecdb 100644
> --- a/mm/vmalloc.c
> +++ b/mm/vmalloc.c
> @@ -4636,30 +4636,6 @@ bool vmalloc_dump_obj(void *object)
>  #endif
>  
>  #ifdef CONFIG_PROC_FS
> -static void *s_start(struct seq_file *m, loff_t *pos)
> -{
> -	struct vmap_node *vn = addr_to_node(0);
> -
> -	mutex_lock(&vmap_purge_lock);
> -	spin_lock(&vn->busy.lock);
> -
> -	return seq_list_start(&vn->busy.head, *pos);
> -}
> -
> -static void *s_next(struct seq_file *m, void *p, loff_t *pos)
> -{
> -	struct vmap_node *vn = addr_to_node(0);
> -	return seq_list_next(p, &vn->busy.head, pos);
> -}
> -
> -static void s_stop(struct seq_file *m, void *p)
> -{
> -	struct vmap_node *vn = addr_to_node(0);
> -
> -	spin_unlock(&vn->busy.lock);
> -	mutex_unlock(&vmap_purge_lock);
> -}
> -
>  static void show_numa_info(struct seq_file *m, struct vm_struct *v)
>  {
>  	if (IS_ENABLED(CONFIG_NUMA)) {
> @@ -4703,84 +4679,82 @@ static void show_purge_info(struct seq_file *m)
>  	}
>  }
>  
> -static int s_show(struct seq_file *m, void *p)
> +static int vmalloc_info_show(struct seq_file *m, void *p)
>  {
>  	struct vmap_node *vn;
>  	struct vmap_area *va;
>  	struct vm_struct *v;
> +	int i;
>  
> -	vn = addr_to_node(0);
> -	va = list_entry(p, struct vmap_area, list);
> +	for (i = 0; i < nr_nodes; i++) {
> +		vn = &nodes[i];
>  
> -	if (!va->vm) {
> -		if (va->flags & VMAP_RAM)
> -			seq_printf(m, "0x%pK-0x%pK %7ld vm_map_ram\n",
> -				(void *)va->va_start, (void *)va->va_end,
> -				va->va_end - va->va_start);
> +		spin_lock(&vn->busy.lock);
> +		list_for_each_entry(va, &vn->busy.head, list) {
> +			if (!va->vm) {
> +				if (va->flags & VMAP_RAM)
> +					seq_printf(m, "0x%pK-0x%pK %7ld vm_map_ram\n",
> +						(void *)va->va_start, (void *)va->va_end,
> +						va->va_end - va->va_start);
>  
> -		goto final;
> -	}
> +				continue;
> +			}
>  
> -	v = va->vm;
> +			v = va->vm;
>  
> -	seq_printf(m, "0x%pK-0x%pK %7ld",
> -		v->addr, v->addr + v->size, v->size);
> +			seq_printf(m, "0x%pK-0x%pK %7ld",
> +				v->addr, v->addr + v->size, v->size);
>  
> -	if (v->caller)
> -		seq_printf(m, " %pS", v->caller);
> +			if (v->caller)
> +				seq_printf(m, " %pS", v->caller);
>  
> -	if (v->nr_pages)
> -		seq_printf(m, " pages=%d", v->nr_pages);
> +			if (v->nr_pages)
> +				seq_printf(m, " pages=%d", v->nr_pages);
>  
> -	if (v->phys_addr)
> -		seq_printf(m, " phys=%pa", &v->phys_addr);
> +			if (v->phys_addr)
> +				seq_printf(m, " phys=%pa", &v->phys_addr);
>  
> -	if (v->flags & VM_IOREMAP)
> -		seq_puts(m, " ioremap");
> +			if (v->flags & VM_IOREMAP)
> +				seq_puts(m, " ioremap");
>  
> -	if (v->flags & VM_ALLOC)
> -		seq_puts(m, " vmalloc");
> +			if (v->flags & VM_ALLOC)
> +				seq_puts(m, " vmalloc");
>  
> -	if (v->flags & VM_MAP)
> -		seq_puts(m, " vmap");
> +			if (v->flags & VM_MAP)
> +				seq_puts(m, " vmap");
>  
> -	if (v->flags & VM_USERMAP)
> -		seq_puts(m, " user");
> +			if (v->flags & VM_USERMAP)
> +				seq_puts(m, " user");
>  
> -	if (v->flags & VM_DMA_COHERENT)
> -		seq_puts(m, " dma-coherent");
> +			if (v->flags & VM_DMA_COHERENT)
> +				seq_puts(m, " dma-coherent");
>  
> -	if (is_vmalloc_addr(v->pages))
> -		seq_puts(m, " vpages");
> +			if (is_vmalloc_addr(v->pages))
> +				seq_puts(m, " vpages");
>  
> -	show_numa_info(m, v);
> -	seq_putc(m, '\n');
> +			show_numa_info(m, v);
> +			seq_putc(m, '\n');
> +		}
> +		spin_unlock(&vn->busy.lock);
> +	}
>  
>  	/*
>  	 * As a final step, dump "unpurged" areas.
>  	 */
> -final:
> -	if (list_is_last(&va->list, &vn->busy.head))
> -		show_purge_info(m);
> -
> +	show_purge_info(m);
>  	return 0;
>  }
>  
> -static const struct seq_operations vmalloc_op = {
> -	.start = s_start,
> -	.next = s_next,
> -	.stop = s_stop,
> -	.show = s_show,
> -};
> -
>  static int __init proc_vmalloc_init(void)
>  {
> +	void *priv_data = NULL;
> +
>  	if (IS_ENABLED(CONFIG_NUMA))
> -		proc_create_seq_private("vmallocinfo", 0400, NULL,
> -				&vmalloc_op,
> -				nr_node_ids * sizeof(unsigned int), NULL);
> -	else
> -		proc_create_seq("vmallocinfo", 0400, NULL, &vmalloc_op);
> +		priv_data = kmalloc(nr_node_ids * sizeof(unsigned int), GFP_KERNEL);
> +
> +	proc_create_single_data("vmallocinfo",
> +		0400, NULL, vmalloc_info_show, priv_data);
> +
>  	return 0;
>  }
>  module_init(proc_vmalloc_init);
> -- 
> 2.30.2
> 

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ