[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <CAD7_sbFi+KY-pH+2RZTq29qpBukvqZcC0xuB-7EJ_WNPP84bjQ@mail.gmail.com>
Date: Thu, 4 Jul 2019 17:31:04 +0800
From: Pengfei Li <lpf.vector@...il.com>
To: Uladzislau Rezki <urezki@...il.com>
Cc: akpm@...ux-foundation.org, peterz@...radead.org, rpenyaev@...e.de,
mhocko@...e.com, guro@...com, aryabinin@...tuozzo.com,
rppt@...ux.ibm.com, mingo@...nel.org, rick.p.edgecombe@...el.com,
linux-mm@...ck.org, linux-kernel@...r.kernel.org
Subject: Re: [PATCH v2 0/5] mm/vmalloc.c: improve readability and rewrite vmap_area
On Thu, Jul 4, 2019 at 3:30 AM Uladzislau Rezki <urezki@...il.com> wrote:
>
> Hello, Li.
>
> I do not think that it is worth to reduce the struct size the way
> this series does. I mean the union around flags/va_start. Simply saying
> if we need two variables: flags and va_start let's have them. Otherwise
> everybody has to think what he/she access at certain moment of time.
>
> So it would be easier to make mistakes, also that conversion looks strange
> to me. That is IMHO.
>
> If we want to reduce the size to L1-cache-line(64 bytes), i would propose to
> eliminate the "flags" variable from the structure. We could do that if apply
> below patch(as an example) on top of https://lkml.org/lkml/2019/7/3/661:
>
Hi, Vlad
Thank you for your detailed comments!
What you said inspired me. I really have no reason to stubbornly
keep the "flags" in vmap_area since it can be eliminated.
I will eliminate the "flags" from vmap_area as you suggested, and
the next version will be based on top of your commit
https://lkml.org/lkml/2019/7/3/661.
--
Pengfei
> <snip>
> diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
> index 51e131245379..49bb82863d5b 100644
> --- a/include/linux/vmalloc.h
> +++ b/include/linux/vmalloc.h
> @@ -51,15 +51,22 @@ struct vmap_area {
> unsigned long va_start;
> unsigned long va_end;
>
> - /*
> - * Largest available free size in subtree.
> - */
> - unsigned long subtree_max_size;
> - unsigned long flags;
> struct rb_node rb_node; /* address sorted rbtree */
> struct list_head list; /* address sorted list */
> - struct llist_node purge_list; /* "lazy purge" list */
> - struct vm_struct *vm;
> +
> + /*
> + * Below three variables can be packed, because vmap_area
> + * object can be only in one of the three different states:
> + *
> + * - when an object is in "free" tree only;
> + * - when an object is in "purge list" only;
> + * - when an object is in "busy" tree only.
> + */
> + union {
> + unsigned long subtree_max_size;
> + struct llist_node purge_list;
> + struct vm_struct *vm;
> + };
> };
>
> /*
> diff --git a/mm/vmalloc.c b/mm/vmalloc.c
> index 6f1b6a188227..e389a6db222b 100644
> --- a/mm/vmalloc.c
> +++ b/mm/vmalloc.c
> @@ -329,8 +329,6 @@ EXPORT_SYMBOL(vmalloc_to_pfn);
> #define DEBUG_AUGMENT_PROPAGATE_CHECK 0
> #define DEBUG_AUGMENT_LOWEST_MATCH_CHECK 0
>
> -#define VM_VM_AREA 0x04
> -
> static DEFINE_SPINLOCK(vmap_area_lock);
> /* Export for kexec only */
> LIST_HEAD(vmap_area_list);
> @@ -1108,7 +1106,7 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
>
> va->va_start = addr;
> va->va_end = addr + size;
> - va->flags = 0;
> + va->vm = NULL;
> insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
>
> spin_unlock(&vmap_area_lock);
> @@ -1912,7 +1910,6 @@ void __init vmalloc_init(void)
> if (WARN_ON_ONCE(!va))
> continue;
>
> - va->flags = VM_VM_AREA;
> va->va_start = (unsigned long)tmp->addr;
> va->va_end = va->va_start + tmp->size;
> va->vm = tmp;
> @@ -2010,7 +2007,6 @@ static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
> vm->size = va->va_end - va->va_start;
> vm->caller = caller;
> va->vm = vm;
> - va->flags |= VM_VM_AREA;
> spin_unlock(&vmap_area_lock);
> }
>
> @@ -2115,7 +2111,7 @@ struct vm_struct *find_vm_area(const void *addr)
> struct vmap_area *va;
>
> va = find_vmap_area((unsigned long)addr);
> - if (va && va->flags & VM_VM_AREA)
> + if (va && va->vm)
> return va->vm;
>
> return NULL;
> @@ -2139,11 +2135,10 @@ struct vm_struct *remove_vm_area(const void *addr)
>
> spin_lock(&vmap_area_lock);
> va = __find_vmap_area((unsigned long)addr);
> - if (va && va->flags & VM_VM_AREA) {
> + if (va && va->vm) {
> struct vm_struct *vm = va->vm;
>
> va->vm = NULL;
> - va->flags &= ~VM_VM_AREA;
> spin_unlock(&vmap_area_lock);
>
> kasan_free_shadow(vm);
> @@ -2854,7 +2849,7 @@ long vread(char *buf, char *addr, unsigned long count)
> if (!count)
> break;
>
> - if (!(va->flags & VM_VM_AREA))
> + if (!va->vm)
> continue;
>
> vm = va->vm;
> @@ -2934,7 +2929,7 @@ long vwrite(char *buf, char *addr, unsigned long count)
> if (!count)
> break;
>
> - if (!(va->flags & VM_VM_AREA))
> + if (!va->vm)
> continue;
>
> vm = va->vm;
> @@ -3464,10 +3459,10 @@ static int s_show(struct seq_file *m, void *p)
> va = list_entry(p, struct vmap_area, list);
>
> /*
> - * s_show can encounter race with remove_vm_area, !VM_VM_AREA on
> - * behalf of vmap area is being tear down or vm_map_ram allocation.
> + * s_show can encounter race with remove_vm_area, !vm on behalf
> + * of vmap area is being tear down or vm_map_ram allocation.
> */
> - if (!(va->flags & VM_VM_AREA)) {
> + if (!va->vm) {
> seq_printf(m, "0x%pK-0x%pK %7ld vm_map_ram\n",
> (void *)va->va_start, (void *)va->va_end,
> va->va_end - va->va_start);
> <snip>
>
> urezki@...36:~/data/ssd/coding/linux-stable$ pahole -C vmap_area mm/vmalloc.o
> die__process_function: tag not supported (INVALID)!
> struct vmap_area {
> long unsigned int va_start; /* 0 8 */
> long unsigned int va_end; /* 8 8 */
> struct rb_node rb_node; /* 16 24 */
> struct list_head list; /* 40 16 */
> union {
> long unsigned int subtree_max_size; /* 8 */
> struct llist_node purge_list; /* 8 */
> struct vm_struct * vm; /* 8 */
> }; /* 56 8 */
> /* --- cacheline 1 boundary (64 bytes) --- */
>
> /* size: 64, cachelines: 1, members: 5 */
> };
> urezki@...36:~/data/ssd/coding/linux-stable$
>
> --
> Vlad Rezki
Powered by blists - more mailing lists