--- linux-2.6/include/linux/vmalloc.h 2006-10-23 10:09:48.000000000 +0200 +++ linux-2.6-ed/include/linux/vmalloc.h 2006-10-23 10:26:37.000000000 +0200 @@ -23,13 +23,14 @@ #endif struct vm_struct { + /* keep next,addr,size together to speedup lookups */ + struct vm_struct *next; void *addr; unsigned long size; unsigned long flags; struct page **pages; unsigned int nr_pages; unsigned long phys_addr; - struct vm_struct *next; }; /* --- linux-2.6/mm/vmalloc.c 2006-10-23 10:11:43.000000000 +0200 +++ linux-2.6-ed/mm/vmalloc.c 2006-10-23 11:26:47.000000000 +0200 @@ -180,15 +180,13 @@ addr = ALIGN(start, align); size = PAGE_ALIGN(size); + if (unlikely(!size)) + return NULL; + area = kmalloc_node(sizeof(*area), GFP_KERNEL, node); if (unlikely(!area)) return NULL; - if (unlikely(!size)) { - kfree (area); - return NULL; - } - /* * We always allocate a guard page. */ @@ -528,11 +526,13 @@ void *ret; ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL); - write_lock(&vmlist_lock); - area = __find_vm_area(ret); - area->flags |= VM_USERMAP; - write_unlock(&vmlist_lock); - + if (ret) { + write_lock(&vmlist_lock); + area = __find_vm_area(ret); + BUG_ON(!area); + area->flags |= VM_USERMAP; + write_unlock(&vmlist_lock); + } return ret; } EXPORT_SYMBOL(vmalloc_user); @@ -601,11 +601,13 @@ void *ret; ret = __vmalloc(size, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL); - write_lock(&vmlist_lock); - area = __find_vm_area(ret); - area->flags |= VM_USERMAP; - write_unlock(&vmlist_lock); - + if (ret) { + write_lock(&vmlist_lock); + area = __find_vm_area(ret); + BUG_ON(!area); + area->flags |= VM_USERMAP; + write_unlock(&vmlist_lock); + } return ret; } EXPORT_SYMBOL(vmalloc_32_user);