Use map_vm_area() instead of vmap() in text_poke() for avoiding page allocation and delayed unmapping. Signed-off-by: Masami Hiramatsu --- arch/x86/include/asm/alternative.h | 1 + arch/x86/kernel/alternative.c | 25 ++++++++++++++++++++----- init/main.c | 3 +++ 3 files changed, 24 insertions(+), 5 deletions(-) Index: linux-2.6/arch/x86/include/asm/alternative.h =================================================================== --- linux-2.6.orig/arch/x86/include/asm/alternative.h +++ linux-2.6/arch/x86/include/asm/alternative.h @@ -177,6 +177,7 @@ extern void add_nops(void *insns, unsign * The _early version expects the memory to already be RW. */ +extern void text_poke_init(void); extern void *text_poke(void *addr, const void *opcode, size_t len); extern void *text_poke_early(void *addr, const void *opcode, size_t len); Index: linux-2.6/arch/x86/kernel/alternative.c =================================================================== --- linux-2.6.orig/arch/x86/kernel/alternative.c +++ linux-2.6/arch/x86/kernel/alternative.c @@ -485,6 +485,16 @@ void *text_poke_early(void *addr, const return addr; } +static struct vm_struct *text_poke_area[2]; +static DEFINE_SPINLOCK(text_poke_lock); + +void __init text_poke_init(void) +{ + text_poke_area[0] = get_vm_area(PAGE_SIZE, VM_ALLOC); + text_poke_area[1] = get_vm_area(2 * PAGE_SIZE, VM_ALLOC); + BUG_ON(!text_poke_area[0] || !text_poke_area[1]); +} + /** * text_poke - Update instructions on a live kernel * @addr: address to modify @@ -501,8 +511,9 @@ void *__kprobes text_poke(void *addr, co unsigned long flags; char *vaddr; int nr_pages = 2; - struct page *pages[2]; - int i; + struct page *pages[2], **pgp = pages; + int i, ret; + struct vm_struct *vma; if (!core_kernel_text((unsigned long)addr)) { pages[0] = vmalloc_to_page(addr); @@ -515,12 +526,16 @@ void *__kprobes text_poke(void *addr, co BUG_ON(!pages[0]); if (!pages[1]) nr_pages = 1; - vaddr = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL); - BUG_ON(!vaddr); + spin_lock(&text_poke_lock); + vma = text_poke_area[nr_pages-1]; + ret = map_vm_area(vma, PAGE_KERNEL, &pgp); + BUG_ON(ret); + vaddr = vma->addr; local_irq_save(flags); memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len); local_irq_restore(flags); - vunmap(vaddr); + unmap_kernel_range((unsigned long)vma->addr, (unsigned long)vma->size); + spin_unlock(&text_poke_lock); sync_core(); /* Could also do a CLFLUSH here to speed up CPU recovery; but that causes hangs on some VIA CPUs. */ @@ -528,3 +543,4 @@ void *__kprobes text_poke(void *addr, co BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]); return addr; } +EXPORT_SYMBOL_GPL(text_poke); Index: linux-2.6/init/main.c =================================================================== --- linux-2.6.orig/init/main.c +++ linux-2.6/init/main.c @@ -676,6 +676,9 @@ asmlinkage void __init start_kernel(void taskstats_init_early(); delayacct_init(); +#ifdef CONFIG_X86 + text_poke_init(); +#endif check_bugs(); acpi_early_init(); /* before LAPIC and SMP init */