diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 9e491e7..871e9e9 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@ -120,9 +120,13 @@ struct tnode { t_key key; unsigned char pos; /* 2log(KEYLENGTH) bits needed */ unsigned char bits; /* 2log(KEYLENGTH) bits needed */ + unsigned char vmalloced; unsigned int full_children; /* KEYLENGTH bits needed */ unsigned int empty_children; /* KEYLENGTH bits needed */ - struct rcu_head rcu; + union { + struct rcu_head rcu; + struct tnode *next; + }; struct node *child[0]; }; @@ -347,17 +351,31 @@ static inline void free_leaf_info(struct leaf_info *leaf) static struct tnode *tnode_alloc(size_t size) { struct page *pages; + struct tnode *tn; if (size <= PAGE_SIZE) return kzalloc(size, GFP_KERNEL); - pages = alloc_pages(GFP_KERNEL|__GFP_ZERO, get_order(size)); - if (!pages) - return NULL; - - return page_address(pages); + /* + * Because of power of two requirements of alloc_pages(), + * we prefer vmalloc() in case we waste too much memory. + */ + if (roundup_pow_of_two(size) - size <= PAGE_SIZE * 8) { + pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(size)); + if (pages) + return page_address(pages); + } + tn = __vmalloc(size, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL); + if (tn) + tn->vmalloced = 1; + return tn; } +static void fb_worker_func(struct work_struct *work); +static DECLARE_WORK(fb_vfree_work, fb_worker_func); +static DEFINE_SPINLOCK(fb_vfree_lock); +static struct tnode *fb_vfree_list; + static void __tnode_free_rcu(struct rcu_head *head) { struct tnode *tn = container_of(head, struct tnode, rcu); @@ -366,8 +384,30 @@ static void __tnode_free_rcu(struct rcu_head *head) if (size <= PAGE_SIZE) kfree(tn); - else + else if (!tn->vmalloced) free_pages((unsigned long)tn, get_order(size)); + else { + spin_lock(&fb_vfree_lock); + tn->next = fb_vfree_list; + fb_vfree_list = tn; + schedule_work(&fb_vfree_work); + spin_unlock(&fb_vfree_lock); + } +} + +static void fb_worker_func(struct work_struct *work) +{ + struct tnode *tn, *next; + + spin_lock_bh(&fb_vfree_lock); + tn = fb_vfree_list; + fb_vfree_list = NULL; + spin_unlock_bh(&fb_vfree_lock); + while (tn) { + next = tn->next; + vfree(tn); + tn = next; + } } static inline void tnode_free(struct tnode *tn)