diff --git a/fs/inode.c b/fs/inode.c index f94f889..dc8e72a 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -556,6 +556,36 @@ repeat: return node ? inode : NULL; } +#ifdef CONFIG_SMP +/* + * Each cpu owns a range of 1024 numbers. + * 'shared_last_ino' is dirtied only once out of 1024 allocations, + * to renew the exhausted range. + */ +static DEFINE_PER_CPU(int, last_ino); + +static int last_ino_get(void) +{ + static atomic_t shared_last_ino; + int *p = &get_cpu_var(last_ino); + int res = *p; + + if (unlikely((res & 1023) == 0)) + res = atomic_add_return(1024, &shared_last_ino) - 1024; + + *p = ++res; + put_cpu_var(last_ino); + return res; +} +#else +static int last_ino_get(void) +{ + static int last_ino; + + return ++last_ino; +} +#endif + /** * new_inode - obtain an inode * @sb: superblock @@ -575,7 +605,6 @@ struct inode *new_inode(struct super_block *sb) * error if st_ino won't fit in target struct field. Use 32bit counter * here to attempt to avoid that. */ - static unsigned int last_ino; struct inode * inode; spin_lock_prefetch(&inode_lock); @@ -583,11 +612,11 @@ struct inode *new_inode(struct super_block *sb) inode = alloc_inode(sb); if (inode) { percpu_counter_inc(&nr_inodes); + inode->i_state = 0; + inode->i_ino = last_ino_get(); spin_lock(&inode_lock); list_add(&inode->i_list, &inode_in_use); list_add(&inode->i_sb_list, &sb->s_inodes); - inode->i_ino = ++last_ino; - inode->i_state = 0; spin_unlock(&inode_lock); } return inode;