[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-Id: <9EC284DE-A66B-4369-8802-31E4A8DDA156@oracle.com>
Date: Thu, 18 Jun 2009 20:23:37 -0700
From: Andy Grover <andy.grover@...cle.com>
To: Tejun Heo <tj@...nel.org>
Cc: Linux Kernel <linux-kernel@...r.kernel.org>,
Ingo Molnar <mingo@...e.hu>,
Mike Frysinger <vapier@...too.org>,
Tony Luck <tony.luck@...el.com>
Subject: Re: [PATCH] percpu: use DEFINE_PER_CPU_SHARED_ALIGNED()
Ack, thanks! -- Andy
On Jun 18, 2009, at 7:34 PM, Tejun Heo <tj@...nel.org> wrote:
> There are a few places where ___cacheline_aligned* is used with
> DEFINE_PER_CPU(). Use DEFINE_PER_CPU_SHARED_ALIGNED() instead.
>
> DEFINE_PER_CPU_SHARED_ALIGNED() applies alignment only on SMPs. While
> all other converted places used _in_smp variant or only get compiled
> for SMP, net/rds used unconditional ____cacheline_aligned. I don't
> see any reason these data structures should be aligned on UP and thus
> converted together.
>
> Signed-off-by: Tejun Heo <tj@...nel.org>
> Cc: Mike Frysinger <vapier@...too.org>
> Cc: Tony Luck <tony.luck@...el.com>
> Cc: Andy Grover <andy.grover@...cle.com>
> ---
> I committed the following patch to percpu#for-next git tree. Please
> let me know if something doesn't look right.
>
> git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu.git for-next
> http://git.kernel.org/?p=linux/kernel/git/tj/percpu.git;a=shortlog;h=for-next
>
> Thanks.
>
> arch/blackfin/mm/sram-alloc.c | 6 +++---
> arch/ia64/kernel/smp.c | 3 ++-
> kernel/sched.c | 4 ++--
> net/rds/ib_stats.c | 2 +-
> net/rds/iw_stats.c | 2 +-
> net/rds/page.c | 2 +-
> 6 files changed, 10 insertions(+), 9 deletions(-)
>
> Index: work/arch/blackfin/mm/sram-alloc.c
> ===================================================================
> --- work.orig/arch/blackfin/mm/sram-alloc.c
> +++ work/arch/blackfin/mm/sram-alloc.c
> @@ -42,9 +42,9 @@
> #include <asm/mem_map.h>
> #include "blackfin_sram.h"
>
> -static DEFINE_PER_CPU(spinlock_t, l1sram_lock)
> ____cacheline_aligned_in_smp;
> -static DEFINE_PER_CPU(spinlock_t, l1_data_sram_lock)
> ____cacheline_aligned_in_smp;
> -static DEFINE_PER_CPU(spinlock_t, l1_inst_sram_lock)
> ____cacheline_aligned_in_smp;
> +static DEFINE_PER_CPU_SHARED_ALIGNED(spinlock_t, l1sram_lock);
> +static DEFINE_PER_CPU_SHARED_ALIGNED(spinlock_t, l1_data_sram_lock);
> +static DEFINE_PER_CPU_SHARED_ALIGNED(spinlock_t, l1_inst_sram_lock);
> static spinlock_t l2_sram_lock ____cacheline_aligned_in_smp;
>
> /* the data structure for L1 scratchpad and DATA SRAM */
> Index: work/arch/ia64/kernel/smp.c
> ===================================================================
> --- work.orig/arch/ia64/kernel/smp.c
> +++ work/arch/ia64/kernel/smp.c
> @@ -58,7 +58,8 @@ static struct local_tlb_flush_counts {
> unsigned int count;
> } __attribute__((__aligned__(32))) local_tlb_flush_counts[NR_CPUS];
>
> -static DEFINE_PER_CPU(unsigned short [NR_CPUS],
> shadow_flush_counts) ____cacheline_aligned;
> +static DEFINE_PER_CPU_SHARED_ALIGNED(unsigned short [NR_CPUS],
> + shadow_flush_counts);
>
> #define IPI_CALL_FUNC 0
> #define IPI_CPU_STOP 1
> Index: work/kernel/sched.c
> ===================================================================
> --- work.orig/kernel/sched.c
> +++ work/kernel/sched.c
> @@ -318,12 +318,12 @@ struct task_group root_task_group;
> /* Default task group's sched entity on each cpu */
> static DEFINE_PER_CPU(struct sched_entity, init_sched_entity);
> /* Default task group's cfs_rq on each cpu */
> -static DEFINE_PER_CPU(struct cfs_rq, init_cfs_rq)
> ____cacheline_aligned_in_smp;
> +static DEFINE_PER_CPU_SHARED_ALIGNED(struct cfs_rq, init_cfs_rq);
> #endif /* CONFIG_FAIR_GROUP_SCHED */
>
> #ifdef CONFIG_RT_GROUP_SCHED
> static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity);
> -static DEFINE_PER_CPU(struct rt_rq, init_rt_rq)
> ____cacheline_aligned_in_smp;
> +static DEFINE_PER_CPU_SHARED_ALIGNED(struct rt_rq, init_rt_rq);
> #endif /* CONFIG_RT_GROUP_SCHED */
> #else /* !CONFIG_USER_SCHED */
> #define root_task_group init_task_group
> Index: work/net/rds/ib_stats.c
> ===================================================================
> --- work.orig/net/rds/ib_stats.c
> +++ work/net/rds/ib_stats.c
> @@ -37,7 +37,7 @@
> #include "rds.h"
> #include "ib.h"
>
> -DEFINE_PER_CPU(struct rds_ib_statistics, rds_ib_stats)
> ____cacheline_aligned;
> +DEFINE_PER_CPU_SHARED_ALIGNED(struct rds_ib_statistics,
> rds_ib_stats);
>
> static char *rds_ib_stat_names[] = {
> "ib_connect_raced",
> Index: work/net/rds/iw_stats.c
> ===================================================================
> --- work.orig/net/rds/iw_stats.c
> +++ work/net/rds/iw_stats.c
> @@ -37,7 +37,7 @@
> #include "rds.h"
> #include "iw.h"
>
> -DEFINE_PER_CPU(struct rds_iw_statistics, rds_iw_stats)
> ____cacheline_aligned;
> +DEFINE_PER_CPU_SHARED_ALIGNED(struct rds_iw_statistics,
> rds_iw_stats);
>
> static char *rds_iw_stat_names[] = {
> "iw_connect_raced",
> Index: work/net/rds/page.c
> ===================================================================
> --- work.orig/net/rds/page.c
> +++ work/net/rds/page.c
> @@ -39,7 +39,7 @@ struct rds_page_remainder {
> unsigned long r_offset;
> };
>
> -DEFINE_PER_CPU(struct rds_page_remainder, rds_page_remainders)
> ____cacheline_aligned;
> +DEFINE_PER_CPU_SHARED_ALIGNED(struct rds_page_remainder,
> rds_page_remainders);
>
> /*
> * returns 0 on success or -errno on failure.
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists