lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20101127150015.GE15365@Krystal>
Date:	Sat, 27 Nov 2010 10:00:15 -0500
From:	Mathieu Desnoyers <mathieu.desnoyers@...icios.com>
To:	Christoph Lameter <cl@...ux.com>
Cc:	akpm@...ux-foundation.org, Pekka Enberg <penberg@...helsinki.fi>,
	linux-kernel@...r.kernel.org,
	Eric Dumazet <eric.dumazet@...il.com>,
	Tejun Heo <tj@...nel.org>
Subject: Re: [thisops uV2 04/10] x86: Support for
	this_cpu_add,sub,dec,inc_return

* Christoph Lameter (cl@...ux.com) wrote:
> Supply an implementation for x86 in order to generate more efficient code.
> 
> Signed-off-by: Christoph Lameter <cl@...ux.com>
> 
> ---
>  arch/x86/include/asm/percpu.h |   50 ++++++++++++++++++++++++++++++++++++++++++
>  1 file changed, 50 insertions(+)
> 
> Index: linux-2.6/arch/x86/include/asm/percpu.h
> ===================================================================
> --- linux-2.6.orig/arch/x86/include/asm/percpu.h	2010-11-23 16:35:19.000000000 -0600
> +++ linux-2.6/arch/x86/include/asm/percpu.h	2010-11-23 16:36:05.000000000 -0600
> @@ -177,6 +177,45 @@ do {									\
>  	}								\
>  } while (0)
>  
> +
> +/*
> + * Add return operation
> + */
> +#define percpu_add_return_op(var, val)					\
> +({									\
> +	typedef typeof(var) pao_T__;					\
> +	typeof(var) pfo_ret__ = val;					\
> +	if (0) {							\
> +		pao_T__ pao_tmp__;					\
> +		pao_tmp__ = (val);					\
> +		(void)pao_tmp__;					\
> +	}								\

OK, I'm dumb: why is the above needed ?

> +	switch (sizeof(var)) {						\
> +	case 1:								\
> +		asm("xaddb %0, "__percpu_arg(1)				\
> +			    : "+q" (pfo_ret__), "+m" (var)		\
> +			    : : "memory");				\
> +		break;							\
> +	case 2:								\
> +		asm("xaddw %0, "__percpu_arg(1)				\
> +			    : "+r" (pfo_ret__), "+m" (var)		\
> +			    : : "memory");				\
> +		break;							\
> +	case 4:								\
> +		asm("xaddl %0, "__percpu_arg(1)				\
> +			    : "+r"(pfo_ret__), "+m" (var)		\
> +			    : : "memory");				\
> +		break;							\
> +	case 8:								\
> +		asm("xaddq %0, "__percpu_arg(1)				\
> +			    : "+re" (pfo_ret__),  "+m" (var)		\
> +			    : : "memory");				\
> +		break;							\
> +	default: __bad_percpu_size();					\
> +	}								\
> +	pfo_ret__ + (val);						\
> +})
> +
>  #define percpu_from_op(op, var, constraint)		\
>  ({							\
>  	typeof(var) pfo_ret__;				\
> @@ -300,6 +339,14 @@ do {									\
>  #define irqsafe_cpu_xor_2(pcp, val)	percpu_to_op("xor", (pcp), val)
>  #define irqsafe_cpu_xor_4(pcp, val)	percpu_to_op("xor", (pcp), val)

(pcp) -> pcp. Same for other similar cases below.

Thanks,

Mathieu

>  
> +#ifndef CONFIG_M386
> +#define __this_cpu_add_return_1(pcp, val)	percpu_add_return_op((pcp), val)
> +#define __this_cpu_add_return_2(pcp, val)	percpu_add_return_op((pcp), val)
> +#define __this_cpu_add_return_4(pcp, val)	percpu_add_return_op((pcp), val)
> +#define this_cpu_add_return_1(pcp, val)		percpu_add_return_op((pcp), val)
> +#define this_cpu_add_return_2(pcp, val)		percpu_add_return_op((pcp), val)
> +#define this_cpu_add_return_4(pcp, val)		percpu_add_return_op((pcp), val)
> +#endif
>  /*
>   * Per cpu atomic 64 bit operations are only available under 64 bit.
>   * 32 bit must fall back to generic operations.
> @@ -324,6 +371,9 @@ do {									\
>  #define irqsafe_cpu_or_8(pcp, val)	percpu_to_op("or", (pcp), val)
>  #define irqsafe_cpu_xor_8(pcp, val)	percpu_to_op("xor", (pcp), val)
>  
> +#define __this_cpu_add_return_8(pcp, val)	percpu_add_return_op((pcp), val)
> +#define this_cpu_add_return_8(pcp, val)	percpu_add_return_op((pcp), val)
> +
>  #endif
>  
>  /* This is not atomic against other CPUs -- CPU preemption needs to be off */
> 

-- 
Mathieu Desnoyers
Operating System Efficiency R&D Consultant
EfficiOS Inc.
http://www.efficios.com
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ