lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <4b5812e9-9fd9-7f5b-e0a0-9404341285c5@fb.com>
Date:   Mon, 7 Dec 2020 17:44:12 -0800
From:   Yonghong Song <yhs@...com>
To:     Brendan Jackman <jackmanb@...gle.com>, <bpf@...r.kernel.org>
CC:     Alexei Starovoitov <ast@...nel.org>,
        Daniel Borkmann <daniel@...earbox.net>,
        KP Singh <kpsingh@...omium.org>,
        Florent Revest <revest@...omium.org>,
        <linux-kernel@...r.kernel.org>, Jann Horn <jannh@...gle.com>
Subject: Re: [PATCH bpf-next v4 07/11] bpf: Add instructions for
 atomic_[cmp]xchg



On 12/7/20 8:07 AM, Brendan Jackman wrote:
> This adds two atomic opcodes, both of which include the BPF_FETCH
> flag. XCHG without the BPF_FETCH flag would naturally encode
> atomic_set. This is not supported because it would be of limited
> value to userspace (it doesn't imply any barriers). CMPXCHG without
> BPF_FETCH woulud be an atomic compare-and-write. We don't have such
> an operation in the kernel so it isn't provided to BPF either.
> 
> There are two significant design decisions made for the CMPXCHG
> instruction:
> 
>   - To solve the issue that this operation fundamentally has 3
>     operands, but we only have two register fields. Therefore the
>     operand we compare against (the kernel's API calls it 'old') is
>     hard-coded to be R0. x86 has similar design (and A64 doesn't
>     have this problem).
> 
>     A potential alternative might be to encode the other operand's
>     register number in the immediate field.
> 
>   - The kernel's atomic_cmpxchg returns the old value, while the C11
>     userspace APIs return a boolean indicating the comparison
>     result. Which should BPF do? A64 returns the old value. x86 returns
>     the old value in the hard-coded register (and also sets a
>     flag). That means return-old-value is easier to JIT.
> 
> Signed-off-by: Brendan Jackman <jackmanb@...gle.com>
> ---
>   arch/x86/net/bpf_jit_comp.c    |  8 ++++++++
>   include/linux/filter.h         | 22 ++++++++++++++++++++++
>   include/uapi/linux/bpf.h       |  4 +++-
>   kernel/bpf/core.c              | 20 ++++++++++++++++++++
>   kernel/bpf/disasm.c            | 15 +++++++++++++++
>   kernel/bpf/verifier.c          | 19 +++++++++++++++++--
>   tools/include/linux/filter.h   | 22 ++++++++++++++++++++++
>   tools/include/uapi/linux/bpf.h |  4 +++-
>   8 files changed, 110 insertions(+), 4 deletions(-)
> 
> diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
> index eea7d8b0bb12..308241187582 100644
> --- a/arch/x86/net/bpf_jit_comp.c
> +++ b/arch/x86/net/bpf_jit_comp.c
> @@ -815,6 +815,14 @@ static int emit_atomic(u8 **pprog, u8 atomic_op,
>   		/* src_reg = atomic_fetch_add(dst_reg + off, src_reg); */
>   		EMIT2(0x0F, 0xC1);
>   		break;
> +	case BPF_XCHG:
> +		/* src_reg = atomic_xchg(dst_reg + off, src_reg); */
> +		EMIT1(0x87);
> +		break;
> +	case BPF_CMPXCHG:
> +		/* r0 = atomic_cmpxchg(dst_reg + off, r0, src_reg); */
> +		EMIT2(0x0F, 0xB1);
> +		break;
>   	default:
>   		pr_err("bpf_jit: unknown atomic opcode %02x\n", atomic_op);
>   		return -EFAULT;
> diff --git a/include/linux/filter.h b/include/linux/filter.h
> index b5258bca10d2..e1e1fc946a7c 100644
> --- a/include/linux/filter.h
> +++ b/include/linux/filter.h
> @@ -265,6 +265,8 @@ static inline bool insn_is_zext(const struct bpf_insn *insn)
>    *
>    *   BPF_ADD                  *(uint *) (dst_reg + off16) += src_reg
>    *   BPF_ADD | BPF_FETCH      src_reg = atomic_fetch_add(dst_reg + off16, src_reg);
> + *   BPF_XCHG                 src_reg = atomic_xchg(dst_reg + off16, src_reg)
> + *   BPF_CMPXCHG              r0 = atomic_cmpxchg(dst_reg + off16, r0, src_reg)
>    */
>   
>   #define BPF_ATOMIC64(OP, DST, SRC, OFF)				\
> @@ -293,6 +295,26 @@ static inline bool insn_is_zext(const struct bpf_insn *insn)
>   		.off   = OFF,					\
>   		.imm   = BPF_ADD })
>   
> +/* Atomic exchange, src_reg = atomic_xchg(dst_reg + off, src_reg) */
> +
> +#define BPF_ATOMIC_XCHG(SIZE, DST, SRC, OFF)			\
> +	((struct bpf_insn) {					\
> +		.code  = BPF_STX | BPF_SIZE(SIZE) | BPF_ATOMIC,	\
> +		.dst_reg = DST,					\
> +		.src_reg = SRC,					\
> +		.off   = OFF,					\
> +		.imm   = BPF_XCHG  })
> +
> +/* Atomic compare-exchange, r0 = atomic_cmpxchg(dst_reg + off, r0, src_reg) */
> +
> +#define BPF_ATOMIC_CMPXCHG(SIZE, DST, SRC, OFF)			\
> +	((struct bpf_insn) {					\
> +		.code  = BPF_STX | BPF_SIZE(SIZE) | BPF_ATOMIC,	\
> +		.dst_reg = DST,					\
> +		.src_reg = SRC,					\
> +		.off   = OFF,					\
> +		.imm   = BPF_CMPXCHG })

Define BPF_ATOMIC_{XCHG, CMPXCHG} based on BPF_ATOMIC macro?

> +
>   /* Memory store, *(uint *) (dst_reg + off16) = imm32 */
>   
>   #define BPF_ST_MEM(SIZE, DST, OFF, IMM)				\
> diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
> index d5389119291e..b733af50a5b9 100644
> --- a/include/uapi/linux/bpf.h
> +++ b/include/uapi/linux/bpf.h
> @@ -45,7 +45,9 @@
>   #define BPF_EXIT	0x90	/* function return */
>   
>   /* atomic op type fields (stored in immediate) */
> -#define BPF_FETCH	0x01	/* fetch previous value into src reg */
> +#define BPF_XCHG	(0xe0 | BPF_FETCH)	/* atomic exchange */
> +#define BPF_CMPXCHG	(0xf0 | BPF_FETCH)	/* atomic compare-and-write */
> +#define BPF_FETCH	0x01	/* not an opcode on its own, used to build others */
>   
>   /* Register numbers */
>   enum {
> diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
> index 61e93eb7d363..28f960bc2e30 100644
> --- a/kernel/bpf/core.c
> +++ b/kernel/bpf/core.c
> @@ -1630,6 +1630,16 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
>   				(u32) SRC,
>   				(atomic_t *)(unsigned long) (DST + insn->off));
>   			break;
> +		case BPF_XCHG:
> +			SRC = (u32) atomic_xchg(
> +				(atomic_t *)(unsigned long) (DST + insn->off),
> +				(u32) SRC);
> +			break;
> +		case BPF_CMPXCHG:
> +			BPF_R0 = (u32) atomic_cmpxchg(
> +				(atomic_t *)(unsigned long) (DST + insn->off),
> +				(u32) BPF_R0, (u32) SRC);
> +			break;
>   		default:
>   			goto default_label;
>   		}
[...]

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ