[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <159718349653.4728.6559437186853473612.stgit@john-Precision-5820-Tower>
Date: Tue, 11 Aug 2020 15:04:56 -0700
From: John Fastabend <john.fastabend@...il.com>
To: songliubraving@...com, kafai@...com, daniel@...earbox.net,
ast@...nel.org
Cc: netdev@...r.kernel.org, bpf@...r.kernel.org,
john.fastabend@...il.com
Subject: [bpf PATCH v3 2/5] bpf: sock_ops sk access may stomp registers when
dst_reg = src_reg
Similar to patch ("bpf: sock_ops ctx access may stomp registers") if the
src_reg = dst_reg when reading the sk field of a sock_ops struct we
generate xlated code,
53: (61) r9 = *(u32 *)(r9 +28)
54: (15) if r9 == 0x0 goto pc+3
56: (79) r9 = *(u64 *)(r9 +0)
This stomps on the r9 reg to do the sk_fullsock check and then when
reading the skops->sk field instead of the sk pointer we get the
sk_fullsock. To fix use similar pattern noted in the previous fix
and use the temp field to save/restore a register used to do
sk_fullsock check.
After the fix the generated xlated code reads,
52: (7b) *(u64 *)(r9 +32) = r8
53: (61) r8 = *(u32 *)(r9 +28)
54: (15) if r9 == 0x0 goto pc+3
55: (79) r8 = *(u64 *)(r9 +32)
56: (79) r9 = *(u64 *)(r9 +0)
57: (05) goto pc+1
58: (79) r8 = *(u64 *)(r9 +32)
Here r9 register was in-use so r8 is chosen as the temporary register.
In line 52 r8 is saved in temp variable and at line 54 restored in case
fullsock != 0. Finally we handle fullsock == 0 case by restoring at
line 58.
This adds a new macro SOCK_OPS_GET_SK it is almost possible to merge
this with SOCK_OPS_GET_FIELD, but I found the extra branch logic a
bit more confusing than just adding a new macro despite a bit of
duplicating code.
Fixes: 1314ef561102e ("bpf: export bpf_sock for BPF_PROG_TYPE_SOCK_OPS prog type")
Acked-by: Song Liu <songliubraving@...com>
Acked-by: Martin KaFai Lau <kafai@...com>
Signed-off-by: John Fastabend <john.fastabend@...il.com>
---
net/core/filter.c | 49 ++++++++++++++++++++++++++++++++++++++-----------
1 file changed, 38 insertions(+), 11 deletions(-)
diff --git a/net/core/filter.c b/net/core/filter.c
index 1baeeff..b2df520 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -8358,6 +8358,43 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
} \
} while (0)
+#define SOCK_OPS_GET_SK() \
+ do { \
+ int fullsock_reg = si->dst_reg, reg = BPF_REG_9, jmp = 1; \
+ if (si->dst_reg == reg || si->src_reg == reg) \
+ reg--; \
+ if (si->dst_reg == reg || si->src_reg == reg) \
+ reg--; \
+ if (si->dst_reg == si->src_reg) { \
+ *insn++ = BPF_STX_MEM(BPF_DW, si->src_reg, reg, \
+ offsetof(struct bpf_sock_ops_kern, \
+ temp)); \
+ fullsock_reg = reg; \
+ jmp += 2; \
+ } \
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
+ struct bpf_sock_ops_kern, \
+ is_fullsock), \
+ fullsock_reg, si->src_reg, \
+ offsetof(struct bpf_sock_ops_kern, \
+ is_fullsock)); \
+ *insn++ = BPF_JMP_IMM(BPF_JEQ, fullsock_reg, 0, jmp); \
+ if (si->dst_reg == si->src_reg) \
+ *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg, \
+ offsetof(struct bpf_sock_ops_kern, \
+ temp)); \
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
+ struct bpf_sock_ops_kern, sk),\
+ si->dst_reg, si->src_reg, \
+ offsetof(struct bpf_sock_ops_kern, sk));\
+ if (si->dst_reg == si->src_reg) { \
+ *insn++ = BPF_JMP_A(1); \
+ *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg, \
+ offsetof(struct bpf_sock_ops_kern, \
+ temp)); \
+ } \
+ } while (0)
+
#define SOCK_OPS_GET_TCP_SOCK_FIELD(FIELD) \
SOCK_OPS_GET_FIELD(FIELD, FIELD, struct tcp_sock)
@@ -8642,17 +8679,7 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
SOCK_OPS_GET_TCP_SOCK_FIELD(bytes_acked);
break;
case offsetof(struct bpf_sock_ops, sk):
- *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
- struct bpf_sock_ops_kern,
- is_fullsock),
- si->dst_reg, si->src_reg,
- offsetof(struct bpf_sock_ops_kern,
- is_fullsock));
- *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1);
- *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
- struct bpf_sock_ops_kern, sk),
- si->dst_reg, si->src_reg,
- offsetof(struct bpf_sock_ops_kern, sk));
+ SOCK_OPS_GET_SK();
break;
}
return insn - insn_buf;
Powered by blists - more mailing lists