[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230608114155.39367-4-gilad9366@gmail.com>
Date: Thu, 8 Jun 2023 14:41:54 +0300
From: Gilad Sever <gilad9366@...il.com>
To: dsahern@...nel.org,
martin.lau@...ux.dev,
daniel@...earbox.net,
john.fastabend@...il.com,
ast@...nel.org,
andrii@...nel.org,
song@...nel.org,
yhs@...com,
kpsingh@...nel.org,
sdf@...gle.com,
haoluo@...gle.com,
jolsa@...nel.org,
davem@...emloft.net,
edumazet@...gle.com,
kuba@...nel.org,
pabeni@...hat.com,
mykolal@...com,
shuah@...nel.org,
hawk@...nel.org,
joe@...d.net.nz
Cc: eyal.birger@...il.com,
shmulik.ladkani@...il.com,
bpf@...r.kernel.org,
netdev@...r.kernel.org,
linux-kselftest@...r.kernel.org,
Gilad Sever <gilad9366@...il.com>
Subject: [PATCH bpf,v5 3/4] bpf: fix bpf socket lookup from tc/xdp to respect socket VRF bindings
When calling bpf_sk_lookup_tcp(), bpf_sk_lookup_udp() or
bpf_skc_lookup_tcp() from tc/xdp ingress, VRF socket bindings aren't
respoected, i.e. unbound sockets are returned, and bound sockets aren't
found.
VRF binding is determined by the sdif argument to sk_lookup(), however
when called from tc the IP SKB control block isn't initialized and thus
inet{,6}_sdif() always returns 0.
Fix by calculating sdif for the tc/xdp flows by observing the device's
l3 enslaved state.
The cg/sk_skb hooking points which are expected to support
inet{,6}_sdif() pass sdif=-1 which makes __bpf_skc_lookup() use the
existing logic.
Fixes: 6acc9b432e67 ("bpf: Add helper to retrieve socket in BPF")
Acked-by: Stanislav Fomichev <sdf@...gle.com>
Reviewed-by: Shmulik Ladkani <shmulik.ladkani@...il.com>
Reviewed-by: Eyal Birger <eyal.birger@...il.com>
Signed-off-by: Gilad Sever <gilad9366@...il.com>
---
v5: Use reverse xmas tree indentation
v4: Move dev_sdif() to include/linux/netdevice.h as suggested by Stanislav Fomichev
v3: Rename bpf_l2_sdif() to dev_sdif() as suggested by Stanislav Fomichev
---
include/linux/netdevice.h | 9 +++++++
net/core/filter.c | 54 ++++++++++++++++++++++++---------------
2 files changed, 42 insertions(+), 21 deletions(-)
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index c2f0c6002a84..db1bfca6b8b4 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -5093,6 +5093,15 @@ static inline bool netif_is_l3_slave(const struct net_device *dev)
return dev->priv_flags & IFF_L3MDEV_SLAVE;
}
+static inline int dev_sdif(const struct net_device *dev)
+{
+#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
+ if (netif_is_l3_slave(dev))
+ return dev->ifindex;
+#endif
+ return 0;
+}
+
static inline bool netif_is_bridge_master(const struct net_device *dev)
{
return dev->priv_flags & IFF_EBRIDGE;
diff --git a/net/core/filter.c b/net/core/filter.c
index e06547922edc..5f665845ae45 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -6555,12 +6555,11 @@ static struct sock *sk_lookup(struct net *net, struct bpf_sock_tuple *tuple,
static struct sock *
__bpf_skc_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
struct net *caller_net, u32 ifindex, u8 proto, u64 netns_id,
- u64 flags)
+ u64 flags, int sdif)
{
struct sock *sk = NULL;
struct net *net;
u8 family;
- int sdif;
if (len == sizeof(tuple->ipv4))
family = AF_INET;
@@ -6572,10 +6571,12 @@ __bpf_skc_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
if (unlikely(flags || !((s32)netns_id < 0 || netns_id <= S32_MAX)))
goto out;
- if (family == AF_INET)
- sdif = inet_sdif(skb);
- else
- sdif = inet6_sdif(skb);
+ if (sdif < 0) {
+ if (family == AF_INET)
+ sdif = inet_sdif(skb);
+ else
+ sdif = inet6_sdif(skb);
+ }
if ((s32)netns_id < 0) {
net = caller_net;
@@ -6595,10 +6596,11 @@ __bpf_skc_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
static struct sock *
__bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
struct net *caller_net, u32 ifindex, u8 proto, u64 netns_id,
- u64 flags)
+ u64 flags, int sdif)
{
struct sock *sk = __bpf_skc_lookup(skb, tuple, len, caller_net,
- ifindex, proto, netns_id, flags);
+ ifindex, proto, netns_id, flags,
+ sdif);
if (sk) {
struct sock *sk2 = sk_to_full_sk(sk);
@@ -6638,7 +6640,7 @@ bpf_skc_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
}
return __bpf_skc_lookup(skb, tuple, len, caller_net, ifindex, proto,
- netns_id, flags);
+ netns_id, flags, -1);
}
static struct sock *
@@ -6732,10 +6734,11 @@ BPF_CALL_5(bpf_tc_skc_lookup_tcp, struct sk_buff *, skb,
{
struct net *caller_net = dev_net(skb->dev);
int ifindex = skb->dev->ifindex;
+ int sdif = dev_sdif(skb->dev);
return (unsigned long)__bpf_skc_lookup(skb, tuple, len, caller_net,
ifindex, IPPROTO_TCP, netns_id,
- flags);
+ flags, sdif);
}
static const struct bpf_func_proto bpf_tc_skc_lookup_tcp_proto = {
@@ -6755,10 +6758,11 @@ BPF_CALL_5(bpf_tc_sk_lookup_tcp, struct sk_buff *, skb,
{
struct net *caller_net = dev_net(skb->dev);
int ifindex = skb->dev->ifindex;
+ int sdif = dev_sdif(skb->dev);
return (unsigned long)__bpf_sk_lookup(skb, tuple, len, caller_net,
ifindex, IPPROTO_TCP, netns_id,
- flags);
+ flags, sdif);
}
static const struct bpf_func_proto bpf_tc_sk_lookup_tcp_proto = {
@@ -6778,10 +6782,11 @@ BPF_CALL_5(bpf_tc_sk_lookup_udp, struct sk_buff *, skb,
{
struct net *caller_net = dev_net(skb->dev);
int ifindex = skb->dev->ifindex;
+ int sdif = dev_sdif(skb->dev);
return (unsigned long)__bpf_sk_lookup(skb, tuple, len, caller_net,
ifindex, IPPROTO_UDP, netns_id,
- flags);
+ flags, sdif);
}
static const struct bpf_func_proto bpf_tc_sk_lookup_udp_proto = {
@@ -6814,11 +6819,13 @@ BPF_CALL_5(bpf_xdp_sk_lookup_udp, struct xdp_buff *, ctx,
struct bpf_sock_tuple *, tuple, u32, len, u32, netns_id, u64, flags)
{
struct net *caller_net = dev_net(ctx->rxq->dev);
- int ifindex = ctx->rxq->dev->ifindex;
+ struct net_device *dev = ctx->rxq->dev;
+ int ifindex = dev->ifindex;
+ int sdif = dev_sdif(dev);
return (unsigned long)__bpf_sk_lookup(NULL, tuple, len, caller_net,
ifindex, IPPROTO_UDP, netns_id,
- flags);
+ flags, sdif);
}
static const struct bpf_func_proto bpf_xdp_sk_lookup_udp_proto = {
@@ -6837,11 +6844,13 @@ BPF_CALL_5(bpf_xdp_skc_lookup_tcp, struct xdp_buff *, ctx,
struct bpf_sock_tuple *, tuple, u32, len, u32, netns_id, u64, flags)
{
struct net *caller_net = dev_net(ctx->rxq->dev);
- int ifindex = ctx->rxq->dev->ifindex;
+ struct net_device *dev = ctx->rxq->dev;
+ int ifindex = dev->ifindex;
+ int sdif = dev_sdif(dev);
return (unsigned long)__bpf_skc_lookup(NULL, tuple, len, caller_net,
ifindex, IPPROTO_TCP, netns_id,
- flags);
+ flags, sdif);
}
static const struct bpf_func_proto bpf_xdp_skc_lookup_tcp_proto = {
@@ -6860,11 +6869,13 @@ BPF_CALL_5(bpf_xdp_sk_lookup_tcp, struct xdp_buff *, ctx,
struct bpf_sock_tuple *, tuple, u32, len, u32, netns_id, u64, flags)
{
struct net *caller_net = dev_net(ctx->rxq->dev);
- int ifindex = ctx->rxq->dev->ifindex;
+ struct net_device *dev = ctx->rxq->dev;
+ int ifindex = dev->ifindex;
+ int sdif = dev_sdif(dev);
return (unsigned long)__bpf_sk_lookup(NULL, tuple, len, caller_net,
ifindex, IPPROTO_TCP, netns_id,
- flags);
+ flags, sdif);
}
static const struct bpf_func_proto bpf_xdp_sk_lookup_tcp_proto = {
@@ -6884,7 +6895,8 @@ BPF_CALL_5(bpf_sock_addr_skc_lookup_tcp, struct bpf_sock_addr_kern *, ctx,
{
return (unsigned long)__bpf_skc_lookup(NULL, tuple, len,
sock_net(ctx->sk), 0,
- IPPROTO_TCP, netns_id, flags);
+ IPPROTO_TCP, netns_id, flags,
+ -1);
}
static const struct bpf_func_proto bpf_sock_addr_skc_lookup_tcp_proto = {
@@ -6903,7 +6915,7 @@ BPF_CALL_5(bpf_sock_addr_sk_lookup_tcp, struct bpf_sock_addr_kern *, ctx,
{
return (unsigned long)__bpf_sk_lookup(NULL, tuple, len,
sock_net(ctx->sk), 0, IPPROTO_TCP,
- netns_id, flags);
+ netns_id, flags, -1);
}
static const struct bpf_func_proto bpf_sock_addr_sk_lookup_tcp_proto = {
@@ -6922,7 +6934,7 @@ BPF_CALL_5(bpf_sock_addr_sk_lookup_udp, struct bpf_sock_addr_kern *, ctx,
{
return (unsigned long)__bpf_sk_lookup(NULL, tuple, len,
sock_net(ctx->sk), 0, IPPROTO_UDP,
- netns_id, flags);
+ netns_id, flags, -1);
}
static const struct bpf_func_proto bpf_sock_addr_sk_lookup_udp_proto = {
--
2.34.1
Powered by blists - more mailing lists