[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <160216616785.882446.3058399056188507434.stgit@firesoul>
Date: Thu, 08 Oct 2020 16:09:27 +0200
From: Jesper Dangaard Brouer <brouer@...hat.com>
To: bpf@...r.kernel.org
Cc: Jesper Dangaard Brouer <brouer@...hat.com>, netdev@...r.kernel.org,
Daniel Borkmann <borkmann@...earbox.net>,
Alexei Starovoitov <alexei.starovoitov@...il.com>,
maze@...gle.com, lmb@...udflare.com, shaun@...era.io,
Lorenzo Bianconi <lorenzo@...nel.org>, marek@...udflare.com,
John Fastabend <john.fastabend@...il.com>,
Jakub Kicinski <kuba@...nel.org>, eyal.birger@...il.com
Subject: [PATCH bpf-next V3 6/6] net: inline and splitup is_skb_forwardable
The BPF-helper bpf_skb_fib_lookup() use is_skb_forwardable() that
also checks if net_device is "up", which is unnecessary for this
helper. This patch splitup is_skb_forwardable() into is_skb_fwd_size_ok()
such that the helper can use this instead.
This change also cause is_skb_forwardable() to be inlined in the
existing call sites. Most importantly in dev_forward_skb().
Signed-off-by: Jesper Dangaard Brouer <brouer@...hat.com>
---
include/linux/netdevice.h | 27 +++++++++++++++++++++++++--
net/core/dev.c | 21 ---------------------
net/core/filter.c | 2 +-
3 files changed, 26 insertions(+), 24 deletions(-)
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 58fb7b4869ba..4857c54590b5 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -3862,8 +3862,31 @@ int xdp_umem_query(struct net_device *dev, u16 queue_id);
int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
-bool is_skb_forwardable(const struct net_device *dev,
- const struct sk_buff *skb);
+
+static __always_inline bool is_skb_fwd_size_ok(const struct net_device *dev,
+ const struct sk_buff *skb)
+{
+ const u32 vlan_hdr_len = 4; /* VLAN_HLEN */
+ unsigned int mtu = dev->mtu + dev->hard_header_len + vlan_hdr_len;
+
+ /* Assumes SKB length at L2 */
+ if (likely(skb->len <= mtu))
+ return true;
+
+ /* If TSO is enabled, we don't care about the length as the packet
+ * could be forwarded without being segmented before.
+ */
+ return skb_is_gso(skb);
+}
+
+static __always_inline bool is_skb_forwardable(const struct net_device *dev,
+ const struct sk_buff *skb)
+{
+ if (unlikely(!(dev->flags & IFF_UP)))
+ return false;
+
+ return is_skb_fwd_size_ok(dev, skb);
+}
static __always_inline int ____dev_forward_skb(struct net_device *dev,
struct sk_buff *skb,
diff --git a/net/core/dev.c b/net/core/dev.c
index 96b455f15872..21b62bda0ef9 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2186,27 +2186,6 @@ static inline void net_timestamp_set(struct sk_buff *skb)
__net_timestamp(SKB); \
} \
-bool is_skb_forwardable(const struct net_device *dev, const struct sk_buff *skb)
-{
- unsigned int len;
-
- if (!(dev->flags & IFF_UP))
- return false;
-
- len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
- if (skb->len <= len)
- return true;
-
- /* if TSO is enabled, we don't care about the length as the packet
- * could be forwarded without being segmented before
- */
- if (skb_is_gso(skb))
- return true;
-
- return false;
-}
-EXPORT_SYMBOL_GPL(is_skb_forwardable);
-
int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
{
int ret = ____dev_forward_skb(dev, skb, true);
diff --git a/net/core/filter.c b/net/core/filter.c
index a8e24092e4f5..14e6b93757d4 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -5502,7 +5502,7 @@ BPF_CALL_4(bpf_skb_fib_lookup, struct sk_buff *, skb,
struct net_device *dev;
dev = dev_get_by_index_rcu(net, params->ifindex);
- if (!is_skb_forwardable(dev, skb))
+ if (!is_skb_fwd_size_ok(dev, skb))
rc = BPF_FIB_LKUP_RET_FRAG_NEEDED;
params->mtu = dev->mtu; /* union with tot_len */
Powered by blists - more mailing lists