[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CA+FuTScee60of_g1Mg7hJnMLu=mjM7w289mj3L4TNZ6WnTkvdA@mail.gmail.com>
Date: Mon, 23 Sep 2019 08:53:40 -0400
From: Willem de Bruijn <willemdebruijn.kernel@...il.com>
To: Steffen Klassert <steffen.klassert@...unet.com>
Cc: Network Development <netdev@...r.kernel.org>,
Paolo Abeni <pabeni@...hat.com>,
Subash Abhinov Kasiviswanathan <subashab@...eaurora.org>,
Marcelo Ricardo Leitner <marcelo.leitner@...il.com>
Subject: Re: [PATCH RFC 1/5] UDP: enable GRO by default.
On Fri, Sep 20, 2019 at 12:49 AM Steffen Klassert
<steffen.klassert@...unet.com> wrote:
>
> This patch enables UDP GRO regardless if a GRO capable
> socket is present. With this GRO is done by default
> for the local input and forwarding path.
>
> Signed-off-by: Steffen Klassert <steffen.klassert@...unet.com>
> struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
> diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
> index a3908e55ed89..929b12fc7bc5 100644
> --- a/net/ipv4/udp_offload.c
> +++ b/net/ipv4/udp_offload.c
> @@ -401,36 +401,25 @@ static struct sk_buff *udp_gro_receive_segment(struct list_head *head,
> return NULL;
> }
>
> -INDIRECT_CALLABLE_DECLARE(struct sock *udp6_lib_lookup_skb(struct sk_buff *skb,
> - __be16 sport, __be16 dport));
> struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
> - struct udphdr *uh, udp_lookup_t lookup)
> + struct udphdr *uh, struct sock *sk)
> {
> struct sk_buff *pp = NULL;
> struct sk_buff *p;
> struct udphdr *uh2;
> unsigned int off = skb_gro_offset(skb);
> int flush = 1;
> - struct sock *sk;
>
> - rcu_read_lock();
> - sk = INDIRECT_CALL_INET(lookup, udp6_lib_lookup_skb,
> - udp4_lib_lookup_skb, skb, uh->source, uh->dest);
> - if (!sk)
> - goto out_unlock;
> -
> - if (udp_sk(sk)->gro_enabled) {
> + if (!sk || !udp_sk(sk)->gro_receive) {
Not critical, but the use of sk->gro_enabled and sk->gro_receive to
signal whether sockets are willing to accept large packets or are udp
tunnels, respectively, is subtle and possibly confusing.
Wrappers udp_sock_is_tunnel and udp_sock_accepts_gso could perhaps
help document the logic a bit.
static inline bool udp_sock_is_tunnel(struct udp_sock *up)
{
return up->gro_receive;
}
And perhaps only pass a non-zero sk to udp_gro_receive if it is a
tunnel and thus skips the new default path:
static inline struct sock *sk = udp4_lookup_tunnel(const struct
sk_buff *skb, __be16 sport, __be16_dport)
{
struct sock *sk;
if (!static_branch_unlikely(&udp_encap_needed_key))
return NULL;
rcu_read_lock();
sk = udp4_lib_lookup_skb(skb, source, dest);
rcu_read_unlock();
return udp_sock_is_tunnel(udp_sk(sk)) ? sk : NULL;
}
> pp = call_gro_receive(udp_gro_receive_segment, head, skb);
> - rcu_read_unlock();
> return pp;
> }
Just a suggestion. It may be too verbose as given.
> @@ -468,8 +456,10 @@ INDIRECT_CALLABLE_SCOPE
> struct sk_buff *udp4_gro_receive(struct list_head *head, struct sk_buff *skb)
> {
> struct udphdr *uh = udp_gro_udphdr(skb);
> + struct sk_buff *pp;
> + struct sock *sk;
>
> - if (unlikely(!uh) || !static_branch_unlikely(&udp_encap_needed_key))
> + if (unlikely(!uh))
> goto flush;
>
> /* Don't bother verifying checksum if we're going to flush anyway. */
> @@ -484,7 +474,11 @@ struct sk_buff *udp4_gro_receive(struct list_head *head, struct sk_buff *skb)
> inet_gro_compute_pseudo);
> skip:
> NAPI_GRO_CB(skb)->is_ipv6 = 0;
> - return udp_gro_receive(head, skb, uh, udp4_lib_lookup_skb);
> + rcu_read_lock();
> + sk = static_branch_unlikely(&udp_encap_needed_key) ? udp4_lib_lookup_skb(skb, uh->source, uh->dest) : NULL;
> + pp = udp_gro_receive(head, skb, uh, sk);
> + rcu_read_unlock();
> + return pp;
Powered by blists - more mailing lists