[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20180723172707.74a8acfa@cakuba.netronome.com>
Date: Mon, 23 Jul 2018 17:27:07 -0700
From: Jakub Kicinski <jakub.kicinski@...ronome.com>
To: Toshiaki Makita <toshiaki.makita1@...il.com>
Cc: netdev@...r.kernel.org, Alexei Starovoitov <ast@...nel.org>,
Daniel Borkmann <daniel@...earbox.net>,
Toshiaki Makita <makita.toshiaki@....ntt.co.jp>,
Jesper Dangaard Brouer <brouer@...hat.com>
Subject: Re: [PATCH v3 bpf-next 3/8] veth: Avoid drops by oversized packets
when XDP is enabled
On Mon, 23 Jul 2018 00:13:03 +0900, Toshiaki Makita wrote:
> From: Toshiaki Makita <makita.toshiaki@....ntt.co.jp>
>
> All oversized packets including GSO packets are dropped if XDP is
> enabled on receiver side, so don't send such packets from peer.
>
> Drop TSO and SCTP fragmentation features so that veth devices themselves
> segment packets with XDP enabled. Also cap MTU accordingly.
>
> Signed-off-by: Toshiaki Makita <makita.toshiaki@....ntt.co.jp>
Is there any precedence for fixing up features and MTU like this? Most
drivers just refuse to install the program if settings are incompatible.
> diff --git a/drivers/net/veth.c b/drivers/net/veth.c
> index 78fa08cb6e24..f5b72e937d9d 100644
> --- a/drivers/net/veth.c
> +++ b/drivers/net/veth.c
> @@ -542,6 +542,23 @@ static int veth_get_iflink(const struct net_device *dev)
> return iflink;
> }
>
> +static netdev_features_t veth_fix_features(struct net_device *dev,
> + netdev_features_t features)
> +{
> + struct veth_priv *priv = netdev_priv(dev);
> + struct net_device *peer;
> +
> + peer = rtnl_dereference(priv->peer);
> + if (peer) {
> + struct veth_priv *peer_priv = netdev_priv(peer);
> +
> + if (peer_priv->_xdp_prog)
> + features &= ~NETIF_F_GSO_SOFTWARE;
> + }
> +
> + return features;
> +}
> +
> static void veth_set_rx_headroom(struct net_device *dev, int new_hr)
> {
> struct veth_priv *peer_priv, *priv = netdev_priv(dev);
> @@ -591,14 +608,33 @@ static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog,
> goto err;
> }
> }
> +
> + if (!old_prog) {
> + peer->hw_features &= ~NETIF_F_GSO_SOFTWARE;
> + peer->max_mtu = PAGE_SIZE - VETH_XDP_HEADROOM -
> + peer->hard_header_len -
> + SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
> + if (peer->mtu > peer->max_mtu)
> + dev_set_mtu(peer, peer->max_mtu);
> + }
> }
>
> if (old_prog) {
> - if (!prog && dev->flags & IFF_UP)
> - veth_disable_xdp(dev);
> + if (!prog) {
> + if (dev->flags & IFF_UP)
> + veth_disable_xdp(dev);
> +
> + if (peer) {
> + peer->hw_features |= NETIF_F_GSO_SOFTWARE;
> + peer->max_mtu = ETH_MAX_MTU;
> + }
> + }
> bpf_prog_put(old_prog);
> }
>
> + if ((!!old_prog ^ !!prog) && peer)
> + netdev_update_features(peer);
> +
> return 0;
> err:
> priv->_xdp_prog = old_prog;
> @@ -643,6 +679,7 @@ static const struct net_device_ops veth_netdev_ops = {
> .ndo_poll_controller = veth_poll_controller,
> #endif
> .ndo_get_iflink = veth_get_iflink,
> + .ndo_fix_features = veth_fix_features,
> .ndo_features_check = passthru_features_check,
> .ndo_set_rx_headroom = veth_set_rx_headroom,
> .ndo_bpf = veth_xdp,
Powered by blists - more mailing lists