lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Tue, 19 Nov 2019 15:39:37 -0800
From:   Jakub Kicinski <jakub.kicinski@...ronome.com>
To:     <sameehj@...zon.com>
Cc:     <davem@...emloft.net>, <netdev@...r.kernel.org>, <dwmw@...zon.com>,
        <zorik@...zon.com>, <matua@...zon.com>, <saeedb@...zon.com>,
        <msw@...zon.com>, <aliguori@...zon.com>, <nafea@...zon.com>,
        <gtzalik@...zon.com>, <netanel@...zon.com>, <alisaidi@...zon.com>,
        <benh@...zon.com>, <akiyano@...zon.com>
Subject: Re: [PATCH V2 net-next v2 1/3] net: ena: implement XDP drop support

On Tue, 19 Nov 2019 15:34:17 +0200, sameehj@...zon.com wrote:
> From: Sameeh Jubran <sameehj@...zon.com>
> 
> This commit implements the basic functionality of drop/pass logic in the
> ena driver.
> 
> Signed-off-by: Sameeh Jubran <sameehj@...zon.com>
> ---
>  drivers/net/ethernet/amazon/ena/ena_netdev.c | 148 +++++++++++++++++--
>  drivers/net/ethernet/amazon/ena/ena_netdev.h |  30 ++++
>  2 files changed, 168 insertions(+), 10 deletions(-)
> 
> diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
> index d46a91200..35f766d9c 100644
> --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
> +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
> @@ -35,8 +35,8 @@
>  #ifdef CONFIG_RFS_ACCEL
>  #include <linux/cpu_rmap.h>
>  #endif /* CONFIG_RFS_ACCEL */
> +#include <linux/bpf_trace.h>
>  #include <linux/ethtool.h>
> -#include <linux/if_vlan.h>
>  #include <linux/kernel.h>
>  #include <linux/module.h>
>  #include <linux/numa.h>
> @@ -123,6 +123,80 @@ static int ena_change_mtu(struct net_device *dev, int new_mtu)
>  	return ret;
>  }
>  
> +static int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp)
> +{
> +	struct bpf_prog *xdp_prog;
> +	u32 verdict = XDP_PASS;
> +
> +	rcu_read_lock();
> +	xdp_prog = READ_ONCE(rx_ring->xdp_bpf_prog);
> +
> +	if (!xdp_prog)
> +		goto out;
> +
> +	verdict = bpf_prog_run_xdp(xdp_prog, xdp);
> +
> +	if (unlikely(verdict == XDP_ABORTED))
> +		trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
> +	else if (unlikely(verdict >= XDP_TX))
> +		bpf_warn_invalid_xdp_action(verdict);
> +out:
> +	rcu_read_unlock();
> +	return verdict;
> +}
> +
> +static int ena_xdp_set(struct net_device *netdev, struct netdev_bpf *bpf)
> +{
> +	struct ena_adapter *adapter = netdev_priv(netdev);
> +	struct bpf_prog *prog = bpf->prog;
> +	struct bpf_prog *old_bpf_prog;
> +	int i, prev_mtu;
> +
> +	if (ena_xdp_allowed(adapter)) {
> +		old_bpf_prog = xchg(&adapter->xdp_bpf_prog, prog);
> +
> +		for (i = 0; i < adapter->num_io_queues; i++)
> +			xchg(&adapter->rx_ring[i].xdp_bpf_prog, prog);
> +
> +		if (old_bpf_prog)
> +			bpf_prog_put(old_bpf_prog);
> +
> +		prev_mtu = netdev->max_mtu;
> +		netdev->max_mtu = prog ? ENA_XDP_MAX_MTU : adapter->max_mtu;
> +		netif_info(adapter, drv, adapter->netdev, "xdp program set, changging the max_mtu from %d to %d",
> +			   prev_mtu, netdev->max_mtu);
> +
> +	} else {
> +		netif_err(adapter, drv, adapter->netdev, "Failed to set xdp program, the current MTU (%d) is larger than the maximum allowed MTU (%lu) while xdp is on",
> +			  netdev->mtu, ENA_XDP_MAX_MTU);
> +		NL_SET_ERR_MSG_MOD(bpf->extack, "Failed to set xdp program, the current MTU is larger than the maximum allowed MTU. Check the dmesg for more info");
> +		return -EINVAL;
> +	}
> +
> +	return 0;
> +}
> +
> +/* This is the main xdp callback, it's used by the kernel to set/unset the xdp
> + * program as well as to query the current xdp program id.
> + */
> +static int ena_xdp(struct net_device *netdev, struct netdev_bpf *bpf)
> +{
> +	struct ena_adapter *adapter = netdev_priv(netdev);
> +
> +	switch (bpf->command) {
> +	case XDP_SETUP_PROG:
> +		return ena_xdp_set(netdev, bpf);
> +	case XDP_QUERY_PROG:
> +		bpf->prog_id = adapter->xdp_bpf_prog ?
> +			adapter->xdp_bpf_prog->aux->id : 0;
> +		break;
> +	default:
> +		NL_SET_ERR_MSG_MOD(bpf->extack, "Unsupported XDP command");

Please remove this and silently ignore unsupported commands.

> +		return -EINVAL;
> +	}
> +	return 0;
> +}
> +
>  static int ena_init_rx_cpu_rmap(struct ena_adapter *adapter)
>  {
>  #ifdef CONFIG_RFS_ACCEL
> @@ -417,6 +491,9 @@ static void ena_free_rx_resources(struct ena_adapter *adapter,
>  
>  	vfree(rx_ring->free_ids);
>  	rx_ring->free_ids = NULL;
> +
> +	xdp_rxq_info_unreg_mem_model(&rx_ring->xdp_rxq);
> +	xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
>  }
>  
>  /* ena_setup_all_rx_resources - allocate I/O Rx queues resources for all queues
> @@ -1037,6 +1114,23 @@ static void ena_set_rx_hash(struct ena_ring *rx_ring,
>  	}
>  }
>  
> +int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp)
> +{
> +	struct ena_rx_buffer *rx_info =
> +		&rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id];

empty line between variables and code.

Also what's the point of the inline init of this variable if you have
to break it over mutliple lines anyway?

> +	xdp->data = page_address(rx_info->page) +
> +		rx_info->page_offset;

How much space does this guarantee? From a quick grep looks like
page_offset is always 0? We'd like to have 256 bytes of space for the
frame to grow for XDP. I see you subtract XDP_PACKET_HEADROOM from the
MTU but I don't see it used otherwise..

> +	xdp->data_meta = xdp->data;
> +	xdp->data_hard_start = page_address(rx_info->page);
> +	xdp->data_end = xdp->data + rx_ring->ena_bufs[0].len;
> +	/* If for some reason we received a bigger packet than
> +	 * we expect, then we simply drop it
> +	 */
> +	if (unlikely(rx_ring->ena_bufs[0].len > ENA_XDP_MAX_MTU))
> +		return XDP_DROP;
> +	else
> +		return ena_xdp_execute(rx_ring, xdp);
> +}
>  /* ena_clean_rx_irq - Cleanup RX irq
>   * @rx_ring: RX ring to clean
>   * @napi: napi handler

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ