[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20191023004501.4a78c300@carbon>
Date: Wed, 23 Oct 2019 00:45:01 +0200
From: Jesper Dangaard Brouer <brouer@...hat.com>
To: Charles McLachlan <cmclachlan@...arflare.com>
Cc: <davem@...emloft.net>, <netdev@...r.kernel.org>,
<linux-net-drivers@...arflare.com>, brouer@...hat.com
Subject: Re: [PATCH net-next 2/6] sfc: perform XDP processing on received
packets.
On Tue, 22 Oct 2019 16:38:27 +0100
Charles McLachlan <cmclachlan@...arflare.com> wrote:
> +/** efx_do_xdp: perform XDP processing on a received packet
> + *
> + * Returns true if packet should still be delivered.
> + */
> +static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel,
> + struct efx_rx_buffer *rx_buf, u8 **ehp)
> +{
> + u8 rx_prefix[EFX_MAX_RX_PREFIX_SIZE];
> + struct efx_rx_queue *rx_queue;
> + struct bpf_prog *xdp_prog;
> + struct xdp_buff xdp;
> + u32 xdp_act;
> + s16 offset;
> + int rc;
> +
> + rcu_read_lock();
> + xdp_prog = rcu_dereference(efx->xdp_prog);
> + if (!xdp_prog) {
> + rcu_read_unlock();
> + return true;
> + }
> +
> + rx_queue = efx_channel_get_rx_queue(channel);
> +
> + if (unlikely(channel->rx_pkt_n_frags > 1)) {
> + /* We can't do XDP on fragmented packets - drop. */
> + rcu_read_unlock();
> + efx_free_rx_buffers(rx_queue, rx_buf,
> + channel->rx_pkt_n_frags);
> + if (net_ratelimit())
> + netif_err(efx, rx_err, efx->net_dev,
> + "XDP is not possible with multiple receive fragments (%d)\n",
> + channel->rx_pkt_n_frags);
> + return false;
> + }
> +
> + dma_sync_single_for_cpu(&efx->pci_dev->dev, rx_buf->dma_addr,
> + rx_buf->len, DMA_FROM_DEVICE);
> +
> + /* Save the rx prefix. */
> + EFX_WARN_ON_PARANOID(efx->rx_prefix_size > EFX_MAX_RX_PREFIX_SIZE);
> + memcpy(rx_prefix, *ehp - efx->rx_prefix_size,
> + efx->rx_prefix_size);
> +
> + xdp.data = *ehp;
> + xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM;
> +
> + /* No support yet for XDP metadata */
> + xdp_set_data_meta_invalid(&xdp);
> + xdp.data_end = xdp.data + rx_buf->len;
> + xdp.rxq = &rx_queue->xdp_rxq_info;
> +
> + xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp);
> + rcu_read_unlock();
> +
> + offset = (u8 *)xdp.data - *ehp;
> +
> + switch (xdp_act) {
> + case XDP_PASS:
> + /* Fix up rx prefix. */
> + if (offset) {
> + *ehp += offset;
> + rx_buf->page_offset += offset;
> + rx_buf->len -= offset;
> + memcpy(*ehp - efx->rx_prefix_size, rx_prefix,
> + efx->rx_prefix_size);
> + }
> + break;
> +
> + case XDP_TX:
> + return -EOPNOTSUPP;
> +
> + case XDP_REDIRECT:
> + rc = xdp_do_redirect(efx->net_dev, &xdp, xdp_prog);
> + if (rc) {
Can we call the 'rc' variable 'err' instead?
And give it an unlikely().
> + efx_free_rx_buffers(rx_queue, rx_buf, 1);
> + if (net_ratelimit())
> + netif_err(efx, rx_err, efx->net_dev,
> + "XDP redirect failed (%d)\n", rc);
> + }
> + break;
> +
> + default:
> + bpf_warn_invalid_xdp_action(xdp_act);
> + /* Fall through */
> + case XDP_ABORTED:
> + efx_free_rx_buffers(rx_queue, rx_buf, 1);
> + break;
> +
> + case XDP_DROP:
> + efx_free_rx_buffers(rx_queue, rx_buf, 1);
> + break;
> + }
> +
> + return xdp_act == XDP_PASS;
> +}
> +
--
Best regards,
Jesper Dangaard Brouer
MSc.CS, Principal Kernel Engineer at Red Hat
LinkedIn: http://www.linkedin.com/in/brouer
Powered by blists - more mailing lists