lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <87v967rznw.fsf@toke.dk>
Date:   Mon, 21 Jun 2021 17:43:47 +0200
From:   Toke Høiland-Jørgensen <toke@...hat.com>
To:     Kumar Kartikeya Dwivedi <memxor@...il.com>, netdev@...r.kernel.org
Cc:     Kumar Kartikeya Dwivedi <memxor@...il.com>,
        Alexei Starovoitov <ast@...nel.org>,
        Daniel Borkmann <daniel@...earbox.net>,
        Andrii Nakryiko <andrii@...nel.org>,
        Jesper Dangaard Brouer <brouer@...hat.com>,
        "David S. Miller" <davem@...emloft.net>,
        Jakub Kicinski <kuba@...nel.org>,
        John Fastabend <john.fastabend@...il.com>,
        Martin KaFai Lau <kafai@...com>, bpf@...r.kernel.org
Subject: Re: [PATCH net-next 2/4] net: implement generic cpumap

Kumar Kartikeya Dwivedi <memxor@...il.com> writes:

> This change implements CPUMAP redirect support for generic XDP programs.
> The idea is to reuse the cpu map entry's queue that is used to push
> native xdp frames for redirecting skb to a different CPU. This will
> match native XDP behavior (in that RPS is invoked again for packet
> reinjected into networking stack).
>
> To be able to determine whether the incoming skb is from the driver or
> cpumap, we reuse skb->redirected bit that skips generic XDP processing
> when it is set. To always make use of this, CONFIG_NET_REDIRECT guard on
> it has been lifted and it is always available.
>
> From the redirect side, we add the skb to ptr_ring with its lowest bit
> set to 1.  This should be safe as skb is not 1-byte aligned. This allows
> kthread to discern between xdp_frames and sk_buff. On consumption of the
> ptr_ring item, the lowest bit is unset.
>
> In the end, the skb is simply added to the list that kthread is anyway
> going to maintain for xdp_frames converted to skb, and then received
> again by using netif_receive_skb_list.
>
> Bulking optimization for generic cpumap is left as an exercise for a
> future patch for now.
>
> Signed-off-by: Kumar Kartikeya Dwivedi <memxor@...il.com>
> ---
>  include/linux/bpf.h    |   8 +++
>  include/linux/skbuff.h |  10 +--
>  kernel/bpf/cpumap.c    | 151 +++++++++++++++++++++++++++++++++++++----
>  net/core/filter.c      |   6 +-
>  4 files changed, 154 insertions(+), 21 deletions(-)
>
> diff --git a/include/linux/bpf.h b/include/linux/bpf.h
> index f309fc1509f2..46e6587d3ee6 100644
> --- a/include/linux/bpf.h
> +++ b/include/linux/bpf.h
> @@ -1513,6 +1513,8 @@ bool dev_map_can_have_prog(struct bpf_map *map);
>  void __cpu_map_flush(void);
>  int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp,
>  		    struct net_device *dev_rx);
> +int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu,
> +			     struct sk_buff *skb);
>  bool cpu_map_prog_allowed(struct bpf_map *map);
>  
>  /* Return map's numa specified by userspace */
> @@ -1710,6 +1712,12 @@ static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu,
>  	return 0;
>  }
>  
> +static inline int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu,
> +					   struct sk_buff *skb)
> +{
> +	return -EOPNOTSUPP;
> +}
> +
>  static inline bool cpu_map_prog_allowed(struct bpf_map *map)
>  {
>  	return false;
> diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
> index b2db9cd9a73f..f19190820e63 100644
> --- a/include/linux/skbuff.h
> +++ b/include/linux/skbuff.h
> @@ -863,8 +863,8 @@ struct sk_buff {
>  	__u8			tc_skip_classify:1;
>  	__u8			tc_at_ingress:1;
>  #endif
> -#ifdef CONFIG_NET_REDIRECT
>  	__u8			redirected:1;
> +#ifdef CONFIG_NET_REDIRECT
>  	__u8			from_ingress:1;
>  #endif
>  #ifdef CONFIG_TLS_DEVICE
> @@ -4664,17 +4664,13 @@ static inline __wsum lco_csum(struct sk_buff *skb)
>  
>  static inline bool skb_is_redirected(const struct sk_buff *skb)
>  {
> -#ifdef CONFIG_NET_REDIRECT
>  	return skb->redirected;
> -#else
> -	return false;
> -#endif
>  }
>  
>  static inline void skb_set_redirected(struct sk_buff *skb, bool from_ingress)
>  {
> -#ifdef CONFIG_NET_REDIRECT
>  	skb->redirected = 1;
> +#ifdef CONFIG_NET_REDIRECT
>  	skb->from_ingress = from_ingress;
>  	if (skb->from_ingress)
>  		skb->tstamp = 0;
> @@ -4683,9 +4679,7 @@ static inline void skb_set_redirected(struct sk_buff *skb, bool from_ingress)
>  
>  static inline void skb_reset_redirect(struct sk_buff *skb)
>  {
> -#ifdef CONFIG_NET_REDIRECT
>  	skb->redirected = 0;
> -#endif
>  }
>  
>  static inline bool skb_csum_is_sctp(struct sk_buff *skb)
> diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c
> index a1a0c4e791c6..f016daf8fdcc 100644
> --- a/kernel/bpf/cpumap.c
> +++ b/kernel/bpf/cpumap.c
> @@ -16,6 +16,7 @@
>   * netstack, and assigning dedicated CPUs for this stage.  This
>   * basically allows for 10G wirespeed pre-filtering via bpf.
>   */
> +#include <linux/bitops.h>
>  #include <linux/bpf.h>
>  #include <linux/filter.h>
>  #include <linux/ptr_ring.h>
> @@ -79,6 +80,29 @@ struct bpf_cpu_map {
>  
>  static DEFINE_PER_CPU(struct list_head, cpu_map_flush_list);
>  
> +static void *__ptr_set_bit(void *ptr, int bit)
> +{
> +	unsigned long __ptr = (unsigned long)ptr;
> +
> +	__ptr |= BIT(bit);
> +	return (void *)__ptr;
> +}
> +
> +static void *__ptr_clear_bit(void *ptr, int bit)
> +{
> +	unsigned long __ptr = (unsigned long)ptr;
> +
> +	__ptr &= ~BIT(bit);
> +	return (void *)__ptr;
> +}
> +
> +static int __ptr_test_bit(void *ptr, int bit)
> +{
> +	unsigned long __ptr = (unsigned long)ptr;
> +
> +	return __ptr & BIT(bit);
> +}

Why not put these into bitops.h instead?

>  static struct bpf_map *cpu_map_alloc(union bpf_attr *attr)
>  {
>  	u32 value_size = attr->value_size;
> @@ -168,6 +192,64 @@ static void put_cpu_map_entry(struct bpf_cpu_map_entry *rcpu)
>  	}
>  }
>  
> +static void cpu_map_bpf_prog_run_skb(struct bpf_cpu_map_entry *rcpu,
> +				    void **frames, int skb_n,
> +				    struct xdp_cpumap_stats *stats,
> +				    struct list_head *listp)
> +{
> +	struct xdp_rxq_info rxq = {};
> +	struct xdp_buff xdp;
> +	int err, i;
> +	u32 act;
> +
> +	xdp.rxq = &rxq;
> +
> +	if (!rcpu->prog)
> +		goto insert;
> +
> +	for (i = 0; i < skb_n; i++) {
> +		struct sk_buff *skb = frames[i];
> +
> +		rxq.dev = skb->dev;
> +
> +		act = bpf_prog_run_generic_xdp(skb, &xdp, rcpu->prog);
> +		switch (act) {
> +		case XDP_PASS:
> +			list_add_tail(&skb->list, listp);
> +			break;
> +		case XDP_REDIRECT:
> +			err = xdp_do_generic_redirect(skb->dev, skb, &xdp,
> +						      rcpu->prog);
> +			if (unlikely(err)) {
> +				kfree_skb(skb);
> +				stats->drop++;
> +			} else {
> +				stats->redirect++;
> +			}
> +			return;
> +		default:
> +			bpf_warn_invalid_xdp_action(act);
> +			fallthrough;
> +		case XDP_ABORTED:
> +			trace_xdp_exception(skb->dev, rcpu->prog, act);
> +			fallthrough;
> +		case XDP_DROP:
> +			kfree_skb(skb);
> +			stats->drop++;
> +			return;
> +		}
> +	}
> +
> +	return;
> +
> +insert:
> +	for (i = 0; i < skb_n; i++) {
> +		struct sk_buff *skb = frames[i];
> +
> +		list_add_tail(&skb->list, listp);
> +	}
> +}
> +
>  static int cpu_map_bpf_prog_run_xdp(struct bpf_cpu_map_entry *rcpu,
>  				    void **frames, int n,
>  				    struct xdp_cpumap_stats *stats)
> @@ -179,8 +261,6 @@ static int cpu_map_bpf_prog_run_xdp(struct bpf_cpu_map_entry *rcpu,
>  	if (!rcpu->prog)
>  		return n;
>  
> -	rcu_read_lock_bh();
> -
>  	xdp_set_return_frame_no_direct();
>  	xdp.rxq = &rxq;
>  
> @@ -227,17 +307,36 @@ static int cpu_map_bpf_prog_run_xdp(struct bpf_cpu_map_entry *rcpu,
>  		}
>  	}
>  
> +	xdp_clear_return_frame_no_direct();
> +
> +	return nframes;
> +}
> +
> +#define CPUMAP_BATCH 8
> +
> +static int cpu_map_bpf_prog_run(struct bpf_cpu_map_entry *rcpu,
> +				void **frames, int xdp_n, int skb_n,
> +				struct xdp_cpumap_stats *stats,
> +				struct list_head *list)
> +{
> +	int nframes;
> +
> +	rcu_read_lock_bh();
> +
> +	nframes = cpu_map_bpf_prog_run_xdp(rcpu, frames, xdp_n, stats);
> +
>  	if (stats->redirect)
> -		xdp_do_flush_map();
> +		xdp_do_flush();
>  
> -	xdp_clear_return_frame_no_direct();
> +	if (unlikely(skb_n))
> +		cpu_map_bpf_prog_run_skb(rcpu, frames + CPUMAP_BATCH, skb_n,
> +					 stats, list);
>  
> -	rcu_read_unlock_bh(); /* resched point, may call do_softirq() */
> +	rcu_read_unlock_bh();
>  
>  	return nframes;
>  }
>  
> -#define CPUMAP_BATCH 8
>  
>  static int cpu_map_kthread_run(void *data)
>  {
> @@ -254,9 +353,9 @@ static int cpu_map_kthread_run(void *data)
>  		struct xdp_cpumap_stats stats = {}; /* zero stats */
>  		unsigned int kmem_alloc_drops = 0, sched = 0;
>  		gfp_t gfp = __GFP_ZERO | GFP_ATOMIC;
> -		void *frames[CPUMAP_BATCH];
> +		int i, n, m, nframes, xdp_n, skb_n;
> +		void *frames[CPUMAP_BATCH * 2];

This double-sized array thing is clever, but it hurts readability. You'd
get basically the same code by having them as two separate arrays and
passing in two separate pointers to cpu_map_bpf_prog_run().

Or you could even just use 'list' - you're passing in that anyway, just
to have cpu_map_bpf_prog_run_skb() add the skbs to it; so why not just
add them right here in the caller, and have cpu_map_bpf_prog_run_skb()
remove them again if the rcpu prog doesn't return XDP_PASS?

-Toke

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ