lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Thu, 2 Jul 2020 01:02:26 +0200
From:   Daniel Borkmann <daniel@...earbox.net>
To:     Lorenzo Bianconi <lorenzo@...nel.org>, netdev@...r.kernel.org,
        bpf@...r.kernel.org
Cc:     davem@...emloft.net, ast@...nel.org, brouer@...hat.com,
        toke@...hat.com, lorenzo.bianconi@...hat.com, dsahern@...nel.org,
        andrii.nakryiko@...il.com
Subject: Re: [PATCH v5 bpf-next 5/9] bpf: cpumap: add the possibility to
 attach an eBPF program to cpumap

On 6/30/20 2:49 PM, Lorenzo Bianconi wrote:
[...]
> diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
> index 52d71525c2ff..0ac7b11302c2 100644
> --- a/include/uapi/linux/bpf.h
> +++ b/include/uapi/linux/bpf.h
> @@ -226,6 +226,7 @@ enum bpf_attach_type {
>   	BPF_CGROUP_INET4_GETSOCKNAME,
>   	BPF_CGROUP_INET6_GETSOCKNAME,
>   	BPF_XDP_DEVMAP,
> +	BPF_XDP_CPUMAP,
>   	__MAX_BPF_ATTACH_TYPE
>   };
>   
> @@ -3819,6 +3820,10 @@ struct bpf_devmap_val {
>    */
>   struct bpf_cpumap_val {
>   	__u32 qsize;	/* queue size to remote target CPU */
> +	union {
> +		int   fd;	/* prog fd on map write */
> +		__u32 id;	/* prog id on map read */
> +	} bpf_prog;
>   };
>   
>   enum sk_action {
> diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c
> index 7e8eec4f7089..32f627bfc67c 100644
> --- a/kernel/bpf/cpumap.c
> +++ b/kernel/bpf/cpumap.c
> @@ -67,6 +67,7 @@ struct bpf_cpu_map_entry {
>   	struct rcu_head rcu;
>   
>   	struct bpf_cpumap_val value;
> +	struct bpf_prog *prog;
>   };
>   
>   struct bpf_cpu_map {
> @@ -81,6 +82,7 @@ static int bq_flush_to_queue(struct xdp_bulk_queue *bq);
>   
>   static struct bpf_map *cpu_map_alloc(union bpf_attr *attr)
>   {
> +	u32 value_size = attr->value_size;
>   	struct bpf_cpu_map *cmap;
>   	int err = -ENOMEM;
>   	u64 cost;
> @@ -91,7 +93,9 @@ static struct bpf_map *cpu_map_alloc(union bpf_attr *attr)
>   
>   	/* check sanity of attributes */
>   	if (attr->max_entries == 0 || attr->key_size != 4 ||
> -	    attr->value_size != 4 || attr->map_flags & ~BPF_F_NUMA_NODE)
> +	    (value_size != offsetofend(struct bpf_cpumap_val, qsize) &&
> +	     value_size != offsetofend(struct bpf_cpumap_val, bpf_prog.fd)) ||
> +	    attr->map_flags & ~BPF_F_NUMA_NODE)
>   		return ERR_PTR(-EINVAL);
>   
>   	cmap = kzalloc(sizeof(*cmap), GFP_USER);
> @@ -221,6 +225,64 @@ static void put_cpu_map_entry(struct bpf_cpu_map_entry *rcpu)
>   	}
>   }
>   
> +static int cpu_map_bpf_prog_run_xdp(struct bpf_cpu_map_entry *rcpu,
> +				    void **frames, int n,
> +				    struct xdp_cpumap_stats *stats)
> +{
> +	struct xdp_rxq_info rxq;
> +	struct bpf_prog *prog;
> +	struct xdp_buff xdp;
> +	int i, nframes = 0;
> +
> +	if (!rcpu->prog)
> +		return n;
> +
> +	rcu_read_lock();
> +
> +	xdp_set_return_frame_no_direct();
> +	xdp.rxq = &rxq;
> +
> +	prog = READ_ONCE(rcpu->prog);

What purpose does the READ_ONCE() have here, also given you don't use it in above check?
Since upon map update you realloc, repopulate and then xchg() the rcpu entry itself, there
is never the case where you xchg() or WRITE_ONCE() the rcpu->prog, so what does READ_ONCE()
serve in this context? Imho, it should probably just be deleted and plain rcpu->prog used
to avoid confusion.

> +	for (i = 0; i < n; i++) {
> +		struct xdp_frame *xdpf = frames[i];
> +		u32 act;
> +		int err;
> +
> +		rxq.dev = xdpf->dev_rx;
> +		rxq.mem = xdpf->mem;
> +		/* TODO: report queue_index to xdp_rxq_info */
> +
> +		xdp_convert_frame_to_buff(xdpf, &xdp);
> +
> +		act = bpf_prog_run_xdp(prog, &xdp);
> +		switch (act) {
> +		case XDP_PASS:
> +			err = xdp_update_frame_from_buff(&xdp, xdpf);
> +			if (err < 0) {
> +				xdp_return_frame(xdpf);
> +				stats->drop++;
> +			} else {
> +				frames[nframes++] = xdpf;
> +				stats->pass++;
> +			}
> +			break;
> +		default:
> +			bpf_warn_invalid_xdp_action(act);
> +			/* fallthrough */
> +		case XDP_DROP:
> +			xdp_return_frame(xdpf);
> +			stats->drop++;
> +			break;
> +		}
> +	}
> +
> +	xdp_clear_return_frame_no_direct();
> +
> +	rcu_read_unlock();
> +
> +	return nframes;
> +}
[...]
> +bool cpu_map_prog_allowed(struct bpf_map *map)
> +{
> +	return map->map_type == BPF_MAP_TYPE_CPUMAP &&
> +	       map->value_size != offsetofend(struct bpf_cpumap_val, qsize);
> +}
> +
> +static int __cpu_map_load_bpf_program(struct bpf_cpu_map_entry *rcpu, int fd)
> +{
> +	struct bpf_prog *prog;
> +
> +	prog = bpf_prog_get_type_dev(fd, BPF_PROG_TYPE_XDP, false);

Nit: why the _dev variant; just use bpf_prog_get_type()?

> +	if (IS_ERR(prog))
> +		return PTR_ERR(prog);
> +
> +	if (prog->expected_attach_type != BPF_XDP_CPUMAP) {
> +		bpf_prog_put(prog);
> +		return -EINVAL;
> +	}
> +
> +	rcpu->value.bpf_prog.id = prog->aux->id;
> +	rcpu->prog = prog;
> +
> +	return 0;
> +}
> +

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ