[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20200424141908.GA6295@localhost.localdomain>
Date: Fri, 24 Apr 2020 16:19:08 +0200
From: Lorenzo Bianconi <lorenzo.bianconi@...hat.com>
To: Hangbin Liu <liuhangbin@...il.com>
Cc: bpf@...r.kernel.org, netdev@...r.kernel.org,
Toke Høiland-Jørgensen <toke@...hat.com>,
Jiri Benc <jbenc@...hat.com>,
Jesper Dangaard Brouer <brouer@...hat.com>,
Eelco Chaudron <echaudro@...hat.com>, ast@...nel.org,
Daniel Borkmann <daniel@...earbox.net>
Subject: Re: [RFC PATCHv2 bpf-next 1/2] xdp: add a new helper for dev map
multicast support
> This is a prototype for xdp multicast support. In this implemention we
> add a new helper to accept two maps, forward map and exclude map.
> We will redirect the packet to all the interfaces in *forward map*, but
> exclude the interfaces that in *exclude map*.
>
> To achive this I add a new ex_map for struct bpf_redirect_info.
> in the helper I set tgt_value to NULL to make a difference with
> bpf_xdp_redirect_map()
>
> We also add a flag *BPF_F_EXCLUDE_INGRESS* incase you don't want to
> create a exclude map for each interface and just want to exclude the
> ingress interface.
>
> The general data path is kept in net/core/filter.c. The native data
> path is in kernel/bpf/devmap.c so we can use direct calls to
> get better performace.
>
> v2: add new syscall bpf_xdp_redirect_map_multi() which could accept
> include/exclude maps directly.
>
> Signed-off-by: Hangbin Liu <liuhangbin@...il.com>
> ---
> include/linux/bpf.h | 20 ++++++
> include/linux/filter.h | 1 +
> include/net/xdp.h | 1 +
> include/uapi/linux/bpf.h | 23 ++++++-
> kernel/bpf/devmap.c | 114 +++++++++++++++++++++++++++++++++
> kernel/bpf/verifier.c | 6 ++
> net/core/filter.c | 98 ++++++++++++++++++++++++++--
> net/core/xdp.c | 26 ++++++++
> tools/include/uapi/linux/bpf.h | 23 ++++++-
> 9 files changed, 305 insertions(+), 7 deletions(-)
>
[...]
> +{
> +
> + switch (map->map_type) {
> + case BPF_MAP_TYPE_DEVMAP:
> + return dev_map_get_next_key(map, key, next_key);
> + case BPF_MAP_TYPE_DEVMAP_HASH:
> + return dev_map_hash_get_next_key(map, key, next_key);
> + default:
> + break;
> + }
> +
> + return -ENOENT;
> +}
> +
> +bool dev_in_exclude_map(struct bpf_dtab_netdev *obj, struct bpf_map *map,
> + int exclude_ifindex)
> +{
> + struct bpf_dtab_netdev *in_obj = NULL;
> + u32 key, next_key;
> + int err;
> +
> + if (!map)
> + return false;
doing so it seems mandatory to define an exclude_map even if we want just to do
not forward the packet to the "ingress" interface.
Moreover I was thinking that we can assume to never forward to in the incoming
interface. Doing so the code would be simpler I guess. Is there a use case for
it? (forward even to the ingress interface)
> +
> + if (obj->dev->ifindex == exclude_ifindex)
> + return true;
> +
> + devmap_get_next_key(map, NULL, &key);
> +
> + for (;;) {
> + switch (map->map_type) {
> + case BPF_MAP_TYPE_DEVMAP:
> + in_obj = __dev_map_lookup_elem(map, key);
> + break;
> + case BPF_MAP_TYPE_DEVMAP_HASH:
> + in_obj = __dev_map_hash_lookup_elem(map, key);
> + break;
> + default:
> + break;
> + }
> +
> + if (in_obj && in_obj->dev->ifindex == obj->dev->ifindex)
> + return true;
> +
> + err = devmap_get_next_key(map, &key, &next_key);
> +
> + if (err)
> + break;
> +
> + key = next_key;
> + }
> +
> + return false;
> +}
> +
> +int dev_map_enqueue_multi(struct xdp_buff *xdp, struct net_device *dev_rx,
> + struct bpf_map *map, struct bpf_map *ex_map,
> + bool exclude_ingress)
> +{
> + struct bpf_dtab_netdev *obj = NULL;
> + struct xdp_frame *xdpf, *nxdpf;
> + struct net_device *dev;
> + u32 key, next_key;
> + int err;
> +
> + devmap_get_next_key(map, NULL, &key);
> +
> + xdpf = convert_to_xdp_frame(xdp);
> + if (unlikely(!xdpf))
> + return -EOVERFLOW;
> +
> + for (;;) {
> + switch (map->map_type) {
> + case BPF_MAP_TYPE_DEVMAP:
> + obj = __dev_map_lookup_elem(map, key);
> + break;
> + case BPF_MAP_TYPE_DEVMAP_HASH:
> + obj = __dev_map_hash_lookup_elem(map, key);
> + break;
> + default:
> + break;
> + }
> +
> + if (!obj || dev_in_exclude_map(obj, ex_map,
> + exclude_ingress ? dev_rx->ifindex : 0))
> + goto find_next;
> +
> + dev = obj->dev;
> +
> + if (!dev->netdev_ops->ndo_xdp_xmit)
> + return -EOPNOTSUPP;
> +
> + err = xdp_ok_fwd_dev(dev, xdp->data_end - xdp->data);
> + if (unlikely(err))
> + return err;
> +
> + nxdpf = xdpf_clone(xdpf);
> + if (unlikely(!nxdpf))
> + return -ENOMEM;
> +
> + bq_enqueue(dev, nxdpf, dev_rx);
> +
> +find_next:
> + err = devmap_get_next_key(map, &key, &next_key);
> + if (err)
> + break;
> + key = next_key;
> + }
Do we need to free 'incoming' xdp buffer here? I think most of the drivers assume
the packet is owned by the stack if xdp_do_redirect returns 0
> +
> + return 0;
> +}
> +
> int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
> struct bpf_prog *xdp_prog)
> {
> diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
> index 38cfcf701eeb..f77213a0e354 100644
> --- a/kernel/bpf/verifier.c
> +++ b/kernel/bpf/verifier.c
> @@ -3880,6 +3880,7 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
> case BPF_MAP_TYPE_DEVMAP:
> case BPF_MAP_TYPE_DEVMAP_HASH:
> if (func_id != BPF_FUNC_redirect_map &&
> + func_id != BPF_FUNC_redirect_map_multi &&
> func_id != BPF_FUNC_map_lookup_elem)
> goto error;
> break;
> @@ -3970,6 +3971,11 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
> map->map_type != BPF_MAP_TYPE_XSKMAP)
> goto error;
> break;
> + case BPF_FUNC_redirect_map_multi:
> + if (map->map_type != BPF_MAP_TYPE_DEVMAP &&
> + map->map_type != BPF_MAP_TYPE_DEVMAP_HASH)
> + goto error;
> + break;
> case BPF_FUNC_sk_redirect_map:
> case BPF_FUNC_msg_redirect_map:
> case BPF_FUNC_sock_map_update:
> diff --git a/net/core/filter.c b/net/core/filter.c
> index 7d6ceaa54d21..94d1530e5ac6 100644
> --- a/net/core/filter.c
> +++ b/net/core/filter.c
> @@ -3473,12 +3473,17 @@ static const struct bpf_func_proto bpf_xdp_adjust_meta_proto = {
> };
>
> static int __bpf_tx_xdp_map(struct net_device *dev_rx, void *fwd,
> - struct bpf_map *map, struct xdp_buff *xdp)
> + struct bpf_map *map, struct xdp_buff *xdp,
> + struct bpf_map *ex_map, bool exclude_ingress)
> {
> switch (map->map_type) {
> case BPF_MAP_TYPE_DEVMAP:
> case BPF_MAP_TYPE_DEVMAP_HASH:
> - return dev_map_enqueue(fwd, xdp, dev_rx);
> + if (fwd)
> + return dev_map_enqueue(fwd, xdp, dev_rx);
> + else
> + return dev_map_enqueue_multi(xdp, dev_rx, map, ex_map,
> + exclude_ingress);
I guess it would be better to do not make it the default case. Maybe you can
add a bit in flags to mark it for "multicast"
> case BPF_MAP_TYPE_CPUMAP:
> return cpu_map_enqueue(fwd, xdp, dev_rx);
> case BPF_MAP_TYPE_XSKMAP:
> @@ -3534,6 +3539,8 @@ int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp,
> struct bpf_prog *xdp_prog)
> {
> struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
> + bool exclude_ingress = !!(ri->flags & BPF_F_EXCLUDE_INGRESS);
> + struct bpf_map *ex_map = READ_ONCE(ri->ex_map);
> struct bpf_map *map = READ_ONCE(ri->map);
> u32 index = ri->tgt_index;
> void *fwd = ri->tgt_value;
> @@ -3552,7 +3559,7 @@ int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp,
>
> err = dev_xdp_enqueue(fwd, xdp, dev);
> } else {
> - err = __bpf_tx_xdp_map(dev, fwd, map, xdp);
> + err = __bpf_tx_xdp_map(dev, fwd, map, xdp, ex_map, exclude_ingress);
> }
>
> if (unlikely(err))
> @@ -3566,6 +3573,49 @@ int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp,
> }
> EXPORT_SYMBOL_GPL(xdp_do_redirect);
>
> +static int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
> + struct bpf_prog *xdp_prog,
> + struct bpf_map *map, struct bpf_map *ex_map,
> + bool exclude_ingress)
> +
> +{
> + struct bpf_dtab_netdev *dst;
> + struct sk_buff *nskb;
> + u32 key, next_key;
> + int err;
> + void *fwd;
> +
> + /* Get first key from forward map */
> + map->ops->map_get_next_key(map, NULL, &key);
> +
> + for (;;) {
> + fwd = __xdp_map_lookup_elem(map, key);
> + if (fwd) {
> + dst = (struct bpf_dtab_netdev *)fwd;
> + if (dev_in_exclude_map(dst, ex_map,
> + exclude_ingress ? dev->ifindex : 0))
> + goto find_next;
> +
> + nskb = skb_clone(skb, GFP_ATOMIC);
> + if (!nskb)
> + return -EOVERFLOW;
> +
> + err = dev_map_generic_redirect(dst, nskb, xdp_prog);
> + if (unlikely(err))
> + return err;
> + }
> +
> +find_next:
> + err = map->ops->map_get_next_key(map, &key, &next_key);
> + if (err)
> + break;
> +
> + key = next_key;
> + }
> +
> + return 0;
> +}
> +
> static int xdp_do_generic_redirect_map(struct net_device *dev,
> struct sk_buff *skb,
> struct xdp_buff *xdp,
> @@ -3573,6 +3623,8 @@ static int xdp_do_generic_redirect_map(struct net_device *dev,
> struct bpf_map *map)
> {
> struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
> + bool exclude_ingress = !!(ri->flags & BPF_F_EXCLUDE_INGRESS);
> + struct bpf_map *ex_map = READ_ONCE(ri->ex_map);
> u32 index = ri->tgt_index;
> void *fwd = ri->tgt_value;
> int err = 0;
> @@ -3583,9 +3635,16 @@ static int xdp_do_generic_redirect_map(struct net_device *dev,
>
> if (map->map_type == BPF_MAP_TYPE_DEVMAP ||
> map->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
> - struct bpf_dtab_netdev *dst = fwd;
> + if (fwd) {
> + struct bpf_dtab_netdev *dst = fwd;
> +
> + err = dev_map_generic_redirect(dst, skb, xdp_prog);
> + } else {
> + /* Deal with multicast maps */
> + err = dev_map_redirect_multi(dev, skb, xdp_prog, map,
> + ex_map, exclude_ingress);
> + }
>
> - err = dev_map_generic_redirect(dst, skb, xdp_prog);
> if (unlikely(err))
> goto err;
> } else if (map->map_type == BPF_MAP_TYPE_XSKMAP) {
> @@ -3699,6 +3758,33 @@ static const struct bpf_func_proto bpf_xdp_redirect_map_proto = {
> .arg3_type = ARG_ANYTHING,
> };
>
> +BPF_CALL_3(bpf_xdp_redirect_map_multi, struct bpf_map *, map,
> + struct bpf_map *, ex_map, u64, flags)
> +{
> + struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
> +
> + if (unlikely(!map || flags > BPF_F_EXCLUDE_INGRESS))
> + return XDP_ABORTED;
> +
> + ri->tgt_index = 0;
> + ri->tgt_value = NULL;
> + ri->flags = flags;
> +
> + WRITE_ONCE(ri->map, map);
> + WRITE_ONCE(ri->ex_map, ex_map);
> +
> + return XDP_REDIRECT;
> +}
> +
> +static const struct bpf_func_proto bpf_xdp_redirect_map_multi_proto = {
> + .func = bpf_xdp_redirect_map_multi,
> + .gpl_only = false,
> + .ret_type = RET_INTEGER,
> + .arg1_type = ARG_CONST_MAP_PTR,
> + .arg1_type = ARG_CONST_MAP_PTR,
> + .arg3_type = ARG_ANYTHING,
> +};
> +
> static unsigned long bpf_skb_copy(void *dst_buff, const void *skb,
> unsigned long off, unsigned long len)
> {
> @@ -6304,6 +6390,8 @@ xdp_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
> return &bpf_xdp_redirect_proto;
> case BPF_FUNC_redirect_map:
> return &bpf_xdp_redirect_map_proto;
> + case BPF_FUNC_redirect_map_multi:
> + return &bpf_xdp_redirect_map_multi_proto;
> case BPF_FUNC_xdp_adjust_tail:
> return &bpf_xdp_adjust_tail_proto;
> case BPF_FUNC_fib_lookup:
> diff --git a/net/core/xdp.c b/net/core/xdp.c
> index 4c7ea85486af..70dfb4910f84 100644
> --- a/net/core/xdp.c
> +++ b/net/core/xdp.c
> @@ -496,3 +496,29 @@ struct xdp_frame *xdp_convert_zc_to_xdp_frame(struct xdp_buff *xdp)
> return xdpf;
> }
> EXPORT_SYMBOL_GPL(xdp_convert_zc_to_xdp_frame);
> +
> +struct xdp_frame *xdpf_clone(struct xdp_frame *xdpf)
> +{
> + unsigned int headroom, totalsize;
> + struct xdp_frame *nxdpf;
> + struct page *page;
> + void *addr;
> +
> + headroom = xdpf->headroom + sizeof(*xdpf);
> + totalsize = headroom + xdpf->len;
> +
> + if (unlikely(totalsize > PAGE_SIZE))
> + return NULL;
> + page = dev_alloc_page();
> + if (!page)
> + return NULL;
> + addr = page_to_virt(page);
> +
> + memcpy(addr, xdpf, totalsize);
> +
> + nxdpf = addr;
> + nxdpf->data = addr + headroom;
> +
> + return nxdpf;
> +}
> +EXPORT_SYMBOL_GPL(xdpf_clone);
> diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
> index 2e29a671d67e..1dbe42290223 100644
> --- a/tools/include/uapi/linux/bpf.h
> +++ b/tools/include/uapi/linux/bpf.h
> @@ -3025,6 +3025,21 @@ union bpf_attr {
> * * **-EOPNOTSUPP** Unsupported operation, for example a
> * call from outside of TC ingress.
> * * **-ESOCKTNOSUPPORT** Socket type not supported (reuseport).
> + *
> + * int bpf_redirect_map_multi(struct bpf_map *map, struct bpf_map *ex_map, u64 flags)
> + * Description
> + * Redirect the packet to all the interfaces in *map*, and
> + * exclude the interfaces that in *ex_map*. The *ex_map* could
> + * be NULL.
> + *
> + * Currently the *flags* only supports *BPF_F_EXCLUDE_INGRESS*,
> + * which could exlcude redirect to the ingress device.
> + *
> + * See also bpf_redirect_map(), which supports redirecting
> + * packet to a specific ifindex in the map.
> + * Return
> + * **XDP_REDIRECT** on success, or **XDP_ABORTED** on error.
> + *
> */
> #define __BPF_FUNC_MAPPER(FN) \
> FN(unspec), \
> @@ -3151,7 +3166,8 @@ union bpf_attr {
> FN(xdp_output), \
> FN(get_netns_cookie), \
> FN(get_current_ancestor_cgroup_id), \
> - FN(sk_assign),
> + FN(sk_assign), \
> + FN(redirect_map_multi),
>
> /* integer value in 'imm' field of BPF_CALL instruction selects which helper
> * function eBPF program intends to call
> @@ -3280,6 +3296,11 @@ enum bpf_lwt_encap_mode {
> BPF_LWT_ENCAP_IP,
> };
>
> +/* BPF_FUNC_redirect_map_multi flags. */
> +enum {
> + BPF_F_EXCLUDE_INGRESS = (1ULL << 0),
> +};
> +
> #define __bpf_md_ptr(type, name) \
> union { \
> type name; \
> --
> 2.19.2
>
Download attachment "signature.asc" of type "application/pgp-signature" (229 bytes)
Powered by blists - more mailing lists