lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [day] [month] [year] [list]
Date:   Sun, 7 Oct 2018 22:15:18 -0500
From:   Mauricio Vasquez <mauricio.vasquez@...ito.it>
To:     Alexei Starovoitov <alexei.starovoitov@...il.com>
Cc:     Alexei Starovoitov <ast@...nel.org>,
        Daniel Borkmann <daniel@...earbox.net>, netdev@...r.kernel.org
Subject: Re: [RFC PATCH bpf-next v4 4/7] bpf: add bpf queue and stack maps



On 10/04/2018 10:40 PM, Mauricio Vasquez wrote:
>
>
> On 10/04/2018 06:57 PM, Alexei Starovoitov wrote:
>> On Thu, Oct 04, 2018 at 07:12:44PM +0200, Mauricio Vasquez B wrote:
>>> Implement two new kind of maps that support the peek, push and pop
>>> operations.
>>>
>>> A use case for this is to keep track of a pool of elements, like
>>> network ports in a SNAT.
>>>
>>> Signed-off-by: Mauricio Vasquez B <mauricio.vasquez@...ito.it>
>>> ---
>>>   include/linux/bpf.h           |    7 +
>>>   include/linux/bpf_types.h     |    2
>>>   include/uapi/linux/bpf.h      |   35 ++++-
>>>   kernel/bpf/Makefile           |    2
>>>   kernel/bpf/core.c             |    3
>>>   kernel/bpf/helpers.c          |   43 ++++++
>>>   kernel/bpf/queue_stack_maps.c |  300 
>>> +++++++++++++++++++++++++++++++++++++++++
>>>   kernel/bpf/syscall.c          |   31 +++-
>>>   kernel/bpf/verifier.c         |   14 +-
>>>   net/core/filter.c             |    6 +
>>>   10 files changed, 424 insertions(+), 19 deletions(-)
>>>   create mode 100644 kernel/bpf/queue_stack_maps.c
>>>
>>> diff --git a/include/linux/bpf.h b/include/linux/bpf.h
>>> index 98c7eeb6d138..cad3bc5cffd1 100644
>>> --- a/include/linux/bpf.h
>>> +++ b/include/linux/bpf.h
>>> @@ -40,6 +40,9 @@ struct bpf_map_ops {
>>>       int (*map_update_elem)(struct bpf_map *map, void *key, void 
>>> *value, u64 flags);
>>>       int (*map_delete_elem)(struct bpf_map *map, void *key);
>>>       void *(*map_lookup_and_delete_elem)(struct bpf_map *map, void 
>>> *key);
>>> +    int (*map_push_elem)(struct bpf_map *map, void *value, u64 flags);
>>> +    int (*map_pop_elem)(struct bpf_map *map, void *value);
>>> +    int (*map_peek_elem)(struct bpf_map *map, void *value);
>>>         /* funcs called by prog_array and perf_event_array map */
>>>       void *(*map_fd_get_ptr)(struct bpf_map *map, struct file 
>>> *map_file,
>>> @@ -139,6 +142,7 @@ enum bpf_arg_type {
>>>       ARG_CONST_MAP_PTR,    /* const argument used as pointer to 
>>> bpf_map */
>>>       ARG_PTR_TO_MAP_KEY,    /* pointer to stack used as map key */
>>>       ARG_PTR_TO_MAP_VALUE,    /* pointer to stack used as map value */
>>> +    ARG_PTR_TO_UNINIT_MAP_VALUE,    /* pointer to valid memory used 
>>> to store a map value */
>>>         /* the following constraints used to prototype bpf_memcmp() 
>>> and other
>>>        * functions that access data on eBPF program stack
>>> @@ -825,6 +829,9 @@ static inline int 
>>> bpf_fd_reuseport_array_update_elem(struct bpf_map *map,
>>>   extern const struct bpf_func_proto bpf_map_lookup_elem_proto;
>>>   extern const struct bpf_func_proto bpf_map_update_elem_proto;
>>>   extern const struct bpf_func_proto bpf_map_delete_elem_proto;
>>> +extern const struct bpf_func_proto bpf_map_push_elem_proto;
>>> +extern const struct bpf_func_proto bpf_map_pop_elem_proto;
>>> +extern const struct bpf_func_proto bpf_map_peek_elem_proto;
>>>     extern const struct bpf_func_proto bpf_get_prandom_u32_proto;
>>>   extern const struct bpf_func_proto bpf_get_smp_processor_id_proto;
>>> diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h
>>> index 658509daacd4..a2ec73aa1ec7 100644
>>> --- a/include/linux/bpf_types.h
>>> +++ b/include/linux/bpf_types.h
>>> @@ -69,3 +69,5 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_XSKMAP, xsk_map_ops)
>>>   BPF_MAP_TYPE(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY, reuseport_array_ops)
>>>   #endif
>>>   #endif
>>> +BPF_MAP_TYPE(BPF_MAP_TYPE_QUEUE, queue_map_ops)
>>> +BPF_MAP_TYPE(BPF_MAP_TYPE_STACK, stack_map_ops)
>>> diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
>>> index 3bb94aa2d408..bfa042273fad 100644
>>> --- a/include/uapi/linux/bpf.h
>>> +++ b/include/uapi/linux/bpf.h
>>> @@ -129,6 +129,8 @@ enum bpf_map_type {
>>>       BPF_MAP_TYPE_CGROUP_STORAGE,
>>>       BPF_MAP_TYPE_REUSEPORT_SOCKARRAY,
>>>       BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE,
>>> +    BPF_MAP_TYPE_QUEUE,
>>> +    BPF_MAP_TYPE_STACK,
>>>   };
>>>     enum bpf_prog_type {
>>> @@ -463,6 +465,28 @@ union bpf_attr {
>>>    *     Return
>>>    *         0 on success, or a negative error in case of failure.
>>>    *
>>> + * int bpf_map_push_elem(struct bpf_map *map, const void *value, 
>>> u64 flags)
>>> + *     Description
>>> + *         Push an element *value* in *map*. *flags* is one of:
>>> + *
>>> + *         **BPF_EXIST**
>>> + *         If the queue/stack is full, the oldest element is 
>>> removed to
>>> + *         make room for this.
>>> + *     Return
>>> + *         0 on success, or a negative error in case of failure.
>>> + *
>>> + * int bpf_map_pop_elem(struct bpf_map *map, void *value)
>>> + *     Description
>>> + *         Pop an element from *map*.
>>> + * Return
>>> + *         0 on success, or a negative error in case of failure.
>>> + *
>>> + * int bpf_map_peek_elem(struct bpf_map *map, void *value)
>>> + *     Description
>>> + *         Get an element from *map* without removing it.
>>> + * Return
>>> + *         0 on success, or a negative error in case of failure.
>>> + *
>>>    * int bpf_probe_read(void *dst, u32 size, const void *src)
>>>    *     Description
>>>    *         For tracing programs, safely attempt to read *size* 
>>> bytes from
>>> @@ -790,14 +814,14 @@ union bpf_attr {
>>>    *
>>>    *             int ret;
>>>    *             struct bpf_tunnel_key key = {};
>>> - *
>>> + *
>>>    *             ret = bpf_skb_get_tunnel_key(skb, &key, 
>>> sizeof(key), 0);
>>>    *             if (ret < 0)
>>>    *                 return TC_ACT_SHOT;    // drop packet
>>> - *
>>> + *
>>>    *             if (key.remote_ipv4 != 0x0a000001)
>>>    *                 return TC_ACT_SHOT;    // drop packet
>>> - *
>>> + *
>>>    *             return TC_ACT_OK;        // accept packet
>>>    *
>>>    *         This interface can also be used with all encapsulation 
>>> devices
>>> @@ -2304,7 +2328,10 @@ union bpf_attr {
>>>       FN(skb_ancestor_cgroup_id),    \
>>>       FN(sk_lookup_tcp),        \
>>>       FN(sk_lookup_udp),        \
>>> -    FN(sk_release),
>>> +    FN(sk_release),            \
>>> +    FN(map_push_elem),        \
>>> +    FN(map_pop_elem),        \
>>> +    FN(map_peek_elem),
>>>     /* integer value in 'imm' field of BPF_CALL instruction selects 
>>> which helper
>>>    * function eBPF program intends to call
>>> diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile
>>> index 0488b8258321..17afae9e65f3 100644
>>> --- a/kernel/bpf/Makefile
>>> +++ b/kernel/bpf/Makefile
>>> @@ -3,7 +3,7 @@ obj-y := core.o
>>>     obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o inode.o 
>>> helpers.o tnum.o
>>>   obj-$(CONFIG_BPF_SYSCALL) += hashtab.o arraymap.o 
>>> percpu_freelist.o bpf_lru_list.o lpm_trie.o map_in_map.o
>>> -obj-$(CONFIG_BPF_SYSCALL) += local_storage.o
>>> +obj-$(CONFIG_BPF_SYSCALL) += local_storage.o queue_stack_maps.o
>>>   obj-$(CONFIG_BPF_SYSCALL) += disasm.o
>>>   obj-$(CONFIG_BPF_SYSCALL) += btf.o
>>>   ifeq ($(CONFIG_NET),y)
>>> diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
>>> index 3f5bf1af0826..8d2db076d123 100644
>>> --- a/kernel/bpf/core.c
>>> +++ b/kernel/bpf/core.c
>>> @@ -1783,6 +1783,9 @@ BPF_CALL_0(bpf_user_rnd_u32)
>>>   const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
>>>   const struct bpf_func_proto bpf_map_update_elem_proto __weak;
>>>   const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
>>> +const struct bpf_func_proto bpf_map_push_elem_proto __weak;
>>> +const struct bpf_func_proto bpf_map_pop_elem_proto __weak;
>>> +const struct bpf_func_proto bpf_map_peek_elem_proto __weak;
>>>     const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
>>>   const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
>>> diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
>>> index 6502115e8f55..ab0d5e3f9892 100644
>>> --- a/kernel/bpf/helpers.c
>>> +++ b/kernel/bpf/helpers.c
>>> @@ -76,6 +76,49 @@ const struct bpf_func_proto 
>>> bpf_map_delete_elem_proto = {
>>>       .arg2_type    = ARG_PTR_TO_MAP_KEY,
>>>   };
>>>   +BPF_CALL_3(bpf_map_push_elem, struct bpf_map *, map, void *, 
>>> value, u64, flags)
>>> +{
>>> +    return map->ops->map_push_elem(map, value, flags);
>>> +}
>>> +
>>> +const struct bpf_func_proto bpf_map_push_elem_proto = {
>>> +    .func        = bpf_map_push_elem,
>>> +    .gpl_only    = false,
>>> +    .pkt_access    = true,
>>> +    .ret_type    = RET_INTEGER,
>>> +    .arg1_type    = ARG_CONST_MAP_PTR,
>>> +    .arg2_type    = ARG_PTR_TO_MAP_VALUE,
>>> +    .arg3_type    = ARG_ANYTHING,
>>> +};
>>> +
>>> +BPF_CALL_2(bpf_map_pop_elem, struct bpf_map *, map, void *, value)
>>> +{
>>> +    return map->ops->map_pop_elem(map, value);
>>> +}
>>> +
>>> +const struct bpf_func_proto bpf_map_pop_elem_proto = {
>>> +    .func        = bpf_map_pop_elem,
>>> +    .gpl_only    = false,
>>> +    .pkt_access    = true,
>>> +    .ret_type    = RET_INTEGER,
>>> +    .arg1_type    = ARG_CONST_MAP_PTR,
>>> +    .arg2_type    = ARG_PTR_TO_UNINIT_MAP_VALUE,
>>> +};
>>> +
>>> +BPF_CALL_2(bpf_map_peek_elem, struct bpf_map *, map, void *, value)
>>> +{
>>> +    return map->ops->map_peek_elem(map, value);
>>> +}
>>> +
>>> +const struct bpf_func_proto bpf_map_peek_elem_proto = {
>>> +    .func        = bpf_map_pop_elem,
>>> +    .gpl_only    = false,
>>> +    .pkt_access    = true,
>>> +    .ret_type    = RET_INTEGER,
>>> +    .arg1_type    = ARG_CONST_MAP_PTR,
>>> +    .arg2_type    = ARG_PTR_TO_UNINIT_MAP_VALUE,
>>> +};
>>> +
>>>   const struct bpf_func_proto bpf_get_prandom_u32_proto = {
>>>       .func        = bpf_user_rnd_u32,
>>>       .gpl_only    = false,
>>> diff --git a/kernel/bpf/queue_stack_maps.c 
>>> b/kernel/bpf/queue_stack_maps.c
>>> new file mode 100644
>>> index 000000000000..a597c5ba68f6
>>> --- /dev/null
>>> +++ b/kernel/bpf/queue_stack_maps.c
>>> @@ -0,0 +1,300 @@
>>> +// SPDX-License-Identifier: GPL-2.0
>>> +/*
>>> + * queue_stack_maps.c: BPF queue and stack maps
>>> + *
>>> + * Copyright (c) 2018 Politecnico di Torino
>>> + */
>>> +#include <linux/bpf.h>
>>> +#include <linux/list.h>
>>> +#include <linux/slab.h>
>>> +#include "percpu_freelist.h"
>>> +
>>> +#define QUEUE_STACK_CREATE_FLAG_MASK \
>>> +    (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
>>> +
>>> +
>>> +struct bpf_queue_stack {
>>> +    struct bpf_map map;
>>> +    raw_spinlock_t lock;
>>> +    u32 head, tail;
>>> +    u32 index_mask;
>>> +    u32 count;
>>> +
>>> +    char elements[0] __aligned(8);
>>> +};
>>> +
>>> +static struct bpf_queue_stack *bpf_queue_stack(struct bpf_map *map)
>>> +{
>>> +    return container_of(map, struct bpf_queue_stack, map);
>>> +}
>>> +
>>> +static bool queue_stack_map_is_empty(struct bpf_queue_stack *qs)
>>> +{
>>> +    return qs->count == 0;
>>> +}
>>> +
>>> +static bool queue_stack_map_is_full(struct bpf_queue_stack *qs)
>>> +{
>>> +    return qs->count == qs->map.max_entries;
>>> +}
>>> +
>>> +/* Called from syscall */
>>> +static int queue_stack_map_alloc_check(union bpf_attr *attr)
>>> +{
>>> +    /* check sanity of attributes */
>>> +    if (attr->max_entries == 0 || attr->key_size != 0 ||
>>> +        attr->map_flags & ~QUEUE_STACK_CREATE_FLAG_MASK)
>>> +        return -EINVAL;
>>> +
>>> +    if (attr->value_size > KMALLOC_MAX_SIZE)
>>> +        /* if value_size is bigger, the user space won't be able to
>>> +         * access the elements.
>>> +         */
>>> +        return -E2BIG;
>>> +
>>> +    return 0;
>>> +}
>>> +
>>> +static struct bpf_map *queue_stack_map_alloc(union bpf_attr *attr)
>>> +{
>>> +    int ret, numa_node = bpf_map_attr_numa_node(attr);
>>> +    u32 max_entries, value_size, index_mask;
>>> +    u64 queue_size, cost, mask64;
>>> +    struct bpf_queue_stack *qs;
>>> +
>>> +    max_entries = attr->max_entries;
>>> +    value_size = attr->value_size;
>>> +
>>> +    /* From arraymap.c:
>>> +     * On 32 bit archs roundup_pow_of_two() with max_entries that has
>>> +     * upper most bit set in u32 space is undefined behavior due to
>>> +     * resulting 1U << 32, so do it manually here in u64 space.
>>> +     */
>>> +    mask64 = fls_long(max_entries - 1);
>>> +    mask64 = 1ULL << mask64;
>>> +    mask64 -= 1;
>>> +
>>> +    index_mask = mask64;
>>> +
>>> +    /* Round up queue size to nearest power of 2 */
>>> +    max_entries = index_mask + 1;
>> what's the point of roundup ?
>
> If the size of the buffer is power of two we can wrap the indexes with 
> an AND operation instead of MOD.
>
>> The memory waste becomes quite large when max_entries are high.
> Yes, you are right, we have the different choices described below.
>
>>
>> If queue/stack is sized to exact max_entries,
>> then 'count' can be removed too, right?
>
> If we don't use 'count' and we want to use the AND operation for 
> wrapping indexes, the max entries should be 2^ - 1  because a slot is 
> lost to distinguish between full/empty queue/stack.
>
> Just to summarize, we have these options:
> 1. Allow any size, round up, use the AND operation and 'count' (current).
> 2. Allow only power of 2 sizes, use the AND operation and 'count'.
> 3. Allow any size, no roundup, use the MOD operation and leaving an 
> empty slot.
>
> I prefer 1 or 2, but I don't have a strong opinion, maybe allowing 
> only power of two max entries could be too limiting.
> Another consideration: is this really too bad to waste memory when 
> user requires a size far away of the next power of 2?
>
>>> +    /* Check for overflows. */
>>> +    if (max_entries < attr->max_entries)
>>> +        return ERR_PTR(-E2BIG);
>>> +
>>> +    queue_size = sizeof(*qs) + (u64) value_size * max_entries;
>>> +
>>> +    cost = queue_size;
>>> +    if (cost >= U32_MAX - PAGE_SIZE)
>>> +        return ERR_PTR(-E2BIG);
>>> +
>>> +    cost = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
>>> +
>>> +    ret = bpf_map_precharge_memlock(cost);
>>> +    if (ret < 0)
>>> +        return ERR_PTR(ret);
>>> +
>>> +    qs = bpf_map_area_alloc(queue_size, numa_node);
>>> +    if (!qs)
>>> +        return ERR_PTR(-ENOMEM);
>>> +
>>> +    memset(qs, 0, sizeof(*qs));
>>> +
>>> +    bpf_map_init_from_attr(&qs->map, attr);
>>> +
>>> +    qs->map.pages = cost;
>>> +    qs->index_mask = index_mask;
>>> +
>>> +    raw_spin_lock_init(&qs->lock);
>>> +
>>> +    return &qs->map;
>>> +}
>>> +
>>> +/* Called when map->refcnt goes to zero, either from workqueue or 
>>> from syscall */
>>> +static void queue_stack_map_free(struct bpf_map *map)
>>> +{
>>> +    struct bpf_queue_stack *qs = bpf_queue_stack(map);
>>> +
>>> +    /* at this point bpf_prog->aux->refcnt == 0 and this 
>>> map->refcnt == 0,
>>> +     * so the programs (can be more than one that used this map) were
>>> +     * disconnected from events. Wait for outstanding critical 
>>> sections in
>>> +     * these programs to complete
>>> +     */
>>> +    synchronize_rcu();
>>> +
>>> +    bpf_map_area_free(qs);
>>> +}
>>> +
>>> +static int __queue_map_get(struct bpf_map *map, void *value, bool 
>>> delete)
>>> +{
>>> +    struct bpf_queue_stack *qs = bpf_queue_stack(map);
>>> +    unsigned long flags;
>>> +    int err = 0;
>>> +    void *ptr;
>>> +
>>> +    raw_spin_lock_irqsave(&qs->lock, flags);
>>> +
>>> +    if (queue_stack_map_is_empty(qs)) {
>>> +        err = -ENOENT;
>>> +        goto out;
>>> +    }
>>> +
>>> +    ptr = &qs->elements[qs->tail * qs->map.value_size];
>>> +    memcpy(value, ptr, qs->map.value_size);
>>> +
>>> +    if (delete) {
>>> +        qs->tail = (qs->tail + 1) & qs->index_mask;
>>> +        qs->count--;
>>> +    }
>>> +
>>> +out:
>>> +    raw_spin_unlock_irqrestore(&qs->lock, flags);
>>> +    return err;
>>> +}
>>> +
>>> +
>>> +static int __stack_map_get(struct bpf_map *map, void *value, bool 
>>> delete)
>>> +{
>>> +    struct bpf_queue_stack *qs = bpf_queue_stack(map);
>>> +    unsigned long flags;
>>> +    int err = 0;
>>> +    void *ptr;
>>> +    u32 index;
>>> +
>>> +    raw_spin_lock_irqsave(&qs->lock, flags);
>>> +
>>> +    if (queue_stack_map_is_empty(qs)) {
>>> +        err = -ENOENT;
>>> +        goto out;
>>> +    }
>>> +
>>> +    index = (qs->head - 1) & qs->index_mask;
>>> +    ptr = &qs->elements[index * qs->map.value_size];
>>> +    memcpy(value, ptr, qs->map.value_size);
>>> +
>>> +    if (delete) {
>>> +        qs->head = (qs->head - 1) & qs->index_mask;
>>> +        qs->count--;
>>> +    }
>>> +
>>> +out:
>>> +    raw_spin_unlock_irqrestore(&qs->lock, flags);
>>> +    return err;
>>> +}
>>> +
>>> +/* Called from syscall or from eBPF program */
>>> +static int queue_map_peek_elem(struct bpf_map *map, void *value)
>>> +{
>>> +    return __queue_map_get(map, value, false);
>>> +}
>>> +
>>> +/* Called from syscall or from eBPF program */
>>> +static int stack_map_peek_elem(struct bpf_map *map, void *value)
>>> +{
>>> +    return __stack_map_get(map, value, false);
>>> +}
>>> +
>>> +/* Called from syscall or from eBPF program */
>>> +static int queue_map_pop_elem(struct bpf_map *map, void *value)
>>> +{
>>> +    return __queue_map_get(map, value, true);
>>> +}
>>> +
>>> +/* Called from syscall or from eBPF program */
>>> +static int stack_map_pop_elem(struct bpf_map *map, void *value)
>>> +{
>>> +    return __stack_map_get(map, value, true);
>>> +}
>>> +
>>> +/* Called from syscall or from eBPF program */
>>> +static int queue_stack_map_push_elem(struct bpf_map *map, void *value,
>>> +                     u64 flags)
>>> +{
>>> +    struct bpf_queue_stack *qs = bpf_queue_stack(map);
>>> +    unsigned long irq_flags;
>>> +    int err = 0;
>>> +    void *dst;
>>> +
>>> +    /* BPF_EXIST is used to force making room for a new element in 
>>> case the
>>> +     * map is full
>>> +     */
>>> +    bool replace = (flags & BPF_EXIST);
>>> +
>>> +    /* Check supported flags for queue and stack maps */
>>> +    if (flags & BPF_NOEXIST || flags > BPF_EXIST)
>>> +        return -EINVAL;
>>> +
>>> +    raw_spin_lock_irqsave(&qs->lock, irq_flags);
>>> +
>>> +    if (queue_stack_map_is_full(qs)) {
>>> +        if (!replace) {
>>> +            err = -E2BIG;
>> ENOSPC is probably more accurate ?
> Agree.

Well, actually I don't, I just realized that other maps are returning 
E2BIG when the max_entries is reached so probably we want to keep equal 
across all maps.

>>> +            goto out;
>>> +        }
>>> +        /* advance tail pointer to overwrite oldest element */
>> 'oldest' is ambiguous here.
>> For queue it's true, but for stack it's the last element.
>> Since stack is poping from the head, push w/exist flag will keep
>> overwriting the last element.
>
> No actually, pushing with exist flag advances the tail index, hence 
> overwrites the oldest element in a stack map.
> It is like shifting the whole stack a position.
>
>
>> Pls explain it more clearly in helper description in bpf.h
>
> Will do.
>>
>>> +        qs->tail = (qs->tail + 1) & qs->index_mask;
>>> +        qs->count--;
>>> +    }
>>> +
>>> +    dst = &qs->elements[qs->head * qs->map.value_size];
>>> +    memcpy(dst, value, qs->map.value_size);
>>> +
>>> +    qs->head = (qs->head + 1) & qs->index_mask;
>>> +    qs->count++;
>>> +
>>> +out:
>>> +    raw_spin_unlock_irqrestore(&qs->lock, irq_flags);
>>> +    return err;
>>> +}
>>> +
>>> +/* Called from syscall or from eBPF program */
>>> +static void *queue_stack_map_lookup_elem(struct bpf_map *map, void 
>>> *key)
>>> +{
>>> +    return NULL;
>>> +}
>>> +
>>> +/* Called from syscall or from eBPF program */
>>> +static int queue_stack_map_update_elem(struct bpf_map *map, void *key,
>>> +                       void *value, u64 flags)
>>> +{
>>> +    return -EINVAL;
>>> +}
>>> +
>>> +/* Called from syscall or from eBPF program */
>>> +static int queue_stack_map_delete_elem(struct bpf_map *map, void *key)
>>> +{
>>> +    return -EINVAL;
>>> +}
>>> +
>>> +/* Called from syscall */
>>> +static int queue_stack_map_get_next_key(struct bpf_map *map, void 
>>> *key,
>>> +                    void *next_key)
>>> +{
>>> +    return -EINVAL;
>>> +}
>>> +
>>> +const struct bpf_map_ops queue_map_ops = {
>>> +    .map_alloc_check = queue_stack_map_alloc_check,
>>> +    .map_alloc = queue_stack_map_alloc,
>>> +    .map_free = queue_stack_map_free,
>>> +    .map_lookup_elem = queue_stack_map_lookup_elem,
>>> +    .map_update_elem = queue_stack_map_update_elem,
>>> +    .map_delete_elem = queue_stack_map_delete_elem,
>>> +    .map_push_elem = queue_stack_map_push_elem,
>>> +    .map_pop_elem = queue_map_pop_elem,
>>> +    .map_peek_elem = queue_map_peek_elem,
>>> +    .map_get_next_key = queue_stack_map_get_next_key,
>>> +};
>>> +
>>> +const struct bpf_map_ops stack_map_ops = {
>>> +    .map_alloc_check = queue_stack_map_alloc_check,
>>> +    .map_alloc = queue_stack_map_alloc,
>>> +    .map_free = queue_stack_map_free,
>>> +    .map_lookup_elem = queue_stack_map_lookup_elem,
>>> +    .map_update_elem = queue_stack_map_update_elem,
>>> +    .map_delete_elem = queue_stack_map_delete_elem,
>>> +    .map_push_elem = queue_stack_map_push_elem,
>>> +    .map_pop_elem = stack_map_pop_elem,
>>> +    .map_peek_elem = stack_map_peek_elem,
>>> +    .map_get_next_key = queue_stack_map_get_next_key,
>>> +};
>>> diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
>>> index 50957e243bfb..c46bf2d38be3 100644
>>> --- a/kernel/bpf/syscall.c
>>> +++ b/kernel/bpf/syscall.c
>>> @@ -727,6 +727,9 @@ static int map_lookup_elem(union bpf_attr *attr)
>>>           err = bpf_fd_htab_map_lookup_elem(map, key, value);
>>>       } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) {
>>>           err = bpf_fd_reuseport_array_lookup_elem(map, key, value);
>>> +    } else if (map->map_type == BPF_MAP_TYPE_QUEUE ||
>>> +           map->map_type == BPF_MAP_TYPE_STACK) {
>>> +        err = map->ops->map_peek_elem(map, value);
>>>       } else {
>>>           rcu_read_lock();
>>>           ptr = map->ops->map_lookup_elem(map, key);
>>> @@ -841,6 +844,9 @@ static int map_update_elem(union bpf_attr *attr)
>>>           /* rcu_read_lock() is not needed */
>>>           err = bpf_fd_reuseport_array_update_elem(map, key, value,
>>>                                attr->flags);
>>> +    } else if (map->map_type == BPF_MAP_TYPE_QUEUE ||
>>> +           map->map_type == BPF_MAP_TYPE_STACK) {
>>> +        err = map->ops->map_push_elem(map, value, attr->flags);
>>>       } else {
>>>           rcu_read_lock();
>>>           err = map->ops->map_update_elem(map, key, value, 
>>> attr->flags);
>>> @@ -1001,11 +1007,6 @@ static int map_lookup_and_delete_elem(union 
>>> bpf_attr *attr)
>>>           goto err_put;
>>>       }
>>>   -    if (!map->ops->map_lookup_and_delete_elem) {
>>> -        err = -ENOTSUPP;
>>> -        goto err_put;
>>> -    }
>>> -
>>>       key = __bpf_copy_key(ukey, map->key_size);
>>>       if (IS_ERR(key)) {
>>>           err = PTR_ERR(key);
>>> @@ -1028,12 +1029,22 @@ static int map_lookup_and_delete_elem(union 
>>> bpf_attr *attr)
>>>        */
>>>       preempt_disable();
>>>       __this_cpu_inc(bpf_prog_active);
>>> -    rcu_read_lock();
>>> -    ptr = map->ops->map_lookup_and_delete_elem(map, key);
>>> -    if (ptr)
>>> -        memcpy(value, ptr, value_size);
>>> -    rcu_read_unlock();
>>> +    if (map->map_type == BPF_MAP_TYPE_QUEUE ||
>>> +        map->map_type == BPF_MAP_TYPE_STACK) {
>>> +        err = map->ops->map_pop_elem(map, value);
>> please clean up the paches, so that patch 4 doesn't immediately
>> deletes the lines that were introduced in patch 3.
>> Otherwise what was the point of them in patch 3?
>
> Actually there are not deleted but moved, anyway, I will clean it up a 
> little bit to move to a closer place.
>>
>>> +    } else {
>>> +        if (!map->ops->map_lookup_and_delete_elem) {
>>> +            err = -ENOTSUPP;
>>> +            goto free_value;
>>> +        }
>>> +        rcu_read_lock();
>>> +        ptr = map->ops->map_lookup_and_delete_elem(map, key);
>>> +        if (ptr)
>>> +            memcpy(value, ptr, value_size);
>>> +        rcu_read_unlock();
>>>           err = ptr ? 0 : -ENOENT;
>>> +    }
>>> +
>>>       __this_cpu_dec(bpf_prog_active);
>>>       preempt_enable();
>>>   diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
>>> index 73c81bef6ae8..489667f93061 100644
>>> --- a/kernel/bpf/verifier.c
>>> +++ b/kernel/bpf/verifier.c
>>> @@ -2121,7 +2121,8 @@ static int check_func_arg(struct 
>>> bpf_verifier_env *env, u32 regno,
>>>       }
>>>         if (arg_type == ARG_PTR_TO_MAP_KEY ||
>>> -        arg_type == ARG_PTR_TO_MAP_VALUE) {
>>> +        arg_type == ARG_PTR_TO_MAP_VALUE ||
>>> +        arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE) {
>>>           expected_type = PTR_TO_STACK;
>>>           if (!type_is_pkt_pointer(type) && type != PTR_TO_MAP_VALUE &&
>>>               type != expected_type)
>>> @@ -2191,7 +2192,8 @@ static int check_func_arg(struct 
>>> bpf_verifier_env *env, u32 regno,
>>>           err = check_helper_mem_access(env, regno,
>>>                             meta->map_ptr->key_size, false,
>>>                             NULL);
>>> -    } else if (arg_type == ARG_PTR_TO_MAP_VALUE) {
>>> +    } else if (arg_type == ARG_PTR_TO_MAP_VALUE ||
>>> +           arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE) {
>>>           /* bpf_map_xxx(..., map_ptr, ..., value) call:
>>>            * check [value, value + map->value_size) validity
>>>            */
>>> @@ -2200,9 +2202,10 @@ static int check_func_arg(struct 
>>> bpf_verifier_env *env, u32 regno,
>>>               verbose(env, "invalid map_ptr to access map->value\n");
>>>               return -EACCES;
>>>           }
>>> +        meta->raw_mode = (arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE);
>>>           err = check_helper_mem_access(env, regno,
>>>                             meta->map_ptr->value_size, false,
>>> -                          NULL);
>>> +                          meta);
>>>       } else if (arg_type_is_mem_size(arg_type)) {
>>>           bool zero_size_allowed = (arg_type == 
>>> ARG_CONST_SIZE_OR_ZERO);
>>>   @@ -2676,7 +2679,10 @@ record_func_map(struct bpf_verifier_env 
>>> *env, struct bpf_call_arg_meta *meta,
>>>       if (func_id != BPF_FUNC_tail_call &&
>>>           func_id != BPF_FUNC_map_lookup_elem &&
>>>           func_id != BPF_FUNC_map_update_elem &&
>>> -        func_id != BPF_FUNC_map_delete_elem)
>>> +        func_id != BPF_FUNC_map_delete_elem &&
>>> +        func_id != BPF_FUNC_map_push_elem &&
>>> +        func_id != BPF_FUNC_map_pop_elem &&
>>> +        func_id != BPF_FUNC_map_peek_elem)
>>>           return 0;
>>>         if (meta->map_ptr == NULL) {
>>> diff --git a/net/core/filter.c b/net/core/filter.c
>>> index 591c698bc517..40736e0d9cff 100644
>>> --- a/net/core/filter.c
>>> +++ b/net/core/filter.c
>>> @@ -4993,6 +4993,12 @@ bpf_base_func_proto(enum bpf_func_id func_id)
>>>           return &bpf_map_update_elem_proto;
>>>       case BPF_FUNC_map_delete_elem:
>>>           return &bpf_map_delete_elem_proto;
>>> +    case BPF_FUNC_map_push_elem:
>>> +        return &bpf_map_push_elem_proto;
>>> +    case BPF_FUNC_map_pop_elem:
>>> +        return &bpf_map_pop_elem_proto;
>>> +    case BPF_FUNC_map_peek_elem:
>>> +        return &bpf_map_peek_elem_proto;
>>>       case BPF_FUNC_get_prandom_u32:
>>>           return &bpf_get_prandom_u32_proto;
>>>       case BPF_FUNC_get_smp_processor_id:
>>>
>

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ