lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Wed, 25 Apr 2018 14:37:02 +0200
From:   Björn Töpel <bjorn.topel@...il.com>
To:     "Michael S. Tsirkin" <mst@...hat.com>
Cc:     "Karlsson, Magnus" <magnus.karlsson@...el.com>,
        "Duyck, Alexander H" <alexander.h.duyck@...el.com>,
        Alexander Duyck <alexander.duyck@...il.com>,
        John Fastabend <john.fastabend@...il.com>,
        Alexei Starovoitov <ast@...com>,
        Jesper Dangaard Brouer <brouer@...hat.com>,
        Willem de Bruijn <willemdebruijn.kernel@...il.com>,
        Daniel Borkmann <daniel@...earbox.net>,
        Netdev <netdev@...r.kernel.org>, michael.lundkvist@...csson.com,
        "Brandeburg, Jesse" <jesse.brandeburg@...el.com>,
        "Singhai, Anjali" <anjali.singhai@...el.com>,
        "Zhang, Qi Z" <qi.z.zhang@...el.com>
Subject: Re: [PATCH bpf-next 03/15] xsk: add umem fill queue support and mmap

2018-04-24 1:16 GMT+02:00 Michael S. Tsirkin <mst@...hat.com>:
> On Mon, Apr 23, 2018 at 03:56:07PM +0200, Björn Töpel wrote:
>> From: Magnus Karlsson <magnus.karlsson@...el.com>
>>
>> Here, we add another setsockopt for registered user memory (umem)
>> called XDP_UMEM_FILL_QUEUE. Using this socket option, the process can
>> ask the kernel to allocate a queue (ring buffer) and also mmap it
>> (XDP_UMEM_PGOFF_FILL_QUEUE) into the process.
>>
>> The queue is used to explicitly pass ownership of umem frames from the
>> user process to the kernel. These frames will in a later patch be
>> filled in with Rx packet data by the kernel.
>>
>> Signed-off-by: Magnus Karlsson <magnus.karlsson@...el.com>
>> ---
>>  include/uapi/linux/if_xdp.h | 15 +++++++++++
>>  net/xdp/Makefile            |  2 +-
>>  net/xdp/xdp_umem.c          |  5 ++++
>>  net/xdp/xdp_umem.h          |  2 ++
>>  net/xdp/xsk.c               | 62 ++++++++++++++++++++++++++++++++++++++++++++-
>>  net/xdp/xsk_queue.c         | 58 ++++++++++++++++++++++++++++++++++++++++++
>>  net/xdp/xsk_queue.h         | 38 +++++++++++++++++++++++++++
>>  7 files changed, 180 insertions(+), 2 deletions(-)
>>  create mode 100644 net/xdp/xsk_queue.c
>>  create mode 100644 net/xdp/xsk_queue.h
>>
>> diff --git a/include/uapi/linux/if_xdp.h b/include/uapi/linux/if_xdp.h
>> index 41252135a0fe..975661e1baca 100644
>> --- a/include/uapi/linux/if_xdp.h
>> +++ b/include/uapi/linux/if_xdp.h
>> @@ -23,6 +23,7 @@
>>
>>  /* XDP socket options */
>>  #define XDP_UMEM_REG                 3
>> +#define XDP_UMEM_FILL_RING           4
>>
>>  struct xdp_umem_reg {
>>       __u64 addr; /* Start of packet data area */
>> @@ -31,4 +32,18 @@ struct xdp_umem_reg {
>>       __u32 frame_headroom; /* Frame head room */
>>  };
>>
>> +/* Pgoff for mmaping the rings */
>> +#define XDP_UMEM_PGOFF_FILL_RING     0x100000000
>> +
>> +struct xdp_ring {
>> +     __u32 producer __attribute__((aligned(64)));
>> +     __u32 consumer __attribute__((aligned(64)));
>> +};
>> +
>> +/* Used for the fill and completion queues for buffers */
>> +struct xdp_umem_ring {
>> +     struct xdp_ring ptrs;
>> +     __u32 desc[0] __attribute__((aligned(64)));
>> +};
>> +
>>  #endif /* _LINUX_IF_XDP_H */
>> diff --git a/net/xdp/Makefile b/net/xdp/Makefile
>> index a5d736640a0f..074fb2b2d51c 100644
>> --- a/net/xdp/Makefile
>> +++ b/net/xdp/Makefile
>> @@ -1,2 +1,2 @@
>> -obj-$(CONFIG_XDP_SOCKETS) += xsk.o xdp_umem.o
>> +obj-$(CONFIG_XDP_SOCKETS) += xsk.o xdp_umem.o xsk_queue.o
>>
>> diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c
>> index bff058f5a769..6fc233e03f30 100644
>> --- a/net/xdp/xdp_umem.c
>> +++ b/net/xdp/xdp_umem.c
>> @@ -62,6 +62,11 @@ static void xdp_umem_release(struct xdp_umem *umem)
>>       struct mm_struct *mm;
>>       unsigned long diff;
>>
>> +     if (umem->fq) {
>> +             xskq_destroy(umem->fq);
>> +             umem->fq = NULL;
>> +     }
>> +
>>       if (umem->pgs) {
>>               xdp_umem_unpin_pages(umem);
>>
>> diff --git a/net/xdp/xdp_umem.h b/net/xdp/xdp_umem.h
>> index 58714f4f7f25..3086091aebdd 100644
>> --- a/net/xdp/xdp_umem.h
>> +++ b/net/xdp/xdp_umem.h
>> @@ -18,9 +18,11 @@
>>  #include <linux/mm.h>
>>  #include <linux/if_xdp.h>
>>
>> +#include "xsk_queue.h"
>>  #include "xdp_umem_props.h"
>>
>>  struct xdp_umem {
>> +     struct xsk_queue *fq;
>>       struct page **pgs;
>>       struct xdp_umem_props props;
>>       u32 npgs;
>> diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
>> index 19fc719cbe0d..bf6a1151df28 100644
>> --- a/net/xdp/xsk.c
>> +++ b/net/xdp/xsk.c
>> @@ -32,6 +32,7 @@
>>  #include <linux/netdevice.h>
>>  #include <net/sock.h>
>>
>> +#include "xsk_queue.h"
>>  #include "xdp_umem.h"
>>
>>  struct xdp_sock {
>> @@ -47,6 +48,21 @@ static struct xdp_sock *xdp_sk(struct sock *sk)
>>       return (struct xdp_sock *)sk;
>>  }
>>
>> +static int xsk_init_queue(u32 entries, struct xsk_queue **queue)
>> +{
>> +     struct xsk_queue *q;
>> +
>> +     if (entries == 0 || *queue || !is_power_of_2(entries))
>> +             return -EINVAL;
>> +
>> +     q = xskq_create(entries);
>> +     if (!q)
>> +             return -ENOMEM;
>> +
>> +     *queue = q;
>> +     return 0;
>> +}
>> +
>>  static int xsk_release(struct socket *sock)
>>  {
>>       struct sock *sk = sock->sk;
>> @@ -109,6 +125,23 @@ static int xsk_setsockopt(struct socket *sock, int level, int optname,
>>               mutex_unlock(&xs->mutex);
>>               return 0;
>>       }
>> +     case XDP_UMEM_FILL_RING:
>> +     {
>> +             struct xsk_queue **q;
>> +             int entries;
>> +
>> +             if (!xs->umem)
>> +                     return -EINVAL;
>> +
>> +             if (copy_from_user(&entries, optval, sizeof(entries)))
>> +                     return -EFAULT;
>> +
>> +             mutex_lock(&xs->mutex);
>> +             q = &xs->umem->fq;
>> +             err = xsk_init_queue(entries, q);
>> +             mutex_unlock(&xs->mutex);
>> +             return err;
>> +     }
>>       default:
>>               break;
>>       }
>> @@ -116,6 +149,33 @@ static int xsk_setsockopt(struct socket *sock, int level, int optname,
>>       return -ENOPROTOOPT;
>>  }
>>
>> +static int xsk_mmap(struct file *file, struct socket *sock,
>> +                 struct vm_area_struct *vma)
>> +{
>> +     unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
>> +     unsigned long size = vma->vm_end - vma->vm_start;
>> +     struct xdp_sock *xs = xdp_sk(sock->sk);
>> +     struct xsk_queue *q;
>> +     unsigned long pfn;
>> +     struct page *qpg;
>> +
>> +     if (!xs->umem)
>> +             return -EINVAL;
>> +
>> +     if (offset == XDP_UMEM_PGOFF_FILL_RING)
>> +             q = xs->umem->fq;
>> +     else
>> +             return -EINVAL;
>> +
>> +     qpg = virt_to_head_page(q->ring);
>> +     if (size > (PAGE_SIZE << compound_order(qpg)))
>> +             return -EINVAL;
>> +
>> +     pfn = virt_to_phys(q->ring) >> PAGE_SHIFT;
>> +     return remap_pfn_range(vma, vma->vm_start, pfn,
>> +                            size, vma->vm_page_prot);
>> +}
>> +
>>  static struct proto xsk_proto = {
>>       .name =         "XDP",
>>       .owner =        THIS_MODULE,
>> @@ -139,7 +199,7 @@ static const struct proto_ops xsk_proto_ops = {
>>       .getsockopt =   sock_no_getsockopt,
>>       .sendmsg =      sock_no_sendmsg,
>>       .recvmsg =      sock_no_recvmsg,
>> -     .mmap =         sock_no_mmap,
>> +     .mmap =         xsk_mmap,
>>       .sendpage =     sock_no_sendpage,
>>  };
>>
>> diff --git a/net/xdp/xsk_queue.c b/net/xdp/xsk_queue.c
>> new file mode 100644
>> index 000000000000..23da4f29d3fb
>> --- /dev/null
>> +++ b/net/xdp/xsk_queue.c
>> @@ -0,0 +1,58 @@
>> +// SPDX-License-Identifier: GPL-2.0
>> +/* XDP user-space ring structure
>> + * Copyright(c) 2018 Intel Corporation.
>> + *
>> + * This program is free software; you can redistribute it and/or modify it
>> + * under the terms and conditions of the GNU General Public License,
>> + * version 2, as published by the Free Software Foundation.
>> + *
>> + * This program is distributed in the hope it will be useful, but WITHOUT
>> + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
>> + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
>> + * more details.
>> + */
>> +
>> +#include <linux/slab.h>
>> +
>> +#include "xsk_queue.h"
>> +
>> +static u32 xskq_umem_get_ring_size(struct xsk_queue *q)
>> +{
>> +     return sizeof(struct xdp_umem_ring) + q->nentries * sizeof(u32);
>> +}
>> +
>> +struct xsk_queue *xskq_create(u32 nentries)
>> +{
>> +     struct xsk_queue *q;
>> +     gfp_t gfp_flags;
>> +     size_t size;
>> +
>> +     q = kzalloc(sizeof(*q), GFP_KERNEL);
>> +     if (!q)
>> +             return NULL;
>> +
>> +     q->nentries = nentries;
>> +     q->ring_mask = nentries - 1;
>> +
>> +     gfp_flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN |
>> +                 __GFP_COMP  | __GFP_NORETRY;
>> +     size = xskq_umem_get_ring_size(q);
>> +
>> +     q->ring = (struct xdp_ring *)__get_free_pages(gfp_flags,
>> +                                                   get_order(size));
>> +     if (!q->ring) {
>> +             kfree(q);
>> +             return NULL;
>> +     }
>> +
>> +     return q;
>> +}
>> +
>> +void xskq_destroy(struct xsk_queue *q)
>> +{
>> +     if (!q)
>> +             return;
>> +
>> +     page_frag_free(q->ring);
>> +     kfree(q);
>> +}
>> diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h
>> new file mode 100644
>> index 000000000000..7eb556bf73be
>> --- /dev/null
>> +++ b/net/xdp/xsk_queue.h
>> @@ -0,0 +1,38 @@
>> +/* SPDX-License-Identifier: GPL-2.0
>> + * XDP user-space ring structure
>> + * Copyright(c) 2018 Intel Corporation.
>> + *
>> + * This program is free software; you can redistribute it and/or modify it
>> + * under the terms and conditions of the GNU General Public License,
>> + * version 2, as published by the Free Software Foundation.
>> + *
>> + * This program is distributed in the hope it will be useful, but WITHOUT
>> + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
>> + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
>> + * more details.
>> + */
>> +
>> +#ifndef _LINUX_XSK_QUEUE_H
>> +#define _LINUX_XSK_QUEUE_H
>> +
>> +#include <linux/types.h>
>> +#include <linux/if_xdp.h>
>> +
>> +#include "xdp_umem_props.h"
>> +
>> +struct xsk_queue {
>> +     struct xdp_umem_props umem_props;
>> +     u32 ring_mask;
>> +     u32 nentries;
>> +     u32 prod_head;
>> +     u32 prod_tail;
>> +     u32 cons_head;
>> +     u32 cons_tail;
>> +     struct xdp_ring *ring;
>> +     u64 invalid_descs;
>> +};
>
> Any documentation on how e.g. the locking works here?
>

It's a SPSC queue. On the kernel side we guarantee synchronization via
the NAPI context. As for the user-space application, it's the
responsibility of the application to do the synchronization.

Even though the xsk_queue structure has both cons/prod members, for a
certain queue, only prod_ *or* cons_ will be used. For the kernel it
means that it will consume from fill and tx queues, and produce to
completion and rx queues.

Note that prod_/cons_ are the *cached* local variables, the actual
produce/consumer pointers reside in the kernel/user shared xdp_ring.

I'll try to make it clearer in the documentation!

>
>> +
>> +struct xsk_queue *xskq_create(u32 nentries);
>> +void xskq_destroy(struct xsk_queue *q);
>> +
>> +#endif /* _LINUX_XSK_QUEUE_H */
>> --
>> 2.14.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ