lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Fri, 23 Mar 2018 10:02:52 -0700
From:   Alexander Duyck <alexander.duyck@...il.com>
To:     Jesper Dangaard Brouer <brouer@...hat.com>
Cc:     Netdev <netdev@...r.kernel.org>,
        BjörnTöpel <bjorn.topel@...el.com>,
        "Karlsson, Magnus" <magnus.karlsson@...el.com>,
        Eugenia Emantayev <eugenia@...lanox.com>,
        Jason Wang <jasowang@...hat.com>,
        John Fastabend <john.fastabend@...il.com>,
        Eran Ben Elisha <eranbe@...lanox.com>,
        Saeed Mahameed <saeedm@...lanox.com>,
        Gal Pressman <galp@...lanox.com>,
        Daniel Borkmann <borkmann@...earbox.net>,
        Alexei Starovoitov <alexei.starovoitov@...il.com>,
        Tariq Toukan <tariqt@...lanox.com>
Subject: Re: [bpf-next V5 PATCH 12/15] xdp: allow page_pool as an allocator
 type in xdp_return_frame

On Fri, Mar 23, 2018 at 5:18 AM, Jesper Dangaard Brouer
<brouer@...hat.com> wrote:
> New allocator type MEM_TYPE_PAGE_POOL for page_pool usage.
>
> The registered allocator page_pool pointer is not available directly
> from xdp_rxq_info, but it could be (if needed).  For now, the driver
> should keep separate track of the page_pool pointer, which it should
> use for RX-ring page allocation.
>
> As suggested by Saeed, to maintain a symmetric API it is the drivers
> responsibility to allocate/create and free/destroy the page_pool.
> Thus, after the driver have called xdp_rxq_info_unreg(), it is drivers
> responsibility to free the page_pool, but with a RCU free call.  This
> is done easily via the page_pool helper page_pool_destroy_rcu() (which
> avoids touching any driver code during the RCU callback, which could
> happen after the driver have been unloaded).
>
> Signed-off-by: Jesper Dangaard Brouer <brouer@...hat.com>
> ---
>  include/net/xdp.h |    3 +++
>  net/core/xdp.c    |   23 ++++++++++++++++++++---
>  2 files changed, 23 insertions(+), 3 deletions(-)
>
> diff --git a/include/net/xdp.h b/include/net/xdp.h
> index 859aa9b737fe..98b55eaf8fd7 100644
> --- a/include/net/xdp.h
> +++ b/include/net/xdp.h
> @@ -36,6 +36,7 @@
>  enum mem_type {
>         MEM_TYPE_PAGE_SHARED = 0, /* Split-page refcnt based model */
>         MEM_TYPE_PAGE_ORDER0,     /* Orig XDP full page model */
> +       MEM_TYPE_PAGE_POOL,
>         MEM_TYPE_MAX,
>  };
>
> @@ -44,6 +45,8 @@ struct xdp_mem_info {
>         u32 id;
>  };
>
> +struct page_pool;
> +
>  struct xdp_rxq_info {
>         struct net_device *dev;
>         u32 queue_index;
> diff --git a/net/core/xdp.c b/net/core/xdp.c
> index 06a5b39491ad..fe8e87abc266 100644
> --- a/net/core/xdp.c
> +++ b/net/core/xdp.c
> @@ -8,6 +8,7 @@
>  #include <linux/slab.h>
>  #include <linux/idr.h>
>  #include <linux/rhashtable.h>
> +#include <net/page_pool.h>
>
>  #include <net/xdp.h>
>
> @@ -27,7 +28,10 @@ static struct rhashtable *mem_id_ht;
>
>  struct xdp_mem_allocator {
>         struct xdp_mem_info mem;
> -       void *allocator;
> +       union {
> +               void *allocator;
> +               struct page_pool *page_pool;
> +       };
>         struct rhash_head node;
>         struct rcu_head rcu;
>  };
> @@ -74,7 +78,9 @@ void __xdp_mem_allocator_rcu_free(struct rcu_head *rcu)
>         /* Allow this ID to be reused */
>         ida_simple_remove(&mem_id_pool, xa->mem.id);
>
> -       /* TODO: Depending on allocator type/pointer free resources */
> +       /* Notice, driver is expected to free the *allocator,
> +        * e.g. page_pool, and MUST also use RCU free.
> +        */
>
>         /* Poison memory */
>         xa->mem.id = 0xFFFF;
> @@ -290,11 +296,21 @@ EXPORT_SYMBOL_GPL(xdp_rxq_info_reg_mem_model);
>
>  void xdp_return_frame(void *data, struct xdp_mem_info *mem)
>  {
> -       struct xdp_mem_allocator *xa;
> +       struct xdp_mem_allocator *xa = NULL;
>
>         rcu_read_lock();
>         if (mem->id)
>                 xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
> +
> +       if (mem->type == MEM_TYPE_PAGE_POOL) {
> +               struct page *page = virt_to_head_page(data);
> +
> +               if (xa)
> +                       page_pool_put_page(xa->page_pool, page);
> +               else
> +                       put_page(page);
> +               return;
> +       }

So at this point there need to be a few changes.

I would suggest doing a switch based on mem->type instead of this mess
of if statements.

Also the PAGE_POOL is the only one that needs the reference to the
memory allocator. You should move all of the logic for accessing the
hash table into the section specific to the page pool with a
fall-through into the other paths.

>         rcu_read_unlock();
>
>         if (mem->type == MEM_TYPE_PAGE_SHARED) {
> @@ -306,6 +322,7 @@ void xdp_return_frame(void *data, struct xdp_mem_info *mem)
>                 struct page *page = virt_to_page(data); /* Assumes order0 page*/
>
>                 put_page(page);
> +               return;

Why add a return here? This was the end of the function already wasn't it?

>         }
>  }
>  EXPORT_SYMBOL_GPL(xdp_return_frame);
>

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ