[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20190411053303.GA1416@apalos>
Date: Thu, 11 Apr 2019 08:33:03 +0300
From: Ilias Apalodimas <ilias.apalodimas@...aro.org>
To: Jesper Dangaard Brouer <brouer@...hat.com>
Cc: netdev@...r.kernel.org, Daniel Borkmann <borkmann@...earbox.net>,
Alexei Starovoitov <alexei.starovoitov@...il.com>,
"David S. Miller" <davem@...emloft.net>, bpf@...r.kernel.org,
Toke Høiland-Jørgensen <toke@...e.dk>
Subject: Re: [PATCH bpf-next 3/5] net: core: introduce build_skb_around
On Wed, Apr 10, 2019 at 01:43:47PM +0200, Jesper Dangaard Brouer wrote:
> The function build_skb() also have the responsibility to allocate and clear
> the SKB structure. Introduce a new function build_skb_around(), that moves
> the responsibility of allocation and clearing to the caller. This allows
> caller to use kmem_cache (slab/slub) bulk allocation API.
>
> Next patch use this function combined with kmem_cache_alloc_bulk.
>
> Signed-off-by: Jesper Dangaard Brouer <brouer@...hat.com>
> ---
> include/linux/skbuff.h | 2 +
> net/core/skbuff.c | 71 +++++++++++++++++++++++++++++++++++-------------
> 2 files changed, 54 insertions(+), 19 deletions(-)
>
> diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
> index 9027a8c4219f..c40ffab8a9b0 100644
> --- a/include/linux/skbuff.h
> +++ b/include/linux/skbuff.h
> @@ -1044,6 +1044,8 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
> int node);
> struct sk_buff *__build_skb(void *data, unsigned int frag_size);
> struct sk_buff *build_skb(void *data, unsigned int frag_size);
> +struct sk_buff *build_skb_around(struct sk_buff *skb,
> + void *data, unsigned int frag_size);
>
> /**
> * alloc_skb - allocate a network buffer
> diff --git a/net/core/skbuff.c b/net/core/skbuff.c
> index 4782f9354dd1..d904b6e5fe08 100644
> --- a/net/core/skbuff.c
> +++ b/net/core/skbuff.c
> @@ -258,6 +258,33 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
> }
> EXPORT_SYMBOL(__alloc_skb);
>
> +/* Caller must provide SKB that is memset cleared */
> +static struct sk_buff *__build_skb_around(struct sk_buff *skb,
> + void *data, unsigned int frag_size)
> +{
> + struct skb_shared_info *shinfo;
> + unsigned int size = frag_size ? : ksize(data);
> +
> + size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
> +
> + /* Assumes caller memset cleared SKB */
> + skb->truesize = SKB_TRUESIZE(size);
> + refcount_set(&skb->users, 1);
> + skb->head = data;
> + skb->data = data;
> + skb_reset_tail_pointer(skb);
> + skb->end = skb->tail + size;
> + skb->mac_header = (typeof(skb->mac_header))~0U;
> + skb->transport_header = (typeof(skb->transport_header))~0U;
> +
> + /* make sure we initialize shinfo sequentially */
> + shinfo = skb_shinfo(skb);
> + memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
> + atomic_set(&shinfo->dataref, 1);
> +
> + return skb;
> +}
> +
> /**
> * __build_skb - build a network buffer
> * @data: data buffer provided by caller
> @@ -279,32 +306,15 @@ EXPORT_SYMBOL(__alloc_skb);
> */
> struct sk_buff *__build_skb(void *data, unsigned int frag_size)
> {
> - struct skb_shared_info *shinfo;
> struct sk_buff *skb;
> - unsigned int size = frag_size ? : ksize(data);
>
> skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC);
> - if (!skb)
> + if (unlikely(!skb))
> return NULL;
>
> - size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
> -
> memset(skb, 0, offsetof(struct sk_buff, tail));
> - skb->truesize = SKB_TRUESIZE(size);
> - refcount_set(&skb->users, 1);
> - skb->head = data;
> - skb->data = data;
> - skb_reset_tail_pointer(skb);
> - skb->end = skb->tail + size;
> - skb->mac_header = (typeof(skb->mac_header))~0U;
> - skb->transport_header = (typeof(skb->transport_header))~0U;
>
> - /* make sure we initialize shinfo sequentially */
> - shinfo = skb_shinfo(skb);
> - memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
> - atomic_set(&shinfo->dataref, 1);
> -
> - return skb;
> + return __build_skb_around(skb, data, frag_size);
> }
>
> /* build_skb() is wrapper over __build_skb(), that specifically
> @@ -325,6 +335,29 @@ struct sk_buff *build_skb(void *data, unsigned int frag_size)
> }
> EXPORT_SYMBOL(build_skb);
>
> +/**
> + * build_skb_around - build a network buffer around provided skb
> + * @skb: sk_buff provide by caller, must be memset cleared
> + * @data: data buffer provided by caller
> + * @frag_size: size of data, or 0 if head was kmalloced
> + */
> +struct sk_buff *build_skb_around(struct sk_buff *skb,
> + void *data, unsigned int frag_size)
> +{
> + if (unlikely(!skb))
Maybe add a warning here, indicating the buffer *must* be there before calling
this?
> + return NULL;
> +
> + skb = __build_skb_around(skb, data, frag_size);
> +
> + if (skb && frag_size) {
> + skb->head_frag = 1;
> + if (page_is_pfmemalloc(virt_to_head_page(data)))
> + skb->pfmemalloc = 1;
> + }
> + return skb;
> +}
> +EXPORT_SYMBOL(build_skb_around);
> +
> #define NAPI_SKB_CACHE_SIZE 64
>
> struct napi_alloc_cache {
>
/Ilias
Powered by blists - more mailing lists