lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20190321033941.bygpejl2krltkdmm@ast-mbp.dhcp.thefacebook.com>
Date:   Wed, 20 Mar 2019 20:39:42 -0700
From:   Alexei Starovoitov <alexei.starovoitov@...il.com>
To:     Stanislav Fomichev <sdf@...gle.com>
Cc:     netdev@...r.kernel.org, bpf@...r.kernel.org, davem@...emloft.net,
        ast@...nel.org, daniel@...earbox.net, simon.horman@...ronome.com,
        willemb@...gle.com, peterpenkov96@...il.com
Subject: Re: [RFC bpf-next v2 1/9] net: introduce __init_skb{,_data,_shinfo}
 helpers

On Tue, Mar 19, 2019 at 03:19:40PM -0700, Stanislav Fomichev wrote:
> __init_skb is essentially a version of __build_skb which accepts skb as
> an argument (instead of doing kmem_cache_alloc to allocate it).
> 
> __init_skb_shinfo initializes shinfo.
> 
> __init_skb_data initializes skb data pointers.
> 
> No functional changes.
> 
> Signed-off-by: Stanislav Fomichev <sdf@...gle.com>
> ---
>  include/linux/skbuff.h | 14 ++++++++++
>  net/core/skbuff.c      | 60 +++++++++++++++++-------------------------
>  2 files changed, 38 insertions(+), 36 deletions(-)
> 
> diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
> index 9027a8c4219f..e8c1d5b97f96 100644
> --- a/include/linux/skbuff.h
> +++ b/include/linux/skbuff.h
> @@ -4399,5 +4399,19 @@ static inline __wsum lco_csum(struct sk_buff *skb)
>  	return csum_partial(l4_hdr, csum_start - l4_hdr, partial);
>  }
>  
> +static inline void __init_skb_data(struct sk_buff *skb, u8 *data,
> +				   unsigned int size)
> +{
> +	/* Account for allocated memory : skb + skb->head */
> +	skb->truesize = SKB_TRUESIZE(size);
> +	refcount_set(&skb->users, 1);
> +	skb->head = data;
> +	skb->data = data;
> +	skb_reset_tail_pointer(skb);
> +	skb->end = skb->tail + size;
> +	skb->mac_header = (typeof(skb->mac_header))~0U;
> +	skb->transport_header = (typeof(skb->transport_header))~0U;
> +}
> +
>  #endif	/* __KERNEL__ */
>  #endif	/* _LINUX_SKBUFF_H */
> diff --git a/net/core/skbuff.c b/net/core/skbuff.c
> index 2415d9cb9b89..b413354ee709 100644
> --- a/net/core/skbuff.c
> +++ b/net/core/skbuff.c
> @@ -160,6 +160,26 @@ static void *__kmalloc_reserve(size_t size, gfp_t flags, int node,
>   *
>   */
>  
> +static inline void __init_skb(struct sk_buff *skb, u8 *data, unsigned int size)
> +{
> +	/* Only clear those fields we need to clear, not those that we will
> +	 * actually initialize below. Hence, don't put any more fields after
> +	 * the tail pointer in struct sk_buff!
> +	 */
> +	memset(skb, 0, offsetof(struct sk_buff, tail));
> +	__init_skb_data(skb, data, size);
> +}
> +
> +static inline void __init_skb_shinfo(struct sk_buff *skb)
> +{
> +	struct skb_shared_info *shinfo;
> +
> +	/* make sure we initialize shinfo sequentially */
> +	shinfo = skb_shinfo(skb);
> +	memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
> +	atomic_set(&shinfo->dataref, 1);
> +}
> +
>  /**
>   *	__alloc_skb	-	allocate a network buffer
>   *	@size: size to allocate
> @@ -181,7 +201,6 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
>  			    int flags, int node)
>  {
>  	struct kmem_cache *cache;
> -	struct skb_shared_info *shinfo;
>  	struct sk_buff *skb;
>  	u8 *data;
>  	bool pfmemalloc;
> @@ -215,27 +234,9 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
>  	size = SKB_WITH_OVERHEAD(ksize(data));
>  	prefetchw(data + size);
>  
> -	/*
> -	 * Only clear those fields we need to clear, not those that we will
> -	 * actually initialise below. Hence, don't put any more fields after
> -	 * the tail pointer in struct sk_buff!
> -	 */
> -	memset(skb, 0, offsetof(struct sk_buff, tail));
> -	/* Account for allocated memory : skb + skb->head */
> -	skb->truesize = SKB_TRUESIZE(size);
> +	__init_skb(skb, data, size);
> +	__init_skb_shinfo(skb);
>  	skb->pfmemalloc = pfmemalloc;
> -	refcount_set(&skb->users, 1);
> -	skb->head = data;
> -	skb->data = data;
> -	skb_reset_tail_pointer(skb);
> -	skb->end = skb->tail + size;
> -	skb->mac_header = (typeof(skb->mac_header))~0U;
> -	skb->transport_header = (typeof(skb->transport_header))~0U;
> -
> -	/* make sure we initialize shinfo sequentially */
> -	shinfo = skb_shinfo(skb);
> -	memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
> -	atomic_set(&shinfo->dataref, 1);
>  
>  	if (flags & SKB_ALLOC_FCLONE) {
>  		struct sk_buff_fclones *fclones;
> @@ -277,7 +278,6 @@ EXPORT_SYMBOL(__alloc_skb);
>   */
>  struct sk_buff *__build_skb(void *data, unsigned int frag_size)
>  {
> -	struct skb_shared_info *shinfo;
>  	struct sk_buff *skb;
>  	unsigned int size = frag_size ? : ksize(data);
>  
> @@ -287,20 +287,8 @@ struct sk_buff *__build_skb(void *data, unsigned int frag_size)
>  
>  	size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
>  
> -	memset(skb, 0, offsetof(struct sk_buff, tail));
> -	skb->truesize = SKB_TRUESIZE(size);
> -	refcount_set(&skb->users, 1);
> -	skb->head = data;
> -	skb->data = data;
> -	skb_reset_tail_pointer(skb);
> -	skb->end = skb->tail + size;
> -	skb->mac_header = (typeof(skb->mac_header))~0U;
> -	skb->transport_header = (typeof(skb->transport_header))~0U;
> -
> -	/* make sure we initialize shinfo sequentially */
> -	shinfo = skb_shinfo(skb);
> -	memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
> -	atomic_set(&shinfo->dataref, 1);
> +	__init_skb(skb, data, size);
> +	__init_skb_shinfo(skb);

I think you need to convince Dave and Eric that
above surgery is necessary to do the hack in patch 6 with
+static DEFINE_PER_CPU(struct sk_buff, bpf_flow_skb);

I think the better option it to introduce new prog type that works
without skb. I think it can be pretty close to shape and form to xdp.

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ