lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Mon, 10 Aug 2020 13:02:43 -0600
From:   David Ahern <dsahern@...il.com>
To:     Harshitha Ramamurthy <harshitha.ramamurthy@...el.com>,
        bpf@...r.kernel.org, netdev@...r.kernel.org, ast@...nel.org,
        daniel@...earbox.net, davem@...emloft.net, kuba@...nel.org,
        kafai@...com, songliubraving@...com, yhs@...com, andriin@...com,
        john.fastabend@...il.com, kpsingh@...omium.org, hawk@...nel.org
Cc:     tom.herbert@...el.com, jacob.e.keller@...el.com,
        alexander.h.duyck@...el.com, carolyn.wyborny@...el.com
Subject: Re: [RFC PATCH bpf-next] bpf: add bpf_get_skb_hash helper function

On 8/10/20 12:28 PM, Harshitha Ramamurthy wrote:
> This patch adds a helper function called bpf_get_skb_hash to calculate
> the skb hash for a packet at the XDP layer. In the helper function,

Why? i.e., expected use case?

Pulling this from hardware when possible is better. e.g., Saeed's
hardware hints proposal includes it.

> a local skb is allocated and we populate the fields needed in the skb
> before calling skb_get_hash. To avoid memory allocations for each packet,
> we allocate an skb per CPU and use the same buffer for subsequent hash
> calculations on the same CPU.
> 
> Signed-off-by: Harshitha Ramamurthy <harshitha.ramamurthy@...el.com>
> ---
>  include/uapi/linux/bpf.h       |  8 ++++++
>  net/core/filter.c              | 50 ++++++++++++++++++++++++++++++++++
>  tools/include/uapi/linux/bpf.h |  8 ++++++
>  3 files changed, 66 insertions(+)
> 
> diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
> index b134e679e9db..25aa850c8a40 100644
> --- a/include/uapi/linux/bpf.h
> +++ b/include/uapi/linux/bpf.h
> @@ -3394,6 +3394,13 @@ union bpf_attr {
>   *		A non-negative value equal to or less than *size* on success,
>   *		or a negative error in case of failure.
>   *
> + * u32 bpf_get_skb_hash(struct xdp_buff *xdp_md)
> + *	Description
> + *		Return the skb hash for the xdp context passed. This function
> + *		allocates a temporary skb and populates the fields needed. It
> + *		then calls skb_get_hash to calculate the skb hash for the packet.
> + *	Return
> + *		The 32-bit hash.
>   */
>  #define __BPF_FUNC_MAPPER(FN)		\
>  	FN(unspec),			\
> @@ -3538,6 +3545,7 @@ union bpf_attr {
>  	FN(skc_to_tcp_request_sock),	\
>  	FN(skc_to_udp6_sock),		\
>  	FN(get_task_stack),		\
> +	FN(get_skb_hash),		\
>  	/* */
>  
>  /* integer value in 'imm' field of BPF_CALL instruction selects which helper
> diff --git a/net/core/filter.c b/net/core/filter.c
> index 7124f0fe6974..9f6ad7209b44 100644
> --- a/net/core/filter.c
> +++ b/net/core/filter.c
> @@ -3765,6 +3765,54 @@ static const struct bpf_func_proto bpf_xdp_redirect_map_proto = {
>  	.arg3_type      = ARG_ANYTHING,
>  };
>  
> +static DEFINE_PER_CPU(struct sk_buff *, hash_skb);
> +
> +BPF_CALL_1(bpf_get_skb_hash, struct xdp_buff *, xdp)
> +{
> +	void *data_end = xdp->data_end;
> +	struct ethhdr *eth = xdp->data;
> +	void *data = xdp->data;
> +	unsigned long flags;
> +	struct sk_buff *skb;
> +	int nh_off, len;
> +	u32 ret = 0;
> +
> +	/* disable interrupts to get the correct skb pointer */
> +	local_irq_save(flags);
> +
> +	len = data_end - data;
> +	skb = this_cpu_read(hash_skb);
> +	if (!skb) {
> +		skb = alloc_skb(len, GFP_ATOMIC);
> +		if (!skb)
> +			goto out;
> +		this_cpu_write(hash_skb, skb);
> +	}
> +
> +	nh_off = sizeof(*eth);

vlans?

> +	if (data + nh_off > data_end)
> +		goto out;
> +
> +	skb->data = data;
> +	skb->head = data;
> +	skb->network_header = nh_off;
> +	skb->protocol = eth->h_proto;
> +	skb->len = len;
> +	skb->dev = xdp->rxq->dev;
> +
> +	ret = skb_get_hash(skb);

static inline __u32 skb_get_hash(struct sk_buff *skb)
{
        if (!skb->l4_hash && !skb->sw_hash)
                __skb_get_hash(skb);

        return skb->hash;
}

__skb_get_hash -> __skb_set_sw_hash -> __skb_set_hash which sets
sw_hash as a minimum, so it seems to me you will always be returning the
hash of the first packet since you do not clear relevant fields of the skb.

Powered by blists - more mailing lists