lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <ZM1p1qU/RIQYiACP@vergenet.net>
Date:   Fri, 4 Aug 2023 23:12:54 +0200
From:   Simon Horman <horms@...nel.org>
To:     "huangjie.albert" <huangjie.albert@...edance.com>
Cc:     davem@...emloft.net, edumazet@...gle.com, kuba@...nel.org,
        pabeni@...hat.com, Alexei Starovoitov <ast@...nel.org>,
        Daniel Borkmann <daniel@...earbox.net>,
        Jesper Dangaard Brouer <hawk@...nel.org>,
        John Fastabend <john.fastabend@...il.com>,
        Björn Töpel <bjorn@...nel.org>,
        Magnus Karlsson <magnus.karlsson@...el.com>,
        Maciej Fijalkowski <maciej.fijalkowski@...el.com>,
        Jonathan Lemon <jonathan.lemon@...il.com>,
        Pavel Begunkov <asml.silence@...il.com>,
        Yunsheng Lin <linyunsheng@...wei.com>,
        Kees Cook <keescook@...omium.org>,
        Richard Gobert <richardbgobert@...il.com>,
        "open list:NETWORKING DRIVERS" <netdev@...r.kernel.org>,
        open list <linux-kernel@...r.kernel.org>,
        "open list:XDP (eXpress Data Path)" <bpf@...r.kernel.org>
Subject: Re: [RFC Optimizing veth xsk performance 10/10] veth: af_xdp tx
 batch support for ipv4 udp

On Thu, Aug 03, 2023 at 10:04:36PM +0800, huangjie.albert wrote:

...

> @@ -103,6 +104,18 @@ struct veth_xdp_tx_bq {
>  	unsigned int count;
>  };
>  
> +struct veth_gso_tuple {
> +	__u8	protocol;
> +	__be32	saddr;
> +	__be32	daddr;
> +	__be16	source;
> +	__be16	dest;
> +	__be16	gso_size;
> +	__be16	gso_segs;
> +	bool gso_enable;
> +	bool gso_flush;
> +};
> +
>  struct veth_seg_info {
>  	u32 segs;
>  	u64 desc[] ____cacheline_aligned_in_smp;

...

> +static inline bool gso_segment_match(struct veth_gso_tuple *gso_tuple, struct iphdr *iph, struct udphdr *udph)
> +{
> +	if (gso_tuple->protocol == iph->protocol &&
> +		gso_tuple->saddr == iph->saddr &&
> +		gso_tuple->daddr == iph->daddr &&
> +		gso_tuple->source == udph->source &&
> +		gso_tuple->dest == udph->dest &&
> +		gso_tuple->gso_size == ntohs(udph->len))

The type of the gso_size field is __be16,
but it is being assigned a host byte order value.

> +	{
> +		gso_tuple->gso_flush = false;
> +		return true;
> +	} else {
> +		gso_tuple->gso_flush = true;
> +		return false;
> +	}
> +}
> +
> +static inline void gso_tuple_init(struct veth_gso_tuple *gso_tuple, struct iphdr *iph, struct udphdr *udph)
> +{
> +	gso_tuple->protocol = iph->protocol;
> +	gso_tuple->saddr = iph->saddr;
> +	gso_tuple->daddr = iph->daddr;
> +	gso_tuple->source = udph->source;
> +	gso_tuple->dest = udph->dest;
> +	gso_tuple->gso_flush = false;
> +	gso_tuple->gso_size = ntohs(udph->len);


Likewise, here.

As flagged by Sparse.

  .../veth.c:721:29: warning: incorrect type in assignment (different base types)
  .../veth.c:721:29:    expected restricted __be16 [usertype] gso_size
  .../veth.c:721:29:    got unsigned short [usertype]
  .../veth.c:703:26: warning: restricted __be16 degrades to integer

> +	gso_tuple->gso_segs = 0;
> +}

...

> +static struct sk_buff *veth_build_skb_zerocopy_gso(struct net_device *dev, struct xsk_buff_pool *pool,
> +					      struct xdp_desc *desc, struct veth_gso_tuple *gso_tuple, struct sk_buff *prev_skb)

Please consider constraining line length to 80 columns.

> +{
> +	u32 hr, len, ts, index, iph_len, th_len, data_offset, data_len, tot_len;
> +	struct veth_seg_info *seg_info;
> +	void *buffer;
> +	struct udphdr *udph;
> +	struct iphdr *iph;
> +	struct sk_buff *skb;
> +	struct page *page;
> +	int hh_len = 0;
> +	u64 addr;
> +
> +	addr = desc->addr;
> +	len = desc->len;
> +
> +	/* l2 reserved len */
> +	hh_len = LL_RESERVED_SPACE(dev);
> +	hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(hh_len));
> +
> +	/* data points to eth header */
> +	buffer = (unsigned char *)xsk_buff_raw_get_data(pool, addr);
> +
> +	iph = (struct iphdr *)(buffer + ETH_HLEN);
> +	iph_len = iph->ihl * 4;
> +
> +	udph = (struct udphdr *)(buffer + ETH_HLEN + iph_len);
> +	th_len = sizeof(struct udphdr);
> +
> +	if (gso_tuple->gso_flush)
> +		gso_tuple_init(gso_tuple, iph, udph);
> +
> +	ts = pool->unaligned ? len : pool->chunk_size;
> +
> +	data_offset = offset_in_page(buffer) + ETH_HLEN + iph_len + th_len;
> +	data_len = len - (ETH_HLEN + iph_len + th_len);
> +
> +	/* head is null or this is a new 5 tuple */
> +	if (NULL == prev_skb || !gso_segment_match(gso_tuple, iph, udph)) {
> +		tot_len = hr + iph_len + th_len;
> +		skb = veth_build_gso_head_skb(dev, buffer, tot_len, hr, iph_len, th_len);
> +		if (!skb) {
> +			/* to do: handle here for skb */
> +			return NULL;
> +		}
> +
> +		/* store information for gso */
> +		seg_info = (struct veth_seg_info *)kmalloc(struct_size(seg_info, desc, MAX_SKB_FRAGS), GFP_KERNEL);

No need to case the return value of kmalloc, it's type is void *.

		seg_info = kmalloc(struct_size(seg_info, desc, MAX_SKB_FRAGS),
				   GFP_KERNEL);
> +		if (!seg_info) {
> +			/* to do */
> +			kfree_skb(skb);
> +			return NULL;
> +		}

...

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ