lists.openwall.net | lists / announce owl-users owl-dev john-users john-dev passwdqc-users yescrypt popa3d-users / oss-security kernel-hardening musl sabotage tlsify passwords / crypt-dev xvendor / Bugtraq Full-Disclosure linux-kernel linux-netdev linux-ext4 linux-hardening linux-cve-announce PHC | |
Open Source and information security mailing list archives
| ||
|
Message-Id: <20230508061417.65297-16-xuanzhuo@linux.alibaba.com> Date: Mon, 8 May 2023 14:14:17 +0800 From: Xuan Zhuo <xuanzhuo@...ux.alibaba.com> To: netdev@...r.kernel.org Cc: "Michael S. Tsirkin" <mst@...hat.com>, Jason Wang <jasowang@...hat.com>, Xuan Zhuo <xuanzhuo@...ux.alibaba.com>, "David S. Miller" <davem@...emloft.net>, Eric Dumazet <edumazet@...gle.com>, Jakub Kicinski <kuba@...nel.org>, Paolo Abeni <pabeni@...hat.com>, Alexei Starovoitov <ast@...nel.org>, Daniel Borkmann <daniel@...earbox.net>, Jesper Dangaard Brouer <hawk@...nel.org>, John Fastabend <john.fastabend@...il.com>, virtualization@...ts.linux-foundation.org, bpf@...r.kernel.org Subject: [PATCH net-next v5 15/15] virtio_net: introduce virtnet_build_skb() This logic is used in multiple places, now we separate it into a helper. Signed-off-by: Xuan Zhuo <xuanzhuo@...ux.alibaba.com> Acked-by: Jason Wang <jasowang@...hat.com> --- drivers/net/virtio_net.c | 34 +++++++++++++++++++++------------- 1 file changed, 21 insertions(+), 13 deletions(-) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 37287ede1959..97241006d64a 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -443,6 +443,22 @@ static unsigned int mergeable_ctx_to_truesize(void *mrg_ctx) return (unsigned long)mrg_ctx & ((1 << MRG_CTX_HEADER_SHIFT) - 1); } +static struct sk_buff *virtnet_build_skb(void *buf, unsigned int buflen, + unsigned int headroom, + unsigned int len) +{ + struct sk_buff *skb; + + skb = build_skb(buf, buflen); + if (unlikely(!skb)) + return NULL; + + skb_reserve(skb, headroom); + skb_put(skb, len); + + return skb; +} + /* Called from bottom half context */ static struct sk_buff *page_to_skb(struct virtnet_info *vi, struct receive_queue *rq, @@ -476,13 +492,10 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi, /* copy small packet so we can reuse these pages */ if (!NET_IP_ALIGN && len > GOOD_COPY_LEN && tailroom >= shinfo_size) { - skb = build_skb(buf, truesize); + skb = virtnet_build_skb(buf, truesize, p - buf, len); if (unlikely(!skb)) return NULL; - skb_reserve(skb, p - buf); - skb_put(skb, len); - page = (struct page *)page->private; if (page) give_pages(rq, page); @@ -946,13 +959,10 @@ static struct sk_buff *receive_small_build_skb(struct virtnet_info *vi, buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); - skb = build_skb(buf, buflen); - if (!skb) + skb = virtnet_build_skb(buf, buflen, headroom, len); + if (unlikely(!skb)) return NULL; - skb_reserve(skb, headroom); - skb_put(skb, len); - buf += header_offset; memcpy(skb_vnet_hdr(skb), buf, vi->hdr_len); @@ -1028,12 +1038,10 @@ static struct sk_buff *receive_small_xdp(struct net_device *dev, goto err_xdp; } - skb = build_skb(buf, buflen); - if (!skb) + skb = virtnet_build_skb(buf, buflen, xdp.data - buf, len); + if (unlikely(!skb)) goto err; - skb_reserve(skb, xdp.data - buf); - skb_put(skb, len); if (metasize) skb_metadata_set(skb, metasize); -- 2.32.0.3.g01195cf9f
Powered by blists - more mailing lists