[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <aPuANsZ6_xj8YY3D@horms.kernel.org>
Date: Fri, 24 Oct 2025 14:33:42 +0100
From: Simon Horman <horms@...nel.org>
To: Jason Xing <kerneljasonxing@...il.com>
Cc: davem@...emloft.net, edumazet@...gle.com, kuba@...nel.org,
pabeni@...hat.com, bjorn@...nel.org, magnus.karlsson@...el.com,
maciej.fijalkowski@...el.com, jonathan.lemon@...il.com,
sdf@...ichev.me, ast@...nel.org, daniel@...earbox.net,
hawk@...nel.org, john.fastabend@...il.com, joe@...a.to,
willemdebruijn.kernel@...il.com, bpf@...r.kernel.org,
netdev@...r.kernel.org, Jason Xing <kernelxing@...cent.com>
Subject: Re: [PATCH net-next v3 3/9] xsk: add xsk_alloc_batch_skb() to build
skbs in batch
On Tue, Oct 21, 2025 at 09:12:03PM +0800, Jason Xing wrote:
...
> diff --git a/net/core/skbuff.c b/net/core/skbuff.c
...
> @@ -615,6 +617,105 @@ static void *kmalloc_reserve(unsigned int *size, gfp_t flags, int node,
> return obj;
> }
>
> +int xsk_alloc_batch_skb(struct xdp_sock *xs, u32 nb_pkts, u32 nb_descs, int *err)
> +{
> + struct xsk_batch *batch = &xs->batch;
> + struct xdp_desc *descs = batch->desc_cache;
> + struct sk_buff **skbs = batch->skb_cache;
> + gfp_t gfp_mask = xs->sk.sk_allocation;
> + struct net_device *dev = xs->dev;
> + int node = NUMA_NO_NODE;
> + struct sk_buff *skb;
> + u32 i = 0, j = 0;
> + bool pfmemalloc;
> + u32 base_len;
> + u8 *data;
> +
> + base_len = max(NET_SKB_PAD, L1_CACHE_ALIGN(dev->needed_headroom));
> + if (!(dev->priv_flags & IFF_TX_SKB_NO_LINEAR))
> + base_len += dev->needed_tailroom;
> +
> + if (batch->skb_count >= nb_pkts)
> + goto build;
> +
> + if (xs->skb) {
> + i = 1;
> + batch->skb_count++;
> + }
> +
> + batch->skb_count += kmem_cache_alloc_bulk(net_hotdata.skbuff_cache,
> + gfp_mask, nb_pkts - batch->skb_count,
> + (void **)&skbs[batch->skb_count]);
> + if (batch->skb_count < nb_pkts)
> + nb_pkts = batch->skb_count;
> +
> +build:
> + for (i = 0, j = 0; j < nb_descs; j++) {
> + if (!xs->skb) {
> + u32 size = base_len + descs[j].len;
> +
> + /* In case we don't have enough allocated skbs */
> + if (i >= nb_pkts) {
> + *err = -EAGAIN;
> + break;
> + }
> +
> + if (sk_wmem_alloc_get(&xs->sk) > READ_ONCE(xs->sk.sk_sndbuf)) {
> + *err = -EAGAIN;
> + break;
> + }
> +
> + skb = skbs[batch->skb_count - 1 - i];
> +
> + prefetchw(skb);
> + /* We do our best to align skb_shared_info on a separate cache
> + * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives
> + * aligned memory blocks, unless SLUB/SLAB debug is enabled.
> + * Both skb->head and skb_shared_info are cache line aligned.
> + */
> + data = kmalloc_reserve(&size, gfp_mask, node, &pfmemalloc);
> + if (unlikely(!data)) {
> + *err = -ENOBUFS;
> + break;
> + }
> + /* kmalloc_size_roundup() might give us more room than requested.
> + * Put skb_shared_info exactly at the end of allocated zone,
> + * to allow max possible filling before reallocation.
> + */
> + prefetchw(data + SKB_WITH_OVERHEAD(size));
> +
> + memset(skb, 0, offsetof(struct sk_buff, tail));
> + __build_skb_around(skb, data, size);
> + skb->pfmemalloc = pfmemalloc;
> + skb_set_owner_w(skb, &xs->sk);
> + } else if (unlikely(i == 0)) {
> + /* We have a skb in cache that is left last time */
> + kmem_cache_free(net_hotdata.skbuff_cache,
> + skbs[batch->skb_count - 1]);
> + skbs[batch->skb_count - 1] = xs->skb;
> + }
> +
> + skb = xsk_build_skb(xs, skb, &descs[j]);
Hi Jason,
Perhaps it cannot occur, but if we reach this line
without the if (!xs->skb) condition having been met for
any iteration of there loop this code sits inside,
then skb will be uninitialised here.
Also, assuming the above doesn't occur, and perhaps this
next case is intentional, but if the same condition is
not met for any iteration of the loop, then skb will have
its value from a prior iteration.
Flagged by Smatch.
> + if (IS_ERR(skb)) {
> + *err = PTR_ERR(skb);
> + break;
> + }
> +
> + if (xp_mb_desc(&descs[j])) {
> + xs->skb = skb;
> + continue;
> + }
> +
> + xs->skb = NULL;
> + i++;
> + __skb_queue_tail(&batch->send_queue, skb);
> + }
> +
> + batch->skb_count -= i;
> +
> + return j;
> +}
Powered by blists - more mailing lists