[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <fceb6891-2749-496a-a6d5-db0748728e8a@wanadoo.fr>
Date: Sat, 26 Apr 2025 17:12:58 +0200
From: Christophe JAILLET <christophe.jaillet@...adoo.fr>
To: Mina Almasry <almasrymina@...gle.com>
Cc: andrew+netdev@...n.ch, asml.silence@...il.com, axboe@...nel.dk,
corbet@....net, davem@...emloft.net, donald.hunter@...il.com,
dsahern@...nel.org, dw@...idwei.uk, edumazet@...gle.com,
eperezma@...hat.com, horms@...nel.org, hramamurthy@...gle.com,
io-uring@...r.kernel.org, jasowang@...hat.com, jeroendb@...gle.com,
jhs@...atatu.com, kaiyuanz@...gle.com, kuba@...nel.org, kuniyu@...zon.com,
kvm@...r.kernel.org, linux-doc@...r.kernel.org,
linux-kernel@...r.kernel.org, linux-kselftest@...r.kernel.org,
mst@...hat.com, ncardwell@...gle.com, netdev@...r.kernel.org,
pabeni@...hat.com, pctammela@...atatu.com, sdf@...ichev.me,
sgarzare@...hat.com, shuah@...nel.org, skhawaja@...gle.com,
stefanha@...hat.com, victor@...atatu.com, virtualization@...ts.linux.dev,
willemb@...gle.com, xuanzhuo@...ux.alibaba.com
Subject: Re: [PATCH net-next v12 4/9] net: devmem: Implement TX path
Le 25/04/2025 à 22:47, Mina Almasry a écrit :
> Augment dmabuf binding to be able to handle TX. Additional to all the RX
> binding, we also create tx_vec needed for the TX path.
>
> Provide API for sendmsg to be able to send dmabufs bound to this device:
>
> - Provide a new dmabuf_tx_cmsg which includes the dmabuf to send from.
> - MSG_ZEROCOPY with SCM_DEVMEM_DMABUF cmsg indicates send from dma-buf.
>
> Devmem is uncopyable, so piggyback off the existing MSG_ZEROCOPY
> implementation, while disabling instances where MSG_ZEROCOPY falls back
> to copying.
...
> @@ -270,24 +284,34 @@ net_devmem_bind_dmabuf(struct net_device *dev, unsigned int dmabuf_fd,
> niov->owner = &owner->area;
> page_pool_set_dma_addr_netmem(net_iov_to_netmem(niov),
> net_devmem_get_dma_addr(niov));
> + if (direction == DMA_TO_DEVICE)
> + binding->tx_vec[owner->area.base_virtual / PAGE_SIZE + i] = niov;
> }
>
> virtual += len;
> }
>
> + err = xa_alloc_cyclic(&net_devmem_dmabuf_bindings, &binding->id,
> + binding, xa_limit_32b, &id_alloc_next,
> + GFP_KERNEL);
> + if (err < 0)
> + goto err_free_id;
> +
> return binding;
>
> +err_free_id:
> + xa_erase(&net_devmem_dmabuf_bindings, binding->id);
Not sure this is correct now that xa_alloc_cyclic() is the last function
which is called.
I guess that that the last goto should be to err_free_chunks.
> err_free_chunks:
> gen_pool_for_each_chunk(binding->chunk_pool,
> net_devmem_dmabuf_free_chunk_owner, NULL);
> gen_pool_destroy(binding->chunk_pool);
> +err_tx_vec:
> + kvfree(binding->tx_vec);
> err_unmap:
> dma_buf_unmap_attachment_unlocked(binding->attachment, binding->sgt,
> DMA_FROM_DEVICE);
> err_detach:
> dma_buf_detach(dmabuf, binding->attachment);
> -err_free_id:
> - xa_erase(&net_devmem_dmabuf_bindings, binding->id);
> err_free_binding:
> kfree(binding);
> err_put_dmabuf:
...
> diff --git a/net/core/sock.c b/net/core/sock.c
> index b64df2463300b..9dd2989040357 100644
> --- a/net/core/sock.c
> +++ b/net/core/sock.c
> @@ -3017,6 +3017,12 @@ int __sock_cmsg_send(struct sock *sk, struct cmsghdr *cmsg,
> if (!sk_set_prio_allowed(sk, *(u32 *)CMSG_DATA(cmsg)))
> return -EPERM;
> sockc->priority = *(u32 *)CMSG_DATA(cmsg);
> + break;
> + case SCM_DEVMEM_DMABUF:
> + if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32)))
> + return -EINVAL;
> + sockc->dmabuf_id = *(u32 *)CMSG_DATA(cmsg);
> +
Nitpick: Unneeded newline, to be consistent with the surrounding code.
> break;
> default:
> return -EINVAL;
...
CJ
Powered by blists - more mailing lists