[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <aPvDW0o89kmtGFfH@mini-arch>
Date: Fri, 24 Oct 2025 11:20:11 -0700
From: Stanislav Fomichev <stfomichev@...il.com>
To: Daniel Borkmann <daniel@...earbox.net>
Cc: netdev@...r.kernel.org, bpf@...r.kernel.org, kuba@...nel.org,
davem@...emloft.net, razor@...ckwall.org, pabeni@...hat.com,
willemb@...gle.com, sdf@...ichev.me, john.fastabend@...il.com,
martin.lau@...nel.org, jordan@...fe.io,
maciej.fijalkowski@...el.com, magnus.karlsson@...el.com,
dw@...idwei.uk, toke@...hat.com, yangzhenze@...edance.com,
wangdongdong.6@...edance.com
Subject: Re: [PATCH net-next v3 02/15] net: Implement
netdev_nl_bind_queue_doit
On 10/20, Daniel Borkmann wrote:
> From: David Wei <dw@...idwei.uk>
>
> Implement netdev_nl_bind_queue_doit() that creates an rx queue in a
> virtual netdev and then binds it to an rxq in a real netdev to create
> a queue pair.
>
> Example with ynl client:
>
> # ./pyynl/cli.py \
> --spec ~/netlink/specs/netdev.yaml \
> --do bind-queue \
> --json '{"src-ifindex": 4, "src-queue-id": 15, "dst-ifindex": 8, "queue-type": "rx"}'
> {'dst-queue-id': 1}
>
> Note that the netdevice locking order is always from the virtual to
> the physical device.
>
> Signed-off-by: David Wei <dw@...idwei.uk>
> Co-developed-by: Daniel Borkmann <daniel@...earbox.net>
> Signed-off-by: Daniel Borkmann <daniel@...earbox.net>
> ---
> include/net/netdev_queues.h | 5 ++
> include/net/netdev_rx_queue.h | 36 ++++++++-
> net/core/netdev-genl.c | 141 +++++++++++++++++++++++++++++++++-
> net/core/netdev_rx_queue.c | 61 +++++++++++++++
> 4 files changed, 240 insertions(+), 3 deletions(-)
>
> diff --git a/include/net/netdev_queues.h b/include/net/netdev_queues.h
> index cd00e0406cf4..286d5edce07d 100644
> --- a/include/net/netdev_queues.h
> +++ b/include/net/netdev_queues.h
> @@ -130,6 +130,10 @@ void netdev_stat_queue_sum(struct net_device *netdev,
> * @ndo_queue_get_dma_dev: Get dma device for zero-copy operations to be used
> * for this queue. Return NULL on error.
> *
> + * @ndo_queue_create: Create a new RX queue which can be bound to another queue.
> + * Ops on this queue are redirected to the peer queue e.g.
> + * when opening a memory provider.
> + *
> * Note that @ndo_queue_mem_alloc and @ndo_queue_mem_free may be called while
> * the interface is closed. @ndo_queue_start and @ndo_queue_stop will only
> * be called for an interface which is open.
> @@ -149,6 +153,7 @@ struct netdev_queue_mgmt_ops {
> int idx);
> struct device * (*ndo_queue_get_dma_dev)(struct net_device *dev,
> int idx);
> + int (*ndo_queue_create)(struct net_device *dev);
> };
>
> bool netif_rxq_has_unreadable_mp(struct net_device *dev, int idx);
> diff --git a/include/net/netdev_rx_queue.h b/include/net/netdev_rx_queue.h
> index 8cdcd138b33f..db3ef94c0744 100644
> --- a/include/net/netdev_rx_queue.h
> +++ b/include/net/netdev_rx_queue.h
> @@ -28,6 +28,7 @@ struct netdev_rx_queue {
> #endif
> struct napi_struct *napi;
> struct pp_memory_provider_params mp_params;
> + struct netdev_rx_queue *peer;
> } ____cacheline_aligned_in_smp;
>
> /*
> @@ -56,6 +57,37 @@ get_netdev_rx_queue_index(struct netdev_rx_queue *queue)
> return index;
> }
>
> -int netdev_rx_queue_restart(struct net_device *dev, unsigned int rxq);
> +static inline void __netdev_rx_queue_peer(struct netdev_rx_queue *src_rxq,
> + struct netdev_rx_queue *dst_rxq)
> +{
> + src_rxq->peer = dst_rxq;
> + dst_rxq->peer = src_rxq;
> +}
>
> -#endif
> +static inline void __netdev_rx_queue_unpeer(struct netdev_rx_queue *src_rxq,
> + struct netdev_rx_queue *dst_rxq)
> +{
> + src_rxq->peer = NULL;
> + dst_rxq->peer = NULL;
> +}
> +
> +static inline bool netdev_rx_queue_peered(struct net_device *dev,
> + u16 queue_id)
> +{
> + if (queue_id < dev->real_num_rx_queues)
> + return dev->_rx[queue_id].peer;
> + return false;
> +}
> +
> +void netdev_rx_queue_peer(struct net_device *src_dev,
> + struct netdev_rx_queue *src_rxq,
> + struct netdev_rx_queue *dst_rxq);
> +void netdev_rx_queue_unpeer(struct net_device *src_dev,
> + struct netdev_rx_queue *src_rxq,
> + struct netdev_rx_queue *dst_rxq);
> +int netdev_rx_queue_restart(struct net_device *dev, unsigned int rxq);
> +struct netdev_rx_queue *
> +netif_get_rx_queue_peer_locked(struct net_device **dev,
> + unsigned int *rxq_idx,
> + bool *needs_unlock);
> +#endif /* _LINUX_NETDEV_RX_QUEUE_H */
> diff --git a/net/core/netdev-genl.c b/net/core/netdev-genl.c
> index ce1018ea390f..579469abac8c 100644
> --- a/net/core/netdev-genl.c
> +++ b/net/core/netdev-genl.c
> @@ -1122,7 +1122,146 @@ int netdev_nl_bind_tx_doit(struct sk_buff *skb, struct genl_info *info)
>
> int netdev_nl_bind_queue_doit(struct sk_buff *skb, struct genl_info *info)
> {
> - return -EOPNOTSUPP;
> + u32 src_ifidx, src_qid, dst_ifidx, dst_qid, q_type;
> + struct netdev_rx_queue *src_rxq, *dst_rxq, *tmp_rxq;
> + struct net_device *src_dev, *dst_dev;
> + struct sk_buff *rsp;
> + int err = 0;
> + void *hdr;
> +
> + if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_QUEUE_PAIR_QUEUE_TYPE) ||
> + GENL_REQ_ATTR_CHECK(info, NETDEV_A_QUEUE_PAIR_SRC_IFINDEX) ||
> + GENL_REQ_ATTR_CHECK(info, NETDEV_A_QUEUE_PAIR_SRC_QUEUE_ID) ||
> + GENL_REQ_ATTR_CHECK(info, NETDEV_A_QUEUE_PAIR_DST_IFINDEX))
> + return -EINVAL;
> +
> + src_ifidx = nla_get_u32(info->attrs[NETDEV_A_QUEUE_PAIR_SRC_IFINDEX]);
> + src_qid = nla_get_u32(info->attrs[NETDEV_A_QUEUE_PAIR_SRC_QUEUE_ID]);
> + dst_ifidx = nla_get_u32(info->attrs[NETDEV_A_QUEUE_PAIR_DST_IFINDEX]);
> + q_type = nla_get_u32(info->attrs[NETDEV_A_QUEUE_PAIR_QUEUE_TYPE]);
> +
> + if (q_type != NETDEV_QUEUE_TYPE_RX) {
> + NL_SET_ERR_MSG(info->extack, "Only binding of RX queue supported");
> + return -EOPNOTSUPP;
> + }
> + if (dst_ifidx == src_ifidx) {
> + NL_SET_ERR_MSG(info->extack,
> + "Destination driver cannot be same as source driver");
> + return -EOPNOTSUPP;
> + }
> +
> + rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
> + if (!rsp)
> + return -ENOMEM;
> +
> + hdr = genlmsg_iput(rsp, info);
> + if (!hdr) {
> + err = -EMSGSIZE;
> + goto err_genlmsg_free;
> + }
[..]
> + /* Locking order is always from the virtual to the physical device
> + * since this is also the same order when applications open the
> + * memory provider later on.
> + */
> + dst_dev = netdev_get_by_index_lock(genl_info_net(info), dst_ifidx);
> + if (!dst_dev) {
> + err = -ENODEV;
> + goto err_genlmsg_free;
> + }
...
> + src_dev = netdev_get_by_index_lock(genl_info_net(info), src_ifidx);
> + if (!src_dev) {
> + err = -ENODEV;
> + goto err_unlock_dst_dev;
> + }
But isn't the above susceptible to ABBA exploitation from the userspace?
I can try to concurrently do two requests, the second one being with
dst_dev and src_dev swapped. Or do we assume that we exit earlier for
the swapped case based on some other condition?
Powered by blists - more mailing lists