[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <b55d868c-7bc1-4e25-aef4-cdf385208400@redhat.com>
Date: Thu, 30 Oct 2025 12:19:02 +0100
From: Paolo Abeni <pabeni@...hat.com>
To: Daniel Jurgens <danielj@...dia.com>, netdev@...r.kernel.org,
mst@...hat.com, jasowang@...hat.com, alex.williamson@...hat.com
Cc: virtualization@...ts.linux.dev, parav@...dia.com, shshitrit@...dia.com,
yohadt@...dia.com, xuanzhuo@...ux.alibaba.com, eperezma@...hat.com,
shameerali.kolothum.thodi@...wei.com, jgg@...pe.ca, kevin.tian@...el.com,
kuba@...nel.org, andrew+netdev@...n.ch, edumazet@...gle.com
Subject: Re: [PATCH net-next v6 05/12] virtio_net: Query and set flow filter
caps
On 10/27/25 6:39 PM, Daniel Jurgens wrote:
> When probing a virtnet device, attempt to read the flow filter
> capabilities. In order to use the feature the caps must also
> be set. For now setting what was read is sufficient.
>
> Signed-off-by: Daniel Jurgens <danielj@...dia.com>
> Reviewed-by: Parav Pandit <parav@...dia.com>
> Reviewed-by: Shahar Shitrit <shshitrit@...dia.com>
> ---
> v4:
> - Validate the length in the selector caps
> - Removed __free usage.
> - Removed for(int.
> v5:
> - Remove unneed () after MAX_SEL_LEN macro (test bot)
> v6:
> - Fix sparse warning "array of flexible structures" Jakub K/Simon H
> - Use new variable and validate ff_mask_size before set_cap. MST
> ---
> drivers/net/virtio_net.c | 171 +++++++++++++++++++++++++++++
> include/linux/virtio_admin.h | 1 +
> include/uapi/linux/virtio_net_ff.h | 91 +++++++++++++++
> 3 files changed, 263 insertions(+)
> create mode 100644 include/uapi/linux/virtio_net_ff.h
>
> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> index a757cbcab87f..a9fde879fdbf 100644
> --- a/drivers/net/virtio_net.c
> +++ b/drivers/net/virtio_net.c
> @@ -26,6 +26,9 @@
> #include <net/netdev_rx_queue.h>
> #include <net/netdev_queues.h>
> #include <net/xdp_sock_drv.h>
> +#include <linux/virtio_admin.h>
> +#include <net/ipv6.h>
> +#include <net/ip.h>
>
> static int napi_weight = NAPI_POLL_WEIGHT;
> module_param(napi_weight, int, 0444);
> @@ -281,6 +284,14 @@ static const struct virtnet_stat_desc virtnet_stats_tx_speed_desc_qstat[] = {
> VIRTNET_STATS_DESC_TX_QSTAT(speed, ratelimit_packets, hw_drop_ratelimits),
> };
>
> +struct virtnet_ff {
> + struct virtio_device *vdev;
> + bool ff_supported;
> + struct virtio_net_ff_cap_data *ff_caps;
> + struct virtio_net_ff_cap_mask_data *ff_mask;
> + struct virtio_net_ff_actions *ff_actions;
> +};
> +
> #define VIRTNET_Q_TYPE_RX 0
> #define VIRTNET_Q_TYPE_TX 1
> #define VIRTNET_Q_TYPE_CQ 2
> @@ -493,6 +504,8 @@ struct virtnet_info {
> struct failover *failover;
>
> u64 device_stats_cap;
> +
> + struct virtnet_ff ff;
> };
>
> struct padded_vnet_hdr {
> @@ -6753,6 +6766,160 @@ static const struct xdp_metadata_ops virtnet_xdp_metadata_ops = {
> .xmo_rx_hash = virtnet_xdp_rx_hash,
> };
>
> +static size_t get_mask_size(u16 type)
> +{
> + switch (type) {
> + case VIRTIO_NET_FF_MASK_TYPE_ETH:
> + return sizeof(struct ethhdr);
> + case VIRTIO_NET_FF_MASK_TYPE_IPV4:
> + return sizeof(struct iphdr);
> + case VIRTIO_NET_FF_MASK_TYPE_IPV6:
> + return sizeof(struct ipv6hdr);
> + case VIRTIO_NET_FF_MASK_TYPE_TCP:
> + return sizeof(struct tcphdr);
> + case VIRTIO_NET_FF_MASK_TYPE_UDP:
> + return sizeof(struct udphdr);
> + }
> +
> + return 0;
> +}
> +
> +#define MAX_SEL_LEN (sizeof(struct ipv6hdr))
> +
> +static void virtnet_ff_init(struct virtnet_ff *ff, struct virtio_device *vdev)
> +{
> + size_t ff_mask_size = sizeof(struct virtio_net_ff_cap_mask_data) +
> + sizeof(struct virtio_net_ff_selector) *
> + VIRTIO_NET_FF_MASK_TYPE_MAX;
> + struct virtio_admin_cmd_query_cap_id_result *cap_id_list;
> + struct virtio_net_ff_selector *sel;
> + size_t real_ff_mask_size;
> + int err;
> + int i;
> +
> + cap_id_list = kzalloc(sizeof(*cap_id_list), GFP_KERNEL);
> + if (!cap_id_list)
> + return;
> +
> + err = virtio_admin_cap_id_list_query(vdev, cap_id_list);
> + if (err)
> + goto err_cap_list;
> +
> + if (!(VIRTIO_CAP_IN_LIST(cap_id_list,
> + VIRTIO_NET_FF_RESOURCE_CAP) &&
> + VIRTIO_CAP_IN_LIST(cap_id_list,
> + VIRTIO_NET_FF_SELECTOR_CAP) &&
> + VIRTIO_CAP_IN_LIST(cap_id_list,
> + VIRTIO_NET_FF_ACTION_CAP)))
> + goto err_cap_list;
> +
> + ff->ff_caps = kzalloc(sizeof(*ff->ff_caps), GFP_KERNEL);
> + if (!ff->ff_caps)
> + goto err_cap_list;
> +
> + err = virtio_admin_cap_get(vdev,
> + VIRTIO_NET_FF_RESOURCE_CAP,
> + ff->ff_caps,
> + sizeof(*ff->ff_caps));
> +
> + if (err)
> + goto err_ff;
> +
> + /* VIRTIO_NET_FF_MASK_TYPE start at 1 */
> + for (i = 1; i <= VIRTIO_NET_FF_MASK_TYPE_MAX; i++)
> + ff_mask_size += get_mask_size(i);
> +
> + ff->ff_mask = kzalloc(ff_mask_size, GFP_KERNEL);
> + if (!ff->ff_mask)
> + goto err_ff;
> +
> + err = virtio_admin_cap_get(vdev,
> + VIRTIO_NET_FF_SELECTOR_CAP,
> + ff->ff_mask,
> + ff_mask_size);
> +
> + if (err)
> + goto err_ff_mask;
> +
> + ff->ff_actions = kzalloc(sizeof(*ff->ff_actions) +
> + VIRTIO_NET_FF_ACTION_MAX,
> + GFP_KERNEL);
> + if (!ff->ff_actions)
> + goto err_ff_mask;
> +
> + err = virtio_admin_cap_get(vdev,
> + VIRTIO_NET_FF_ACTION_CAP,
> + ff->ff_actions,
> + sizeof(*ff->ff_actions) + VIRTIO_NET_FF_ACTION_MAX);
> +
> + if (err)
> + goto err_ff_action;
> +
> + err = virtio_admin_cap_set(vdev,
> + VIRTIO_NET_FF_RESOURCE_CAP,
> + ff->ff_caps,
> + sizeof(*ff->ff_caps));
> + if (err)
> + goto err_ff_action;
> +
> + real_ff_mask_size = sizeof(struct virtio_net_ff_cap_mask_data);
> + sel = (void *)&ff->ff_mask->selectors[0];
> +
> + for (i = 0; i < ff->ff_mask->count; i++) {
> + if (sel->length > MAX_SEL_LEN) {
> + err = -EINVAL;
> + goto err_ff_action;
> + }
> + real_ff_mask_size += sizeof(struct virtio_net_ff_selector) + sel->length;
> + sel = (void *)sel + sizeof(*sel) + sel->length;
> + }
> +
> + if (real_ff_mask_size > ff_mask_size) {
> + err = -EINVAL;
> + goto err_ff_action;
> + }
> +
> + err = virtio_admin_cap_set(vdev,
> + VIRTIO_NET_FF_SELECTOR_CAP,
> + ff->ff_mask,
> + ff_mask_size);
> + if (err)
> + goto err_ff_action;
> +
> + err = virtio_admin_cap_set(vdev,
> + VIRTIO_NET_FF_ACTION_CAP,
> + ff->ff_actions,
> + sizeof(*ff->ff_actions) + VIRTIO_NET_FF_ACTION_MAX);
> + if (err)
> + goto err_ff_action;
> +
> + ff->vdev = vdev;
> + ff->ff_supported = true;
> +
> + kfree(cap_id_list);
> +
> + return;
> +
> +err_ff_action:
> + kfree(ff->ff_actions);
> +err_ff_mask:
> + kfree(ff->ff_mask);
> +err_ff:
> + kfree(ff->ff_caps);
> +err_cap_list:
> + kfree(cap_id_list);
Minor nit: AFAICS the ff->ff_{caps,mask,actions} pointers can be left !=
NULL even after free. That should not cause issue at cleanup time, as
double free is protected by/avoided with 'ff_supported'.
Still it could foul kmemleak check. I think it would be better to either
clear such fields here or set such fields only on success (and work with
local variable up to that point).
Not a blocker anyway.
/P
Powered by blists - more mailing lists