lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <1742347325.317026-1-xuanzhuo@linux.alibaba.com>
Date: Wed, 19 Mar 2025 09:22:05 +0800
From: Xuan Zhuo <xuanzhuo@...ux.alibaba.com>
To: Akihiko Odaki <akihiko.odaki@...nix.com>
Cc: virtualization@...ts.linux.dev,
 linux-kernel@...r.kernel.org,
 netdev@...r.kernel.org,
 devel@...nix.com,
 Akihiko Odaki <akihiko.odaki@...nix.com>,
 "Michael S. Tsirkin" <mst@...hat.com>,
 Jason Wang <jasowang@...hat.com>,
 Eugenio PĂ©rez <eperezma@...hat.com>,
 Andrew Lunn <andrew+netdev@...n.ch>,
 "David S. Miller" <davem@...emloft.net>,
 Eric Dumazet <edumazet@...gle.com>,
 Jakub Kicinski <kuba@...nel.org>,
 Paolo Abeni <pabeni@...hat.com>,
 Andrew Melnychenko <andrew@...nix.com>,
 Joe Damato <jdamato@...tly.com>,
 Philo Lu <lulie@...ux.alibaba.com>
Subject: Re: [PATCH net-next 3/4] virtio_net: Use new RSS config structs

On Tue, 18 Mar 2025 18:56:53 +0900, Akihiko Odaki <akihiko.odaki@...nix.com> wrote:
> The new RSS configuration structures allow easily constructing data for
> VIRTIO_NET_CTRL_MQ_RSS_CONFIG as they strictly follow the order of data
> for the command.
>
> Signed-off-by: Akihiko Odaki <akihiko.odaki@...nix.com>
> ---
>  drivers/net/virtio_net.c | 117 +++++++++++++++++------------------------------
>  1 file changed, 43 insertions(+), 74 deletions(-)
>
> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> index d1ed544ba03a..4153a0a5f278 100644
> --- a/drivers/net/virtio_net.c
> +++ b/drivers/net/virtio_net.c
> @@ -360,24 +360,7 @@ struct receive_queue {
>  	struct xdp_buff **xsk_buffs;
>  };
>
> -/* This structure can contain rss message with maximum settings for indirection table and keysize
> - * Note, that default structure that describes RSS configuration virtio_net_rss_config
> - * contains same info but can't handle table values.
> - * In any case, structure would be passed to virtio hw through sg_buf split by parts
> - * because table sizes may be differ according to the device configuration.
> - */
>  #define VIRTIO_NET_RSS_MAX_KEY_SIZE     40
> -struct virtio_net_ctrl_rss {
> -	__le32 hash_types;
> -	__le16 indirection_table_mask;
> -	__le16 unclassified_queue;
> -	__le16 hash_cfg_reserved; /* for HASH_CONFIG (see virtio_net_hash_config for details) */
> -	__le16 max_tx_vq;
> -	u8 hash_key_length;
> -	u8 key[VIRTIO_NET_RSS_MAX_KEY_SIZE];
> -
> -	__le16 *indirection_table;
> -};
>
>  /* Control VQ buffers: protected by the rtnl lock */
>  struct control_buf {
> @@ -421,7 +404,9 @@ struct virtnet_info {
>  	u16 rss_indir_table_size;
>  	u32 rss_hash_types_supported;
>  	u32 rss_hash_types_saved;
> -	struct virtio_net_ctrl_rss rss;
> +	struct virtio_net_rss_config_hdr *rss_hdr;
> +	struct virtio_net_rss_config_trailer rss_trailer;
> +	u8 rss_hash_key_data[VIRTIO_NET_RSS_MAX_KEY_SIZE];

Why put rss_hash_key_data outside of virtio_net_rss_config_trailer?

Thanks.


>
>  	/* Has control virtqueue */
>  	bool has_cvq;
> @@ -523,23 +508,16 @@ enum virtnet_xmit_type {
>  	VIRTNET_XMIT_TYPE_XSK,
>  };
>
> -static int rss_indirection_table_alloc(struct virtio_net_ctrl_rss *rss, u16 indir_table_size)
> +static size_t virtnet_rss_hdr_size(const struct virtnet_info *vi)
>  {
> -	if (!indir_table_size) {
> -		rss->indirection_table = NULL;
> -		return 0;
> -	}
> +	u16 indir_table_size = vi->has_rss ? vi->rss_indir_table_size : 1;
>
> -	rss->indirection_table = kmalloc_array(indir_table_size, sizeof(u16), GFP_KERNEL);
> -	if (!rss->indirection_table)
> -		return -ENOMEM;
> -
> -	return 0;
> +	return struct_size(vi->rss_hdr, indirection_table, indir_table_size);
>  }
>
> -static void rss_indirection_table_free(struct virtio_net_ctrl_rss *rss)
> +static size_t virtnet_rss_trailer_size(const struct virtnet_info *vi)
>  {
> -	kfree(rss->indirection_table);
> +	return struct_size(&vi->rss_trailer, hash_key_data, vi->rss_key_size);
>  }
>
>  /* We use the last two bits of the pointer to distinguish the xmit type. */
> @@ -3576,15 +3554,16 @@ static void virtnet_rss_update_by_qpairs(struct virtnet_info *vi, u16 queue_pair
>
>  	for (; i < vi->rss_indir_table_size; ++i) {
>  		indir_val = ethtool_rxfh_indir_default(i, queue_pairs);
> -		vi->rss.indirection_table[i] = cpu_to_le16(indir_val);
> +		vi->rss_hdr->indirection_table[i] = cpu_to_le16(indir_val);
>  	}
> -	vi->rss.max_tx_vq = cpu_to_le16(queue_pairs);
> +	vi->rss_trailer.max_tx_vq = cpu_to_le16(queue_pairs);
>  }
>
>  static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
>  {
>  	struct virtio_net_ctrl_mq *mq __free(kfree) = NULL;
> -	struct virtio_net_ctrl_rss old_rss;
> +	struct virtio_net_rss_config_hdr *old_rss_hdr;
> +	struct virtio_net_rss_config_trailer old_rss_trailer;
>  	struct net_device *dev = vi->dev;
>  	struct scatterlist sg;
>
> @@ -3599,24 +3578,28 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
>  	 * update (VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET below) and return directly.
>  	 */
>  	if (vi->has_rss && !netif_is_rxfh_configured(dev)) {
> -		memcpy(&old_rss, &vi->rss, sizeof(old_rss));
> -		if (rss_indirection_table_alloc(&vi->rss, vi->rss_indir_table_size)) {
> -			vi->rss.indirection_table = old_rss.indirection_table;
> +		old_rss_hdr = vi->rss_hdr;
> +		old_rss_trailer = vi->rss_trailer;
> +		vi->rss_hdr = kmalloc(virtnet_rss_hdr_size(vi), GFP_KERNEL);
> +		if (!vi->rss_hdr) {
> +			vi->rss_hdr = old_rss_hdr;
>  			return -ENOMEM;
>  		}
>
> +		*vi->rss_hdr = *old_rss_hdr;
>  		virtnet_rss_update_by_qpairs(vi, queue_pairs);
>
>  		if (!virtnet_commit_rss_command(vi)) {
>  			/* restore ctrl_rss if commit_rss_command failed */
> -			rss_indirection_table_free(&vi->rss);
> -			memcpy(&vi->rss, &old_rss, sizeof(old_rss));
> +			kfree(vi->rss_hdr);
> +			vi->rss_hdr = old_rss_hdr;
> +			vi->rss_trailer = old_rss_trailer;
>
>  			dev_warn(&dev->dev, "Fail to set num of queue pairs to %d, because committing RSS failed\n",
>  				 queue_pairs);
>  			return -EINVAL;
>  		}
> -		rss_indirection_table_free(&old_rss);
> +		kfree(old_rss_hdr);
>  		goto succ;
>  	}
>
> @@ -4059,28 +4042,12 @@ static int virtnet_set_ringparam(struct net_device *dev,
>  static bool virtnet_commit_rss_command(struct virtnet_info *vi)
>  {
>  	struct net_device *dev = vi->dev;
> -	struct scatterlist sgs[4];
> -	unsigned int sg_buf_size;
> +	struct scatterlist sgs[2];
>
>  	/* prepare sgs */
> -	sg_init_table(sgs, 4);
> -
> -	sg_buf_size = offsetof(struct virtio_net_ctrl_rss, hash_cfg_reserved);
> -	sg_set_buf(&sgs[0], &vi->rss, sg_buf_size);
> -
> -	if (vi->has_rss) {
> -		sg_buf_size = sizeof(uint16_t) * vi->rss_indir_table_size;
> -		sg_set_buf(&sgs[1], vi->rss.indirection_table, sg_buf_size);
> -	} else {
> -		sg_set_buf(&sgs[1], &vi->rss.hash_cfg_reserved, sizeof(uint16_t));
> -	}
> -
> -	sg_buf_size = offsetof(struct virtio_net_ctrl_rss, key)
> -			- offsetof(struct virtio_net_ctrl_rss, max_tx_vq);
> -	sg_set_buf(&sgs[2], &vi->rss.max_tx_vq, sg_buf_size);
> -
> -	sg_buf_size = vi->rss_key_size;
> -	sg_set_buf(&sgs[3], vi->rss.key, sg_buf_size);
> +	sg_init_table(sgs, 2);
> +	sg_set_buf(&sgs[0], vi->rss_hdr, virtnet_rss_hdr_size(vi));
> +	sg_set_buf(&sgs[1], &vi->rss_trailer, virtnet_rss_trailer_size(vi));
>
>  	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
>  				  vi->has_rss ? VIRTIO_NET_CTRL_MQ_RSS_CONFIG
> @@ -4097,17 +4064,17 @@ static bool virtnet_commit_rss_command(struct virtnet_info *vi)
>
>  static void virtnet_init_default_rss(struct virtnet_info *vi)
>  {
> -	vi->rss.hash_types = cpu_to_le32(vi->rss_hash_types_supported);
> +	vi->rss_hdr->hash_types = cpu_to_le32(vi->rss_hash_types_supported);
>  	vi->rss_hash_types_saved = vi->rss_hash_types_supported;
> -	vi->rss.indirection_table_mask = vi->rss_indir_table_size
> +	vi->rss_hdr->indirection_table_mask = vi->rss_indir_table_size
>  						? cpu_to_le16(vi->rss_indir_table_size - 1) : 0;
> -	vi->rss.unclassified_queue = 0;
> +	vi->rss_hdr->unclassified_queue = 0;
>
>  	virtnet_rss_update_by_qpairs(vi, vi->curr_queue_pairs);
>
> -	vi->rss.hash_key_length = vi->rss_key_size;
> +	vi->rss_trailer.hash_key_length = vi->rss_key_size;
>
> -	netdev_rss_key_fill(vi->rss.key, vi->rss_key_size);
> +	netdev_rss_key_fill(vi->rss_hash_key_data, vi->rss_key_size);
>  }
>
>  static void virtnet_get_hashflow(const struct virtnet_info *vi, struct ethtool_rxnfc *info)
> @@ -4218,7 +4185,7 @@ static bool virtnet_set_hashflow(struct virtnet_info *vi, struct ethtool_rxnfc *
>
>  	if (new_hashtypes != vi->rss_hash_types_saved) {
>  		vi->rss_hash_types_saved = new_hashtypes;
> -		vi->rss.hash_types = cpu_to_le32(vi->rss_hash_types_saved);
> +		vi->rss_hdr->hash_types = cpu_to_le32(vi->rss_hash_types_saved);
>  		if (vi->dev->features & NETIF_F_RXHASH)
>  			return virtnet_commit_rss_command(vi);
>  	}
> @@ -5398,11 +5365,11 @@ static int virtnet_get_rxfh(struct net_device *dev,
>
>  	if (rxfh->indir) {
>  		for (i = 0; i < vi->rss_indir_table_size; ++i)
> -			rxfh->indir[i] = le16_to_cpu(vi->rss.indirection_table[i]);
> +			rxfh->indir[i] = le16_to_cpu(vi->rss_hdr->indirection_table[i]);
>  	}
>
>  	if (rxfh->key)
> -		memcpy(rxfh->key, vi->rss.key, vi->rss_key_size);
> +		memcpy(rxfh->key, vi->rss_hash_key_data, vi->rss_key_size);
>
>  	rxfh->hfunc = ETH_RSS_HASH_TOP;
>
> @@ -5426,7 +5393,7 @@ static int virtnet_set_rxfh(struct net_device *dev,
>  			return -EOPNOTSUPP;
>
>  		for (i = 0; i < vi->rss_indir_table_size; ++i)
> -			vi->rss.indirection_table[i] = cpu_to_le16(rxfh->indir[i]);
> +			vi->rss_hdr->indirection_table[i] = cpu_to_le16(rxfh->indir[i]);
>  		update = true;
>  	}
>
> @@ -5438,7 +5405,7 @@ static int virtnet_set_rxfh(struct net_device *dev,
>  		if (!vi->has_rss && !vi->has_rss_hash_report)
>  			return -EOPNOTSUPP;
>
> -		memcpy(vi->rss.key, rxfh->key, vi->rss_key_size);
> +		memcpy(vi->rss_hash_key_data, rxfh->key, vi->rss_key_size);
>  		update = true;
>  	}
>
> @@ -6044,9 +6011,9 @@ static int virtnet_set_features(struct net_device *dev,
>
>  	if ((dev->features ^ features) & NETIF_F_RXHASH) {
>  		if (features & NETIF_F_RXHASH)
> -			vi->rss.hash_types = cpu_to_le32(vi->rss_hash_types_saved);
> +			vi->rss_hdr->hash_types = cpu_to_le32(vi->rss_hash_types_saved);
>  		else
> -			vi->rss.hash_types = cpu_to_le32(VIRTIO_NET_HASH_REPORT_NONE);
> +			vi->rss_hdr->hash_types = cpu_to_le32(VIRTIO_NET_HASH_REPORT_NONE);
>
>  		if (!virtnet_commit_rss_command(vi))
>  			return -EINVAL;
> @@ -6735,9 +6702,11 @@ static int virtnet_probe(struct virtio_device *vdev)
>  			virtio_cread16(vdev, offsetof(struct virtio_net_config,
>  				rss_max_indirection_table_length));
>  	}
> -	err = rss_indirection_table_alloc(&vi->rss, vi->rss_indir_table_size);
> -	if (err)
> +	vi->rss_hdr = kmalloc(virtnet_rss_hdr_size(vi), GFP_KERNEL);
> +	if (!vi->rss_hdr) {
> +		err = -ENOMEM;
>  		goto free;
> +	}
>
>  	if (vi->has_rss || vi->has_rss_hash_report) {
>  		vi->rss_key_size =
> @@ -7016,7 +6985,7 @@ static void virtnet_remove(struct virtio_device *vdev)
>
>  	remove_vq_common(vi);
>
> -	rss_indirection_table_free(&vi->rss);
> +	kfree(vi->rss_hdr);
>
>  	free_netdev(vi->dev);
>  }
>
> --
> 2.48.1
>

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ