lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <aB1ajDUwJy6PVGEY@fedora>
Date: Fri, 9 May 2025 09:29:48 +0800
From: Ming Lei <ming.lei@...hat.com>
To: Daniel Wagner <wagi@...nel.org>
Cc: Jens Axboe <axboe@...nel.dk>, Keith Busch <kbusch@...nel.org>,
	Christoph Hellwig <hch@....de>, Sagi Grimberg <sagi@...mberg.me>,
	"Michael S. Tsirkin" <mst@...hat.com>,
	"Martin K. Petersen" <martin.petersen@...cle.com>,
	Thomas Gleixner <tglx@...utronix.de>,
	Costa Shulyupin <costa.shul@...hat.com>,
	Juri Lelli <juri.lelli@...hat.com>,
	Valentin Schneider <vschneid@...hat.com>,
	Waiman Long <llong@...hat.com>,
	Frederic Weisbecker <frederic@...nel.org>,
	Mel Gorman <mgorman@...e.de>, Hannes Reinecke <hare@...e.de>,
	Mathieu Desnoyers <mathieu.desnoyers@...icios.com>,
	linux-kernel@...r.kernel.org, linux-block@...r.kernel.org,
	linux-nvme@...ts.infradead.org, megaraidlinux.pdl@...adcom.com,
	linux-scsi@...r.kernel.org, storagedev@...rochip.com,
	virtualization@...ts.linux.dev,
	GR-QLogic-Storage-Upstream@...vell.com
Subject: Re: [PATCH v6 1/9] lib/group_cpus: let group_cpu_evenly return
 number initialized masks

On Thu, Apr 24, 2025 at 08:19:40PM +0200, Daniel Wagner wrote:
> group_cpu_evenly might allocated less groups then the requested:
> 
> group_cpu_evenly
>   __group_cpus_evenly
>     alloc_nodes_groups
>       # allocated total groups may be less than numgrps when
>       # active total CPU number is less then numgrps
> 
> In this case, the caller will do an out of bound access because the
> caller assumes the masks returned has numgrps.
> 
> Return the number of groups created so the caller can limit the access
> range accordingly.
> 
> Reviewed-by: Hannes Reinecke <hare@...e.de>
> Signed-off-by: Daniel Wagner <wagi@...nel.org>
> ---
>  block/blk-mq-cpumap.c        |  6 +++---
>  drivers/virtio/virtio_vdpa.c |  9 +++++----
>  fs/fuse/virtio_fs.c          |  6 +++---
>  include/linux/group_cpus.h   |  3 ++-
>  kernel/irq/affinity.c        |  9 +++++----
>  lib/group_cpus.c             | 12 +++++++++---
>  6 files changed, 27 insertions(+), 18 deletions(-)
> 
> diff --git a/block/blk-mq-cpumap.c b/block/blk-mq-cpumap.c
> index 444798c5374f48088b661b519f2638bda8556cf2..269161252add756897fce1b65cae5b2e6aebd647 100644
> --- a/block/blk-mq-cpumap.c
> +++ b/block/blk-mq-cpumap.c
> @@ -19,9 +19,9 @@
>  void blk_mq_map_queues(struct blk_mq_queue_map *qmap)
>  {
>  	const struct cpumask *masks;
> -	unsigned int queue, cpu;
> +	unsigned int queue, cpu, nr_masks;
>  
> -	masks = group_cpus_evenly(qmap->nr_queues);
> +	masks = group_cpus_evenly(qmap->nr_queues, &nr_masks);
>  	if (!masks) {
>  		for_each_possible_cpu(cpu)
>  			qmap->mq_map[cpu] = qmap->queue_offset;
> @@ -29,7 +29,7 @@ void blk_mq_map_queues(struct blk_mq_queue_map *qmap)
>  	}
>  
>  	for (queue = 0; queue < qmap->nr_queues; queue++) {
> -		for_each_cpu(cpu, &masks[queue])
> +		for_each_cpu(cpu, &masks[queue % nr_masks])
>  			qmap->mq_map[cpu] = qmap->queue_offset + queue;
>  	}
>  	kfree(masks);
> diff --git a/drivers/virtio/virtio_vdpa.c b/drivers/virtio/virtio_vdpa.c
> index 1f60c9d5cb1810a6f208c24bb2ac640d537391a0..a7b297dae4890c9d6002744b90fc133bbedb7b44 100644
> --- a/drivers/virtio/virtio_vdpa.c
> +++ b/drivers/virtio/virtio_vdpa.c
> @@ -329,20 +329,21 @@ create_affinity_masks(unsigned int nvecs, struct irq_affinity *affd)
>  
>  	for (i = 0, usedvecs = 0; i < affd->nr_sets; i++) {
>  		unsigned int this_vecs = affd->set_size[i];
> +		unsigned int nr_masks;
>  		int j;
> -		struct cpumask *result = group_cpus_evenly(this_vecs);
> +		struct cpumask *result = group_cpus_evenly(this_vecs, &nr_masks);
>  
>  		if (!result) {
>  			kfree(masks);
>  			return NULL;
>  		}
>  
> -		for (j = 0; j < this_vecs; j++)
> +		for (j = 0; j < nr_masks; j++)
>  			cpumask_copy(&masks[curvec + j], &result[j]);
>  		kfree(result);
>  
> -		curvec += this_vecs;
> -		usedvecs += this_vecs;
> +		curvec += nr_masks;
> +		usedvecs += nr_masks;
>  	}
>  
>  	/* Fill out vectors at the end that don't need affinity */
> diff --git a/fs/fuse/virtio_fs.c b/fs/fuse/virtio_fs.c
> index 2c7b24cb67adb2cb329ed545f56f04700aca8b81..7ed43b9ea4f3f8b108f1e0d7050c27267b9941c9 100644
> --- a/fs/fuse/virtio_fs.c
> +++ b/fs/fuse/virtio_fs.c
> @@ -862,7 +862,7 @@ static void virtio_fs_requests_done_work(struct work_struct *work)
>  static void virtio_fs_map_queues(struct virtio_device *vdev, struct virtio_fs *fs)
>  {
>  	const struct cpumask *mask, *masks;
> -	unsigned int q, cpu;
> +	unsigned int q, cpu, nr_masks;
>  
>  	/* First attempt to map using existing transport layer affinities
>  	 * e.g. PCIe MSI-X
> @@ -882,7 +882,7 @@ static void virtio_fs_map_queues(struct virtio_device *vdev, struct virtio_fs *f
>  	return;
>  fallback:
>  	/* Attempt to map evenly in groups over the CPUs */
> -	masks = group_cpus_evenly(fs->num_request_queues);
> +	masks = group_cpus_evenly(fs->num_request_queues, &nr_masks);
>  	/* If even this fails we default to all CPUs use first request queue */
>  	if (!masks) {
>  		for_each_possible_cpu(cpu)
> @@ -891,7 +891,7 @@ static void virtio_fs_map_queues(struct virtio_device *vdev, struct virtio_fs *f
>  	}
>  
>  	for (q = 0; q < fs->num_request_queues; q++) {
> -		for_each_cpu(cpu, &masks[q])
> +		for_each_cpu(cpu, &masks[q % nr_masks])
>  			fs->mq_map[cpu] = q + VQ_REQUEST;
>  	}
>  	kfree(masks);
> diff --git a/include/linux/group_cpus.h b/include/linux/group_cpus.h
> index e42807ec61f6e8cf3787af7daa0d8686edfef0a3..bd5dada6e8606fa6cf8f7babf939e39fd7475c8d 100644
> --- a/include/linux/group_cpus.h
> +++ b/include/linux/group_cpus.h
> @@ -9,6 +9,7 @@
>  #include <linux/kernel.h>
>  #include <linux/cpu.h>
>  
> -struct cpumask *group_cpus_evenly(unsigned int numgrps);
> +struct cpumask *group_cpus_evenly(unsigned int numgrps,
> +				  unsigned int *nummasks);
>  
>  #endif
> diff --git a/kernel/irq/affinity.c b/kernel/irq/affinity.c
> index 44a4eba80315cc098ecfa366ca1d88483641b12a..d2aefab5eb2b929877ced43f48b6268098484bd7 100644
> --- a/kernel/irq/affinity.c
> +++ b/kernel/irq/affinity.c
> @@ -70,20 +70,21 @@ irq_create_affinity_masks(unsigned int nvecs, struct irq_affinity *affd)
>  	 */
>  	for (i = 0, usedvecs = 0; i < affd->nr_sets; i++) {
>  		unsigned int this_vecs = affd->set_size[i];
> +		unsigned int nr_masks;
>  		int j;
> -		struct cpumask *result = group_cpus_evenly(this_vecs);
> +		struct cpumask *result = group_cpus_evenly(this_vecs, &nr_masks);
>  
>  		if (!result) {
>  			kfree(masks);
>  			return NULL;
>  		}
>  
> -		for (j = 0; j < this_vecs; j++)
> +		for (j = 0; j < nr_masks; j++)
>  			cpumask_copy(&masks[curvec + j].mask, &result[j]);
>  		kfree(result);
>  
> -		curvec += this_vecs;
> -		usedvecs += this_vecs;
> +		curvec += nr_masks;
> +		usedvecs += nr_masks;
>  	}
>  
>  	/* Fill out vectors at the end that don't need affinity */
> diff --git a/lib/group_cpus.c b/lib/group_cpus.c
> index ee272c4cefcc13907ce9f211f479615d2e3c9154..016c6578a07616959470b47121459a16a1bc99e5 100644
> --- a/lib/group_cpus.c
> +++ b/lib/group_cpus.c
> @@ -332,9 +332,11 @@ static int __group_cpus_evenly(unsigned int startgrp, unsigned int numgrps,
>  /**
>   * group_cpus_evenly - Group all CPUs evenly per NUMA/CPU locality
>   * @numgrps: number of groups
> + * @nummasks: number of initialized cpumasks
>   *
>   * Return: cpumask array if successful, NULL otherwise. And each element
> - * includes CPUs assigned to this group
> + * includes CPUs assigned to this group. nummasks contains the number
> + * of initialized masks which can be less than numgrps.
>   *
>   * Try to put close CPUs from viewpoint of CPU and NUMA locality into
>   * same group, and run two-stage grouping:
> @@ -344,7 +346,8 @@ static int __group_cpus_evenly(unsigned int startgrp, unsigned int numgrps,
>   * We guarantee in the resulted grouping that all CPUs are covered, and
>   * no same CPU is assigned to multiple groups
>   */
> -struct cpumask *group_cpus_evenly(unsigned int numgrps)
> +struct cpumask *group_cpus_evenly(unsigned int numgrps,
> +				  unsigned int *nummasks)
>  {
>  	unsigned int curgrp = 0, nr_present = 0, nr_others = 0;
>  	cpumask_var_t *node_to_cpumask;
> @@ -421,10 +424,12 @@ struct cpumask *group_cpus_evenly(unsigned int numgrps)
>  		kfree(masks);
>  		return NULL;
>  	}
> +	*nummasks = nr_present + nr_others;

WARN_ON(nr_present + nr_others < numgrps) can be removed now.

Other than that and with Thomas's comment addressed:

Reviewed-by: Ming Lei <ming.lei@...hat.com>


Thanks,
Ming


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ