lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [day] [month] [year] [list]
Date:   Wed, 28 Aug 2019 10:30:48 +0800
From:   kbuild test robot <lkp@...el.com>
To:     Ming Lei <ming.lei@...hat.com>
Cc:     kbuild-all@...org, linux-kernel@...r.kernel.org,
        tipbuild@...or.com, Thomas Gleixner <tglx@...utronix.de>,
        Keith Busch <kbusch@...nel.org>,
        Jon Derrick <jonathan.derrick@...el.com>
Subject: [tip:irq/core 3/3] kernel/irq/affinity.c:287:31: warning: passing
 argument 2 of 'alloc_nodes_vectors' from incompatible pointer type

tree:   https://kernel.googlesource.com/pub/scm/linux/kernel/git/tip/tip.git irq/core
head:   b1a5a73e64e99faa5f4deef2ae96d7371a0fb5d0
commit: b1a5a73e64e99faa5f4deef2ae96d7371a0fb5d0 [3/3] genirq/affinity: Spread vectors on node according to nr_cpu ratio
config: x86_64-randconfig-a002-201934 (attached as .config)
compiler: gcc-4.9 (Debian 4.9.2-10+deb8u1) 4.9.2
reproduce:
        git checkout b1a5a73e64e99faa5f4deef2ae96d7371a0fb5d0
        # save the attached .config to linux build tree
        make ARCH=x86_64 

If you fix the issue, kindly add following tag
Reported-by: kbuild test robot <lkp@...el.com>

All warnings (new ones prefixed by >>):

   kernel/irq/affinity.c: In function '__irq_build_affinity_masks':
>> kernel/irq/affinity.c:287:31: warning: passing argument 2 of 'alloc_nodes_vectors' from incompatible pointer type
     alloc_nodes_vectors(numvecs, node_to_cpumask, cpu_mask,
                                  ^
   kernel/irq/affinity.c:128:13: note: expected 'const struct cpumask (*)[1]' but argument is of type 'struct cpumask (*)[1]'
    static void alloc_nodes_vectors(unsigned int numvecs,
                ^
   Cyclomatic Complexity 2 arch/x86/include/asm/bitops.h:arch_set_bit
   Cyclomatic Complexity 2 arch/x86/include/asm/bitops.h:arch_clear_bit
   Cyclomatic Complexity 2 arch/x86/include/asm/bitops.h:arch_test_and_clear_bit
   Cyclomatic Complexity 1 arch/x86/include/asm/bitops.h:fls64
   Cyclomatic Complexity 1 arch/x86/include/asm/arch_hweight.h:__arch_hweight64
   Cyclomatic Complexity 1 include/asm-generic/bitops-instrumented.h:set_bit
   Cyclomatic Complexity 1 include/asm-generic/bitops-instrumented.h:clear_bit
   Cyclomatic Complexity 1 include/asm-generic/bitops-instrumented.h:test_and_clear_bit
   Cyclomatic Complexity 2 include/linux/bitops.h:hweight_long
   Cyclomatic Complexity 1 include/linux/log2.h:__ilog2_u64
   Cyclomatic Complexity 1 include/linux/bitmap.h:bitmap_zero
   Cyclomatic Complexity 1 include/linux/bitmap.h:bitmap_copy
   Cyclomatic Complexity 3 include/linux/bitmap.h:bitmap_and
   Cyclomatic Complexity 3 include/linux/bitmap.h:bitmap_or
   Cyclomatic Complexity 3 include/linux/bitmap.h:bitmap_andnot
   Cyclomatic Complexity 3 include/linux/bitmap.h:bitmap_intersects
   Cyclomatic Complexity 3 include/linux/bitmap.h:bitmap_weight
   Cyclomatic Complexity 2 include/linux/cpumask.h:cpu_max_bits_warn
   Cyclomatic Complexity 1 include/linux/cpumask.h:cpumask_check
   Cyclomatic Complexity 1 include/linux/cpumask.h:cpumask_first
   Cyclomatic Complexity 1 include/linux/cpumask.h:cpumask_set_cpu
   Cyclomatic Complexity 1 include/linux/cpumask.h:cpumask_clear_cpu
   Cyclomatic Complexity 1 include/linux/cpumask.h:cpumask_test_and_clear_cpu
   Cyclomatic Complexity 1 include/linux/cpumask.h:cpumask_clear
   Cyclomatic Complexity 1 include/linux/cpumask.h:cpumask_and
   Cyclomatic Complexity 1 include/linux/cpumask.h:cpumask_or
   Cyclomatic Complexity 1 include/linux/cpumask.h:cpumask_andnot
   Cyclomatic Complexity 1 include/linux/cpumask.h:cpumask_intersects
   Cyclomatic Complexity 1 include/linux/cpumask.h:cpumask_weight
   Cyclomatic Complexity 1 include/linux/cpumask.h:cpumask_copy
   Cyclomatic Complexity 1 include/linux/cpumask.h:zalloc_cpumask_var
   Cyclomatic Complexity 1 include/linux/cpumask.h:free_cpumask_var
   Cyclomatic Complexity 69 include/asm-generic/getorder.h:get_order
   Cyclomatic Complexity 1 include/linux/nodemask.h:__node_set
   Cyclomatic Complexity 1 include/linux/nodemask.h:__first_node
   Cyclomatic Complexity 1 include/linux/nodemask.h:__next_node
   Cyclomatic Complexity 1 include/linux/slab.h:kmalloc_type
   Cyclomatic Complexity 28 include/linux/slab.h:kmalloc_index
   Cyclomatic Complexity 1 include/linux/slab.h:kmalloc_large
   Cyclomatic Complexity 4 include/linux/slab.h:kmalloc
   Cyclomatic Complexity 9 include/linux/slab.h:kmalloc_array
   Cyclomatic Complexity 1 include/linux/slab.h:kcalloc
   Cyclomatic Complexity 1 include/linux/cpu.h:get_online_cpus
   Cyclomatic Complexity 1 include/linux/cpu.h:put_online_cpus
   Cyclomatic Complexity 3 kernel/irq/affinity.c:get_nodes_in_cpumask
   Cyclomatic Complexity 1 kernel/irq/affinity.c:ncpus_cmp_func
   Cyclomatic Complexity 1 kernel/irq/affinity.c:default_calc_sets
   Cyclomatic Complexity 5 kernel/irq/affinity.c:alloc_node_to_cpumask
   Cyclomatic Complexity 2 kernel/irq/affinity.c:free_node_to_cpumask
   Cyclomatic Complexity 2 kernel/irq/affinity.c:build_node_to_cpumask
   Cyclomatic Complexity 6 kernel/irq/affinity.c:irq_spread_init_one
   Cyclomatic Complexity 8 kernel/irq/affinity.c:alloc_nodes_vectors
   Cyclomatic Complexity 13 kernel/irq/affinity.c:__irq_build_affinity_masks
   Cyclomatic Complexity 9 kernel/irq/affinity.c:irq_build_affinity_masks
   Cyclomatic Complexity 13 kernel/irq/affinity.c:irq_create_affinity_masks
   Cyclomatic Complexity 3 kernel/irq/affinity.c:irq_calc_affinity_vectors
   Cyclomatic Complexity 1 kernel/irq/affinity.c:_GLOBAL__sub_I_65535_0_irq_create_affinity_masks

vim +/alloc_nodes_vectors +287 kernel/irq/affinity.c

   246	
   247	static int __irq_build_affinity_masks(unsigned int startvec,
   248					      unsigned int numvecs,
   249					      unsigned int firstvec,
   250					      cpumask_var_t *node_to_cpumask,
   251					      const struct cpumask *cpu_mask,
   252					      struct cpumask *nmsk,
   253					      struct irq_affinity_desc *masks)
   254	{
   255		unsigned int i, n, nodes, cpus_per_vec, extra_vecs, done = 0;
   256		unsigned int last_affv = firstvec + numvecs;
   257		unsigned int curvec = startvec;
   258		nodemask_t nodemsk = NODE_MASK_NONE;
   259		struct node_vectors *node_vectors;
   260	
   261		if (!cpumask_weight(cpu_mask))
   262			return 0;
   263	
   264		nodes = get_nodes_in_cpumask(node_to_cpumask, cpu_mask, &nodemsk);
   265	
   266		/*
   267		 * If the number of nodes in the mask is greater than or equal the
   268		 * number of vectors we just spread the vectors across the nodes.
   269		 */
   270		if (numvecs <= nodes) {
   271			for_each_node_mask(n, nodemsk) {
   272				cpumask_or(&masks[curvec].mask, &masks[curvec].mask,
   273					   node_to_cpumask[n]);
   274				if (++curvec == last_affv)
   275					curvec = firstvec;
   276			}
   277			return numvecs;
   278		}
   279	
   280		node_vectors = kcalloc(nr_node_ids,
   281				       sizeof(struct node_vectors),
   282				       GFP_KERNEL);
   283		if (!node_vectors)
   284			return -ENOMEM;
   285	
   286		/* allocate vector number for each node */
 > 287		alloc_nodes_vectors(numvecs, node_to_cpumask, cpu_mask,
   288				    nodemsk, nmsk, node_vectors);
   289	
   290		for (i = 0; i < nr_node_ids; i++) {
   291			unsigned int ncpus, v;
   292			struct node_vectors *nv = &node_vectors[i];
   293	
   294			if (nv->nvectors == UINT_MAX)
   295				continue;
   296	
   297			/* Get the cpus on this node which are in the mask */
   298			cpumask_and(nmsk, cpu_mask, node_to_cpumask[nv->id]);
   299			ncpus = cpumask_weight(nmsk);
   300			if (!ncpus)
   301				continue;
   302	
   303			WARN_ON_ONCE(nv->nvectors > ncpus);
   304	
   305			/* Account for rounding errors */
   306			extra_vecs = ncpus - nv->nvectors * (ncpus / nv->nvectors);
   307	
   308			/* Spread allocated vectors on CPUs of the current node */
   309			for (v = 0; v < nv->nvectors; v++, curvec++) {
   310				cpus_per_vec = ncpus / nv->nvectors;
   311	
   312				/* Account for extra vectors to compensate rounding errors */
   313				if (extra_vecs) {
   314					cpus_per_vec++;
   315					--extra_vecs;
   316				}
   317	
   318				/*
   319				 * wrapping has to be considered given 'startvec'
   320				 * may start anywhere
   321				 */
   322				if (curvec >= last_affv)
   323					curvec = firstvec;
   324				irq_spread_init_one(&masks[curvec].mask, nmsk,
   325							cpus_per_vec);
   326			}
   327			done += nv->nvectors;
   328		}
   329		kfree(node_vectors);
   330		return done;
   331	}
   332	

---
0-DAY kernel test infrastructure                Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all                   Intel Corporation

Download attachment ".config.gz" of type "application/gzip" (29188 bytes)

Powered by blists - more mailing lists