lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Date: Fri, 5 Jan 2024 13:40:42 +0530
From: Sandipan Das <sandipan.das@....com>
To: kernel test robot <lkp@...el.com>
Cc: oe-kbuild-all@...ts.linux.dev, linux-kernel@...r.kernel.org,
 Peter Zijlstra <peterz@...radead.org>
Subject: Re: arch/x86/events/amd/uncore.c:941:52: error: '%d' directive output
 may be truncated writing between 1 and 10 bytes into a region of size 8

Hi,

On 1/5/2024 1:20 PM, kernel test robot wrote:
> Hi Sandipan,
> 
> FYI, the error/warning still remains.
> 

The active UMCs in a socket are represented by a 32 bit mask coming from CPUID 0x80000022 ECX.
Since there are at most 2 sockets in a system, the maximum number of UMCs will not exceed 64.
This fits in pmu->name. While there may be SKUs in the future having more UMCs, the number is
not expected to be larger than a 7 digit integer.

- Sandipan

> tree:   https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git master
> head:   1f874787ed9a2d78ed59cb21d0d90ac0178eceb0
> commit: 25e56847821f7375bdee7dae1027c7917d07ce4b perf/x86/amd/uncore: Add memory controller support
> date:   3 months ago
> config: x86_64-sof-customedconfig-avs-defconfig (https://download.01.org/0day-ci/archive/20240105/202401051554.teOdw8yt-lkp@intel.com/config)
> compiler: gcc-7 (Ubuntu 7.5.0-6ubuntu2) 7.5.0
> reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20240105/202401051554.teOdw8yt-lkp@intel.com/reproduce)
> 
> If you fix the issue in a separate patch/commit (i.e. not just a new version of
> the same patch/commit), kindly add following tags
> | Reported-by: kernel test robot <lkp@...el.com>
> | Closes: https://lore.kernel.org/oe-kbuild-all/202401051554.teOdw8yt-lkp@intel.com/
> 
> All errors (new ones prefixed by >>):
> 
>    arch/x86/events/amd/uncore.c: In function 'amd_uncore_umc_ctx_init':
>>> arch/x86/events/amd/uncore.c:941:52: error: '%d' directive output may be truncated writing between 1 and 10 bytes into a region of size 8 [-Werror=format-truncation=]
>        snprintf(pmu->name, sizeof(pmu->name), "amd_umc_%d", index);
>                                                        ^~
>    arch/x86/events/amd/uncore.c:941:43: note: directive argument in the range [0, 2147483647]
>        snprintf(pmu->name, sizeof(pmu->name), "amd_umc_%d", index);
>                                               ^~~~~~~~~~~~
>    arch/x86/events/amd/uncore.c:941:4: note: 'snprintf' output between 10 and 19 bytes into a destination of size 16
>        snprintf(pmu->name, sizeof(pmu->name), "amd_umc_%d", index);
>        ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
>    cc1: all warnings being treated as errors
> 
> 
> vim +941 arch/x86/events/amd/uncore.c
> 
>    900	
>    901	static
>    902	int amd_uncore_umc_ctx_init(struct amd_uncore *uncore, unsigned int cpu)
>    903	{
>    904		DECLARE_BITMAP(gmask, UNCORE_GROUP_MAX) = { 0 };
>    905		u8 group_num_pmus[UNCORE_GROUP_MAX] = { 0 };
>    906		u8 group_num_pmcs[UNCORE_GROUP_MAX] = { 0 };
>    907		union amd_uncore_info info;
>    908		struct amd_uncore_pmu *pmu;
>    909		int index = 0, gid, i;
>    910	
>    911		if (pmu_version < 2)
>    912			return 0;
>    913	
>    914		/* Run just once */
>    915		if (uncore->init_done)
>    916			return amd_uncore_ctx_init(uncore, cpu);
>    917	
>    918		/* Find unique groups */
>    919		for_each_online_cpu(i) {
>    920			info = *per_cpu_ptr(uncore->info, i);
>    921			gid = info.split.gid;
>    922			if (test_bit(gid, gmask))
>    923				continue;
>    924	
>    925			__set_bit(gid, gmask);
>    926			group_num_pmus[gid] = hweight32(info.split.aux_data);
>    927			group_num_pmcs[gid] = info.split.num_pmcs;
>    928			uncore->num_pmus += group_num_pmus[gid];
>    929		}
>    930	
>    931		uncore->pmus = kzalloc(sizeof(*uncore->pmus) * uncore->num_pmus,
>    932				       GFP_KERNEL);
>    933		if (!uncore->pmus) {
>    934			uncore->num_pmus = 0;
>    935			goto done;
>    936		}
>    937	
>    938		for_each_set_bit(gid, gmask, UNCORE_GROUP_MAX) {
>    939			for (i = 0; i < group_num_pmus[gid]; i++) {
>    940				pmu = &uncore->pmus[index];
>  > 941				snprintf(pmu->name, sizeof(pmu->name), "amd_umc_%d", index);
>    942				pmu->num_counters = group_num_pmcs[gid] / group_num_pmus[gid];
>    943				pmu->msr_base = MSR_F19H_UMC_PERF_CTL + i * pmu->num_counters * 2;
>    944				pmu->rdpmc_base = -1;
>    945				pmu->group = gid;
>    946	
>    947				pmu->ctx = alloc_percpu(struct amd_uncore_ctx *);
>    948				if (!pmu->ctx)
>    949					goto done;
>    950	
>    951				pmu->pmu = (struct pmu) {
>    952					.task_ctx_nr	= perf_invalid_context,
>    953					.attr_groups	= amd_uncore_umc_attr_groups,
>    954					.name		= pmu->name,
>    955					.event_init	= amd_uncore_umc_event_init,
>    956					.add		= amd_uncore_add,
>    957					.del		= amd_uncore_del,
>    958					.start		= amd_uncore_umc_start,
>    959					.stop		= amd_uncore_stop,
>    960					.read		= amd_uncore_read,
>    961					.capabilities	= PERF_PMU_CAP_NO_EXCLUDE | PERF_PMU_CAP_NO_INTERRUPT,
>    962					.module		= THIS_MODULE,
>    963				};
>    964	
>    965				if (perf_pmu_register(&pmu->pmu, pmu->pmu.name, -1)) {
>    966					free_percpu(pmu->ctx);
>    967					pmu->ctx = NULL;
>    968					goto done;
>    969				}
>    970	
>    971				pr_info("%d %s counters detected\n", pmu->num_counters,
>    972					pmu->pmu.name);
>    973	
>    974				index++;
>    975			}
>    976		}
>    977	
>    978	done:
>    979		uncore->num_pmus = index;
>    980		uncore->init_done = true;
>    981	
>    982		return amd_uncore_ctx_init(uncore, cpu);
>    983	}
>    984	
> 


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ