[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <d414ba25-c541-4036-b442-9e53f16ea02a@roeck-us.net>
Date: Sun, 4 Feb 2024 06:47:24 -0800
From: Guenter Roeck <linux@...ck-us.net>
To: Zhang Rui <rui.zhang@...el.com>
Cc: jdelvare@...e.com, fenghua.yu@...el.com, linux-hwmon@...r.kernel.org,
linux-kernel@...r.kernel.org
Subject: Re: [PATCH V2 07/11] hwmon: (coretemp) Remove redundant
pdata->cpu_map[]
On Fri, Feb 02, 2024 at 05:21:40PM +0800, Zhang Rui wrote:
> pdata->cpu_map[] saves the mapping between cpu core id and the index in
> pdata->core_data[]. This is used to find the temp_data structure using
> cpu_core_id, by traversing the pdata->cpu_map[] array. But the same goal
> can be achieved by traversing the pdata->core_temp[] array directly.
>
> Remove redundant pdata->cpu_map[].
>
> No functional change.
>
> Signed-off-by: Zhang Rui <rui.zhang@...el.com>
Applied.
Thanks,
Guenter
> ---
> drivers/hwmon/coretemp.c | 16 ++++++----------
> 1 file changed, 6 insertions(+), 10 deletions(-)
>
> diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
> index cdd1e069d5c1..29ee8e0c0fe9 100644
> --- a/drivers/hwmon/coretemp.c
> +++ b/drivers/hwmon/coretemp.c
> @@ -96,7 +96,6 @@ struct temp_data {
> struct platform_data {
> struct device *hwmon_dev;
> u16 pkg_id;
> - u16 cpu_map[NUM_REAL_CORES];
> struct ida ida;
> struct cpumask cpumask;
> struct temp_data *core_data[MAX_CORE_DATA];
> @@ -517,7 +516,6 @@ static int create_core_data(struct platform_device *pdev, unsigned int cpu,
> if (index < 0)
> return index;
>
> - pdata->cpu_map[index] = topology_core_id(cpu);
> index += BASE_SYSFS_ATTR_NO;
> }
>
> @@ -696,7 +694,7 @@ static int coretemp_cpu_offline(unsigned int cpu)
> struct platform_device *pdev = coretemp_get_pdev(cpu);
> struct platform_data *pd;
> struct temp_data *tdata;
> - int i, indx = -1, target;
> + int i, target;
>
> /* No need to tear down any interfaces for suspend */
> if (cpuhp_tasks_frozen)
> @@ -707,18 +705,16 @@ static int coretemp_cpu_offline(unsigned int cpu)
> if (!pd->hwmon_dev)
> return 0;
>
> - for (i = 0; i < NUM_REAL_CORES; i++) {
> - if (pd->cpu_map[i] == topology_core_id(cpu)) {
> - indx = i + BASE_SYSFS_ATTR_NO;
> + for (i = BASE_SYSFS_ATTR_NO; i < MAX_CORE_DATA; i++) {
> + if (pd->core_data[i] && pd->core_data[i]->cpu_core_id == topology_core_id(cpu))
> break;
> - }
> }
>
> /* Too many cores and this core is not populated, just return */
> - if (indx < 0)
> + if (i == MAX_CORE_DATA)
> return 0;
>
> - tdata = pd->core_data[indx];
> + tdata = pd->core_data[i];
>
> cpumask_clear_cpu(cpu, &pd->cpumask);
>
> @@ -729,7 +725,7 @@ static int coretemp_cpu_offline(unsigned int cpu)
> */
> target = cpumask_any_and(&pd->cpumask, topology_sibling_cpumask(cpu));
> if (target >= nr_cpu_ids) {
> - coretemp_remove_core(pd, indx);
> + coretemp_remove_core(pd, i);
> } else if (tdata && tdata->cpu == cpu) {
> mutex_lock(&tdata->update_lock);
> tdata->cpu = target;
Powered by blists - more mailing lists