lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20191018055720.za3a5zeqdzcupc4h@vireshk-i7>
Date:   Fri, 18 Oct 2019 11:27:20 +0530
From:   Viresh Kumar <viresh.kumar@...aro.org>
To:     Sudeep Holla <sudeep.holla@....com>
Cc:     "Rafael J . Wysocki" <rjw@...ysocki.net>, linux-pm@...r.kernel.org,
        linux-kernel@...r.kernel.org, nico@...xnic.net
Subject: Re: [PATCH v2 4/5] cpufreq: vexpress-spc: remove lots of debug
 messages

On 17-10-19, 13:35, Sudeep Holla wrote:
> This driver have been used and tested for year now and the extensive
> debug/log messages in the driver are not really required anymore.
> Get rid of those unnecessary log messages.
> 
> Signed-off-by: Sudeep Holla <sudeep.holla@....com>
> ---
>  drivers/cpufreq/vexpress-spc-cpufreq.c | 72 +++++---------------------
>  1 file changed, 13 insertions(+), 59 deletions(-)
> 
> diff --git a/drivers/cpufreq/vexpress-spc-cpufreq.c b/drivers/cpufreq/vexpress-spc-cpufreq.c
>  static void put_cluster_clk_and_freq_table(struct device *cpu_dev,
> @@ -324,11 +296,9 @@ static void put_cluster_clk_and_freq_table(struct device *cpu_dev,
>  
>  	for_each_present_cpu(i) {
>  		struct device *cdev = get_cpu_device(i);
> -		if (!cdev) {
> -			pr_err("%s: failed to get cpu%d device\n", __func__, i);
> -			return;
> -		}
>  
> +		if (!cdev)
> +			return;

We had a blank line after this, which isn't there in your version
anymore. Please keep that here and few more places below.

>  		_put_cluster_clk_and_freq_table(cdev, cpumask);
>  	}
>  
> @@ -354,19 +324,12 @@ static int _get_cluster_clk_and_freq_table(struct device *cpu_dev,
>  		goto out;
>  
>  	ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table[cluster]);
> -	if (ret) {
> -		dev_err(cpu_dev, "%s: failed to init cpufreq table, cpu: %d, err: %d\n",
> -				__func__, cpu_dev->id, ret);
> +	if (ret)
>  		goto out;
> -	}
>  
>  	clk[cluster] = clk_get(cpu_dev, NULL);
> -	if (!IS_ERR(clk[cluster])) {
> -		dev_dbg(cpu_dev, "%s: clk: %p & freq table: %p, cluster: %d\n",
> -				__func__, clk[cluster], freq_table[cluster],
> -				cluster);
> +	if (!IS_ERR(clk[cluster]))
>  		return 0;
> -	}
>  
>  	dev_err(cpu_dev, "%s: Failed to get clk for cpu: %d, cluster: %d\n",
>  			__func__, cpu_dev->id, cluster);
> @@ -401,11 +364,9 @@ static int get_cluster_clk_and_freq_table(struct device *cpu_dev,
>  	 */
>  	for_each_present_cpu(i) {
>  		struct device *cdev = get_cpu_device(i);
> -		if (!cdev) {
> -			pr_err("%s: failed to get cpu%d device\n", __func__, i);
> -			return -ENODEV;
> -		}
>  
> +		if (!cdev)
> +			return -ENODEV;
>  		ret = _get_cluster_clk_and_freq_table(cdev, cpumask);
>  		if (ret)
>  			goto put_clusters;
> @@ -419,19 +380,14 @@ static int get_cluster_clk_and_freq_table(struct device *cpu_dev,
>  	clk_big_min = get_table_min(freq_table[0]);
>  	clk_little_max = VIRT_FREQ(1, get_table_max(freq_table[1]));
>  
> -	pr_debug("%s: cluster: %d, clk_big_min: %d, clk_little_max: %d\n",
> -			__func__, cluster, clk_big_min, clk_little_max);
> -
>  	return 0;
>  
>  put_clusters:
>  	for_each_present_cpu(i) {
>  		struct device *cdev = get_cpu_device(i);
> -		if (!cdev) {
> -			pr_err("%s: failed to get cpu%d device\n", __func__, i);
> -			return -ENODEV;
> -		}
>  
> +		if (!cdev)
> +			return -ENODEV;
>  		_put_cluster_clk_and_freq_table(cdev, cpumask);
>  	}
>  
> @@ -500,8 +456,6 @@ static int ve_spc_cpufreq_exit(struct cpufreq_policy *policy)
>  	}
>  
>  	put_cluster_clk_and_freq_table(cpu_dev, policy->related_cpus);
> -	dev_dbg(cpu_dev, "%s: Exited, cpu: %d\n", __func__, policy->cpu);
> -
>  	return 0;
>  }
>  
> -- 
> 2.17.1

-- 
viresh

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ