lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20160215061710.GC6334@vireshk-i7>
Date:	Mon, 15 Feb 2016 11:47:10 +0530
From:	Viresh Kumar <viresh.kumar@...aro.org>
To:	"Rafael J. Wysocki" <rjw@...ysocki.net>
Cc:	Linux PM list <linux-pm@...r.kernel.org>,
	Linux Kernel Mailing List <linux-kernel@...r.kernel.org>,
	Juri Lelli <juri.lelli@....com>
Subject: Re: [PATCH 2/9] cpufreq: governor: Avoid atomic operations in hot
 paths

On 15-02-16, 02:13, Rafael J. Wysocki wrote:
>  static void dbs_irq_work(struct irq_work *irq_work)
> @@ -357,6 +360,7 @@ static void dbs_update_util_handler(stru
>  {
>  	struct cpu_dbs_info *cdbs = container_of(data, struct cpu_dbs_info, update_util);
>  	struct policy_dbs_info *policy_dbs = cdbs->policy_dbs;
> +	u64 delta_ns;
>  
>  	/*
>  	 * The work may not be allowed to be queued up right now.
> @@ -364,17 +368,30 @@ static void dbs_update_util_handler(stru
>  	 * - Work has already been queued up or is in progress.
>  	 * - It is too early (too little time from the previous sample).
>  	 */
> -	if (atomic_inc_return(&policy_dbs->work_count) == 1) {
> -		u64 delta_ns;
> +	if (policy_dbs->work_in_progress)
> +		return;
>  
> -		delta_ns = time - policy_dbs->last_sample_time;
> -		if ((s64)delta_ns >= policy_dbs->sample_delay_ns) {
> -			policy_dbs->last_sample_time = time;
> -			gov_queue_irq_work(policy_dbs);
> -			return;
> -		}
> -	}
> -	atomic_dec(&policy_dbs->work_count);
> +	/*
> +	 * If the reads below are reordered before the check above, the value
> +	 * of sample_delay_ns used in the computation may be stale.
> +	 */
> +	smp_rmb();
> +	delta_ns = time - policy_dbs->last_sample_time;
> +	if ((s64)delta_ns < policy_dbs->sample_delay_ns)
> +		return;
> +
> +	/*
> +	 * If the policy is not shared, the irq_work may be queued up right away
> +	 * at this point.  Otherwise, we need to ensure that only one of the
> +	 * CPUs sharing the policy will do that.
> +	 */
> +	if (policy_dbs->is_shared &&
> +	    !atomic_add_unless(&policy_dbs->work_count, 1, 1))
> +		return;
> +
> +	policy_dbs->last_sample_time = time;
> +	policy_dbs->work_in_progress = true;
> +	gov_queue_irq_work(policy_dbs);
>  }
>  
>  static struct policy_dbs_info *alloc_policy_dbs_info(struct cpufreq_policy *policy,
> @@ -551,6 +568,8 @@ static int cpufreq_governor_start(struct
>  	if (!policy->cur)
>  		return -EINVAL;
>  
> +	policy_dbs->is_shared = policy_is_shared(policy);
> +
>  	sampling_rate = dbs_data->sampling_rate;
>  	ignore_nice = dbs_data->ignore_nice_load;
>  
> Index: linux-pm/drivers/cpufreq/cpufreq_governor.h
> ===================================================================
> --- linux-pm.orig/drivers/cpufreq/cpufreq_governor.h
> +++ linux-pm/drivers/cpufreq/cpufreq_governor.h
> @@ -130,6 +130,9 @@ struct policy_dbs_info {
>  	/* dbs_data may be shared between multiple policy objects */
>  	struct dbs_data *dbs_data;
>  	struct list_head list;
> +	/* Status indicators */
> +	bool is_shared;		/* This object is used by multiple CPUs */
> +	bool work_in_progress;	/* Work is being queued up or in progress */
>  };
>  
>  static inline void gov_update_sample_delay(struct policy_dbs_info *policy_dbs,

-- 
viresh

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ