lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Thu, 5 Oct 2023 16:32:55 +0530
From:   Viresh Kumar <viresh.kumar@...aro.org>
To:     Liao Chang <liaochang1@...wei.com>
Cc:     rafael@...nel.org, linux-pm@...r.kernel.org,
        linux-kernel@...r.kernel.org
Subject: Re: [PATCH 1/2] cpufreq: userspace: Use fine-grained mutex in
 userspace governor

On 12-09-23, 06:10, Liao Chang wrote:
> The userspace governor currently uses a big global mutex to avoid the
> race condition on the governor_data field of cpufreq_policy structure.
> This leads to a low concurrency if multiple userspace applications are
> trying to set the speed of different policies at the same time. This
> patch introduces a per-policy mutex to allow the updating of different
> policies to be performed concurrently, improving overall concurrency.
> 
> Signed-off-by: Liao Chang <liaochang1@...wei.com>
> ---
>  drivers/cpufreq/cpufreq_userspace.c | 69 +++++++++++++++++------------
>  1 file changed, 40 insertions(+), 29 deletions(-)
> 
> diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c
> index 50a4d7846580..442e31060d62 100644
> --- a/drivers/cpufreq/cpufreq_userspace.c
> +++ b/drivers/cpufreq/cpufreq_userspace.c
> @@ -16,7 +16,11 @@
>  #include <linux/slab.h>
>  
>  static DEFINE_PER_CPU(unsigned int, cpu_is_managed);
> -static DEFINE_MUTEX(userspace_mutex);
> +
> +struct userspace_policy {
> +	unsigned int setspeed;
> +	struct mutex mutex;
> +};
>  
>  /**
>   * cpufreq_set - set the CPU frequency
> @@ -28,19 +32,19 @@ static DEFINE_MUTEX(userspace_mutex);
>  static int cpufreq_set(struct cpufreq_policy *policy, unsigned int freq)
>  {
>  	int ret = -EINVAL;
> -	unsigned int *setspeed = policy->governor_data;
> +	struct userspace_policy *userspace = policy->governor_data;
>  
>  	pr_debug("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq);
>  
> -	mutex_lock(&userspace_mutex);
> +	mutex_lock(&userspace->mutex);
>  	if (!per_cpu(cpu_is_managed, policy->cpu))
>  		goto err;
>  
> -	*setspeed = freq;
> +	userspace->setspeed = freq;
>  
>  	ret = __cpufreq_driver_target(policy, freq, CPUFREQ_RELATION_L);
>   err:
> -	mutex_unlock(&userspace_mutex);
> +	mutex_unlock(&userspace->mutex);
>  	return ret;
>  }
>  
> @@ -51,67 +55,74 @@ static ssize_t show_speed(struct cpufreq_policy *policy, char *buf)
>  
>  static int cpufreq_userspace_policy_init(struct cpufreq_policy *policy)
>  {
> -	unsigned int *setspeed;
> +	struct userspace_policy *userspace;
>  
> -	setspeed = kzalloc(sizeof(*setspeed), GFP_KERNEL);
> -	if (!setspeed)
> +	userspace = kzalloc(sizeof(*userspace), GFP_KERNEL);
> +	if (!userspace)
>  		return -ENOMEM;
>  
> -	policy->governor_data = setspeed;
> +	mutex_init(&userspace->mutex);
> +
> +	policy->governor_data = userspace;
>  	return 0;
>  }
>  
> +/*
> + * Any routine that writes to the policy struct will hold the "rwsem" of
> + * policy struct that means it is free to free "governor_data" here.
> + */
>  static void cpufreq_userspace_policy_exit(struct cpufreq_policy *policy)
>  {
> -	mutex_lock(&userspace_mutex);
>  	kfree(policy->governor_data);
>  	policy->governor_data = NULL;
> -	mutex_unlock(&userspace_mutex);
>  }
>  
>  static int cpufreq_userspace_policy_start(struct cpufreq_policy *policy)
>  {
> -	unsigned int *setspeed = policy->governor_data;
> +	struct userspace_policy *userspace = policy->governor_data;
>  
>  	BUG_ON(!policy->cur);
>  	pr_debug("started managing cpu %u\n", policy->cpu);
>  
> -	mutex_lock(&userspace_mutex);
> +	mutex_lock(&userspace->mutex);
>  	per_cpu(cpu_is_managed, policy->cpu) = 1;
> -	*setspeed = policy->cur;
> -	mutex_unlock(&userspace_mutex);
> +	userspace->setspeed = policy->cur;
> +	mutex_unlock(&userspace->mutex);
>  	return 0;
>  }
>  
>  static void cpufreq_userspace_policy_stop(struct cpufreq_policy *policy)
>  {
> -	unsigned int *setspeed = policy->governor_data;
> +	struct userspace_policy *userspace = policy->governor_data;
>  
>  	pr_debug("managing cpu %u stopped\n", policy->cpu);
>  
> -	mutex_lock(&userspace_mutex);
> +	mutex_lock(&userspace->mutex);
>  	per_cpu(cpu_is_managed, policy->cpu) = 0;
> -	*setspeed = 0;
> -	mutex_unlock(&userspace_mutex);
> +	userspace->setspeed = 0;
> +	mutex_unlock(&userspace->mutex);
>  }
>  
>  static void cpufreq_userspace_policy_limits(struct cpufreq_policy *policy)
>  {
> -	unsigned int *setspeed = policy->governor_data;
> +	struct userspace_policy *userspace = policy->governor_data;
>  
> -	mutex_lock(&userspace_mutex);
> +	mutex_lock(&userspace->mutex);
>  
>  	pr_debug("limit event for cpu %u: %u - %u kHz, currently %u kHz, last set to %u kHz\n",
> -		 policy->cpu, policy->min, policy->max, policy->cur, *setspeed);
> -
> -	if (policy->max < *setspeed)
> -		__cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H);
> -	else if (policy->min > *setspeed)
> -		__cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L);
> +		 policy->cpu, policy->min, policy->max, policy->cur, userspace->setspeed);
> +
> +	if (policy->max < userspace->setspeed)
> +		__cpufreq_driver_target(policy, policy->max,
> +					CPUFREQ_RELATION_H);
> +	else if (policy->min > userspace->setspeed)
> +		__cpufreq_driver_target(policy, policy->min,
> +					CPUFREQ_RELATION_L);
>  	else
> -		__cpufreq_driver_target(policy, *setspeed, CPUFREQ_RELATION_L);
> +		__cpufreq_driver_target(policy, userspace->setspeed,
> +					CPUFREQ_RELATION_L);
>  
> -	mutex_unlock(&userspace_mutex);
> +	mutex_unlock(&userspace->mutex);
>  }
>  
>  static struct cpufreq_governor cpufreq_gov_userspace = {

Acked-by: Viresh Kumar <viresh.kumar@...aro.org>

-- 
viresh

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ