lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date: Tue, 20 Feb 2024 13:35:07 +0200
From: Georgi Djakov <djakov@...nel.org>
To: Rumeng Wang <wangrumeng@...omi.corp-partner.google.com>
Cc: linux-pm@...r.kernel.org, linux-kernel@...r.kernel.org,
 fengqi@...omi.com, xuyingfeng@...omi.com
Subject: Re: [PATCH] interconnect:Replace mutex with rt_mutex for icc_bw_lock

Hello,

On 20.02.24 9:43, Rumeng Wang wrote:
> From: wangrumeng <wangrumeng@...omi.corp-partner.google.com>
> 
> Replace existing mutex with rt_mutex to prevent priority inversion
> between clients, which can cause unacceptable delays in some cases.
> 
> Signed-off-by: wangrumeng <wangrumeng@...omi.corp-partner.google.com>

A similar patch [1] has been posted some time ago. Please check the review
comments.

Thanks,
Georgi

[1] https://lore.kernel.org/all/20220906191423.30109-1-quic_mdtipton@quicinc.com/

> ---
>   drivers/interconnect/core.c | 15 ++++++++-------
>   1 file changed, 8 insertions(+), 7 deletions(-)
> 
> diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c
> index 50bac2d79d9b..467d42cc7e49 100644
> --- a/drivers/interconnect/core.c
> +++ b/drivers/interconnect/core.c
> @@ -14,6 +14,7 @@
>   #include <linux/interconnect-provider.h>
>   #include <linux/list.h>
>   #include <linux/mutex.h>
> +#include <linux/rtmutex.h>
>   #include <linux/slab.h>
>   #include <linux/of.h>
>   #include <linux/overflow.h>
> @@ -28,7 +29,7 @@ static LIST_HEAD(icc_providers);
>   static int providers_count;
>   static bool synced_state;
>   static DEFINE_MUTEX(icc_lock);
> -static DEFINE_MUTEX(icc_bw_lock);
> +static DEFINE_RT_MUTEX(icc_bw_lock);
>   static struct dentry *icc_debugfs_dir;
>   
>   static void icc_summary_show_one(struct seq_file *s, struct icc_node *n)
> @@ -698,7 +699,7 @@ int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw)
>   	if (WARN_ON(IS_ERR(path) || !path->num_nodes))
>   		return -EINVAL;
>   
> -	mutex_lock(&icc_bw_lock);
> +	rt_mutex_lock(&icc_bw_lock);
>   
>   	old_avg = path->reqs[0].avg_bw;
>   	old_peak = path->reqs[0].peak_bw;
> @@ -730,7 +731,7 @@ int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw)
>   		apply_constraints(path);
>   	}
>   
> -	mutex_unlock(&icc_bw_lock);
> +	rt_mutex_unlock(&icc_bw_lock);
>   
>   	trace_icc_set_bw_end(path, ret);
>   
> @@ -939,7 +940,7 @@ void icc_node_add(struct icc_node *node, struct icc_provider *provider)
>   		return;
>   
>   	mutex_lock(&icc_lock);
> -	mutex_lock(&icc_bw_lock);
> +	rt_mutex_lock(&icc_bw_lock);
>   
>   	node->provider = provider;
>   	list_add_tail(&node->node_list, &provider->nodes);
> @@ -968,7 +969,7 @@ void icc_node_add(struct icc_node *node, struct icc_provider *provider)
>   	node->avg_bw = 0;
>   	node->peak_bw = 0;
>   
> -	mutex_unlock(&icc_bw_lock);
> +	rt_mutex_unlock(&icc_bw_lock);
>   	mutex_unlock(&icc_lock);
>   }
>   EXPORT_SYMBOL_GPL(icc_node_add);
> @@ -1094,7 +1095,7 @@ void icc_sync_state(struct device *dev)
>   		return;
>   
>   	mutex_lock(&icc_lock);
> -	mutex_lock(&icc_bw_lock);
> +	rt_mutex_lock(&icc_bw_lock);
>   	synced_state = true;
>   	list_for_each_entry(p, &icc_providers, provider_list) {
>   		dev_dbg(p->dev, "interconnect provider is in synced state\n");
> @@ -1107,7 +1108,7 @@ void icc_sync_state(struct device *dev)
>   			}
>   		}
>   	}
> -	mutex_unlock(&icc_bw_lock);
> +	rt_mutex_unlock(&icc_bw_lock);
>   	mutex_unlock(&icc_lock);
>   }
>   EXPORT_SYMBOL_GPL(icc_sync_state);


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ