[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <20220906191423.30109-1-quic_mdtipton@quicinc.com>
Date: Tue, 6 Sep 2022 12:14:23 -0700
From: Mike Tipton <quic_mdtipton@...cinc.com>
To: <djakov@...nel.org>
CC: <linux-pm@...r.kernel.org>, <linux-kernel@...r.kernel.org>,
<quic_okukatla@...cinc.com>,
Mike Tipton <quic_mdtipton@...cinc.com>
Subject: [PATCH] interconnect: Replace mutex with rt_mutex
Replace mutex with rt_mutex to prevent priority inversion between
clients, which can cause unacceptable delays in some cases.
Signed-off-by: Mike Tipton <quic_mdtipton@...cinc.com>
---
We've run into a number of cases internally and from customers where
high priority, RT clients (typically display) become blocked for long
periods of time on lower priority, non-RT clients. Switching to rt_mutex
has proven to help performance in these cases.
drivers/interconnect/core.c | 80 ++++++++++++++++++-------------------
1 file changed, 40 insertions(+), 40 deletions(-)
diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c
index 25debded65a8..a536c013d9ca 100644
--- a/drivers/interconnect/core.c
+++ b/drivers/interconnect/core.c
@@ -14,7 +14,7 @@
#include <linux/interconnect-provider.h>
#include <linux/list.h>
#include <linux/module.h>
-#include <linux/mutex.h>
+#include <linux/rtmutex.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/overflow.h>
@@ -28,7 +28,7 @@ static DEFINE_IDR(icc_idr);
static LIST_HEAD(icc_providers);
static int providers_count;
static bool synced_state;
-static DEFINE_MUTEX(icc_lock);
+static DEFINE_RT_MUTEX(icc_lock);
static struct dentry *icc_debugfs_dir;
static void icc_summary_show_one(struct seq_file *s, struct icc_node *n)
@@ -47,7 +47,7 @@ static int icc_summary_show(struct seq_file *s, void *data)
seq_puts(s, " node tag avg peak\n");
seq_puts(s, "--------------------------------------------------------------------\n");
- mutex_lock(&icc_lock);
+ rt_mutex_lock(&icc_lock);
list_for_each_entry(provider, &icc_providers, provider_list) {
struct icc_node *n;
@@ -73,7 +73,7 @@ static int icc_summary_show(struct seq_file *s, void *data)
}
}
- mutex_unlock(&icc_lock);
+ rt_mutex_unlock(&icc_lock);
return 0;
}
@@ -104,7 +104,7 @@ static int icc_graph_show(struct seq_file *s, void *data)
int i;
seq_puts(s, "digraph {\n\trankdir = LR\n\tnode [shape = record]\n");
- mutex_lock(&icc_lock);
+ rt_mutex_lock(&icc_lock);
/* draw providers as cluster subgraphs */
cluster_index = 0;
@@ -136,7 +136,7 @@ static int icc_graph_show(struct seq_file *s, void *data)
icc_graph_show_link(s, 1, n,
n->links[i]);
- mutex_unlock(&icc_lock);
+ rt_mutex_unlock(&icc_lock);
seq_puts(s, "}");
return 0;
@@ -362,7 +362,7 @@ struct icc_node_data *of_icc_get_from_provider(struct of_phandle_args *spec)
if (!spec)
return ERR_PTR(-EINVAL);
- mutex_lock(&icc_lock);
+ rt_mutex_lock(&icc_lock);
list_for_each_entry(provider, &icc_providers, provider_list) {
if (provider->dev->of_node == spec->np) {
if (provider->xlate_extended) {
@@ -378,7 +378,7 @@ struct icc_node_data *of_icc_get_from_provider(struct of_phandle_args *spec)
}
}
}
- mutex_unlock(&icc_lock);
+ rt_mutex_unlock(&icc_lock);
if (IS_ERR(node))
return ERR_CAST(node);
@@ -490,9 +490,9 @@ struct icc_path *of_icc_get_by_index(struct device *dev, int idx)
return ERR_CAST(dst_data);
}
- mutex_lock(&icc_lock);
+ rt_mutex_lock(&icc_lock);
path = path_find(dev, src_data->node, dst_data->node);
- mutex_unlock(&icc_lock);
+ rt_mutex_unlock(&icc_lock);
if (IS_ERR(path)) {
dev_err(dev, "%s: invalid path=%ld\n", __func__, PTR_ERR(path));
goto free_icc_data;
@@ -577,12 +577,12 @@ void icc_set_tag(struct icc_path *path, u32 tag)
if (!path)
return;
- mutex_lock(&icc_lock);
+ rt_mutex_lock(&icc_lock);
for (i = 0; i < path->num_nodes; i++)
path->reqs[i].tag = tag;
- mutex_unlock(&icc_lock);
+ rt_mutex_unlock(&icc_lock);
}
EXPORT_SYMBOL_GPL(icc_set_tag);
@@ -632,7 +632,7 @@ int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw)
if (WARN_ON(IS_ERR(path) || !path->num_nodes))
return -EINVAL;
- mutex_lock(&icc_lock);
+ rt_mutex_lock(&icc_lock);
old_avg = path->reqs[0].avg_bw;
old_peak = path->reqs[0].peak_bw;
@@ -664,7 +664,7 @@ int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw)
apply_constraints(path);
}
- mutex_unlock(&icc_lock);
+ rt_mutex_unlock(&icc_lock);
trace_icc_set_bw_end(path, ret);
@@ -682,12 +682,12 @@ static int __icc_enable(struct icc_path *path, bool enable)
if (WARN_ON(IS_ERR(path) || !path->num_nodes))
return -EINVAL;
- mutex_lock(&icc_lock);
+ rt_mutex_lock(&icc_lock);
for (i = 0; i < path->num_nodes; i++)
path->reqs[i].enabled = enable;
- mutex_unlock(&icc_lock);
+ rt_mutex_unlock(&icc_lock);
return icc_set_bw(path, path->reqs[0].avg_bw,
path->reqs[0].peak_bw);
@@ -726,7 +726,7 @@ struct icc_path *icc_get(struct device *dev, const int src_id, const int dst_id)
struct icc_node *src, *dst;
struct icc_path *path = ERR_PTR(-EPROBE_DEFER);
- mutex_lock(&icc_lock);
+ rt_mutex_lock(&icc_lock);
src = node_find(src_id);
if (!src)
@@ -748,7 +748,7 @@ struct icc_path *icc_get(struct device *dev, const int src_id, const int dst_id)
path = ERR_PTR(-ENOMEM);
}
out:
- mutex_unlock(&icc_lock);
+ rt_mutex_unlock(&icc_lock);
return path;
}
EXPORT_SYMBOL_GPL(icc_get);
@@ -773,14 +773,14 @@ void icc_put(struct icc_path *path)
if (ret)
pr_err("%s: error (%d)\n", __func__, ret);
- mutex_lock(&icc_lock);
+ rt_mutex_lock(&icc_lock);
for (i = 0; i < path->num_nodes; i++) {
node = path->reqs[i].node;
hlist_del(&path->reqs[i].req_node);
if (!WARN_ON(!node->provider->users))
node->provider->users--;
}
- mutex_unlock(&icc_lock);
+ rt_mutex_unlock(&icc_lock);
kfree_const(path->name);
kfree(path);
@@ -822,11 +822,11 @@ struct icc_node *icc_node_create(int id)
{
struct icc_node *node;
- mutex_lock(&icc_lock);
+ rt_mutex_lock(&icc_lock);
node = icc_node_create_nolock(id);
- mutex_unlock(&icc_lock);
+ rt_mutex_unlock(&icc_lock);
return node;
}
@@ -840,7 +840,7 @@ void icc_node_destroy(int id)
{
struct icc_node *node;
- mutex_lock(&icc_lock);
+ rt_mutex_lock(&icc_lock);
node = node_find(id);
if (node) {
@@ -848,7 +848,7 @@ void icc_node_destroy(int id)
WARN_ON(!hlist_empty(&node->req_list));
}
- mutex_unlock(&icc_lock);
+ rt_mutex_unlock(&icc_lock);
kfree(node);
}
@@ -876,7 +876,7 @@ int icc_link_create(struct icc_node *node, const int dst_id)
if (!node->provider)
return -EINVAL;
- mutex_lock(&icc_lock);
+ rt_mutex_lock(&icc_lock);
dst = node_find(dst_id);
if (!dst) {
@@ -900,7 +900,7 @@ int icc_link_create(struct icc_node *node, const int dst_id)
node->links[node->num_links++] = dst;
out:
- mutex_unlock(&icc_lock);
+ rt_mutex_unlock(&icc_lock);
return ret;
}
@@ -925,7 +925,7 @@ int icc_link_destroy(struct icc_node *src, struct icc_node *dst)
if (IS_ERR_OR_NULL(dst))
return -EINVAL;
- mutex_lock(&icc_lock);
+ rt_mutex_lock(&icc_lock);
for (slot = 0; slot < src->num_links; slot++)
if (src->links[slot] == dst)
@@ -946,7 +946,7 @@ int icc_link_destroy(struct icc_node *src, struct icc_node *dst)
ret = -ENOMEM;
out:
- mutex_unlock(&icc_lock);
+ rt_mutex_unlock(&icc_lock);
return ret;
}
@@ -962,7 +962,7 @@ void icc_node_add(struct icc_node *node, struct icc_provider *provider)
if (WARN_ON(node->provider))
return;
- mutex_lock(&icc_lock);
+ rt_mutex_lock(&icc_lock);
node->provider = provider;
list_add_tail(&node->node_list, &provider->nodes);
@@ -988,7 +988,7 @@ void icc_node_add(struct icc_node *node, struct icc_provider *provider)
node->avg_bw = 0;
node->peak_bw = 0;
- mutex_unlock(&icc_lock);
+ rt_mutex_unlock(&icc_lock);
}
EXPORT_SYMBOL_GPL(icc_node_add);
@@ -998,11 +998,11 @@ EXPORT_SYMBOL_GPL(icc_node_add);
*/
void icc_node_del(struct icc_node *node)
{
- mutex_lock(&icc_lock);
+ rt_mutex_lock(&icc_lock);
list_del(&node->node_list);
- mutex_unlock(&icc_lock);
+ rt_mutex_unlock(&icc_lock);
}
EXPORT_SYMBOL_GPL(icc_node_del);
@@ -1041,12 +1041,12 @@ int icc_provider_add(struct icc_provider *provider)
if (WARN_ON(!provider->xlate && !provider->xlate_extended))
return -EINVAL;
- mutex_lock(&icc_lock);
+ rt_mutex_lock(&icc_lock);
INIT_LIST_HEAD(&provider->nodes);
list_add_tail(&provider->provider_list, &icc_providers);
- mutex_unlock(&icc_lock);
+ rt_mutex_unlock(&icc_lock);
dev_dbg(provider->dev, "interconnect provider added to topology\n");
@@ -1060,22 +1060,22 @@ EXPORT_SYMBOL_GPL(icc_provider_add);
*/
void icc_provider_del(struct icc_provider *provider)
{
- mutex_lock(&icc_lock);
+ rt_mutex_lock(&icc_lock);
if (provider->users) {
pr_warn("interconnect provider still has %d users\n",
provider->users);
- mutex_unlock(&icc_lock);
+ rt_mutex_unlock(&icc_lock);
return;
}
if (!list_empty(&provider->nodes)) {
pr_warn("interconnect provider still has nodes\n");
- mutex_unlock(&icc_lock);
+ rt_mutex_unlock(&icc_lock);
return;
}
list_del(&provider->provider_list);
- mutex_unlock(&icc_lock);
+ rt_mutex_unlock(&icc_lock);
}
EXPORT_SYMBOL_GPL(icc_provider_del);
@@ -1110,7 +1110,7 @@ void icc_sync_state(struct device *dev)
if (count < providers_count)
return;
- mutex_lock(&icc_lock);
+ rt_mutex_lock(&icc_lock);
synced_state = true;
list_for_each_entry(p, &icc_providers, provider_list) {
dev_dbg(p->dev, "interconnect provider is in synced state\n");
@@ -1123,7 +1123,7 @@ void icc_sync_state(struct device *dev)
}
}
}
- mutex_unlock(&icc_lock);
+ rt_mutex_unlock(&icc_lock);
}
EXPORT_SYMBOL_GPL(icc_sync_state);
--
2.17.1
Powered by blists - more mailing lists