[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210813091118.17571-1-zhipeng.wang_1@nxp.com>
Date: Fri, 13 Aug 2021 09:11:18 +0000
From: Zhipeng Wang <zhipeng.wang_1@....com>
To: mturquette@...libre.com
Cc: sboyd@...nel.org, linux-clk@...r.kernel.org,
linux-kernel@...r.kernel.org
Subject: [PATCH] clk: add sys node to disable unused clk
The normal sequence is that the clock provider registers clk to
the clock framework, and the clock framework manages the clock resources
of the system. Clk consumers obtain clk through the clock framework API,
enable and disable clk.
Not all clk registered through the clock provider will be used
by the clock consumer, so the clock framework has a function
late_initcall_sync(clk_disable_unused); disables the unused clk.
Now we modularize the clock provider and some consumers, which will
cause late_initcall_sync(clk_disable_unused); cannot work properly, so
increase the sys node.
Signed-off-by: Zhipeng Wang <zhipeng.wang_1@....com>
---
drivers/clk/clk.c | 74 ++++++++++++++++++++++++++++++++++++++++++++---
1 file changed, 70 insertions(+), 4 deletions(-)
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 65508eb89ec9..2bd496c87f80 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -32,6 +32,7 @@ static struct task_struct *enable_owner;
static int prepare_refcnt;
static int enable_refcnt;
+static bool enable_clk_disable_unused;
static HLIST_HEAD(clk_root_list);
static HLIST_HEAD(clk_orphan_list);
@@ -1206,7 +1207,7 @@ static void clk_core_disable_unprepare(struct clk_core *core)
clk_core_unprepare_lock(core);
}
-static void __init clk_unprepare_unused_subtree(struct clk_core *core)
+static void clk_unprepare_unused_subtree(struct clk_core *core)
{
struct clk_core *child;
@@ -1236,7 +1237,7 @@ static void __init clk_unprepare_unused_subtree(struct clk_core *core)
clk_pm_runtime_put(core);
}
-static void __init clk_disable_unused_subtree(struct clk_core *core)
+static void clk_disable_unused_subtree(struct clk_core *core)
{
struct clk_core *child;
unsigned long flags;
@@ -1282,7 +1283,7 @@ static void __init clk_disable_unused_subtree(struct clk_core *core)
clk_core_disable_unprepare(core->parent);
}
-static bool clk_ignore_unused __initdata;
+static bool clk_ignore_unused;
static int __init clk_ignore_unused_setup(char *__unused)
{
clk_ignore_unused = true;
@@ -1290,7 +1291,7 @@ static int __init clk_ignore_unused_setup(char *__unused)
}
__setup("clk_ignore_unused", clk_ignore_unused_setup);
-static int __init clk_disable_unused(void)
+static int clk_disable_unused(void)
{
struct clk_core *core;
@@ -1319,6 +1320,71 @@ static int __init clk_disable_unused(void)
}
late_initcall_sync(clk_disable_unused);
+static void clk_disable_unused_enable(bool enable)
+{
+ if (enable) {
+ clk_disable_unused();
+ enable_clk_disable_unused = true;
+ pr_info("clk_disable_unused: enabled\n");
+ } else {
+ enable_clk_disable_unused = false;
+ pr_info("clk_disable_unused: disabled\n");
+ }
+}
+
+static bool clk_disable_unused_status(void)
+{
+ return enable_clk_disable_unused;
+}
+
+static ssize_t clk_disable_unused_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%u\n", clk_disable_unused_status());
+}
+
+static ssize_t clk_disable_unused_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ unsigned long val;
+
+ if (kstrtoul(buf, 10, &val))
+ return -EINVAL;
+ if (val > 1)
+ return -EINVAL;
+
+ clk_disable_unused_enable(val);
+
+ return n;
+}
+
+static struct kobj_attribute clk_ctrl_attr =
+ __ATTR(enable_clk_disable_unused, 0644, clk_disable_unused_show, clk_disable_unused_store);
+
+static struct attribute *clk_ctrl_attrbute[] = {
+ &clk_ctrl_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group clk_ctrl_attr_group = {
+ .attrs = clk_ctrl_attrbute,
+};
+
+static const struct attribute_group *clk_ctrl_attr_groups[] = {
+ &clk_ctrl_attr_group,
+ NULL,
+};
+
+static int __init creat_sys_clk_unused(void)
+{
+ struct kobject *clk_ctrl_kobj = kobject_create_and_add("clk_ctrl", NULL);
+
+ sysfs_create_groups(clk_ctrl_kobj, clk_ctrl_attr_groups);
+
+ return 0;
+}
+late_initcall_sync(creat_sys_clk_unused);
+
static int clk_core_determine_round_nolock(struct clk_core *core,
struct clk_rate_request *req)
{
--
2.17.1
Powered by blists - more mailing lists