[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <153504874119.28926.4197673432816461627@swboyd.mtv.corp.google.com>
Date: Thu, 23 Aug 2018 11:25:41 -0700
From: Stephen Boyd <sboyd@...nel.org>
To: Michael Turquette <mturquette@...libre.com>,
Taniya Das <tdas@...eaurora.org>
Cc: Andy Gross <andy.gross@...aro.org>,
David Brown <david.brown@...aro.org>,
Rajendra Nayak <rnayak@...eaurora.org>,
Amit Nischal <anischal@...eaurora.org>,
linux-arm-msm@...r.kernel.org, linux-soc@...r.kernel.org,
linux-clk@...r.kernel.org, linux-kernel@...r.kernel.org
Subject: Re: [PATCH v4 0/2] clk: qcom: Add support for RCG to register for DFS
Quoting Taniya Das (2018-08-22 03:28:31)
>
> >
> > Hmmmm. Ok. That won't work then. recalc_rate() better not try to
> > populate the frequency table then or it will not work. So I suppose it
> > needs to fallback to reading the registers and assuming the parent_rate
> > coming in is the actual frequency of it's parent until the frequency
> > table pointer is non-NULL. Would that work?
> >
> Yes that would work.
Ok.
>
> > BTW, does DFS switch parents without software knowing about it?
> DFS would not switch until a HW request is sent, but SW would be unware
> of the switch except the current_perf_state being updated with the
> requested level.
>
> What
> > happens in that case? Does the QUP driver make sure that the new parent
> > of this RCG is properly enabled so that it can switch to it when needed?
>
> I am not sure if they poll for any of their QUP HW state to make sure
> the switch is complete.
>
> > I'm still trying to understand this whole design. Who takes care of the
> > voltage requirements in this case? The QUP driver as well?
> >
>
> When the QUP driver requires to switch to new performance level, the
> first request would be to set_rate()(QUP driver would get the list of
> supported frequencies using the clk_round_rate()) which in QCOM clock
> driver would take care of setting the required voltage for the new
> parent switch.
It would also make sure that the new parent is enabled if the QUP clk is
enabled. That's another concern. Does the PLL turn on automatically when
the RCG switches to it?
> Then the QUP driver would request the HW for a new perf switch which
> would result to a DFS switch for the QUP clocks.
It sounds like the QUP driver does half of the work via the clk APIs and
then the other half through the DFS register. Maybe the QUP driver
should be registering a clk as well for its DFS register so it can all
be clk API calls here. Something to consider. Anyway, that's not
important to this patch so here's the updated patch.
---8<----
diff --git a/drivers/clk/qcom/clk-rcg.h b/drivers/clk/qcom/clk-rcg.h
index e6300e05d5df..e5eca8a1abe4 100644
--- a/drivers/clk/qcom/clk-rcg.h
+++ b/drivers/clk/qcom/clk-rcg.h
@@ -163,6 +163,15 @@ extern const struct clk_ops clk_pixel_ops;
extern const struct clk_ops clk_gfx3d_ops;
extern const struct clk_ops clk_rcg2_shared_ops;
+struct clk_rcg_dfs_data {
+ struct clk_rcg2 *rcg;
+ struct clk_init_data *init;
+};
+
+#define DEFINE_RCG_DFS(r) \
+ { .rcg = &r##_src, .init = &r##_init }
+
extern int qcom_cc_register_rcg_dfs(struct regmap *regmap,
- struct clk_rcg2 **rcgs, int num_clks);
+ const struct clk_rcg_dfs_data *rcgs,
+ size_t len);
#endif
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index 55a5b58cbb15..d5d77f9ad170 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -940,25 +940,21 @@ const struct clk_ops clk_rcg2_shared_ops = {
EXPORT_SYMBOL_GPL(clk_rcg2_shared_ops);
/* Common APIs to be used for DFS based RCGR */
-static unsigned long clk_rcg2_calculate_freq(struct clk_hw *hw,
- int level, struct freq_tbl *f)
+static void clk_rcg2_dfs_populate_freq(struct clk_hw *hw, unsigned int l,
+ struct freq_tbl *f)
{
struct clk_rcg2 *rcg = to_clk_rcg2(hw);
struct clk_hw *p;
unsigned long prate = 0;
- u32 val, mask, cfg, m_off, n_off, offset, mode;
- int i, ret, num_parents;
+ u32 val, mask, cfg, mode;
+ int i, num_parents;
- offset = SE_PERF_DFSR(level);
- ret = regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + offset, &cfg);
- if (ret)
- return ret;
+ regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_DFSR(l), &cfg);
mask = BIT(rcg->hid_width) - 1;
- f->pre_div = cfg & mask ? (cfg & mask) : 1;
-
- mode = cfg & CFG_MODE_MASK;
- mode >>= CFG_MODE_SHIFT;
+ f->pre_div = 1;
+ if (cfg & mask)
+ f->pre_div = cfg & mask;
cfg &= CFG_SRC_SEL_MASK;
cfg >>= CFG_SRC_SEL_SHIFT;
@@ -972,60 +968,39 @@ static unsigned long clk_rcg2_calculate_freq(struct clk_hw *hw,
}
}
+ mode = cfg & CFG_MODE_MASK;
+ mode >>= CFG_MODE_SHIFT;
if (mode) {
- /* Calculate M & N values */
- m_off = SE_PERF_M_DFSR(level);
- n_off = SE_PERF_N_DFSR(level);
-
mask = BIT(rcg->mnd_width) - 1;
- ret = regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + m_off,
- &val);
- if (ret) {
- pr_err("Failed to read M offset register\n");
- return ret;
- }
+ regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_M_DFSR(l),
+ &val);
val &= mask;
f->m = val;
- ret = regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + n_off,
- &val);
- if (ret) {
- pr_err("Failed to read N offset register\n");
- return ret;
- }
- /* val ~(N-M) */
+ regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_N_DFSR(l),
+ &val);
val = ~val;
val &= mask;
val += f->m;
f->n = val;
}
- return calc_rate(prate, f->m, f->n, mode, f->pre_div);
+ f->freq = calc_rate(prate, f->m, f->n, mode, f->pre_div);
}
static int clk_rcg2_dfs_populate_freq_table(struct clk_rcg2 *rcg)
{
struct freq_tbl *freq_tbl;
- unsigned long calc_freq;
int i;
- freq_tbl = kcalloc(MAX_PERF_LEVEL, sizeof(*freq_tbl),
- GFP_KERNEL);
+ freq_tbl = kcalloc(MAX_PERF_LEVEL, sizeof(*freq_tbl), GFP_KERNEL);
if (!freq_tbl)
return -ENOMEM;
-
- for (i = 0; i < MAX_PERF_LEVEL; i++) {
- calc_freq = clk_rcg2_calculate_freq(&rcg->clkr.hw,
- i, &freq_tbl[i]);
- if (calc_freq < 0) {
- kfree(freq_tbl);
- return calc_freq;
- }
-
- freq_tbl[i].freq = calc_freq;
- }
rcg->freq_tbl = freq_tbl;
+ for (i = 0; i < MAX_PERF_LEVEL; i++)
+ clk_rcg2_dfs_populate_freq(&rcg->clkr.hw, i, freq_tbl + i);
+
return 0;
}
@@ -1044,55 +1019,54 @@ static int clk_rcg2_dfs_determine_rate(struct clk_hw *hw,
}
}
- return clk_rcg2_shared_ops.determine_rate(hw, req);
+ return clk_rcg2_determine_rate(hw, req);
}
static unsigned long
clk_rcg2_dfs_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
{
struct clk_rcg2 *rcg = to_clk_rcg2(hw);
- u32 cfg, hid_div, m = 0, n = 0, mode = 0, mask, level;
- int num_parents, i;
- unsigned long prate;
+ u32 level, mask, cfg, m = 0, n = 0, mode, pre_div;
- regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr +
- SE_CMD_DFSR_OFFSET, &cfg);
- level = (GENMASK(4, 1) & cfg) >> 1;
+ regmap_read(rcg->clkr.regmap,
+ rcg->cmd_rcgr + SE_CMD_DFSR_OFFSET, &level);
+ level &= GENMASK(4, 1);
+ level >>= 1;
- regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr +
- SE_PERF_DFSR(level), &cfg);
- if (rcg->mnd_width) {
+ if (rcg->freq_tbl)
+ return rcg->freq_tbl[level].freq;
+
+ /*
+ * Assume that parent_rate is actually the parent because
+ * we can't do any better at figuring it out when the table
+ * hasn't been populated yet. We only populate the table
+ * in determine_rate because we can't guarantee the parents
+ * will be registered with the framework until then.
+ */
+ regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_DFSR(level),
+ &cfg);
+
+ mask = BIT(rcg->hid_width) - 1;
+ pre_div = 1;
+ if (cfg & mask)
+ pre_div = cfg & mask;
+
+ mode = cfg & CFG_MODE_MASK;
+ mode >>= CFG_MODE_SHIFT;
+ if (mode) {
mask = BIT(rcg->mnd_width) - 1;
- regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr +
- SE_PERF_M_DFSR(level), &m);
+ regmap_read(rcg->clkr.regmap,
+ rcg->cmd_rcgr + SE_PERF_M_DFSR(level), &m);
m &= mask;
- regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr +
- SE_PERF_N_DFSR(level), &n);
- n = ~n;
+
+ regmap_read(rcg->clkr.regmap,
+ rcg->cmd_rcgr + SE_PERF_N_DFSR(level), &n);
+ n = ~n;
n &= mask;
n += m;
- mode = cfg & CFG_MODE_MASK;
- mode >>= CFG_MODE_SHIFT;
}
- mask = BIT(rcg->hid_width) - 1;
- hid_div = cfg >> CFG_SRC_DIV_SHIFT;
- hid_div &= mask;
- cfg &= CFG_SRC_SEL_MASK;
- cfg >>= CFG_SRC_SEL_SHIFT;
-
- num_parents = clk_hw_get_num_parents(hw);
- for (i = 0; i < num_parents; i++) {
- if (cfg == rcg->parent_map[i].cfg) {
- prate = clk_hw_get_rate(
- clk_hw_get_parent_by_index(&rcg->clkr.hw, i));
- if (parent_rate != prate)
- parent_rate = prate;
- }
- }
-
-
- return calc_rate(parent_rate, m, n, mode, hid_div);
+ return calc_rate(parent_rate, m, n, mode, pre_div);
}
static const struct clk_ops clk_rcg2_dfs_ops = {
@@ -1102,32 +1076,28 @@ static const struct clk_ops clk_rcg2_dfs_ops = {
.recalc_rate = clk_rcg2_dfs_recalc_rate,
};
-static int clk_rcg2_enable_dfs(struct clk_rcg2 *rcg, struct regmap *regmap)
+static int clk_rcg2_enable_dfs(const struct clk_rcg_dfs_data *data,
+ struct regmap *regmap)
{
- struct clk_init_data *init;
+ struct clk_rcg2 *rcg = data->rcg;
+ struct clk_init_data *init = data->init;
u32 val;
int ret;
- ret = regmap_read(regmap, rcg->cmd_rcgr + SE_CMD_DFSR_OFFSET,
- &val);
+ ret = regmap_read(regmap, rcg->cmd_rcgr + SE_CMD_DFSR_OFFSET, &val);
if (ret)
return -EINVAL;
if (!(val & SE_CMD_DFS_EN))
return 0;
- init = kzalloc(sizeof(*init), GFP_KERNEL);
- if (!init)
- return -ENOMEM;
-
- init->name = rcg->clkr.hw.init->name;
- init->flags = rcg->clkr.hw.init->flags;
- init->parent_names = rcg->clkr.hw.init->parent_names;
- init->num_parents = rcg->clkr.hw.init->num_parents;
- init->flags = CLK_GET_RATE_NOCACHE;
+ /*
+ * Rate changes with consumer writing a register in
+ * their own I/O region
+ */
+ init->flags |= CLK_GET_RATE_NOCACHE;
init->ops = &clk_rcg2_dfs_ops;
- rcg->clkr.hw.init = init;
rcg->freq_tbl = NULL;
pr_debug("DFS registered for clk %s\n", init->name);
@@ -1136,14 +1106,14 @@ static int clk_rcg2_enable_dfs(struct clk_rcg2 *rcg, struct regmap *regmap)
}
int qcom_cc_register_rcg_dfs(struct regmap *regmap,
- struct clk_rcg2 **rcgs, int num_clks)
+ const struct clk_rcg_dfs_data *rcgs, size_t len)
{
int i, ret;
- for (i = 0; i < num_clks; i++) {
- ret = clk_rcg2_enable_dfs(rcgs[i], regmap);
+ for (i = 0; i < len; i++) {
+ ret = clk_rcg2_enable_dfs(&rcgs[i], regmap);
if (ret) {
- const char *name = rcgs[i]->clkr.hw.init->name;
+ const char *name = rcgs[i].init->name;
pr_err("DFS register failed for clk %s\n", name);
return ret;
diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c
index fef6732bd7d8..42ab01d33b52 100644
--- a/drivers/clk/qcom/gcc-sdm845.c
+++ b/drivers/clk/qcom/gcc-sdm845.c
@@ -396,18 +396,27 @@ static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s0_clk_src[] = {
{ }
};
+static struct clk_init_data gcc_qupv3_wrap0_s0_clk_init = {
+ .name = "gcc_qupv3_wrap0_s0_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_shared_ops,
+};
+
static struct clk_rcg2 gcc_qupv3_wrap0_s0_clk_src = {
.cmd_rcgr = 0x17034,
.mnd_width = 16,
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap0_s0_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 4,
- .ops = &clk_rcg2_shared_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap0_s0_clk_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s1_clk_init = {
+ .name = "gcc_qupv3_wrap0_s1_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_shared_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = {
@@ -416,12 +425,14 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap0_s1_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 4,
- .ops = &clk_rcg2_shared_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap0_s1_clk_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s2_clk_init = {
+ .name = "gcc_qupv3_wrap0_s2_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_shared_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s2_clk_src = {
@@ -430,12 +441,14 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s2_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap0_s2_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 4,
- .ops = &clk_rcg2_shared_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap0_s2_clk_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s3_clk_init = {
+ .name = "gcc_qupv3_wrap0_s3_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_shared_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = {
@@ -444,12 +457,14 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap0_s3_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 4,
- .ops = &clk_rcg2_shared_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap0_s3_clk_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s4_clk_init = {
+ .name = "gcc_qupv3_wrap0_s4_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_shared_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = {
@@ -458,12 +473,14 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap0_s4_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 4,
- .ops = &clk_rcg2_shared_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap0_s4_clk_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s5_clk_init = {
+ .name = "gcc_qupv3_wrap0_s5_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_shared_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = {
@@ -472,12 +489,14 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap0_s5_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 4,
- .ops = &clk_rcg2_shared_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap0_s5_clk_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s6_clk_init = {
+ .name = "gcc_qupv3_wrap0_s6_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_shared_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s6_clk_src = {
@@ -486,12 +505,14 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s6_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap0_s6_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 4,
- .ops = &clk_rcg2_shared_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap0_s6_clk_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s7_clk_init = {
+ .name = "gcc_qupv3_wrap0_s7_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_shared_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s7_clk_src = {
@@ -500,12 +521,14 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s7_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap0_s7_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 4,
- .ops = &clk_rcg2_shared_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap0_s7_clk_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s0_clk_init = {
+ .name = "gcc_qupv3_wrap1_s0_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_shared_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = {
@@ -514,12 +537,14 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap1_s0_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 4,
- .ops = &clk_rcg2_shared_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap1_s0_clk_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s1_clk_init = {
+ .name = "gcc_qupv3_wrap1_s1_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_shared_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = {
@@ -528,12 +553,14 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap1_s1_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 4,
- .ops = &clk_rcg2_shared_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap1_s1_clk_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s2_clk_init = {
+ .name = "gcc_qupv3_wrap1_s2_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_shared_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s2_clk_src = {
@@ -542,12 +569,14 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s2_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap1_s2_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 4,
- .ops = &clk_rcg2_shared_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap1_s2_clk_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s3_clk_init = {
+ .name = "gcc_qupv3_wrap1_s3_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_shared_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = {
@@ -556,12 +585,14 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap1_s3_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 4,
- .ops = &clk_rcg2_shared_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap1_s3_clk_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s4_clk_init = {
+ .name = "gcc_qupv3_wrap1_s4_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_shared_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = {
@@ -570,12 +601,14 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap1_s4_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 4,
- .ops = &clk_rcg2_shared_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap1_s4_clk_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s5_clk_init = {
+ .name = "gcc_qupv3_wrap1_s5_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_shared_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = {
@@ -584,12 +617,14 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap1_s5_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 4,
- .ops = &clk_rcg2_shared_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap1_s5_clk_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s6_clk_init = {
+ .name = "gcc_qupv3_wrap1_s6_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_shared_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s6_clk_src = {
@@ -598,12 +633,14 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s6_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap1_s6_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 4,
- .ops = &clk_rcg2_shared_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap1_s6_clk_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s7_clk_init = {
+ .name = "gcc_qupv3_wrap1_s7_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_shared_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s7_clk_src = {
@@ -612,12 +649,7 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s7_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_qupv3_wrap1_s7_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 4,
- .ops = &clk_rcg2_shared_ops,
- },
+ .clkr.hw.init = &gcc_qupv3_wrap1_s7_clk_init,
};
static const struct freq_tbl ftbl_gcc_sdcc2_apps_clk_src[] = {
@@ -3458,23 +3490,23 @@ static const struct of_device_id gcc_sdm845_match_table[] = {
};
MODULE_DEVICE_TABLE(of, gcc_sdm845_match_table);
-static struct clk_rcg2 *gcc_dfs_clocks[] = {
- &gcc_qupv3_wrap0_s0_clk_src,
- &gcc_qupv3_wrap0_s1_clk_src,
- &gcc_qupv3_wrap0_s2_clk_src,
- &gcc_qupv3_wrap0_s3_clk_src,
- &gcc_qupv3_wrap0_s4_clk_src,
- &gcc_qupv3_wrap0_s5_clk_src,
- &gcc_qupv3_wrap0_s6_clk_src,
- &gcc_qupv3_wrap0_s7_clk_src,
- &gcc_qupv3_wrap1_s0_clk_src,
- &gcc_qupv3_wrap1_s1_clk_src,
- &gcc_qupv3_wrap1_s2_clk_src,
- &gcc_qupv3_wrap1_s3_clk_src,
- &gcc_qupv3_wrap1_s4_clk_src,
- &gcc_qupv3_wrap1_s5_clk_src,
- &gcc_qupv3_wrap1_s6_clk_src,
- &gcc_qupv3_wrap1_s7_clk_src,
+static const struct clk_rcg_dfs_data gcc_dfs_clocks[] = {
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s0_clk),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s1_clk),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s2_clk),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s3_clk),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s4_clk),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s5_clk),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s6_clk),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s7_clk),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s0_clk),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s1_clk),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s2_clk),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s3_clk),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s4_clk),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s5_clk),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s6_clk),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s7_clk),
};
static int gcc_sdm845_probe(struct platform_device *pdev)
Powered by blists - more mailing lists