[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-Id: <20220412185644.2EEC9C385A5@smtp.kernel.org>
Date: Tue, 12 Apr 2022 11:56:42 -0700
From: Stephen Boyd <sboyd@...nel.org>
To: Amit Nischal <quic_anischal@...cinc.com>,
Bjorn Andersson <bjorn.andersson@...aro.org>,
Michael Turquette <mturquette@...libre.com>,
Taniya Das <quic_tdas@...cinc.com>
Cc: dmitry.baryshkov@...aro.org, linux-arm-msm@...r.kernel.org,
linux-clk@...r.kernel.org, linux-kernel@...r.kernel.org
Subject: Re: [PATCH v2] clk: qcom: rcg2: Cache CFG register updates for parked RCGs
I "quicified" the codeaurora emails, let's hope it worked.
Quoting Bjorn Andersson (2022-03-07 20:03:48)
> As GDSCs are turned on and off some associated clocks are momentarily
> enabled for house keeping purposes. For this, and similar, purposes the
> "shared RCGs" will park the RCG on a source clock which is known to be
> available.
> When the RCG is parked, a safe clock source will be selected and
> committed, then the original source would be written back and upon enable
> the change back to the unparked source would be committed.
>
> But starting with SM8350 this fails, as the value in CFG is committed by
> the GDSC handshake and without a valid parent the GDSC enablement will
> fail.
Does this lead to boot problems? Or some driver failing to work? More
details on severity here please.
>
> To avoid this problem, the software needs to cache the CFG register
> content while the shared RCG is parked.
>
> Writes to M, N and D registers are committed as they are requested. New
> helpers for get_parent() and recalc_rate() are extracted from their
> previous implementations and __clk_rcg2_configure() is modified to allow
> it to operate on the cached value.
>
> Fixes: 7ef6f11887bd ("clk: qcom: Configure the RCGs to a safe source as needed")
> Signed-off-by: Bjorn Andersson <bjorn.andersson@...aro.org>
> ---
Minor nits mostly. Thanks for taking the cfg caching approach. I think
we want to take this for clk-fixes if it is serious enough so I can
merge the next version directly.
>
> Changes since v1:
> - Rather than caching the last requested freqnecy, cache and update the CFG
> register value while the shared RCG is disabled.
> - Use/modify the cached RCG value in get_parent(), set_parent() and
> recalc_rate() for parked shared RCGs as well.
> - Rewrote the commit message.
>
> drivers/clk/qcom/clk-rcg.h | 2 +
> drivers/clk/qcom/clk-rcg2.c | 134 +++++++++++++++++++++++++++---------
> 2 files changed, 104 insertions(+), 32 deletions(-)
>
> diff --git a/drivers/clk/qcom/clk-rcg.h b/drivers/clk/qcom/clk-rcg.h
> index 99efcc7f8d88..7bcbda8e4f17 100644
> --- a/drivers/clk/qcom/clk-rcg.h
> +++ b/drivers/clk/qcom/clk-rcg.h
> @@ -139,6 +139,7 @@ extern const struct clk_ops clk_dyn_rcg_ops;
> * @freq_tbl: frequency table
> * @clkr: regmap clock handle
> * @cfg_off: defines the cfg register offset from the CMD_RCGR + CFG_REG
> + * @parked_cfg: cached value of the CFG register for parked RCGs
> */
> struct clk_rcg2 {
> u32 cmd_rcgr;
> @@ -149,6 +150,7 @@ struct clk_rcg2 {
> const struct freq_tbl *freq_tbl;
> struct clk_regmap clkr;
> u8 cfg_off;
> + u32 parked_cfg;
> };
>
> #define to_clk_rcg2(_hw) container_of(to_clk_regmap(_hw), struct clk_rcg2, clkr)
> diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
> index e1b1b426fae4..8e3aebb72191 100644
> --- a/drivers/clk/qcom/clk-rcg2.c
> +++ b/drivers/clk/qcom/clk-rcg2.c
> @@ -73,16 +73,11 @@ static int clk_rcg2_is_enabled(struct clk_hw *hw)
> return (cmd & CMD_ROOT_OFF) == 0;
> }
>
> -static u8 clk_rcg2_get_parent(struct clk_hw *hw)
> +static u8 __clk_rcg2_get_parent(struct clk_hw *hw, u32 cfg)
> {
> struct clk_rcg2 *rcg = to_clk_rcg2(hw);
> int num_parents = clk_hw_get_num_parents(hw);
> - u32 cfg;
> - int i, ret;
> -
> - ret = regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg);
> - if (ret)
> - goto err;
> + int i;
>
> cfg &= CFG_SRC_SEL_MASK;
> cfg >>= CFG_SRC_SEL_SHIFT;
> @@ -91,12 +86,27 @@ static u8 clk_rcg2_get_parent(struct clk_hw *hw)
> if (cfg == rcg->parent_map[i].cfg)
> return i;
>
> -err:
> pr_debug("%s: Clock %s has invalid parent, using default.\n",
> __func__, clk_hw_get_name(hw));
> return 0;
> }
>
> +static u8 clk_rcg2_get_parent(struct clk_hw *hw)
> +{
> + struct clk_rcg2 *rcg = to_clk_rcg2(hw);
> + u32 cfg;
> + int ret;
> +
> + ret = regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg);
> + if (ret) {
> + pr_err("%s: Unable to read CFG register for %s\n",
This used to be the pr_debug() above. Can it still be a pr_debug()?
> + __func__, clk_hw_get_name(hw));
> + return 0;
> + }
> +
> + return __clk_rcg2_get_parent(hw, cfg);
> +}
> +
> static int update_config(struct clk_rcg2 *rcg)
> {
> int count, ret;
> @@ -163,12 +173,10 @@ calc_rate(unsigned long rate, u32 m, u32 n, u32 mode, u32 hid_div)
> }
>
> static unsigned long
> -clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
> +__clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate, u32 cfg)
> {
> struct clk_rcg2 *rcg = to_clk_rcg2(hw);
> - u32 cfg, hid_div, m = 0, n = 0, mode = 0, mask;
> -
> - regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg);
> + u32 hid_div, m = 0, n = 0, mode = 0, mask;
>
> if (rcg->mnd_width) {
> mask = BIT(rcg->mnd_width) - 1;
> @@ -189,6 +197,17 @@ clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
> return calc_rate(parent_rate, m, n, mode, hid_div);
> }
>
> +static unsigned long
> +clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
> +{
> + struct clk_rcg2 *rcg = to_clk_rcg2(hw);
> + u32 cfg;
> +
> + regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg);
> +
> + return __clk_rcg2_recalc_rate(hw, parent_rate, cfg);
> +}
> +
> static int _freq_tbl_determine_rate(struct clk_hw *hw, const struct freq_tbl *f,
> struct clk_rate_request *req,
> enum freq_policy policy)
> @@ -262,9 +281,10 @@ static int clk_rcg2_determine_floor_rate(struct clk_hw *hw,
> return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req, FLOOR);
> }
>
> -static int __clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
> +static int __clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f,
> + u32 *cfg)
> {
> - u32 cfg, mask;
> + u32 mask;
> struct clk_hw *hw = &rcg->clkr.hw;
> int ret, index = qcom_find_src_index(hw, rcg->parent_map, f->src);
>
> @@ -289,21 +309,31 @@ static int __clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
> return ret;
> }
>
> - mask = BIT(rcg->hid_width) - 1;
> - mask |= CFG_SRC_SEL_MASK | CFG_MODE_MASK | CFG_HW_CLK_CTRL_MASK;
> - cfg = f->pre_div << CFG_SRC_DIV_SHIFT;
> - cfg |= rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
> + *cfg &= ~GENMASK(rcg->hid_width - 1, 0);
I'd prefer to not change this in this patch. Name the parameter _cfg and
then assign it at the end? I had to look closely here and things aren't
the same.
> + *cfg &= ~(CFG_SRC_SEL_MASK | CFG_MODE_MASK | CFG_HW_CLK_CTRL_MASK);
> +
> + *cfg = f->pre_div << CFG_SRC_DIV_SHIFT;
> + *cfg |= rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
> if (rcg->mnd_width && f->n && (f->m != f->n))
> - cfg |= CFG_MODE_DUAL_EDGE;
> - return regmap_update_bits(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg),
> - mask, cfg);
> + *cfg |= CFG_MODE_DUAL_EDGE;
> +
*_cfg = cfg;
> + return 0;
> }
>
> static int clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
> {
> + u32 cfg;
> int ret;
>
> - ret = __clk_rcg2_configure(rcg, f);
> + ret = regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg);
> + if (ret)
> + return ret;
> +
> + ret = __clk_rcg2_configure(rcg, f, &cfg);
> + if (ret)
> + return ret;
> +
> + ret = regmap_write(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), cfg);
> if (ret)
> return ret;
>
> @@ -969,11 +999,12 @@ static int clk_rcg2_shared_set_rate(struct clk_hw *hw, unsigned long rate,
> return -EINVAL;
>
> /*
> - * In case clock is disabled, update the CFG, M, N and D registers
> - * and don't hit the update bit of CMD register.
> + * In case clock is disabled, update the M, N and D registers and cache
> + * the CFG value in parked_cfg.
We still don't hit the update bit, right?
> */
> +
> if (!__clk_is_enabled(hw->clk))
> - return __clk_rcg2_configure(rcg, f);
> + return __clk_rcg2_configure(rcg, f, &rcg->parked_cfg);
>
> return clk_rcg2_shared_force_enable_clear(hw, f);
> }
> @@ -997,6 +1028,11 @@ static int clk_rcg2_shared_enable(struct clk_hw *hw)
> if (ret)
> return ret;
>
> + /* Write back the stored configuration corresponding to current rate */
> + ret = regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, rcg->parked_cfg);
> + if (ret)
> + return ret;
> +
> ret = update_config(rcg);
> if (ret)
> return ret;
> @@ -1007,13 +1043,12 @@ static int clk_rcg2_shared_enable(struct clk_hw *hw)
> static void clk_rcg2_shared_disable(struct clk_hw *hw)
> {
> struct clk_rcg2 *rcg = to_clk_rcg2(hw);
> - u32 cfg;
>
> /*
> * Store current configuration as switching to safe source would clear
> * the SRC and DIV of CFG register
> */
> - regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
> + regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &rcg->parked_cfg);
>
> /*
> * Park the RCG at a safe configuration - sourced off of safe source.
> @@ -1031,17 +1066,52 @@ static void clk_rcg2_shared_disable(struct clk_hw *hw)
> update_config(rcg);
>
> clk_rcg2_clear_force_enable(hw);
> +}
>
> - /* Write back the stored configuration corresponding to current rate */
> - regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, cfg);
> +static u8 clk_rcg2_shared_get_parent(struct clk_hw *hw)
> +{
> + struct clk_rcg2 *rcg = to_clk_rcg2(hw);
> +
> + /* If the shared rcg is parked used the cached cfg instead */
> + if (!__clk_is_enabled(hw->clk))
> + return __clk_rcg2_get_parent(hw, rcg->parked_cfg);
> +
> + return clk_rcg2_get_parent(hw);
> +}
> +
> +static int clk_rcg2_shared_set_parent(struct clk_hw *hw, u8 index)
> +{
> + struct clk_rcg2 *rcg = to_clk_rcg2(hw);
> +
> + /* If the shared rcg is parked only update the cached cfg */
> + if (!__clk_is_enabled(hw->clk)) {
Use clk_hw_is_enabled() please
> + rcg->parked_cfg &= ~CFG_SRC_SEL_MASK;
> + rcg->parked_cfg |= rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
> +
> + return 0;
> + }
> +
> + return clk_rcg2_set_parent(hw, index);
> +}
Powered by blists - more mailing lists