lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAPDyKFoQ4ugMsy+ZRvg9AzPwQa3LC73sYMw7gNfYqe+pCfncUw@mail.gmail.com>
Date:   Wed, 8 Jun 2022 11:36:14 +0200
From:   Ulf Hansson <ulf.hansson@...aro.org>
To:     Maulik Shah <quic_mkshah@...cinc.com>
Cc:     bjorn.andersson@...aro.org, linux-arm-msm@...r.kernel.org,
        linux-pm@...r.kernel.org, linux-kernel@...r.kernel.org,
        rafael@...nel.org, daniel.lezcano@...aro.org,
        quic_lsrao@...cinc.com, quic_rjendra@...cinc.com
Subject: Re: [PATCH v2 6/6] soc: qcom: rpmh-rsc: Write CONTROL_TCS with next
 timer wakeup

On Fri, 20 May 2022 at 11:39, Ulf Hansson <ulf.hansson@...aro.org> wrote:
>
> On Wed, 11 May 2022 at 15:17, Maulik Shah <quic_mkshah@...cinc.com> wrote:
> >
> > The next wakeup timer value needs to be set in always on domain timer
> > as the arch timer interrupt can not wakeup the SoC if after the deepest
> > CPUidle states the SoC also enters deepest low power state.
> >
> > To wakeup the SoC in such scenarios the earliest wakeup time is set in
> > CONTROL_TCS and the firmware takes care of setting up its own timer in
> > always on domain with next wakeup time. The timer wakes up the RSC and
> > sets resources back to wake state.
> >
> > Signed-off-by: Maulik Shah <quic_mkshah@...cinc.com>
>
> I didn't forget to review this, but please allow me a few more days to
> think a little bit more about this.

Apologize for the delay, this looks good to me!

Reviewed-by: Ulf Hansson <ulf.hansson@...aro.org>

Kind regards
Uffe

>
> > ---
> >  drivers/soc/qcom/rpmh-internal.h |  3 ++
> >  drivers/soc/qcom/rpmh-rsc.c      | 61 ++++++++++++++++++++++++++++++++++++++++
> >  drivers/soc/qcom/rpmh.c          |  4 ++-
> >  3 files changed, 67 insertions(+), 1 deletion(-)
> >
> > diff --git a/drivers/soc/qcom/rpmh-internal.h b/drivers/soc/qcom/rpmh-internal.h
> > index 7866bb1..39f5358 100644
> > --- a/drivers/soc/qcom/rpmh-internal.h
> > +++ b/drivers/soc/qcom/rpmh-internal.h
> > @@ -112,6 +112,7 @@ struct rpmh_ctrlr {
> >   * @tcs_wait:           Wait queue used to wait for @tcs_in_use to free up a
> >   *                      slot
> >   * @client:             Handle to the DRV's client.
> > + * @dev:                RSC device.
> >   */
> >  struct rsc_drv {
> >         const char *name;
> > @@ -127,12 +128,14 @@ struct rsc_drv {
> >         spinlock_t lock;
> >         wait_queue_head_t tcs_wait;
> >         struct rpmh_ctrlr client;
> > +       struct device *dev;
> >  };
> >
> >  int rpmh_rsc_send_data(struct rsc_drv *drv, const struct tcs_request *msg);
> >  int rpmh_rsc_write_ctrl_data(struct rsc_drv *drv,
> >                              const struct tcs_request *msg);
> >  void rpmh_rsc_invalidate(struct rsc_drv *drv);
> > +void rpmh_rsc_write_next_wakeup(struct rsc_drv *drv);
> >
> >  void rpmh_tx_done(const struct tcs_request *msg, int r);
> >  int rpmh_flush(struct rpmh_ctrlr *ctrlr);
> > diff --git a/drivers/soc/qcom/rpmh-rsc.c b/drivers/soc/qcom/rpmh-rsc.c
> > index 8e01697..25b838b 100644
> > --- a/drivers/soc/qcom/rpmh-rsc.c
> > +++ b/drivers/soc/qcom/rpmh-rsc.c
> > @@ -12,6 +12,7 @@
> >  #include <linux/io.h>
> >  #include <linux/iopoll.h>
> >  #include <linux/kernel.h>
> > +#include <linux/ktime.h>
> >  #include <linux/list.h>
> >  #include <linux/module.h>
> >  #include <linux/notifier.h>
> > @@ -25,6 +26,7 @@
> >  #include <linux/spinlock.h>
> >  #include <linux/wait.h>
> >
> > +#include <clocksource/arm_arch_timer.h>
> >  #include <soc/qcom/cmd-db.h>
> >  #include <soc/qcom/tcs.h>
> >  #include <dt-bindings/soc/qcom,rpmh-rsc.h>
> > @@ -49,6 +51,14 @@
> >  #define DRV_NCPT_MASK                  0x1F
> >  #define DRV_NCPT_SHIFT                 27
> >
> > +/* Offsets for CONTROL TCS Registers */
> > +#define RSC_DRV_CTL_TCS_DATA_HI                0x38
> > +#define RSC_DRV_CTL_TCS_DATA_HI_MASK   0xFFFFFF
> > +#define RSC_DRV_CTL_TCS_DATA_HI_VALID  BIT(31)
> > +#define RSC_DRV_CTL_TCS_DATA_LO                0x40
> > +#define RSC_DRV_CTL_TCS_DATA_LO_MASK   0xFFFFFFFF
> > +#define RSC_DRV_CTL_TCS_DATA_SIZE      32
> > +
> >  /* Offsets for common TCS Registers, one bit per TCS */
> >  #define RSC_DRV_IRQ_ENABLE             0x00
> >  #define RSC_DRV_IRQ_STATUS             0x04
> > @@ -142,6 +152,14 @@
> >   *  +---------------------------------------------------+
> >   */
> >
> > +#define USECS_TO_CYCLES(time_usecs)                    \
> > +       xloops_to_cycles((time_usecs) * 0x10C7UL)
> > +
> > +static inline unsigned long xloops_to_cycles(unsigned long xloops)
> > +{
> > +       return (xloops * loops_per_jiffy * HZ) >> 32;
> > +}
> > +
> >  static inline void __iomem *
> >  tcs_reg_addr(const struct rsc_drv *drv, int reg, int tcs_id)
> >  {
> > @@ -757,6 +775,48 @@ static bool rpmh_rsc_ctrlr_is_busy(struct rsc_drv *drv)
> >  }
> >
> >  /**
> > + * rpmh_rsc_write_next_wakeup() - Write next wakeup in CONTROL_TCS.
> > + * @drv: The controller
> > + *
> > + * Writes maximum wakeup cycles when called from suspend.
> > + * Writes earliest hrtimer wakeup when called from idle.
> > + */
> > +void rpmh_rsc_write_next_wakeup(struct rsc_drv *drv)
> > +{
> > +       ktime_t now, wakeup;
> > +       u64 wakeup_us, wakeup_cycles = ~0;
> > +       u32 lo, hi;
> > +
> > +       if (!drv->tcs[CONTROL_TCS].num_tcs || !drv->genpd_nb.notifier_call)
> > +               return;
> > +
> > +       /* Set highest time when system (timekeeping) is suspended */
> > +       if (system_state == SYSTEM_SUSPEND)
> > +               goto exit;
> > +
> > +       /* Find the earliest hrtimer wakeup from online cpus */
> > +       wakeup = dev_pm_genpd_get_next_hrtimer(drv->dev);
> > +
> > +       /* Find the relative wakeup in kernel time scale */
> > +       now = ktime_get();
> > +       wakeup = ktime_sub(wakeup, now);
> > +       wakeup_us = ktime_to_us(wakeup);
> > +
> > +       /* Convert the wakeup to arch timer scale */
> > +       wakeup_cycles = USECS_TO_CYCLES(wakeup_us);
> > +       wakeup_cycles += arch_timer_read_counter();
> > +
> > +exit:
> > +       lo = wakeup_cycles & RSC_DRV_CTL_TCS_DATA_LO_MASK;
> > +       hi = wakeup_cycles >> RSC_DRV_CTL_TCS_DATA_SIZE;
> > +       hi &= RSC_DRV_CTL_TCS_DATA_HI_MASK;
> > +       hi |= RSC_DRV_CTL_TCS_DATA_HI_VALID;
> > +
> > +       writel_relaxed(lo, drv->base + RSC_DRV_CTL_TCS_DATA_LO);
> > +       writel_relaxed(hi, drv->base + RSC_DRV_CTL_TCS_DATA_HI);
> > +}
> > +
> > +/**
> >   * rpmh_rsc_cpu_pm_callback() - Check if any of the AMCs are busy.
> >   * @nfb:    Pointer to the notifier block in struct rsc_drv.
> >   * @action: CPU_PM_ENTER, CPU_PM_ENTER_FAILED, or CPU_PM_EXIT.
> > @@ -1035,6 +1095,7 @@ static int rpmh_rsc_probe(struct platform_device *pdev)
> >         INIT_LIST_HEAD(&drv->client.batch_cache);
> >
> >         dev_set_drvdata(&pdev->dev, drv);
> > +       drv->dev = &pdev->dev;
> >
> >         ret = devm_of_platform_populate(&pdev->dev);
> >         if (ret && pdev->dev.pm_domain) {
> > diff --git a/drivers/soc/qcom/rpmh.c b/drivers/soc/qcom/rpmh.c
> > index 01765ee..3a53ed9 100644
> > --- a/drivers/soc/qcom/rpmh.c
> > +++ b/drivers/soc/qcom/rpmh.c
> > @@ -450,7 +450,7 @@ int rpmh_flush(struct rpmh_ctrlr *ctrlr)
> >
> >         if (!ctrlr->dirty) {
> >                 pr_debug("Skipping flush, TCS has latest data.\n");
> > -               goto exit;
> > +               goto write_next_wakeup;
> >         }
> >
> >         /* Invalidate the TCSes first to avoid stale data */
> > @@ -479,6 +479,8 @@ int rpmh_flush(struct rpmh_ctrlr *ctrlr)
> >
> >         ctrlr->dirty = false;
> >
> > +write_next_wakeup:
> > +       rpmh_rsc_write_next_wakeup(ctrlr_to_drv(ctrlr));
> >  exit:
> >         spin_unlock(&ctrlr->cache_lock);
> >         return ret;
> > --
> > 2.7.4
> >

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ