[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20210729061713.kzhka7nymo53zw2o@vireshk-i7>
Date: Thu, 29 Jul 2021 11:47:13 +0530
From: Viresh Kumar <viresh.kumar@...aro.org>
To: Thara Gopinath <thara.gopinath@...aro.org>
Cc: agross@...nel.org, bjorn.andersson@...aro.org, rui.zhang@...el.com,
daniel.lezcano@...aro.org, rjw@...ysocki.net, robh+dt@...nel.org,
steev@...i.org, linux-arm-msm@...r.kernel.org,
linux-pm@...r.kernel.org, linux-kernel@...r.kernel.org,
devicetree@...r.kernel.org
Subject: Re: [Patch v4 3/6] cpufreq: qcom-cpufreq-hw: Add dcvs interrupt
support
On 28-07-21, 18:19, Thara Gopinath wrote:
> Ha! I was too lazy to write this down! So how about I make this a mutex and
mutex may not work as you come here from irq.
> put mod_delayed_work() inside the lock. So it will be something like below
>
> qcom_lmh_dcvs_notify() qcom_cpufreq_hw_lmh_exit()
>
> mutex_lock() mutex_lock()
> if (data->cancel_throttle) { cancel_throttle = true
> mutex_unlock() mutex_unlock()
> return cancel_delayed_work_sync()
> } free_irq()
> enable_irq() / mod_delayed_work()
> mutex_unlock()
>
> I will let you break it!
I can't any further :)
Consider merging below to this patch, it fixes sever other minor
issues I see in the code.
--
viresh
-------------------------8<-------------------------
diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c
index 3cc27d9e2ed1..4261299fa9e5 100644
--- a/drivers/cpufreq/qcom-cpufreq-hw.c
+++ b/drivers/cpufreq/qcom-cpufreq-hw.c
@@ -38,13 +38,17 @@ struct qcom_cpufreq_soc_data {
struct qcom_cpufreq_data {
void __iomem *base;
struct resource *res;
- struct delayed_work throttle_work;
const struct qcom_cpufreq_soc_data *soc_data;
- struct cpufreq_policy *policy;
- /* Lock to synchronize between de-init sequence and re-starting LMh polling/interrupts */
+
+ /*
+ * Lock to synchronize between de-init sequence and re-starting LMh
+ * polling/interrupts.
+ */
spinlock_t throttle_lock;
int throttle_irq;
bool cancel_throttle;
+ struct delayed_work throttle_work;
+ struct cpufreq_policy *policy;
};
static unsigned long cpu_hw_rate, xo_rate;
@@ -271,10 +275,11 @@ static unsigned int qcom_lmh_get_throttle_freq(struct qcom_cpufreq_data *data)
static void qcom_lmh_dcvs_notify(struct qcom_cpufreq_data *data)
{
+ unsigned long max_capacity, capacity, freq_hz, throttled_freq;
struct cpufreq_policy *policy = data->policy;
+ int cpu = cpumask_first(policy->cpus);
+ struct device *dev = get_cpu_device(cpu);
struct dev_pm_opp *opp;
- struct device *dev;
- unsigned long max_capacity, capacity, freq_hz, throttled_freq;
unsigned int freq;
/*
@@ -284,7 +289,6 @@ static void qcom_lmh_dcvs_notify(struct qcom_cpufreq_data *data)
freq = qcom_lmh_get_throttle_freq(data);
freq_hz = freq * HZ_PER_KHZ;
- dev = get_cpu_device(cpumask_first(policy->cpus));
opp = dev_pm_opp_find_freq_floor(dev, &freq_hz);
if (IS_ERR(opp) && PTR_ERR(opp) == -ERANGE)
opp = dev_pm_opp_find_freq_ceil(dev, &freq_hz);
@@ -293,34 +297,37 @@ static void qcom_lmh_dcvs_notify(struct qcom_cpufreq_data *data)
/* Update thermal pressure */
- max_capacity = arch_scale_cpu_capacity(cpumask_first(policy->cpus));
+ max_capacity = arch_scale_cpu_capacity(cpu);
capacity = mult_frac(max_capacity, throttled_freq, policy->cpuinfo.max_freq);
+
/* Don't pass boost capacity to scheduler */
if (capacity > max_capacity)
capacity = max_capacity;
arch_set_thermal_pressure(policy->cpus, max_capacity - capacity);
- /* In the unlikely case cpufreq is de-registered do not enable polling or h/w interrupt */
-
+ /*
+ * In the unlikely case, where the policy is going away, do not enable
+ * polling or h/w interrupt.
+ */
spin_lock(&data->throttle_lock);
- if (data->cancel_throttle) {
- spin_unlock(&data->throttle_lock);
- return;
- }
- spin_unlock(&data->throttle_lock);
+
+ if (data->cancel_throttle)
+ goto out;
/*
- * If h/w throttled frequency is higher than what cpufreq has requested for, stop
- * polling and switch back to interrupt mechanism
+ * If h/w throttled frequency is higher than what cpufreq has requested
+ * for, then stop polling and switch back to interrupt mechanism.
*/
- if (throttled_freq >= qcom_cpufreq_hw_get(cpumask_first(policy->cpus)))
- /* Clear the existing interrupts and enable it back */
+ if (throttled_freq >= qcom_cpufreq_hw_get(cpu)) {
enable_irq(data->throttle_irq);
- else
+ } else {
mod_delayed_work(system_highpri_wq, &data->throttle_work,
msecs_to_jiffies(10));
+ }
+out:
+ spin_unlock(&data->throttle_lock);
}
static void qcom_lmh_dcvs_poll(struct work_struct *work)
@@ -328,7 +335,6 @@ static void qcom_lmh_dcvs_poll(struct work_struct *work)
struct qcom_cpufreq_data *data;
data = container_of(work, struct qcom_cpufreq_data, throttle_work.work);
-
qcom_lmh_dcvs_notify(data);
}
@@ -407,6 +413,7 @@ static void qcom_cpufreq_hw_lmh_exit(struct qcom_cpufreq_data *data)
spin_lock(&data->throttle_lock);
data->cancel_throttle = true;
spin_unlock(&data->throttle_lock);
+
cancel_delayed_work_sync(&data->throttle_work);
free_irq(data->throttle_irq, data);
}
Powered by blists - more mailing lists