[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAKohpok0iDffNM31W2r97Wtq4DR+TdPwYWTAdJH7AyLqHm0UPA@mail.gmail.com>
Date: Wed, 30 Jan 2013 21:21:41 +0530
From: Viresh Kumar <viresh.kumar@...aro.org>
To: Fabio Baltieri <fabio.baltieri@...aro.org>
Cc: "Rafael J. Wysocki" <rjw@...k.pl>, cpufreq@...r.kernel.org,
linux-pm@...r.kernel.org, Linus Walleij <linus.walleij@...aro.org>,
swarren@...dotorg.org, linaro-dev@...ts.linaro.org,
Nicolas Pitre <nicolas.pitre@...aro.org>,
mathieu.poirier@...aro.org, Joseph Lo <josephl@...dia.com>,
linux-kernel@...r.kernel.org
Subject: Re: [PATCH v7 2/4] cpufreq: ondemand: call dbs_check_cpu only when necessary
On 30 January 2013 18:30, Fabio Baltieri <fabio.baltieri@...aro.org> wrote:
> Modify ondemand timer to not resample CPU utilization if recently
> sampled from another SW coordinated core.
>
> Signed-off-by: Fabio Baltieri <fabio.baltieri@...aro.org>
I might be wrong but i have some real concerns over this patch.
This is what i believe is your idea:
- because a cpu can sleep, create timer per cpu
- then check load again only when no other cpu in policy->cpus has
checked load within sampling time interval.
Correct?
> ---
> drivers/cpufreq/cpufreq_governor.c | 3 ++
> drivers/cpufreq/cpufreq_governor.h | 1 +
> drivers/cpufreq/cpufreq_ondemand.c | 58 +++++++++++++++++++++++++++++++-------
> 3 files changed, 52 insertions(+), 10 deletions(-)
>
> diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
> index 8393d6e..46f96a4 100644
> --- a/drivers/cpufreq/cpufreq_governor.c
> +++ b/drivers/cpufreq/cpufreq_governor.c
> @@ -289,6 +289,9 @@ second_time:
> }
> mutex_unlock(&dbs_data->mutex);
>
> + /* Initiate timer time stamp */
> + cpu_cdbs->time_stamp = ktime_get();
We have updated time_stamp only for policy->cpu's cdbs.
> for_each_cpu(j, policy->cpus)
> dbs_timer_init(dbs_data, j, *sampling_rate);
> break;
> diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
> index 93bb56d..13ceb3c 100644
> --- a/drivers/cpufreq/cpufreq_ondemand.c
> +++ b/drivers/cpufreq/cpufreq_ondemand.c
> @@ -216,23 +216,23 @@ static void od_check_cpu(int cpu, unsigned int load_freq)
> }
> }
>
> -static void od_dbs_timer(struct work_struct *work)
> +static void od_timer_update(struct od_cpu_dbs_info_s *dbs_info, bool sample,
> + struct delayed_work *dw)
> {
> - struct od_cpu_dbs_info_s *dbs_info =
> - container_of(work, struct od_cpu_dbs_info_s, cdbs.work.work);
> unsigned int cpu = dbs_info->cdbs.cpu;
> int delay, sample_type = dbs_info->sample_type;
>
> - mutex_lock(&dbs_info->cdbs.timer_mutex);
> -
> /* Common NORMAL_SAMPLE setup */
> dbs_info->sample_type = OD_NORMAL_SAMPLE;
> if (sample_type == OD_SUB_SAMPLE) {
> delay = dbs_info->freq_lo_jiffies;
> - __cpufreq_driver_target(dbs_info->cdbs.cur_policy,
> - dbs_info->freq_lo, CPUFREQ_RELATION_H);
> + if (sample)
> + __cpufreq_driver_target(dbs_info->cdbs.cur_policy,
> + dbs_info->freq_lo,
> + CPUFREQ_RELATION_H);
> } else {
> - dbs_check_cpu(&od_dbs_data, cpu);
> + if (sample)
> + dbs_check_cpu(&od_dbs_data, cpu);
> if (dbs_info->freq_lo) {
> /* Setup timer for SUB_SAMPLE */
> dbs_info->sample_type = OD_SUB_SAMPLE;
> @@ -243,11 +243,49 @@ static void od_dbs_timer(struct work_struct *work)
> }
> }
>
> - schedule_delayed_work_on(smp_processor_id(), &dbs_info->cdbs.work,
> - delay);
> + schedule_delayed_work_on(smp_processor_id(), dw, delay);
> +}
All good until now.
> +
> +static void od_timer_coordinated(struct od_cpu_dbs_info_s *dbs_info_local,
> + struct delayed_work *dw)
> +{
> + struct od_cpu_dbs_info_s *dbs_info;
> + ktime_t time_now;
> + s64 delta_us;
> + bool sample = true;
> +
--start--
> + /* use leader CPU's dbs_info */
> + dbs_info = &per_cpu(od_cpu_dbs_info, dbs_info_local->cdbs.cpu);
> + mutex_lock(&dbs_info->cdbs.timer_mutex);
> +
> + time_now = ktime_get();
> + delta_us = ktime_us_delta(time_now, dbs_info->cdbs.time_stamp);
> +
> + /* Do nothing if we recently have sampled */
> + if (delta_us < (s64)(od_tuners.sampling_rate / 2))
> + sample = false;
> + else
> + dbs_info->cdbs.time_stamp = time_now;
> +
--end--
Instead of two routines (this and the one below), we can have one and can
put the above code in the if (coordinated cpus case). That will save some
redundant code.
Another issue that i see is: Current routine will be called from timer handler
of every cpu and so above calculations will happen for every cpu. Here, you
are taking the diff of last timestamp of cpu "x" with cpu "x" current timestamp,
but what you should have really done is the diff b/w current timestamp with
the timestamp of last change on any cpu in policy->cpus.
Over that, timestamp for all cpu's isn't initialized early in the code
at GOV_START.
Am i correct?
> + od_timer_update(dbs_info, sample, dw);
> mutex_unlock(&dbs_info->cdbs.timer_mutex);
> }
>
> +static void od_dbs_timer(struct work_struct *work)
> +{
> + struct delayed_work *dw = to_delayed_work(work);
> + struct od_cpu_dbs_info_s *dbs_info =
> + container_of(work, struct od_cpu_dbs_info_s, cdbs.work.work);
> +
> + if (dbs_sw_coordinated_cpus(&dbs_info->cdbs)) {
> + od_timer_coordinated(dbs_info, dw);
> + } else {
> + mutex_lock(&dbs_info->cdbs.timer_mutex);
> + od_timer_update(dbs_info, true, dw);
> + mutex_unlock(&dbs_info->cdbs.timer_mutex);
> + }
> +}
> +
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists