[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20180907145404.GB11088@cmpxchg.org>
Date: Fri, 7 Sep 2018 10:54:04 -0400
From: Johannes Weiner <hannes@...xchg.org>
To: Peter Zijlstra <peterz@...radead.org>
Cc: Ingo Molnar <mingo@...hat.com>,
Andrew Morton <akpm@...ux-foundation.org>,
Linus Torvalds <torvalds@...ux-foundation.org>,
Tejun Heo <tj@...nel.org>,
Suren Baghdasaryan <surenb@...gle.com>,
Daniel Drake <drake@...lessm.com>,
Vinayak Menon <vinmenon@...eaurora.org>,
Christopher Lameter <cl@...ux.com>,
Peter Enderborg <peter.enderborg@...y.com>,
Shakeel Butt <shakeelb@...gle.com>,
Mike Galbraith <efault@....de>, linux-mm@...ck.org,
cgroups@...r.kernel.org, linux-kernel@...r.kernel.org,
kernel-team@...com
Subject: Re: [PATCH 8/9] psi: pressure stall information for CPU, memory, and
IO
On Fri, Sep 07, 2018 at 12:24:58PM +0200, Peter Zijlstra wrote:
> On Tue, Aug 28, 2018 at 01:22:57PM -0400, Johannes Weiner wrote:
> > +static void psi_clock(struct work_struct *work)
> > +{
> > + struct delayed_work *dwork;
> > + struct psi_group *group;
> > + bool nonidle;
> > +
> > + dwork = to_delayed_work(work);
> > + group = container_of(dwork, struct psi_group, clock_work);
> > +
> > + /*
> > + * If there is task activity, periodically fold the per-cpu
> > + * times and feed samples into the running averages. If things
> > + * are idle and there is no data to process, stop the clock.
> > + * Once restarted, we'll catch up the running averages in one
> > + * go - see calc_avgs() and missed_periods.
> > + */
> > +
> > + nonidle = update_stats(group);
> > +
> > + if (nonidle) {
> > + unsigned long delay = 0;
> > + u64 now;
> > +
> > + now = sched_clock();
> > + if (group->next_update > now)
> > + delay = nsecs_to_jiffies(group->next_update - now) + 1;
> > + schedule_delayed_work(dwork, delay);
> > + }
> > +}
>
> Just a little nit; I would expect a function called *clock() to return a
> time.
Fair enough, let's rename this. How about this on top?
diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c
index 92489e66840b..0f07749b60a4 100644
--- a/kernel/sched/psi.c
+++ b/kernel/sched/psi.c
@@ -154,7 +154,7 @@ static struct psi_group psi_system = {
.pcpu = &system_group_pcpu,
};
-static void psi_clock(struct work_struct *work);
+static void psi_update_work(struct work_struct *work);
static void group_init(struct psi_group *group)
{
@@ -163,7 +163,7 @@ static void group_init(struct psi_group *group)
for_each_possible_cpu(cpu)
seqcount_init(&per_cpu_ptr(group->pcpu, cpu)->seq);
group->next_update = sched_clock() + psi_period;
- INIT_DELAYED_WORK(&group->clock_work, psi_clock);
+ INIT_DELAYED_WORK(&group->clock_work, psi_update_work);
mutex_init(&group->stat_lock);
}
@@ -347,7 +347,7 @@ static bool update_stats(struct psi_group *group)
return nonidle_total;
}
-static void psi_clock(struct work_struct *work)
+static void psi_update_work(struct work_struct *work)
{
struct delayed_work *dwork;
struct psi_group *group;
Powered by blists - more mailing lists