[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1502845243-20454-2-git-send-email-vikas.shivappa@linux.intel.com>
Date: Tue, 15 Aug 2017 18:00:42 -0700
From: Vikas Shivappa <vikas.shivappa@...ux.intel.com>
To: vikas.shivappa@...el.com
Cc: vikas.shivappa@...ux.intel.com, x86@...nel.org,
linux-kernel@...r.kernel.org, hpa@...or.com, tglx@...utronix.de,
peterz@...radead.org, ravi.v.shankar@...el.com,
tony.luck@...el.com, fenghua.yu@...el.com, ak@...ux.intel.com,
eranian@...gle.com, davidcc@...gle.com
Subject: [PATCH 1/2] x86/intel_rdt/mbm: Fix MBM overflow handler during hot cpu
When a CPU is dying, we cancel the worker and schedule a new worker on a
different CPU on the same domain. But if the timer is already about to
expire (say 0.99s) then we essentially double the interval.
We modify the hot cpu handling to cancel the delayed work on the dying
cpu and run the worker immediately on a different cpu in same domain. We
donot flush the worker because the MBM overflow worker reschedules the
worker on same CPU and scans the domain->cpu_mask to get the domain
pointer.
Reported-by: Thomas Gleixner <tglx@...utronix.de>
Signed-off-by: Vikas Shivappa <vikas.shivappa@...ux.intel.com>
---
arch/x86/kernel/cpu/intel_rdt.c | 4 ++--
arch/x86/kernel/cpu/intel_rdt.h | 2 +-
arch/x86/kernel/cpu/intel_rdt_monitor.c | 4 ++--
arch/x86/kernel/cpu/intel_rdt_rdtgroup.c | 2 +-
4 files changed, 6 insertions(+), 6 deletions(-)
diff --git a/arch/x86/kernel/cpu/intel_rdt.c b/arch/x86/kernel/cpu/intel_rdt.c
index 97c8d83..b8dc141 100644
--- a/arch/x86/kernel/cpu/intel_rdt.c
+++ b/arch/x86/kernel/cpu/intel_rdt.c
@@ -447,7 +447,7 @@ static int domain_setup_mon_state(struct rdt_resource *r, struct rdt_domain *d)
if (is_mbm_enabled()) {
INIT_DELAYED_WORK(&d->mbm_over, mbm_handle_overflow);
- mbm_setup_overflow_handler(d);
+ mbm_setup_overflow_handler(d, MBM_OVERFLOW_INTERVAL);
}
return 0;
@@ -540,7 +540,7 @@ static void domain_remove_cpu(int cpu, struct rdt_resource *r)
} else if (r == &rdt_resources_all[RDT_RESOURCE_L3] &&
cpu == d->mbm_work_cpu && is_mbm_enabled()) {
cancel_delayed_work(&d->mbm_over);
- mbm_setup_overflow_handler(d);
+ mbm_setup_overflow_handler(d, 0);
}
}
diff --git a/arch/x86/kernel/cpu/intel_rdt.h b/arch/x86/kernel/cpu/intel_rdt.h
index 4040bf1..3e48693 100644
--- a/arch/x86/kernel/cpu/intel_rdt.h
+++ b/arch/x86/kernel/cpu/intel_rdt.h
@@ -422,7 +422,7 @@ void mkdir_mondata_subdir_allrdtgrp(struct rdt_resource *r,
struct rdt_domain *d);
void mon_event_read(struct rmid_read *rr, struct rdt_domain *d,
struct rdtgroup *rdtgrp, int evtid, int first);
-void mbm_setup_overflow_handler(struct rdt_domain *dom);
+void mbm_setup_overflow_handler(struct rdt_domain *dom, unsigned long delay_ms);
void mbm_handle_overflow(struct work_struct *work);
#endif /* _ASM_X86_INTEL_RDT_H */
diff --git a/arch/x86/kernel/cpu/intel_rdt_monitor.c b/arch/x86/kernel/cpu/intel_rdt_monitor.c
index d6bfdfd..8378785 100644
--- a/arch/x86/kernel/cpu/intel_rdt_monitor.c
+++ b/arch/x86/kernel/cpu/intel_rdt_monitor.c
@@ -417,9 +417,9 @@ void mbm_handle_overflow(struct work_struct *work)
mutex_unlock(&rdtgroup_mutex);
}
-void mbm_setup_overflow_handler(struct rdt_domain *dom)
+void mbm_setup_overflow_handler(struct rdt_domain *dom, unsigned long delay_ms)
{
- unsigned long delay = msecs_to_jiffies(MBM_OVERFLOW_INTERVAL);
+ unsigned long delay = msecs_to_jiffies(delay_ms);
int cpu;
if (!static_branch_likely(&rdt_enable_key))
diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
index 86a6979..b529f93 100644
--- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
+++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
@@ -1140,7 +1140,7 @@ static struct dentry *rdt_mount(struct file_system_type *fs_type,
if (is_mbm_enabled()) {
r = &rdt_resources_all[RDT_RESOURCE_L3];
list_for_each_entry(dom, &r->domains, list)
- mbm_setup_overflow_handler(dom);
+ mbm_setup_overflow_handler(dom, MBM_OVERFLOW_INTERVAL);
}
goto out;
--
1.9.1
Powered by blists - more mailing lists