lists.openwall.net | lists / announce owl-users owl-dev john-users john-dev passwdqc-users yescrypt popa3d-users / oss-security kernel-hardening musl sabotage tlsify passwords / crypt-dev xvendor / Bugtraq Full-Disclosure linux-kernel linux-netdev linux-ext4 linux-hardening linux-cve-announce PHC | |
Open Source and information security mailing list archives
| ||
|
Date: Wed, 4 Nov 2015 18:19:29 -0800 From: David Carrillo-Cisneros <davidcc@...gle.com> To: Fenghua Yu <fenghua.yu@...el.com> Cc: Stephane Eranian <eranian@...gle.com>, Paul Turner <pjt@...gle.com>, linux-kernel@...r.kernel.org, David Carrillo-Cisneros <davidcc@...gle.com> Subject: [PATCH 2/2] x86/intel_rdt: Fix bug in initialization, locks and write cbm mask. Fix bugs in patch series "x86:Intel Cache Allocation Technology Support" patches by Fenghua Yu. Changes are: 1) Instruct task_css_check not to print a warning for unnecesary lockdeps when calling from __rdt_intel_sched_in since all callers are already synchronized by task_rq_lock(). 2) Add missing mutex_locks surrounding accesses to clos_cbm_table. 3) Properly initialize online cpus in intel_rdt_late_init by using intel_rdt_cpu_start() instead of rdt_cpumask_update(). 4) Make cbm_validate_rdt_cgroup to actually use the children's mask when validating children's masks (as it should). Signed-off-by: David Carrillo-Cisneros <davidcc@...gle.com> --- arch/x86/include/asm/intel_rdt.h | 12 +++++++++--- arch/x86/kernel/cpu/intel_rdt.c | 24 ++++++++++++++++++------ 2 files changed, 27 insertions(+), 9 deletions(-) diff --git a/arch/x86/include/asm/intel_rdt.h b/arch/x86/include/asm/intel_rdt.h index fbe1e00..f487a93 100644 --- a/arch/x86/include/asm/intel_rdt.h +++ b/arch/x86/include/asm/intel_rdt.h @@ -37,11 +37,17 @@ static inline struct intel_rdt *parent_rdt(struct intel_rdt *ir) } /* - * Return rdt group to which this task belongs. + * Return rdt group to which this task belongs without checking for lockdep. */ -static inline struct intel_rdt *task_rdt(struct task_struct *task) +static inline struct intel_rdt *task_rdt_nocheck(struct task_struct *task) { - return css_rdt(task_css(task, intel_rdt_cgrp_id)); + /* + * The checks for lockdep performed by task_subsys_state are not + * necessary when callers are properly synchronized by other locks. + * If the caller for this function is not properly synchronized + * use task_css instead. + */ + return css_rdt(task_css_check(task, intel_rdt_cgrp_id, true)); } /* diff --git a/arch/x86/kernel/cpu/intel_rdt.c b/arch/x86/kernel/cpu/intel_rdt.c index cb4d2ef..d5fa76f 100644 --- a/arch/x86/kernel/cpu/intel_rdt.c +++ b/arch/x86/kernel/cpu/intel_rdt.c @@ -115,7 +115,13 @@ static inline bool cache_alloc_supported(struct cpuinfo_x86 *c) void __intel_rdt_sched_in(void *dummy) { struct intel_pqr_state *state = this_cpu_ptr(&pqr_state); - struct intel_rdt *ir = task_rdt(current); + + /* + * All callers are synchronized by task_rq_lock(); we do not use RCU + * which is pointless here. Thus, we call task_rdt_nocheck that avoids + * the lockdep checks. + */ + struct intel_rdt *ir = task_rdt_nocheck(current); if (ir->closid == state->closid) return; @@ -403,7 +409,9 @@ static int intel_cache_alloc_cbm_read(struct seq_file *m, void *v) struct intel_rdt *ir = css_rdt(seq_css(m)); unsigned long l3_cbm = 0; + mutex_lock(&rdt_group_mutex); clos_cbm_table_read(ir->closid, &l3_cbm); + mutex_unlock(&rdt_group_mutex); seq_printf(m, "%08lx\n", l3_cbm); return 0; @@ -431,7 +439,7 @@ static int cbm_validate_rdt_cgroup(struct intel_rdt *ir, unsigned long cbmvalue) rcu_read_lock(); rdt_for_each_child(css, ir) { c = css_rdt(css); - clos_cbm_table_read(par->closid, &cbm_tmp); + clos_cbm_table_read(c->closid, &cbm_tmp); if (!bitmap_subset(&cbm_tmp, &cbmvalue, MAX_CBM_LENGTH)) { rcu_read_unlock(); err = -EINVAL; @@ -504,7 +512,6 @@ static int intel_cache_alloc_cbm_write(struct cgroup_subsys_state *css, closcbm_map_dump(); out: mutex_unlock(&rdt_group_mutex); - return err; } @@ -513,12 +520,16 @@ static void rdt_cgroup_init(void) int max_cbm_len = boot_cpu_data.x86_cache_max_cbm_len; u32 closid; + mutex_lock(&rdt_group_mutex); + closid_alloc(&closid); WARN_ON(closid != 0); rdt_root_group.closid = closid; clos_cbm_table_update(closid, (1ULL << max_cbm_len) - 1); + + mutex_unlock(&rdt_group_mutex); } static int __init intel_rdt_late_init(void) @@ -552,15 +563,16 @@ static int __init intel_rdt_late_init(void) cpu_notifier_register_begin(); for_each_online_cpu(i) - rdt_cpumask_update(i); - + intel_rdt_cpu_start(i); __hotcpu_notifier(intel_rdt_cpu_notifier, 0); cpu_notifier_register_done(); + rdt_cgroup_init(); static_key_slow_inc(&rdt_enable_key); - pr_info("Intel cache allocation enabled\n"); + pr_info("Intel cache allocation enabled\n" + "max_closid:%u, max_cbm_len:%u\n", maxid, max_cbm_len); out_err: return err; -- 2.6.0.rc2.230.g3dd15c0 -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majordomo@...r.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists