lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Mon, 4 May 2015 10:30:15 -0700 (PDT)
From:	Vikas Shivappa <vikas.shivappa@...el.com>
To:	Peter Zijlstra <peterz@...radead.org>
cc:	Vikas Shivappa <vikas.shivappa@...ux.intel.com>,
	vikas.shivappa@...el.com, linux-kernel@...r.kernel.org,
	x86@...nel.org, hpa@...or.com, tglx@...utronix.de,
	mingo@...nel.org, tj@...nel.org,
	Matt Fleming <matt.fleming@...el.com>,
	"Auld, Will" <will.auld@...el.com>, peter.zijlstra@...el.com,
	h.peter.anvin@...el.com,
	"Juvva, Kanaka D" <kanaka.d.juvva@...el.com>
Subject: Re: [PATCH 3/7] x86/intel_rdt: Support cache bit mask for Intel
 CAT



On Sat, 2 May 2015, Peter Zijlstra wrote:

>
> There's CAT in your subject, make up your minds already on what you want
> to call this stuff.

We dont have control over the names.It is clear from the patch 0/7 where 
its explained that RDT is 
the umbrella term and CAT is a part of it and this patch series is only for CAT 
... It also mentions what exact section of the Intel manual this refers to. Is 
there still some lack of clarification here ?
If its just your disliking the term thats already known.

would have received suggestions for some fancy names 
like venilla / or icecream or burger and spent time deciding on the names but 
we cant do that to keep consistent with the rest of naming.

>
> On Fri, May 01, 2015 at 06:36:37PM -0700, Vikas Shivappa wrote:
>> +static void rdt_free_closid(unsigned int clos)
>> +{
>> +
>
> superfluous whitespace

Will fix the whitespace issues (including before return) or other possible 
coding convention issues.

It could be more of a common sense to have this in checkpatch rather that manually having to 
pointing out. If you want to have fun with that go for it though.
An automated approach would make the time taken much 
smaller in terms of reviewer's time and for people submiting the code 
as well.
They are all run against checkpatch.

>
>> +	lockdep_assert_held(&rdt_group_mutex);
>> +
>> +	clear_bit(clos, rdtss_info.closmap);
>> +}
>
>> +static inline bool cbm_is_contiguous(unsigned long var)
>> +{
>> +	unsigned long first_bit, zero_bit;
>> +	unsigned long maxcbm = MAX_CBM_LENGTH;
>
> flip these two lines
>
>> +
>> +	if (!var)
>> +		return false;
>> +
>> +	first_bit = find_next_bit(&var, maxcbm, 0);
>
> What was wrong with find_first_bit() ?
>
>> +	zero_bit = find_next_zero_bit(&var, maxcbm, first_bit);
>> +
>> +	if (find_next_bit(&var, maxcbm, zero_bit) < maxcbm)
>> +		return false;
>> +
>> +	return true;
>> +}
>> +
>> +static int cat_cbm_read(struct seq_file *m, void *v)
>> +{
>> +	struct intel_rdt *ir = css_rdt(seq_css(m));
>> +
>> +	seq_printf(m, "%08lx\n", ccmap[ir->clos].cache_mask);
>
> inconsistent spacing, you mostly have a whilespace before the return
> statement, but here you have not.
>
>> +	return 0;
>> +}
>> +
>> +static int validate_cbm(struct intel_rdt *ir, unsigned long cbmvalue)
>> +{
>> +	struct intel_rdt *par, *c;
>> +	struct cgroup_subsys_state *css;
>> +	unsigned long *cbm_tmp;
>
> No reason no to order these on line length is there?
>
>> +
>> +	if (!cbm_is_contiguous(cbmvalue)) {
>> +		pr_err("bitmask should have >= 1 bits and be contiguous\n");
>> +		return -EINVAL;
>> +	}
>> +
>> +	par = parent_rdt(ir);
>> +	cbm_tmp = &ccmap[par->clos].cache_mask;
>> +	if (!bitmap_subset(&cbmvalue, cbm_tmp, MAX_CBM_LENGTH))
>> +		return -EINVAL;
>> +
>> +	rcu_read_lock();
>> +	rdt_for_each_child(css, ir) {
>> +		c = css_rdt(css);
>> +		cbm_tmp = &ccmap[c->clos].cache_mask;
>> +		if (!bitmap_subset(cbm_tmp, &cbmvalue, MAX_CBM_LENGTH)) {
>> +			rcu_read_unlock();
>> +			pr_err("Children's mask not a subset\n");
>> +			return -EINVAL;
>> +		}
>> +	}
>> +
>> +	rcu_read_unlock();
>
> Daft whitespace again.
>
>> +	return 0;
>> +}
>> +
>> +static bool cbm_search(unsigned long cbm, int *closid)
>> +{
>> +	int maxid = boot_cpu_data.x86_cat_closs;
>> +	unsigned int i;
>> +
>> +	for (i = 0; i < maxid; i++) {
>> +		if (bitmap_equal(&cbm, &ccmap[i].cache_mask, MAX_CBM_LENGTH)) {
>> +			*closid = i;
>> +			return true;
>> +		}
>> +	}
>
> and again
>
>> +	return false;
>> +}
>> +
>> +static void cbmmap_dump(void)
>> +{
>> +	int i;
>> +
>> +	pr_debug("CBMMAP\n");
>> +	for (i = 0; i < boot_cpu_data.x86_cat_closs; i++)
>> +		pr_debug("cache_mask: 0x%x,clos_refcnt: %u\n",
>> +		 (unsigned int)ccmap[i].cache_mask, ccmap[i].clos_refcnt);
>
> This is missing {}
>
>> +}
>> +
>> +static void __cpu_cbm_update(void *info)
>> +{
>> +	unsigned int closid = *((unsigned int *)info);
>> +
>> +	wrmsrl(CBM_FROM_INDEX(closid), ccmap[closid].cache_mask);
>> +}
>
>> +static int cat_cbm_write(struct cgroup_subsys_state *css,
>> +				 struct cftype *cft, u64 cbmvalue)
>> +{
>> +	struct intel_rdt *ir = css_rdt(css);
>> +	ssize_t err = 0;
>> +	unsigned long cache_mask, max_mask;
>> +	unsigned long *cbm_tmp;
>> +	unsigned int closid;
>> +	u32 max_cbm = boot_cpu_data.x86_cat_cbmlength;
>
> That's just a right mess isn't it?
>
>> +
>> +	if (ir == &rdt_root_group)
>> +		return -EPERM;
>> +	bitmap_set(&max_mask, 0, max_cbm);
>> +
>> +	/*
>> +	 * Need global mutex as cbm write may allocate a closid.
>> +	 */
>> +	mutex_lock(&rdt_group_mutex);
>> +	bitmap_and(&cache_mask, (unsigned long *)&cbmvalue, &max_mask, max_cbm);
>> +	cbm_tmp = &ccmap[ir->clos].cache_mask;
>> +
>> +	if (bitmap_equal(&cache_mask, cbm_tmp, MAX_CBM_LENGTH))
>> +		goto out;
>> +
>> +	err = validate_cbm(ir, cache_mask);
>> +	if (err)
>> +		goto out;
>> +
>> +	/*
>> +	 * At this point we are sure to change the cache_mask.Hence release the
>> +	 * reference to the current CLOSid and try to get a reference for
>> +	 * a different CLOSid.
>> +	 */
>> +	__clos_put(ir->clos);
>> +
>> +	if (cbm_search(cache_mask, &closid)) {
>> +		ir->clos = closid;
>> +		__clos_get(closid);
>> +	} else {
>> +		err = rdt_alloc_closid(ir);
>> +		if (err)
>> +			goto out;
>> +
>> +		ccmap[ir->clos].cache_mask = cache_mask;
>> +		cbm_update_all(ir->clos);
>> +	}
>> +
>> +	cbmmap_dump();
>> +out:
>> +
>
> Daft whitespace again.. Also inconsistent return paradigm, here you use
> an out label, where in validate_cbm() you did rcu_read_unlock() and
> return from the middle.
>
>> +	mutex_unlock(&rdt_group_mutex);
>> +	return err;
>> +}
>> +
>> +static inline bool rdt_update_cpumask(int cpu)
>> +{
>> +	int phys_id = topology_physical_package_id(cpu);
>> +	struct cpumask *mask = &rdt_cpumask;
>> +	int i;
>> +
>> +	for_each_cpu(i, mask) {
>> +		if (phys_id == topology_physical_package_id(i))
>> +			return false;
>> +	}
>> +
>> +	cpumask_set_cpu(cpu, mask);
>
> More daft whitespace
>
>> +	return true;
>> +}
>> +
>> +/*
>> + * rdt_cpu_start() - If a new package has come up, update all
>> + * the Cache bitmasks on the package.
>> + */
>> +static inline void rdt_cpu_start(int cpu)
>> +{
>> +	mutex_lock(&rdt_group_mutex);
>> +	if (rdt_update_cpumask(cpu))
>> +		cbm_update_msrs(cpu);
>> +	mutex_unlock(&rdt_group_mutex);
>> +}
>> +
>> +static void rdt_cpu_exit(unsigned int cpu)
>> +{
>> +	int phys_id = topology_physical_package_id(cpu);
>> +	int i;
>> +
>> +	mutex_lock(&rdt_group_mutex);
>> +	if (!cpumask_test_and_clear_cpu(cpu, &rdt_cpumask)) {
>> +		mutex_unlock(&rdt_group_mutex);
>> +		return;
>
> And here we return from the middle again..
>
>> +	}
>> +
>> +	for_each_online_cpu(i) {
>> +		if (i == cpu)
>> +			continue;
>> +
>> +		if (phys_id == topology_physical_package_id(i)) {
>> +			cpumask_set_cpu(i, &rdt_cpumask);
>> +			break;
>> +		}
>> +	}
>> +	mutex_unlock(&rdt_group_mutex);
>> +}
>
> /me tired and gives up..
>
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ