lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20141107123431.GE3337@twins.programming.kicks-ass.net>
Date:	Fri, 7 Nov 2014 13:34:31 +0100
From:	Peter Zijlstra <peterz@...radead.org>
To:	Matt Fleming <matt@...sole-pimps.org>
Cc:	Ingo Molnar <mingo@...nel.org>, Jiri Olsa <jolsa@...hat.com>,
	Arnaldo Carvalho de Melo <acme@...nel.org>,
	Andi Kleen <andi@...stfloor.org>,
	Thomas Gleixner <tglx@...utronix.de>,
	linux-kernel@...r.kernel.org, "H. Peter Anvin" <hpa@...or.com>,
	Kanaka Juvva <kanaka.d.juvva@...el.com>,
	Matt Fleming <matt.fleming@...el.com>
Subject: Re: [PATCH v3 10/11] perf/x86/intel: Perform rotation on Intel CQM
 RMIDs

On Thu, Nov 06, 2014 at 12:23:21PM +0000, Matt Fleming wrote:
> +/*
> + * Test whether an RMID has a zero occupancy value on this cpu.
> + */
> +static void intel_cqm_stable(void *arg)
> +{
> +	unsigned int nr_bits;
> +	int i = -1;
> +
> +	nr_bits = cqm_max_rmid + 1;
> +
> +	for (; i = find_next_bit(cqm_limbo_bitmap, nr_bits, i+1),
> +		i < nr_bits;) {
> +		if (__rmid_read(i) > __intel_cqm_threshold)
> +			clear_bit(i, cqm_free_bitmap);
> +	}
> +}
> +
> +static unsigned int __rotation_period = 250; /* ms */
> +
> +/*
> + * intel_cqm_rmid_stabilize - move RMIDs from limbo to free list
> + * @available: are there freeable RMIDs on the limbo list?
> + *
> + * Quiescent state; wait for all 'freed' RMIDs to become unused, i.e. no
> + * cachelines are tagged with those RMIDs. After this we can reuse them
> + * and know that the current set of active RMIDs is stable.
> + *
> + * Return %true or %false depending on whether we were able to stabilize
> + * an RMID for intel_cqm_rotation_rmid.
> + *
> + * If we return %false then @available is updated to indicate the reason
> + * we couldn't stabilize any RMIDs. @available is %false if no suitable
> + * RMIDs were found on the limbo list to recycle, i.e. no RMIDs had been
> + * on the list for the minimum queue time. If @available is %true then,
> + * we found suitable RMIDs to recycle but none had an associated
> + * occupancy value below __intel_cqm_threshold and the threshold should
> + * be increased and stabilization reattempted.
> + */
> +static bool intel_cqm_rmid_stabilize(bool *available)
> +{
> +	struct cqm_rmid_entry *entry;
> +	unsigned int nr_bits;
> +	struct perf_event *event;
> +
> +	lockdep_assert_held(&cache_mutex);
> +
> +	nr_bits = cqm_max_rmid + 1;
> +
> +	bitmap_zero(cqm_limbo_bitmap, nr_bits);
> +	bitmap_zero(cqm_free_bitmap, nr_bits);
> +
> +	list_for_each_entry(entry, &cqm_rmid_limbo_lru, list) {
> +		unsigned long min_queue_time;
> +		unsigned long now = jiffies;
> +
> +		/*
> +		 * We hold RMIDs placed into limbo for a minimum queue
> +		 * time. Before the minimum queue time has elapsed we do
> +		 * not recycle RMIDs.
> +		 *
> +		 * The reasoning is that until a sufficient time has
> +		 * passed since we stopped using an RMID, any RMID
> +		 * placed onto the limbo list will likely still have
> +		 * data tagged in the cache, which means we'll probably
> +		 * fail to recycle it anyway.
> +		 *
> +		 * We can save ourselves an expensive IPI by skipping
> +		 * any RMIDs that have not been queued for the minimum
> +		 * time.
> +		 */
> +		min_queue_time = entry->queue_time +
> +			msecs_to_jiffies(__rotation_period);
> +
> +		if (time_after(min_queue_time, now))
> +			continue;

Why continue; this LRU is time ordered, later entries cannot be earlier,
right?

> +		set_bit(entry->rmid, cqm_limbo_bitmap);
> +		set_bit(entry->rmid, cqm_free_bitmap);
> +	}
> +
> +	/*
> +	 * Fast return if none of the RMIDs on the limbo list have been
> +	 * sitting on the queue for the minimum queue time.
> +	 */
> +	*available = !bitmap_empty(cqm_limbo_bitmap, nr_bits);
> +	if (!*available)
> +		return false;
> +
> +	/*
> +	 * Test whether an RMID is free for each package.
> +	 */
> +	preempt_disable();
> +	smp_call_function_many(&cqm_cpumask, intel_cqm_stable, NULL, true);
> +	preempt_enable();

I don't get the whole list -> bitmap -> list juggle.

enum rmid_cycle_state {
	RMID_AVAILABLE = 0,
	RMID_LIMBO,
	RMID_YOUNG,
};

struct cqm_rmid_entry {
	...
	enum rmid_cycle_state state;
};

static void __intel_sqm_stable(void *arg)
{
	list_for_each_entry(entry, &cqm_rmid_limbo_lru, list) {
		if (entry->state == RMID_YOUNG)
			break;

		if (__rmid_read(entry->rmid) > __threshold)
			entry->state = RMID_LIMBO;
	}
}

static bool intel_cqm_rmid_stabilize()
{
	unsigned long queue_time = jiffies + msecs_to_jiffies(__rotation_period);
	unsigned int nr_limbo = 0;
	...

	list_for_each_entry(entry, &cqm_rmid_limbo_lru, list) {
		if (time_after(entry->queue_time, queue_time))
			break;

		entry->state = RMID_AVAILABLE;
		nr_limbo++;
	}

	if (!nr_limbo)
		return;

	on_each_cpu_mask(&cqm_cpumask, __intel_cqm_stable, NULL, true);

	list_for_each_entry_safe(entry, tmp, &cqm_rmid_limbo_lru, list) {
		if (entry->state == RMID_YOUNG)
			break;

		if (entry->state == RMID_AVAILABLE)
			list_move(&cqm_rmid_free_list, &entry->list);
	}
}


Would not something like that work?
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ