lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Wed, 18 May 2016 22:36:03 +0200 (CEST)
From:	Thomas Gleixner <tglx@...utronix.de>
To:	David Carrillo-Cisneros <davidcc@...gle.com>
cc:	Peter Zijlstra <peterz@...radead.org>,
	Alexander Shishkin <alexander.shishkin@...ux.intel.com>,
	Arnaldo Carvalho de Melo <acme@...nel.org>,
	Ingo Molnar <mingo@...hat.com>,
	Vikas Shivappa <vikas.shivappa@...ux.intel.com>,
	Matt Fleming <matt@...eblueprint.co.uk>,
	Tony Luck <tony.luck@...el.com>,
	Stephane Eranian <eranian@...gle.com>,
	Paul Turner <pjt@...gle.com>, x86@...nel.org,
	linux-kernel@...r.kernel.org
Subject: Re: [PATCH v2 10/32] perf/x86/intel/cqm: introduce (I)state and
 limbo prmids

On Wed, 11 May 2016, David Carrillo-Cisneros wrote:
>  static inline bool __pmonr__in_astate(struct pmonr *pmonr)
>  {
>  	lockdep_assert_held(&__pkg_data(pmonr, pkg_data_lock));
> -	return pmonr->prmid;
> +	return pmonr->prmid && !pmonr->ancestor_pmonr;
>  }
>  
>  static inline bool __pmonr__in_ustate(struct pmonr *pmonr)
>  {
>  	lockdep_assert_held(&__pkg_data(pmonr, pkg_data_lock));
> -	return !pmonr->prmid;
> +	return !pmonr->prmid && !pmonr->ancestor_pmonr;
> +}
> +
> +static inline bool __pmonr__in_istate(struct pmonr *pmonr)
> +{
> +	lockdep_assert_held(&__pkg_data(pmonr, pkg_data_lock));
> +	return pmonr->ancestor_pmonr;
> +}
> +
> +static inline bool __pmonr__in_ilstate(struct pmonr *pmonr)
> +{
> +	lockdep_assert_held(&__pkg_data(pmonr, pkg_data_lock));
> +	return __pmonr__in_istate(pmonr) && pmonr->limbo_prmid;
> +}

So this does lockdep asserts several times in a row. And looking at the call
sites you do that a dozen times in the same function on the same pmonr.

Throwing enough asserts into the code makes it better, right?

> +static inline bool __pmonr__in_instate(struct pmonr *pmonr)
> +{
> +	lockdep_assert_held(&__pkg_data(pmonr, pkg_data_lock));
> +	return __pmonr__in_istate(pmonr) && !__pmonr__in_ilstate(pmonr);
>  }

This state tracking sucks. It's completely non obvious which combinations of
members are denoting a certain state.

What's wrong with having:

       pmonr->state

and a enum

enum pmonr_state {
     PMONR_UNUSED,
     PMONR_ACTIVE,
     PMONR_LIMBO,
     PMONR_INHERITED,
};

That would make all this horror readable and understandable. I bet you can't
remember the meaning of all this state stuff 3 month from now. That's going to
be the hell of a ride to track down a problem in this code.

>  static inline bool monr__is_root(struct monr *monr)
> @@ -191,9 +209,12 @@ static int pkg_data_init_cpu(int cpu)
>  
>  	INIT_LIST_HEAD(&pkg_data->free_prmids_pool);
>  	INIT_LIST_HEAD(&pkg_data->active_prmids_pool);
> +	INIT_LIST_HEAD(&pkg_data->pmonr_limbo_prmids_pool);
>  	INIT_LIST_HEAD(&pkg_data->nopmonr_limbo_prmids_pool);
>  
>  	INIT_LIST_HEAD(&pkg_data->astate_pmonrs_lru);
> +	INIT_LIST_HEAD(&pkg_data->istate_pmonrs_lru);
> +	INIT_LIST_HEAD(&pkg_data->ilstate_pmonrs_lru);
>  
>  	mutex_init(&pkg_data->pkg_data_mutex);
>  	raw_spin_lock_init(&pkg_data->pkg_data_lock);
> @@ -242,7 +263,15 @@ static struct pmonr *pmonr_alloc(int cpu)
>  	if (!pmonr)
>  		return ERR_PTR(-ENOMEM);
>  
> +	pmonr->ancestor_pmonr = NULL;
> +
> +	/*
> +	 * Since (A)state and (I)state have union in members,
> +	 * initialize one of them only.

What one of them? list heads or some other random stuff?

> +	 */
> +	INIT_LIST_HEAD(&pmonr->pmonr_deps_head);
>  	pmonr->prmid = NULL;
> +	INIT_LIST_HEAD(&pmonr->limbo_rotation_entry);
>  
>  	pmonr->monr = NULL;
>  	INIT_LIST_HEAD(&pmonr->rotation_entry);
> @@ -308,6 +337,44 @@ __pmonr__finish_to_astate(struct pmonr *pmonr, struct prmid *prmid)
>  	atomic64_set(&pmonr->prmid_summary_atomic, summary.value);
>  }
>  
> +/*
> + * Transition to (A)state from (IN)state, given a valid prmid.
> + * Cannot fail. Updates ancestor dependants to use this pmonr as new ancestor.
> + */
> +static inline void
> +__pmonr__instate_to_astate(struct pmonr *pmonr, struct prmid *prmid)

And this function is used where?

> +{
> +	struct pmonr *pos, *tmp, *ancestor;
> +	union prmid_summary old_summary, summary;
> +
> +	lockdep_assert_held(&__pkg_data(pmonr, pkg_data_lock));
> +
> +	/* If in (I) state, cannot have limbo_prmid, otherwise prmid
> +	 * in function's argument is superfluous.
> +	 */
> +	WARN_ON_ONCE(pmonr->limbo_prmid);

And what happens if that WARN_ON triggers? Can the code below be executed
without damaging anything? I'm just asking because so far your WARN_ONs have
been superfluous in most of the places.

> +	/* Do not depend on ancestor_pmonr anymore. Make it (A)state. */
> +	ancestor = pmonr->ancestor_pmonr;
> +	list_del_init(&pmonr->pmonr_deps_entry);
> +	pmonr->ancestor_pmonr = NULL;
> +	__pmonr__finish_to_astate(pmonr, prmid);
> +
> +	/* Update ex ancestor's dependants that are pmonr descendants. */
> +	list_for_each_entry_safe(pos, tmp, &ancestor->pmonr_deps_head,
> +				 pmonr_deps_entry) {
> +		if (!__monr_hrchy_is_ancestor(monr_hrchy_root,
> +					      pmonr->monr, pos->monr))
> +			continue;
> +		list_move_tail(&pos->pmonr_deps_entry, &pmonr->pmonr_deps_head);
> +		pos->ancestor_pmonr = pmonr;
> +		old_summary.value = atomic64_read(&pos->prmid_summary_atomic);
> +		summary.sched_rmid = prmid->rmid;
> +		summary.read_rmid = old_summary.read_rmid;
> +		atomic64_set(&pos->prmid_summary_atomic, summary.value);
> +	}
> +}
> +
>  static inline void
>  __pmonr__ustate_to_astate(struct pmonr *pmonr, struct prmid *prmid)
>  {
> @@ -315,9 +382,59 @@ __pmonr__ustate_to_astate(struct pmonr *pmonr, struct prmid *prmid)
>  	__pmonr__finish_to_astate(pmonr, prmid);
>  }
>  
> +/*
> + * Find lowest active ancestor.
> + * Always successful since monr_hrchy_root is always in (A)state.
> + */
> +static struct monr *
> +__monr_hrchy__find_laa(struct monr *monr, u16 pkg_id)

Just waiting for the next function to be named __monr_hrchy__find_loo()

> +{
> +	lockdep_assert_held(&cqm_pkgs_data[pkg_id]->pkg_data_lock);
> +
> +	while ((monr = monr->parent)) {
> +		if (__pmonr__in_astate(monr->pmonrs[pkg_id]))
> +			return monr;
> +	}
> +	/* Should have hitted monr_hrchy_root */
> +	WARN_ON_ONCE(true);

So you have a WARN_ON here and you have one at the call site checking the
return value.

> +	return NULL;
> +}
> +
> +/*
> + * __pmnor__move_dependants: Move dependants from one ancestor to another.
> + * @old: Old ancestor.
> + * @new: New ancestor.
> + *
> + * To be called on valid pmonrs. @new must be ancestor of @old.
> + */
> +static inline void
> +__pmonr__move_dependants(struct pmonr *old, struct pmonr *new)
> +{
> +	struct pmonr *dep;
> +	union prmid_summary old_summary, summary;
> +
> +	WARN_ON_ONCE(old->pkg_id != new->pkg_id);

Great, another warning which will lead to explosions later on simply because
you access @new unlocked.

> +	lockdep_assert_held(&__pkg_data(old, pkg_data_lock));
> +
> +	/* Update this pmonr dependencies to use new ancestor. */
> +	list_for_each_entry(dep, &old->pmonr_deps_head, pmonr_deps_entry) {
> +		/* Set next summary for dependent pmonrs. */
> +		dep->ancestor_pmonr = new;
> +
> +		old_summary.value = atomic64_read(&dep->prmid_summary_atomic);
> +		summary.sched_rmid = new->prmid->rmid;
> +		summary.read_rmid = old_summary.read_rmid;
> +		atomic64_set(&dep->prmid_summary_atomic, summary.value);
> +	}
> +	list_splice_tail_init(&old->pmonr_deps_head,
> +			      &new->pmonr_deps_head);
> +}
> +
>  static inline void
>  __pmonr__to_ustate(struct pmonr *pmonr)
>  {
> +	struct pmonr *ancestor;
> +	u16 pkg_id = pmonr->pkg_id;
>  	union prmid_summary summary;
>  
>  	lockdep_assert_held(&__pkg_data(pmonr, pkg_data_lock));
> @@ -331,9 +448,27 @@ __pmonr__to_ustate(struct pmonr *pmonr)
>  	if (__pmonr__in_astate(pmonr)) {
>  		WARN_ON_ONCE(!pmonr->prmid);
>  
> +		ancestor = __monr_hrchy__find_laa(
> +			pmonr->monr, pkg_id)->pmonrs[pkg_id];

Using intermediat variables instead of these line breaks would make reading
this way simpler.

> +		WARN_ON_ONCE(!ancestor);

Brilliant error handling. But at least consistently useless as the one 5 lines
above.

> +		__pmonr__move_dependants(pmonr, ancestor);
>  		list_move_tail(&pmonr->prmid->pool_entry,
>  			       &__pkg_data(pmonr, nopmonr_limbo_prmids_pool));
>  		pmonr->prmid =  NULL;
> +	} else if (__pmonr__in_istate(pmonr)) {
> +		list_del_init(&pmonr->pmonr_deps_entry);
> +		/* limbo_prmid is already in limbo pool */
> +		if (__pmonr__in_ilstate(pmonr)) {
> +			WARN_ON(!pmonr->limbo_prmid);

Sigh.

> +			list_move_tail(
> +				&pmonr->limbo_prmid->pool_entry,
> +				&__pkg_data(pmonr, nopmonr_limbo_prmids_pool));

I really appreciate all the useful comments in that code. This shuffles stuff
around from one pool to the next w/o any explanation whatsoever.

And just for the record. I just had to look up __pkg_data() once again, which
is annoying enough.

You do:

    cqm_pkgs_data[pmonr->pkg_id]->xxxx

over and over. Instead of that you can simply store the pointer to the pkg
data in pmonr and do:

     pmonr->pdata->xxxx

Hmm?

> +
> +			pmonr->limbo_prmid = NULL;
> +			list_del_init(&pmonr->limbo_rotation_entry);
> +		} else {
> +		}
> +		pmonr->ancestor_pmonr = NULL;
>  	} else {
>  		WARN_ON_ONCE(true);
>  		return;
> @@ -348,6 +483,62 @@ __pmonr__to_ustate(struct pmonr *pmonr)
>  	WARN_ON_ONCE(!__pmonr__in_ustate(pmonr));
>  }
>  
> +static inline void __pmonr__set_istate_summary(struct pmonr *pmonr)
> +{
> +	union prmid_summary summary;
> +
> +	summary.sched_rmid = pmonr->ancestor_pmonr->prmid->rmid;
> +	summary.read_rmid =
> +		pmonr->limbo_prmid ? pmonr->limbo_prmid->rmid : INVALID_RMID;
> +	atomic64_set(
> +		&pmonr->prmid_summary_atomic, summary.value);

Your name choices are interesting. You either chose cryptic names which cannot
be decoded or pointless long descriptive names which contain redundant
information.

	pmonr->prmid_summary

Should be enough, right? The compiler will tell you if you access that
directly.

> +/*
> + * Transition to (I)state from no (I)state..
> + * Finds a valid ancestor transversing monr_hrchy. Cannot fail.
> + */
> +static inline void
> +__pmonr__to_istate(struct pmonr *pmonr)
> +{
> +	struct pmonr *ancestor;
> +	u16 pkg_id = pmonr->pkg_id;
> +
> +	lockdep_assert_held(&__pkg_data(pmonr, pkg_data_lock));
> +
> +	if (!(__pmonr__in_ustate(pmonr) || __pmonr__in_astate(pmonr))) {
> +		/* Invalid initial state. */
> +		WARN_ON_ONCE(true);

According to the comment above it cannot fail. What's this? 

> +		return;
> +	}
> +
> +	ancestor = __monr_hrchy__find_laa(pmonr->monr, pkg_id)->pmonrs[pkg_id];
> +	WARN_ON_ONCE(!ancestor);

Lalalalala.

> +/* A pmonr in (I)state (either (IN)state or (IL)state. */
> +inline bool prmid_summary__is_istate(union prmid_summary summ)
> +{
> +	return summ.sched_rmid != INVALID_RMID &&
> +	       summ.sched_rmid != summ.read_rmid;
> +}

Yet more undecodable state tracking.

>  struct pmonr {
>  
> -	struct prmid				*prmid;
> +	/* If set, pmonr is in (I)state. */
> +	struct pmonr				*ancestor_pmonr;
> +
> +	union{
> +		struct { /* (A)state variables. */
> +			struct list_head	pmonr_deps_head;
> +			struct prmid		*prmid;
> +		};
> +		struct { /* (I)state variables. */
> +			struct list_head	pmonr_deps_entry;
> +			struct prmid		*limbo_prmid;
> +			struct list_head	limbo_rotation_entry;

What's the value of this union? I can't see one, really. Please explain.

Thanks,

	tglx

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ