lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <7f35e628-1331-84df-5e1d-8c46233dbc6c@linux.vnet.ibm.com>
Date:   Tue, 23 May 2017 17:12:45 +0200
From:   Laurent Dufour <ldufour@...ux.vnet.ibm.com>
To:     Davidlohr Bueso <dave@...olabs.net>, mingo@...nel.org,
        peterz@...radead.org, akpm@...ux-foundation.org
Cc:     jack@...e.cz, kirill.shutemov@...ux.intel.com, mhocko@...e.com,
        mgorman@...hsingularity.net, linux-kernel@...r.kernel.org,
        Davidlohr Bueso <dbueso@...e.de>
Subject: Re: [PATCH 2/6] locking: Introduce range reader/writer lock

On 15/05/2017 11:07, Davidlohr Bueso wrote:
> --- /dev/null
> +++ b/include/linux/range_lock.h
> @@ -0,0 +1,181 @@
> +/*
> + * Range/interval rw-locking
> + * -------------------------
> + *
> + * Interval-tree based range locking is about controlling tasks' forward
> + * progress when adding an arbitrary interval (node) to the tree, depending
> + * on any overlapping ranges. A task can only continue (wakeup) if there are
> + * no intersecting ranges, thus achieving mutual exclusion. To this end, a
> + * reference counter is kept for each intersecting range in the tree
> + * (_before_ adding itself to it). To enable shared locking semantics,
> + * the reader to-be-locked will not take reference if an intersecting node
> + * is also a reader, therefore ignoring the node altogether.
> + *
> + * Fairness and freedom of starvation are guaranteed by the lack of lock
> + * stealing, thus range locks depend directly on interval tree semantics.
> + * This is particularly for iterations, where the key for the rbtree is
> + * given by the interval's low endpoint, and duplicates are walked as it
> + * would an inorder traversal of the tree.
> + *
> + * The cost of lock and unlock of a range is O((1+R_int)log(R_all)) where
> + * R_all is total number of ranges and R_int is the number of ranges
> + * intersecting the operated range.
> + */
> +#ifndef _LINUX_RANGE_LOCK_H
> +#define _LINUX_RANGE_LOCK_H
> +
> +#include <linux/rbtree.h>
> +#include <linux/interval_tree.h>
> +#include <linux/list.h>
> +#include <linux/spinlock.h>
> +
> +/*
> + * The largest range will span [0,RANGE_LOCK_FULL].
> + */
> +#define RANGE_LOCK_FULL  ~0UL
> +
> +struct range_lock {
> +	struct interval_tree_node node;
> +	struct task_struct *tsk;
> +	/* Number of ranges which are blocking acquisition of the lock */
> +	unsigned int blocking_ranges;
> +	u64 seqnum;
> +};
> +
> +struct range_lock_tree {
> +	struct rb_root root;
> +	spinlock_t lock;
> +	struct interval_tree_node *leftmost; /* compute smallest 'start' */
> +	u64 seqnum; /* track order of incoming ranges, avoid overflows */
> +#ifdef CONFIG_DEBUG_LOCK_ALLOC
> +	struct lockdep_map dep_map;
> +#endif
> +};
> +
> +#ifdef CONFIG_DEBUG_LOCK_ALLOC
> +# define __RANGE_LOCK_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
> +#else
> +# define __RANGE_LOCK_DEP_MAP_INIT(lockname)
> +#endif
> +
> +#define __RANGE_LOCK_TREE_INITIALIZER(name)		\
> +	{ .leftmost = NULL                              \
> +	, .root = RB_ROOT				\
> +	, .seqnum = 0					\
> +	, .lock = __SPIN_LOCK_UNLOCKED(name.lock)       \
> +	__RANGE_LOCK_DEP_MAP_INIT(name) }		\
> +
> +#define DEFINE_RANGE_LOCK_TREE(name) \
> +	struct range_lock_tree name = __RANGE_LOCK_TREE_INITIALIZER(name)
> +
> +#define __RANGE_LOCK_INITIALIZER(__start, __last) {	\
> +		.node = {				\
> +			.start = (__start)		\
> +			,.last = (__last)		\
> +		}					\
> +		, .task = NULL				\
                   ^tsk

> +		, .blocking_ranges = 0			\
> +		, .reader = false			\
                   ^ this field doesn't exist anymore

Cheers,
Laurent.

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ