lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Mon, 15 May 2017 15:59:16 +0200
From:   Peter Zijlstra <peterz@...radead.org>
To:     Davidlohr Bueso <dave@...olabs.net>
Cc:     mingo@...nel.org, akpm@...ux-foundation.org, jack@...e.cz,
        kirill.shutemov@...ux.intel.com, ldufour@...ux.vnet.ibm.com,
        mhocko@...e.com, mgorman@...hsingularity.net,
        linux-kernel@...r.kernel.org, Davidlohr Bueso <dbueso@...e.de>
Subject: Re: [PATCH 2/6] locking: Introduce range reader/writer lock

On Mon, May 15, 2017 at 02:07:21AM -0700, Davidlohr Bueso wrote:
> +static inline int wait_for_ranges(struct range_lock_tree *tree,
> +				  struct range_lock *lock, long state)
> +{
> +	int ret = 0;
> +
> +	while (true) {
> +		set_current_state(state);
> +
> +		/* do we need to go to sleep? */
> +		if (!lock->blocking_ranges)
> +			break;
> +
> +		if (unlikely(signal_pending_state(state, current))) {
> +			struct interval_tree_node *node;
> +			unsigned long flags;
> +			DEFINE_WAKE_Q(wake_q);
> +
> +			ret = -EINTR;
> +			/*
> +			 * We're not taking the lock after all, cleanup
> +			 * after ourselves.
> +			 */
> +			spin_lock_irqsave(&tree->lock, flags);
> +
> +			range_lock_clear_reader(lock);
> +			__range_tree_remove(tree, lock);
> +
> +			if (!__range_intersects_intree(tree, lock))
> +				goto unlock;
> +
> +			range_interval_tree_foreach(node, &tree->root,
> +						    lock->node.start,
> +						    lock->node.last) {
> +				struct range_lock *blked;
> +				blked = to_range_lock(node);
> +
> +				if (range_lock_is_reader(lock) &&
> +				    range_lock_is_reader(blked))
> +					continue;
> +
> +				/* unaccount for threads _we_ are blocking */
> +				if (lock->seqnum < blked->seqnum)
> +					range_lock_put(blked, &wake_q);
> +			}
> +
> +		unlock:
> +			spin_unlock_irqrestore(&tree->lock, flags);
> +			wake_up_q(&wake_q);
> +			break;
> +		}
> +
> +		schedule();
> +	}
> +
> +	__set_current_state(TASK_RUNNING);
> +	return ret;
> +}


> +void range_read_unlock(struct range_lock_tree *tree, struct range_lock *lock)
> +{
> +	struct interval_tree_node *node;
> +	unsigned long flags;
> +	DEFINE_WAKE_Q(wake_q);
> +
> +	spin_lock_irqsave(&tree->lock, flags);
> +
> +	range_lock_clear_reader(lock);
> +	__range_tree_remove(tree, lock);
> +
> +	range_lock_release(&tree->dep_map, 1, _RET_IP_);
> +
> +	if (!__range_intersects_intree(tree, lock)) {
> +		/* nobody to wakeup, we're done */
> +		spin_unlock_irqrestore(&tree->lock, flags);
> +		return;
> +	}
> +
> +	range_interval_tree_foreach(node, &tree->root,
> +				    lock->node.start, lock->node.last) {
> +		struct range_lock *blocked_lock;
> +		blocked_lock = to_range_lock(node);
> +
> +		if (!range_lock_is_reader(blocked_lock))
> +			range_lock_put(blocked_lock, &wake_q);
> +	}
> +
> +	spin_unlock_irqrestore(&tree->lock, flags);
> +	wake_up_q(&wake_q);
> +}
> +EXPORT_SYMBOL_GPL(range_read_unlock);

> +void range_write_unlock(struct range_lock_tree *tree, struct range_lock *lock)
> +{
> +	struct interval_tree_node *node;
> +	unsigned long flags;
> +	DEFINE_WAKE_Q(wake_q);
> +
> +	spin_lock_irqsave(&tree->lock, flags);
> +
> +	range_lock_clear_reader(lock);
> +	__range_tree_remove(tree, lock);
> +
> +	range_lock_release(&tree->dep_map, 1, _RET_IP_);
> +
> +	if (!__range_intersects_intree(tree, lock)) {
> +		/* nobody to wakeup, we're done */
> +		spin_unlock_irqrestore(&tree->lock, flags);
> +		return;
> +	}
> +
> +	range_interval_tree_foreach(node, &tree->root,
> +				    lock->node.start, lock->node.last) {
> +		struct range_lock *blocked_lock;
> +		blocked_lock = to_range_lock(node);
> +
> +		range_lock_put(blocked_lock, &wake_q);
> +	}
> +
> +	spin_unlock_irqrestore(&tree->lock, flags);
> +	wake_up_q(&wake_q);
> +}
> +EXPORT_SYMBOL_GPL(range_write_unlock);


There is significant duplication here. Can't we have a
__range_unlock_common() and use that 3 times?


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ