lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <201805222020.FEJ82897.OFtJMFHOVLQOSF@I-love.SAKURA.ne.jp>
Date:   Tue, 22 May 2018 20:20:06 +0900
From:   Tetsuo Handa <penguin-kernel@...ove.SAKURA.ne.jp>
To:     Bart.VanAssche@....com, dvyukov@...gle.com
Cc:     linux-kernel@...r.kernel.org, linux-block@...r.kernel.org,
        jthumshirn@...e.de, alan.christopher.jenkins@...il.com,
        syzbot+c4f9cebf9d651f6e54de@...kaller.appspotmail.com,
        martin.petersen@...cle.com, axboe@...nel.dk,
        dan.j.williams@...el.com, hch@....de, oleksandr@...alenko.name,
        ming.lei@...hat.com, martin@...htvoll.de, hare@...e.com,
        syzkaller-bugs@...glegroups.com, ross.zwisler@...ux.intel.com,
        keith.busch@...el.com, linux-ext4@...r.kernel.org
Subject: Re: INFO: task hung in blk_queue_enter

I checked counter values using debug printk() patch shown below, and
found that q->q_usage_counter.count == 1 when this deadlock occurs.

Since sum of percpu_count did not change after percpu_ref_kill(), this is
not a race condition while folding percpu counter values into atomic counter
value. That is, for some reason, someone who is responsible for calling
percpu_ref_put(&q->q_usage_counter) (presumably via blk_queue_exit()) is
unable to call percpu_ref_put().

diff --git a/block/blk-core.c b/block/blk-core.c
index 85909b4..6933020 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -908,6 +908,12 @@ struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
 }
 EXPORT_SYMBOL(blk_alloc_queue);
 
+static unsigned long __percpu *percpu_count_ptr(struct percpu_ref *ref)
+{
+	return (unsigned long __percpu *)
+		(ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC_DEAD);
+}
+
 /**
  * blk_queue_enter() - try to increase q->q_usage_counter
  * @q: request queue pointer
@@ -950,10 +956,22 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
 		 */
 		smp_rmb();
 
-		wait_event(q->mq_freeze_wq,
-			   (atomic_read(&q->mq_freeze_depth) == 0 &&
-			    (preempt || !blk_queue_preempt_only(q))) ||
-			   blk_queue_dying(q));
+		while (wait_event_timeout(q->mq_freeze_wq,
+					  (atomic_read(&q->mq_freeze_depth) == 0 &&
+					   (preempt || !blk_queue_preempt_only(q))) ||
+					  blk_queue_dying(q), 3 * HZ) == 0) {
+			struct percpu_ref *ref = &q->q_usage_counter;
+			unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
+			unsigned long count = 0;
+			int cpu;
+
+			for_each_possible_cpu(cpu)
+				count += *per_cpu_ptr(percpu_count, cpu);
+
+			printk("%s(%d): %px %ld %ld\n", current->comm, current->pid,
+			       ref, atomic_long_read(&ref->count), count);
+		}
+
 		if (blk_queue_dying(q))
 			return -ENODEV;
 	}
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
index 9f96fa7..72773ce 100644
--- a/lib/percpu-refcount.c
+++ b/lib/percpu-refcount.c
@@ -133,8 +133,8 @@ static void percpu_ref_switch_to_atomic_rcu(struct rcu_head *rcu)
 	for_each_possible_cpu(cpu)
 		count += *per_cpu_ptr(percpu_count, cpu);
 
-	pr_debug("global %ld percpu %ld",
-		 atomic_long_read(&ref->count), (long)count);
+	printk("%px global %ld percpu %ld\n", ref,
+	       atomic_long_read(&ref->count), (long)count);
 
 	/*
 	 * It's crucial that we sum the percpu counters _before_ adding the sum
@@ -150,6 +150,8 @@ static void percpu_ref_switch_to_atomic_rcu(struct rcu_head *rcu)
 	 */
 	atomic_long_add((long)count - PERCPU_COUNT_BIAS, &ref->count);
 
+	printk("%px global %ld\n", ref, atomic_long_read(&ref->count));
+
 	WARN_ONCE(atomic_long_read(&ref->count) <= 0,
 		  "percpu ref (%pf) <= 0 (%ld) after switching to atomic",
 		  ref->release, atomic_long_read(&ref->count));

If I change blk_queue_enter() not to wait at wait_event() if
q->mq_freeze_depth != 0, this deadlock problem does not occur.

Also, I found that if blk_freeze_queue_start() tries to wait for
counters to become 1 before calling percpu_ref_kill() (like shown
below), this deadlock problem does not occur.

diff --git a/block/blk-mq.c b/block/blk-mq.c
index 9ce9cac..4bff534 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -134,12 +134,36 @@ void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part,
 	blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight_rw, &mi);
 }
 
+#define PERCPU_COUNT_BIAS       (1LU << (BITS_PER_LONG - 1))
+
+static unsigned long __percpu *percpu_count_ptr(struct percpu_ref *ref)
+{
+	return (unsigned long __percpu *)
+		(ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC_DEAD);
+}
+
 void blk_freeze_queue_start(struct request_queue *q)
 {
 	int freeze_depth;
 
 	freeze_depth = atomic_inc_return(&q->mq_freeze_depth);
 	if (freeze_depth == 1) {
+		int i;
+		for (i = 0; i < 10; i++) {
+			struct percpu_ref *ref = &q->q_usage_counter;
+			unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
+			unsigned long count = 0;
+			int cpu;
+
+			for_each_possible_cpu(cpu)
+				count += *per_cpu_ptr(percpu_count, cpu);
+
+			if (atomic_long_read(&ref->count) + count - PERCPU_COUNT_BIAS == 1)
+				break;
+			printk("%s(%d):! %px %ld %ld\n", current->comm, current->pid,
+			       ref, atomic_long_read(&ref->count), count);
+			schedule_timeout_uninterruptible(HZ / 10);
+		}
 		percpu_ref_kill(&q->q_usage_counter);
 		if (q->mq_ops)
 			blk_mq_run_hw_queues(q, false);

But I don't know how to find someone who is failing to call percpu_ref_put()...

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ