lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <f1855145-9562-4bef-800f-43bcacff6fc8@xs4all.nl>
Date: Tue, 30 Jan 2024 15:35:40 +0100
From: Hans Verkuil <hverkuil-cisco@...all.nl>
To: "Yang, Chenyuan" <cy54@...inois.edu>,
 "linux-media@...r.kernel.org" <linux-media@...r.kernel.org>,
 "linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>
Cc: "jani.nikula@...el.com" <jani.nikula@...el.com>,
 "syzkaller@...glegroups.com" <syzkaller@...glegroups.com>,
 "mchehab@...nel.org" <mchehab@...nel.org>, "Zhao, Zijie"
 <zijie4@...inois.edu>, "Zhang, Lingming" <lingming@...inois.edu>
Subject: Re: [Linux Kernel Bugs] KASAN: slab-use-after-free Read in
 cec_queue_msg_fh and 4 other crashes in the cec device (`cec_ioctl`)

On 29/01/2024 04:03, Yang, Chenyuan wrote:
> Hi Hans,
> 
> Thanks a lot for this new patch!
> 
> After applying this new patch in the latest kernel (hash: ecb1b8288dc7ccbdcb3b9df005fa1c0e0c0388a7) and fuzzing with Syzkaller, it seems that the hang still exists.
> To help you better debug it, I attached the covered lines for the fuzz testing and the output of `git diff`. Hope this could help you.
> 
> By the way, the syscall descriptions for CEC have been merged into the Syzkaller mainstream: https://github.com/google/syzkaller/blob/master/sys/linux/dev_cec.txt.
> 
> Let me know if you need further information.
> 
> Best,
> Chenyuan

Here is another patch. This now time outs on all wait_for_completion calls
and reports a WARN_ON and shows additional info. Hopefully this will give me
better insight into what is going on.

Unfortunately I was unable to reproduce this issue on my VM, so I have to
rely on you to run the test.

Regards,

	Hans

[PATCH] Test

Signed-off-by: Hans Verkuil <hverkuil-cisco@...all.nl>
---
diff --git a/drivers/media/cec/core/cec-adap.c b/drivers/media/cec/core/cec-adap.c
index 5741adf09a2e..b1951eb7f5bd 100644
--- a/drivers/media/cec/core/cec-adap.c
+++ b/drivers/media/cec/core/cec-adap.c
@@ -935,9 +935,12 @@ int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg,
 	 * Release the lock and wait, retake the lock afterwards.
 	 */
 	mutex_unlock(&adap->lock);
-	wait_for_completion_killable(&data->c);
-	if (!data->completed)
-		cancel_delayed_work_sync(&data->work);
+	if (WARN_ON(wait_for_completion_killable_timeout(&data->c, msecs_to_jiffies(adap->xfer_timeout_ms + 1000)) <= 0)) {
+		dprintk(0, "wfc1: %px %d%d%d%d %x\n", adap->kthread_config,
+			adap->is_configuring, adap->is_configured,
+			adap->is_enabled, adap->must_reconfigure, adap->phys_addr);
+	}
+	cancel_delayed_work_sync(&data->work);
 	mutex_lock(&adap->lock);

 	/* Cancel the transmit if it was interrupted */
@@ -1563,10 +1566,12 @@ static int cec_config_thread_func(void *arg)
 			cec_transmit_msg_fh(adap, &msg, NULL, false);
 		}
 	}
+	mutex_unlock(&adap->lock);
+    call_void_op(adap, configured);
+    mutex_lock(&adap->lock);
 	adap->kthread_config = NULL;
 	complete(&adap->config_completion);
 	mutex_unlock(&adap->lock);
-	call_void_op(adap, configured);
 	return 0;

 unconfigure:
@@ -1592,6 +1597,17 @@ static void cec_claim_log_addrs(struct cec_adapter *adap, bool block)
 	if (WARN_ON(adap->is_configuring || adap->is_configured))
 		return;

+	if (adap->kthread_config) {
+		mutex_unlock(&adap->lock);
+//		wait_for_completion(&adap->config_completion);
+		if (WARN_ON(wait_for_completion_killable_timeout(&adap->config_completion, msecs_to_jiffies(10000)) <= 0)) {
+			dprintk(0, "wfc2: %px %d%d%d%d %x\n", adap->kthread_config,
+				adap->is_configuring, adap->is_configured,
+				adap->is_enabled, adap->must_reconfigure, adap->phys_addr);
+		}
+		mutex_lock(&adap->lock);
+	}
+
 	init_completion(&adap->config_completion);

 	/* Ready to kick off the thread */
@@ -1599,11 +1615,17 @@ static void cec_claim_log_addrs(struct cec_adapter *adap, bool block)
 	adap->kthread_config = kthread_run(cec_config_thread_func, adap,
 					   "ceccfg-%s", adap->name);
 	if (IS_ERR(adap->kthread_config)) {
-		adap->kthread_config = NULL;
 		adap->is_configuring = false;
+		adap->kthread_config = NULL;
 	} else if (block) {
 		mutex_unlock(&adap->lock);
-		wait_for_completion(&adap->config_completion);
+		//wait_for_completion(&adap->config_completion);
+		if (WARN_ON(wait_for_completion_killable_timeout(&adap->config_completion, msecs_to_jiffies(10000)) <= 0)) {
+			dprintk(0, "wfc3: %px %d%d%d%d %x\n", adap->kthread_config,
+				adap->is_configuring, adap->is_configured,
+				adap->is_enabled, adap->must_reconfigure, adap->phys_addr);
+
+		}
 		mutex_lock(&adap->lock);
 	}
 }
diff --git a/drivers/media/cec/core/cec-api.c b/drivers/media/cec/core/cec-api.c
index 67dc79ef1705..d64bb716f9c6 100644
--- a/drivers/media/cec/core/cec-api.c
+++ b/drivers/media/cec/core/cec-api.c
@@ -664,6 +664,8 @@ static int cec_release(struct inode *inode, struct file *filp)
 		list_del_init(&data->xfer_list);
 	}
 	mutex_unlock(&adap->lock);
+
+	mutex_lock(&fh->lock);
 	while (!list_empty(&fh->msgs)) {
 		struct cec_msg_entry *entry =
 			list_first_entry(&fh->msgs, struct cec_msg_entry, list);
@@ -681,6 +683,7 @@ static int cec_release(struct inode *inode, struct file *filp)
 			kfree(entry);
 		}
 	}
+	mutex_unlock(&fh->lock);
 	kfree(fh);

 	cec_put_device(devnode);


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ