lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Sun, 25 Aug 2019 14:49:57 +0900
From:   Tetsuo Handa <penguin-kernel@...ove.sakura.ne.jp>
To:     Ingo Molnar <mingo@...nel.org>,
        Linus Torvalds <torvalds@...ux-foundation.org>
Cc:     Arnd Bergmann <arnd@...db.de>,
        Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
        Linux List Kernel Mailing <linux-kernel@...r.kernel.org>,
        syzbot <syzbot+8ab2d0f39fb79fe6ca40@...kaller.appspotmail.com>
Subject: Re: [PATCH] /dev/mem: Bail out upon SIGKILL when reading memory.

On 2019/08/25 5:22, Ingo Molnar wrote:
>> So I'd be willing to try that (and then if somebody reports a
>> regression we can make it use "fatal_signal_pending()" instead)
> 
> Ok, will post a changelogged patch (unless Tetsuo beats me to it?).

Here is a patch. This patch also tries to fix handling of return code when
partial read/write happened (because we should return bytes processed when
we return due to -EINTR). But asymmetric between read function and write
function looks messy. Maybe we should just make /dev/{mem,kmem} killable
for now, and defer making /dev/{mem,kmem} interruptible till rewrite of
read/write functions.

 drivers/char/mem.c | 89 ++++++++++++++++++++++++++++++------------------------
 1 file changed, 50 insertions(+), 39 deletions(-)

diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index cb8e653..3c6a3c2 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -108,7 +108,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
 	ssize_t read, sz;
 	void *ptr;
 	char *bounce;
-	int err;
+	int err = 0;
 
 	if (p != *ppos)
 		return 0;
@@ -132,8 +132,10 @@ static ssize_t read_mem(struct file *file, char __user *buf,
 #endif
 
 	bounce = kmalloc(PAGE_SIZE, GFP_KERNEL);
-	if (!bounce)
-		return -ENOMEM;
+	if (!bounce) {
+		err = -ENOMEM;
+		goto failed;
+	}
 
 	while (count > 0) {
 		unsigned long remaining;
@@ -142,7 +144,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
 		sz = size_inside_page(p, count);
 		cond_resched();
 		err = -EINTR;
-		if (fatal_signal_pending(current))
+		if (signal_pending(current))
 			goto failed;
 
 		err = -EPERM;
@@ -180,14 +182,11 @@ static ssize_t read_mem(struct file *file, char __user *buf,
 		count -= sz;
 		read += sz;
 	}
+failed:
 	kfree(bounce);
 
 	*ppos += read;
-	return read;
-
-failed:
-	kfree(bounce);
-	return err;
+	return read ? read : err;
 }
 
 static ssize_t write_mem(struct file *file, const char __user *buf,
@@ -197,6 +196,7 @@ static ssize_t write_mem(struct file *file, const char __user *buf,
 	ssize_t written, sz;
 	unsigned long copied;
 	void *ptr;
+	int err = 0;
 
 	if (p != *ppos)
 		return -EFBIG;
@@ -223,13 +223,16 @@ static ssize_t write_mem(struct file *file, const char __user *buf,
 
 		sz = size_inside_page(p, count);
 		cond_resched();
-		if (fatal_signal_pending(current))
-			return -EINTR;
+		err = -EINTR;
+		if (signal_pending(current))
+			break;
 
+		err = -EPERM;
 		allowed = page_is_allowed(p >> PAGE_SHIFT);
 		if (!allowed)
-			return -EPERM;
+			break;
 
+		err = -EFAULT;
 		/* Skip actual writing when a page is marked as restricted. */
 		if (allowed == 1) {
 			/*
@@ -238,19 +241,14 @@ static ssize_t write_mem(struct file *file, const char __user *buf,
 			 * by the kernel or data corruption may occur.
 			 */
 			ptr = xlate_dev_mem_ptr(p);
-			if (!ptr) {
-				if (written)
-					break;
-				return -EFAULT;
-			}
+			if (!ptr)
+				break;
 
 			copied = copy_from_user(ptr, buf, sz);
 			unxlate_dev_mem_ptr(p, ptr);
 			if (copied) {
 				written += sz - copied;
-				if (written)
-					break;
-				return -EFAULT;
+				break;
 			}
 		}
 
@@ -261,7 +259,7 @@ static ssize_t write_mem(struct file *file, const char __user *buf,
 	}
 
 	*ppos += written;
-	return written;
+	return written ? written : err;
 }
 
 int __weak phys_mem_access_prot_allowed(struct file *file,
@@ -459,8 +457,10 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
 		while (low_count > 0) {
 			sz = size_inside_page(p, low_count);
 			cond_resched();
-			if (fatal_signal_pending(current))
-				return -EINTR;
+			if (signal_pending(current)) {
+				err = -EINTR;
+				goto failed;
+			}
 
 			/*
 			 * On ia64 if a page has been mapped somewhere as
@@ -468,11 +468,15 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
 			 * by the kernel or data corruption may occur
 			 */
 			kbuf = xlate_dev_kmem_ptr((void *)p);
-			if (!virt_addr_valid(kbuf))
-				return -ENXIO;
+			if (!virt_addr_valid(kbuf)) {
+				err = -ENXIO;
+				goto failed;
+			}
 
-			if (copy_to_user(buf, kbuf, sz))
-				return -EFAULT;
+			if (copy_to_user(buf, kbuf, sz)) {
+				err = -EFAULT;
+				goto failed;
+			}
 			buf += sz;
 			p += sz;
 			read += sz;
@@ -483,12 +487,14 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
 
 	if (count > 0) {
 		kbuf = (char *)__get_free_page(GFP_KERNEL);
-		if (!kbuf)
-			return -ENOMEM;
+		if (!kbuf) {
+			err = -ENOMEM;
+			goto failed;
+		}
 		while (count > 0) {
 			sz = size_inside_page(p, count);
 			cond_resched();
-			if (fatal_signal_pending(current)) {
+			if (signal_pending(current)) {
 				err = -EINTR;
 				break;
 			}
@@ -510,6 +516,7 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
 		}
 		free_page((unsigned long)kbuf);
 	}
+ failed:
 	*ppos = p;
 	return read ? read : err;
 }
@@ -520,6 +527,7 @@ static ssize_t do_write_kmem(unsigned long p, const char __user *buf,
 {
 	ssize_t written, sz;
 	unsigned long copied;
+	int err = 0;
 
 	written = 0;
 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
@@ -539,8 +547,10 @@ static ssize_t do_write_kmem(unsigned long p, const char __user *buf,
 
 		sz = size_inside_page(p, count);
 		cond_resched();
-		if (fatal_signal_pending(current))
-			return -EINTR;
+		if (signal_pending(current)) {
+			err = -EINTR;
+			break;
+		}
 
 		/*
 		 * On ia64 if a page has been mapped somewhere as uncached, then
@@ -548,15 +558,16 @@ static ssize_t do_write_kmem(unsigned long p, const char __user *buf,
 		 * corruption may occur.
 		 */
 		ptr = xlate_dev_kmem_ptr((void *)p);
-		if (!virt_addr_valid(ptr))
-			return -ENXIO;
+		if (!virt_addr_valid(ptr)) {
+			err = -ENXIO;
+			break;
+		}
 
 		copied = copy_from_user(ptr, buf, sz);
 		if (copied) {
 			written += sz - copied;
-			if (written)
-				break;
-			return -EFAULT;
+			err = -EFAULT;
+			break;
 		}
 		buf += sz;
 		p += sz;
@@ -565,7 +576,7 @@ static ssize_t do_write_kmem(unsigned long p, const char __user *buf,
 	}
 
 	*ppos += written;
-	return written;
+	return written ? written : err;
 }
 
 /*
@@ -600,7 +611,7 @@ static ssize_t write_kmem(struct file *file, const char __user *buf,
 			unsigned long n;
 
 			cond_resched();
-			if (fatal_signal_pending(current)) {
+			if (signal_pending(current)) {
 				err = -EINTR;
 				break;
 			}
-- 
1.8.3.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ