lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <m3ljzmdrqh.fsf@gravicappa.englab.brq.redhat.com>
Date:	Mon, 28 Jul 2008 16:10:30 +0200
From:	Vitaly Mayatskikh <v.mayatskih@...il.com>
To:	linux-kernel@...r.kernel.org
Cc:	Linus Torvalds <torvalds@...ux-foundation.org>,
	Andi Kleen <andi@...stfloor.org>, Ingo Molnar <mingo@...e.hu>
Subject: [PATCH] x86: Optimize tail handling for copy_user

Reduce protection faults count in copy_user_handle_tail routine by
limiting clear length to the end of page as was suggested by Linus.

Linus, should I add myself "signed-off-by: you" for patches with your
ideas implemented? Clear length calculation was changed a bit for
correct handling of page aligned addresses.

I'm using this systemtap script for sanity testing:
http://people.redhat.com/vmayatsk/copy_user_x8664/
It looks very ugly, but works.

Signed-off-by: Vitaly Mayatskikh <v.mayatskih@...il.com>

diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
index f4df6e7..42baeca 100644
--- a/arch/x86/lib/usercopy_64.c
+++ b/arch/x86/lib/usercopy_64.c
@@ -161,23 +161,33 @@ EXPORT_SYMBOL(copy_in_user);
 /*
  * Try to copy last bytes and clear the rest if needed.
  * Since protection fault in copy_from/to_user is not a normal situation,
- * it is not necessary to optimize tail handling.
+ * it is not necessary to do low level optimization of tail handling.
  */
 unsigned long
-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
+copy_user_handle_tail(char *to, char *from, unsigned len, unsigned clear_rest)
 {
 	char c;
-	unsigned zero_len;
+	unsigned clear_len;
 
-	for (; len; --len) {
-		if (__get_user_nocheck(c, from++, sizeof(char)))
-			break;
-		if (__put_user_nocheck(c, to++, sizeof(char)))
+	while (len) {
+		if (__get_user_nocheck(c, from, 1))
 			break;
+		from++;
+		if (__put_user_nocheck(c, to, 1))
+			/* Fault in destination, nothing to clear */
+			goto out;
+		to++;
+		len--;
 	}
 
-	for (c = 0, zero_len = len; zerorest && zero_len; --zero_len)
-		if (__put_user_nocheck(c, to++, sizeof(char)))
-			break;
+	if (clear_rest) {
+		unsigned long addr = (unsigned long)to;
+		clear_len = len;
+		/* Limit clear_len to the rest of the page */
+		if ((addr + len) & PAGE_MASK != addr & PAGE_MASK)
+			clear_len = len - ((addr + len) & ~PAGE_MASK);
+		memset(to, 0, clear_len);
+	}
+out:
 	return len;
 }
diff --git a/include/asm-x86/uaccess_64.h b/include/asm-x86/uaccess_64.h
index 5cfd295..c5c5af0 100644
--- a/include/asm-x86/uaccess_64.h
+++ b/include/asm-x86/uaccess_64.h
@@ -196,6 +196,6 @@ static inline int __copy_from_user_inatomic_nocache(void *dst,
 }
 
 unsigned long
-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
+copy_user_handle_tail(char *to, char *from, unsigned len, unsigned clear_rest);
 
 #endif /* ASM_X86__UACCESS_64_H */

-- 
wbr, Vitaly
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ