lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <lsq.1496331795.184883283@decadent.org.uk>
Date:   Thu, 01 Jun 2017 16:43:15 +0100
From:   Ben Hutchings <ben@...adent.org.uk>
To:     linux-kernel@...r.kernel.org, stable@...r.kernel.org
CC:     akpm@...ux-foundation.org,
        "Stephen Röttger" <sroettger@...gle.com>,
        "Mathias Svensson" <idolf@...gle.com>,
        "Kees Cook" <keescook@...omium.org>,
        "James Morris" <james.l.morris@...cle.com>
Subject: [PATCH 3.16 018/212] samples/seccomp: fix 64-bit comparison macros

3.16.44-rc1 review patch.  If anyone has any objections, please let me know.

------------------

From: Mathias Svensson <idolf@...gle.com>

commit 916cafdc95843fb9af5fd5f83ca499d75473d107 upstream.

There were some bugs in the JNE64 and JLT64 comparision macros. This fixes
them, improves comments, and cleans up the file while we are at it.

Reported-by: Stephen Röttger <sroettger@...gle.com>
Signed-off-by: Mathias Svensson <idolf@...gle.com>
Signed-off-by: Kees Cook <keescook@...omium.org>
Signed-off-by: James Morris <james.l.morris@...cle.com>
Signed-off-by: Ben Hutchings <ben@...adent.org.uk>
---
 samples/seccomp/bpf-helper.h | 125 +++++++++++++++++++++++++------------------
 1 file changed, 72 insertions(+), 53 deletions(-)

--- a/samples/seccomp/bpf-helper.h
+++ b/samples/seccomp/bpf-helper.h
@@ -138,7 +138,7 @@ union arg64 {
 #define ARG_32(idx) \
 	BPF_STMT(BPF_LD+BPF_W+BPF_ABS, LO_ARG(idx))
 
-/* Loads hi into A and lo in X */
+/* Loads lo into M[0] and hi into M[1] and A */
 #define ARG_64(idx) \
 	BPF_STMT(BPF_LD+BPF_W+BPF_ABS, LO_ARG(idx)), \
 	BPF_STMT(BPF_ST, 0), /* lo -> M[0] */ \
@@ -153,88 +153,107 @@ union arg64 {
 	BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (value), 1, 0), \
 	jt
 
-/* Checks the lo, then swaps to check the hi. A=lo,X=hi */
+#define JA32(value, jt) \
+	BPF_JUMP(BPF_JMP+BPF_JSET+BPF_K, (value), 0, 1), \
+	jt
+
+#define JGE32(value, jt) \
+	BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (value), 0, 1), \
+	jt
+
+#define JGT32(value, jt) \
+	BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (value), 0, 1), \
+	jt
+
+#define JLE32(value, jt) \
+	BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (value), 1, 0), \
+	jt
+
+#define JLT32(value, jt) \
+	BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (value), 1, 0), \
+	jt
+
+/*
+ * All the JXX64 checks assume lo is saved in M[0] and hi is saved in both
+ * A and M[1]. This invariant is kept by restoring A if necessary.
+ */
 #define JEQ64(lo, hi, jt) \
+	/* if (hi != arg.hi) goto NOMATCH; */ \
 	BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \
 	BPF_STMT(BPF_LD+BPF_MEM, 0), /* swap in lo */ \
+	/* if (lo != arg.lo) goto NOMATCH; */ \
 	BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (lo), 0, 2), \
-	BPF_STMT(BPF_LD+BPF_MEM, 1), /* passed: swap hi back in */ \
+	BPF_STMT(BPF_LD+BPF_MEM, 1), \
 	jt, \
-	BPF_STMT(BPF_LD+BPF_MEM, 1) /* failed: swap hi back in */
+	BPF_STMT(BPF_LD+BPF_MEM, 1)
 
 #define JNE64(lo, hi, jt) \
-	BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 5, 0), \
-	BPF_STMT(BPF_LD+BPF_MEM, 0), /* swap in lo */ \
+	/* if (hi != arg.hi) goto MATCH; */ \
+	BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 3), \
+	BPF_STMT(BPF_LD+BPF_MEM, 0), \
+	/* if (lo != arg.lo) goto MATCH; */ \
 	BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (lo), 2, 0), \
-	BPF_STMT(BPF_LD+BPF_MEM, 1), /* passed: swap hi back in */ \
+	BPF_STMT(BPF_LD+BPF_MEM, 1), \
 	jt, \
-	BPF_STMT(BPF_LD+BPF_MEM, 1) /* failed: swap hi back in */
-
-#define JA32(value, jt) \
-	BPF_JUMP(BPF_JMP+BPF_JSET+BPF_K, (value), 0, 1), \
-	jt
+	BPF_STMT(BPF_LD+BPF_MEM, 1)
 
 #define JA64(lo, hi, jt) \
+	/* if (hi & arg.hi) goto MATCH; */ \
 	BPF_JUMP(BPF_JMP+BPF_JSET+BPF_K, (hi), 3, 0), \
-	BPF_STMT(BPF_LD+BPF_MEM, 0), /* swap in lo */ \
+	BPF_STMT(BPF_LD+BPF_MEM, 0), \
+	/* if (lo & arg.lo) goto MATCH; */ \
 	BPF_JUMP(BPF_JMP+BPF_JSET+BPF_K, (lo), 0, 2), \
-	BPF_STMT(BPF_LD+BPF_MEM, 1), /* passed: swap hi back in */ \
+	BPF_STMT(BPF_LD+BPF_MEM, 1), \
 	jt, \
-	BPF_STMT(BPF_LD+BPF_MEM, 1) /* failed: swap hi back in */
-
-#define JGE32(value, jt) \
-	BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (value), 0, 1), \
-	jt
+	BPF_STMT(BPF_LD+BPF_MEM, 1)
 
-#define JLT32(value, jt) \
-	BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (value), 1, 0), \
-	jt
-
-/* Shortcut checking if hi > arg.hi. */
 #define JGE64(lo, hi, jt) \
+	/* if (hi > arg.hi) goto MATCH; */ \
 	BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (hi), 4, 0), \
+	/* if (hi != arg.hi) goto NOMATCH; */ \
 	BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \
-	BPF_STMT(BPF_LD+BPF_MEM, 0), /* swap in lo */ \
+	BPF_STMT(BPF_LD+BPF_MEM, 0), \
+	/* if (lo >= arg.lo) goto MATCH; */ \
 	BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (lo), 0, 2), \
-	BPF_STMT(BPF_LD+BPF_MEM, 1), /* passed: swap hi back in */ \
+	BPF_STMT(BPF_LD+BPF_MEM, 1), \
 	jt, \
-	BPF_STMT(BPF_LD+BPF_MEM, 1) /* failed: swap hi back in */
-
-#define JLT64(lo, hi, jt) \
-	BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (hi), 0, 4), \
-	BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \
-	BPF_STMT(BPF_LD+BPF_MEM, 0), /* swap in lo */ \
-	BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (lo), 2, 0), \
-	BPF_STMT(BPF_LD+BPF_MEM, 1), /* passed: swap hi back in */ \
-	jt, \
-	BPF_STMT(BPF_LD+BPF_MEM, 1) /* failed: swap hi back in */
-
-#define JGT32(value, jt) \
-	BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (value), 0, 1), \
-	jt
-
-#define JLE32(value, jt) \
-	BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (value), 1, 0), \
-	jt
+	BPF_STMT(BPF_LD+BPF_MEM, 1)
 
-/* Check hi > args.hi first, then do the GE checking */
 #define JGT64(lo, hi, jt) \
+	/* if (hi > arg.hi) goto MATCH; */ \
 	BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (hi), 4, 0), \
+	/* if (hi != arg.hi) goto NOMATCH; */ \
 	BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \
-	BPF_STMT(BPF_LD+BPF_MEM, 0), /* swap in lo */ \
+	BPF_STMT(BPF_LD+BPF_MEM, 0), \
+	/* if (lo > arg.lo) goto MATCH; */ \
 	BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (lo), 0, 2), \
-	BPF_STMT(BPF_LD+BPF_MEM, 1), /* passed: swap hi back in */ \
+	BPF_STMT(BPF_LD+BPF_MEM, 1), \
 	jt, \
-	BPF_STMT(BPF_LD+BPF_MEM, 1) /* failed: swap hi back in */
+	BPF_STMT(BPF_LD+BPF_MEM, 1)
 
 #define JLE64(lo, hi, jt) \
-	BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (hi), 6, 0), \
-	BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 3), \
-	BPF_STMT(BPF_LD+BPF_MEM, 0), /* swap in lo */ \
+	/* if (hi < arg.hi) goto MATCH; */ \
+	BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (hi), 0, 4), \
+	/* if (hi != arg.hi) goto NOMATCH; */ \
+	BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \
+	BPF_STMT(BPF_LD+BPF_MEM, 0), \
+	/* if (lo <= arg.lo) goto MATCH; */ \
 	BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (lo), 2, 0), \
-	BPF_STMT(BPF_LD+BPF_MEM, 1), /* passed: swap hi back in */ \
+	BPF_STMT(BPF_LD+BPF_MEM, 1), \
+	jt, \
+	BPF_STMT(BPF_LD+BPF_MEM, 1)
+
+#define JLT64(lo, hi, jt) \
+	/* if (hi < arg.hi) goto MATCH; */ \
+	BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (hi), 0, 4), \
+	/* if (hi != arg.hi) goto NOMATCH; */ \
+	BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \
+	BPF_STMT(BPF_LD+BPF_MEM, 0), \
+	/* if (lo < arg.lo) goto MATCH; */ \
+	BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (lo), 2, 0), \
+	BPF_STMT(BPF_LD+BPF_MEM, 1), \
 	jt, \
-	BPF_STMT(BPF_LD+BPF_MEM, 1) /* failed: swap hi back in */
+	BPF_STMT(BPF_LD+BPF_MEM, 1)
 
 #define LOAD_SYSCALL_NR \
 	BPF_STMT(BPF_LD+BPF_W+BPF_ABS, \

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ