[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <4074348.448IFB0yOx@panther>
Date: Wed, 18 Oct 2017 19:04:53 +0300
From: Sergey Klyaus <sergey.m.klyaus@...il.com>
To: Sergey Klyaus <sergey.m.klyaus@...il.com>
Cc: Al Viro <viro@...iv.linux.org.uk>,
linux-fsdevel <linux-fsdevel@...r.kernel.org>,
Linux Kernel Mailing List <linux-kernel@...r.kernel.org>
Subject: [PATCH v2] vfs: Improve overflow checking for stat*() compat fields
The checks for overflown fields in compat_statfs[64] structures are
heuristic and are not error prone. This patch introduces
ASSIGN_CHECK_OVERFLOW() family of macros which assign a field from a
kernel representation and check if value is get truncated or changed
its sign if the types of compat and in-kernel fields are different (and
if so, they set a flag to a "true").
These macros may use compiler builtin for overflow detection. They are
also used for stat*() syscalls.
Signed-off-by: Sergey Klyaus <sergey.m.klyaus@...il.com>
---
This is replacement for vfs: fix statfs64() returning impossible EOVERFLOW for
64-bit f_files
fs/stat.c | 33 ++++----
fs/statfs.c | 195 +++++++++++++++++++++++++------------------
include/linux/build_bug.h | 10 +++
include/linux/compiler-gcc.h | 5 ++
include/linux/compiler.h | 43 ++++++++++
5 files changed, 190 insertions(+), 96 deletions(-)
diff --git a/fs/stat.c b/fs/stat.c
index 8a6aa8c..e18539c 100644
--- a/fs/stat.c
+++ b/fs/stat.c
@@ -16,6 +16,7 @@
#include <linux/syscalls.h>
#include <linux/pagemap.h>
#include <linux/compat.h>
+#include <linux/compiler.h>
#include <linux/uaccess.h>
#include <asm/unistd.h>
@@ -207,6 +208,7 @@ static int cp_old_stat(struct kstat *stat, struct __old_kernel_stat __user * sta
{
static int warncount = 5;
struct __old_kernel_stat tmp;
+ bool offlag = false;
if (warncount > 0) {
warncount--;
@@ -219,12 +221,10 @@ static int cp_old_stat(struct kstat *stat, struct __old_kernel_stat __user * sta
memset(&tmp, 0, sizeof(struct __old_kernel_stat));
tmp.st_dev = old_encode_dev(stat->dev);
- tmp.st_ino = stat->ino;
- if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
- return -EOVERFLOW;
+ ASSIGN_CHECK_OVERFLOW(tmp.st_ino, stat->ino, offlag);
tmp.st_mode = stat->mode;
- tmp.st_nlink = stat->nlink;
- if (tmp.st_nlink != stat->nlink)
+ ASSIGN_CHECK_OVERFLOW(tmp.st_nlink, stat->nlink, offlag);
+ if (unlikely(offlag))
return -EOVERFLOW;
SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
@@ -237,6 +237,7 @@ static int cp_old_stat(struct kstat *stat, struct __old_kernel_stat __user * sta
tmp.st_atime = stat->atime.tv_sec;
tmp.st_mtime = stat->mtime.tv_sec;
tmp.st_ctime = stat->ctime.tv_sec;
+
return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
}
@@ -295,6 +296,7 @@ SYSCALL_DEFINE2(fstat, unsigned int, fd, struct __old_kernel_stat __user *, stat
static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf)
{
struct stat tmp;
+ bool offlag = false;
if (!valid_dev(stat->dev) || !valid_dev(stat->rdev))
return -EOVERFLOW;
@@ -305,12 +307,11 @@ static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf)
INIT_STRUCT_STAT_PADDING(tmp);
tmp.st_dev = encode_dev(stat->dev);
- tmp.st_ino = stat->ino;
- if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
- return -EOVERFLOW;
+ ASSIGN_CHECK_OVERFLOW(tmp.st_ino, stat->ino, offlag);
tmp.st_mode = stat->mode;
tmp.st_nlink = stat->nlink;
- if (tmp.st_nlink != stat->nlink)
+ ASSIGN_CHECK_OVERFLOW(tmp.st_nlink, stat->nlink, offlag);
+ if (unlikely(offlag))
return -EOVERFLOW;
SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
@@ -431,6 +432,7 @@ SYSCALL_DEFINE3(readlink, const char __user *, path, char __user *, buf,
static long cp_new_stat64(struct kstat *stat, struct stat64 __user *statbuf)
{
struct stat64 tmp;
+ bool offlag = false;
INIT_STRUCT_STAT64_PADDING(tmp);
#ifdef CONFIG_MIPS
@@ -441,8 +443,8 @@ static long cp_new_stat64(struct kstat *stat, struct stat64 __user *statbuf)
tmp.st_dev = huge_encode_dev(stat->dev);
tmp.st_rdev = huge_encode_dev(stat->rdev);
#endif
- tmp.st_ino = stat->ino;
- if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
+ ASSIGN_CHECK_OVERFLOW(tmp.st_ino, stat->ino, offlag);
+ if (unlikely(offlag))
return -EOVERFLOW;
#ifdef STAT64_HAS_BROKEN_ST_INO
tmp.__st_ino = stat->ino;
@@ -580,18 +582,17 @@ SYSCALL_DEFINE5(statx,
static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
{
struct compat_stat tmp;
+ bool offlag = false;
if (!old_valid_dev(stat->dev) || !old_valid_dev(stat->rdev))
return -EOVERFLOW;
memset(&tmp, 0, sizeof(tmp));
tmp.st_dev = old_encode_dev(stat->dev);
- tmp.st_ino = stat->ino;
- if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
- return -EOVERFLOW;
+ ASSIGN_CHECK_OVERFLOW(tmp.st_ino, stat->ino, offlag);
tmp.st_mode = stat->mode;
- tmp.st_nlink = stat->nlink;
- if (tmp.st_nlink != stat->nlink)
+ ASSIGN_CHECK_OVERFLOW(tmp.st_nlink, stat->nlink, offlag);
+ if (unlikely(offlag))
return -EOVERFLOW;
SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
diff --git a/fs/statfs.c b/fs/statfs.c
index fab9b6a..47cf963 100644
--- a/fs/statfs.c
+++ b/fs/statfs.c
@@ -8,8 +8,34 @@
#include <linux/security.h>
#include <linux/uaccess.h>
#include <linux/compat.h>
+#include <linux/compiler.h>
+#include <linux/bitops.h>
#include "internal.h"
+/**
+ * ASSIGN_CHECK_TRUNCATED_BITS() assigns src value to dst and checks if
+ * neither set bit in src lost during assignment. It uses fls()/fls64()
+ * to avoid sign extension induced by 32-bit -> 64-bit conversion.
+ */
+#define fls3264(value) \
+ ({ \
+ typeof(value) __val = (value); \
+ int __last_bit = 0; \
+ if (sizeof(__val) == 8) { \
+ __last_bit = fls64(__val); \
+ } else { \
+ __last_bit = fls(__val); \
+ } \
+ __last_bit; \
+ })
+#define ASSIGN_CHECK_TRUNCATED_BITS(dest, src, offlag) \
+ do { \
+ typeof(src) __src = (src); \
+ typeof(dest) __dst = (dest = __src); \
+ if (fls3264(__dst) != fls3264(__src)) \
+ offlag = true; \
+ } while (0)
+
static int flags_by_mnt(int mnt_flags)
{
int flags = 0;
@@ -110,39 +136,41 @@ static int do_statfs_native(struct kstatfs *st, struct statfs __user *p)
{
struct statfs buf;
- if (sizeof(buf) == sizeof(*st))
+ if (sizeof(buf) == sizeof(*st)) {
memcpy(&buf, st, sizeof(*st));
- else {
- if (sizeof buf.f_blocks == 4) {
- if ((st->f_blocks | st->f_bfree | st->f_bavail |
- st->f_bsize | st->f_frsize) &
- 0xffffffff00000000ULL)
- return -EOVERFLOW;
- /*
- * f_files and f_ffree may be -1; it's okay to stuff
- * that into 32 bits
- */
- if (st->f_files != -1 &&
- (st->f_files & 0xffffffff00000000ULL))
- return -EOVERFLOW;
- if (st->f_ffree != -1 &&
- (st->f_ffree & 0xffffffff00000000ULL))
- return -EOVERFLOW;
- }
-
- buf.f_type = st->f_type;
- buf.f_bsize = st->f_bsize;
- buf.f_blocks = st->f_blocks;
- buf.f_bfree = st->f_bfree;
- buf.f_bavail = st->f_bavail;
- buf.f_files = st->f_files;
- buf.f_ffree = st->f_ffree;
- buf.f_fsid = st->f_fsid;
- buf.f_namelen = st->f_namelen;
- buf.f_frsize = st->f_frsize;
- buf.f_flags = st->f_flags;
+ } else {
+ bool offlag = false;
+ /* f_type can be signed 32-bits on some architectures, but it
+ * contains magic value, so we do not care if value overflows
+ * destination structure field if bits are intact.
+ */
+ ASSIGN_CHECK_TRUNCATED_BITS(buf.f_type, st->f_type, offlag);
+ if (WARN_ON(offlag))
+ return -EOVERFLOW;
+ ASSIGN_CHECK_OVERFLOW(buf.f_namelen, st->f_namelen, offlag);
+ if (WARN_ON(offlag))
+ return -EOVERFLOW;
+ ASSIGN_CHECK_OVERFLOW(buf.f_bsize, st->f_bsize, offlag);
+ ASSIGN_CHECK_OVERFLOW(buf.f_blocks, st->f_blocks, offlag);
+ ASSIGN_CHECK_OVERFLOW(buf.f_bfree, st->f_bfree, offlag);
+ ASSIGN_CHECK_OVERFLOW(buf.f_bavail, st->f_bavail, offlag);
+ /* f_files and f_ffree may be -1; it's okay to put that into
+ * 32 bits
+ */
+ ASSIGN_CHECK_OVERFLOW_EXCEPT(buf.f_files, st->f_files, offlag,
+ 0xffffffffffffffffULL);
+ ASSIGN_CHECK_OVERFLOW_EXCEPT(buf.f_ffree, st->f_ffree, offlag,
+ 0xffffffffffffffffULL);
+ ASSIGN_CHECK_SAME_TYPE(buf.f_fsid.val[0], st->f_fsid.val[0]);
+ ASSIGN_CHECK_SAME_TYPE(buf.f_fsid.val[1], st->f_fsid.val[1]);
+ ASSIGN_CHECK_OVERFLOW(buf.f_frsize, st->f_frsize, offlag);
+ ASSIGN_CHECK_TRUNCATED_BITS(buf.f_flags, st->f_flags, offlag);
memset(buf.f_spare, 0, sizeof(buf.f_spare));
+
+ if (unlikely(offlag))
+ return -EOVERFLOW;
}
+
if (copy_to_user(p, &buf, sizeof(buf)))
return -EFAULT;
return 0;
@@ -247,32 +275,37 @@ SYSCALL_DEFINE2(ustat, unsigned, dev, struct ustat __user *, ubuf)
static int put_compat_statfs(struct compat_statfs __user *ubuf, struct kstatfs *kbuf)
{
struct compat_statfs buf;
- if (sizeof ubuf->f_blocks == 4) {
- if ((kbuf->f_blocks | kbuf->f_bfree | kbuf->f_bavail |
- kbuf->f_bsize | kbuf->f_frsize) & 0xffffffff00000000ULL)
- return -EOVERFLOW;
- /* f_files and f_ffree may be -1; it's okay
- * to stuff that into 32 bits */
- if (kbuf->f_files != 0xffffffffffffffffULL
- && (kbuf->f_files & 0xffffffff00000000ULL))
- return -EOVERFLOW;
- if (kbuf->f_ffree != 0xffffffffffffffffULL
- && (kbuf->f_ffree & 0xffffffff00000000ULL))
- return -EOVERFLOW;
- }
+ bool offlag = false;
+
memset(&buf, 0, sizeof(struct compat_statfs));
- buf.f_type = kbuf->f_type;
- buf.f_bsize = kbuf->f_bsize;
- buf.f_blocks = kbuf->f_blocks;
- buf.f_bfree = kbuf->f_bfree;
- buf.f_bavail = kbuf->f_bavail;
- buf.f_files = kbuf->f_files;
- buf.f_ffree = kbuf->f_ffree;
- buf.f_namelen = kbuf->f_namelen;
- buf.f_fsid.val[0] = kbuf->f_fsid.val[0];
- buf.f_fsid.val[1] = kbuf->f_fsid.val[1];
- buf.f_frsize = kbuf->f_frsize;
- buf.f_flags = kbuf->f_flags;
+
+ /* f_type can be signed 32-bits on some architectures, but it contains
+ * magic value, so we do not care if value overflows destination
+ * structure field if bits are intact.
+ */
+ ASSIGN_CHECK_TRUNCATED_BITS(buf.f_type, kbuf->f_type, offlag);
+ if (WARN_ON(offlag))
+ return -EOVERFLOW;
+ ASSIGN_CHECK_OVERFLOW(buf.f_namelen, kbuf->f_namelen, offlag);
+ if (WARN_ON(offlag))
+ return -EOVERFLOW;
+
+ ASSIGN_CHECK_OVERFLOW(buf.f_bsize, kbuf->f_bsize, offlag);
+ ASSIGN_CHECK_OVERFLOW(buf.f_blocks, kbuf->f_blocks, offlag);
+ ASSIGN_CHECK_OVERFLOW(buf.f_bfree, kbuf->f_bfree, offlag);
+ ASSIGN_CHECK_OVERFLOW(buf.f_bavail, kbuf->f_bavail, offlag);
+ /* f_files and f_ffree may be -1; it's okay to put that into 32 bits */
+ ASSIGN_CHECK_OVERFLOW_EXCEPT(buf.f_files, kbuf->f_files, offlag,
+ 0xffffffffffffffffULL);
+ ASSIGN_CHECK_OVERFLOW_EXCEPT(buf.f_ffree, kbuf->f_ffree, offlag,
+ 0xffffffffffffffffULL);
+ ASSIGN_CHECK_SAME_TYPE(buf.f_fsid.val[0], kbuf->f_fsid.val[0]);
+ ASSIGN_CHECK_SAME_TYPE(buf.f_fsid.val[1], kbuf->f_fsid.val[1]);
+ ASSIGN_CHECK_OVERFLOW(buf.f_frsize, kbuf->f_frsize, offlag);
+ ASSIGN_CHECK_TRUNCATED_BITS(buf.f_flags, kbuf->f_flags, offlag);
+
+ if (unlikely(offlag))
+ return -EOVERFLOW;
if (copy_to_user(ubuf, &buf, sizeof(struct compat_statfs)))
return -EFAULT;
return 0;
@@ -303,32 +336,34 @@ COMPAT_SYSCALL_DEFINE2(fstatfs, unsigned int, fd, struct compat_statfs __user *,
static int put_compat_statfs64(struct compat_statfs64 __user *ubuf, struct kstatfs *kbuf)
{
struct compat_statfs64 buf;
- if (sizeof(ubuf->f_bsize) == 4) {
- if ((kbuf->f_type | kbuf->f_bsize | kbuf->f_namelen |
- kbuf->f_frsize | kbuf->f_flags) & 0xffffffff00000000ULL)
- return -EOVERFLOW;
- /* f_files and f_ffree may be -1; it's okay
- * to stuff that into 32 bits */
- if (kbuf->f_files != 0xffffffffffffffffULL
- && (kbuf->f_files & 0xffffffff00000000ULL))
- return -EOVERFLOW;
- if (kbuf->f_ffree != 0xffffffffffffffffULL
- && (kbuf->f_ffree & 0xffffffff00000000ULL))
- return -EOVERFLOW;
- }
+ bool offlag = false;
+
memset(&buf, 0, sizeof(struct compat_statfs64));
- buf.f_type = kbuf->f_type;
- buf.f_bsize = kbuf->f_bsize;
- buf.f_blocks = kbuf->f_blocks;
- buf.f_bfree = kbuf->f_bfree;
- buf.f_bavail = kbuf->f_bavail;
- buf.f_files = kbuf->f_files;
- buf.f_ffree = kbuf->f_ffree;
- buf.f_namelen = kbuf->f_namelen;
- buf.f_fsid.val[0] = kbuf->f_fsid.val[0];
- buf.f_fsid.val[1] = kbuf->f_fsid.val[1];
- buf.f_frsize = kbuf->f_frsize;
- buf.f_flags = kbuf->f_flags;
+
+ /* f_type can be signed 32-bits on some architectures, but it contains
+ * magic value, so we do not care if value overflows destination
+ * structure field if bits are intact.
+ */
+ ASSIGN_CHECK_TRUNCATED_BITS(buf.f_type, kbuf->f_type, offlag);
+ if (WARN_ON(offlag))
+ return -EOVERFLOW;
+ ASSIGN_CHECK_OVERFLOW(buf.f_namelen, kbuf->f_namelen, offlag);
+ if (WARN_ON(offlag))
+ return -EOVERFLOW;
+
+ ASSIGN_CHECK_OVERFLOW(buf.f_bsize, kbuf->f_bsize, offlag);
+ ASSIGN_CHECK_SAME_TYPE(buf.f_blocks, kbuf->f_blocks);
+ ASSIGN_CHECK_SAME_TYPE(buf.f_bfree, kbuf->f_bfree);
+ ASSIGN_CHECK_SAME_TYPE(buf.f_bavail, kbuf->f_bavail);
+ ASSIGN_CHECK_SAME_TYPE(buf.f_files, kbuf->f_files);
+ ASSIGN_CHECK_SAME_TYPE(buf.f_ffree, kbuf->f_ffree);
+ ASSIGN_CHECK_OVERFLOW(buf.f_fsid.val[0], kbuf->f_fsid.val[0], offlag);
+ ASSIGN_CHECK_OVERFLOW(buf.f_fsid.val[1], kbuf->f_fsid.val[1], offlag);
+ ASSIGN_CHECK_OVERFLOW(buf.f_frsize, kbuf->f_frsize, offlag);
+ ASSIGN_CHECK_TRUNCATED_BITS(buf.f_flags, kbuf->f_flags, offlag);
+
+ if (unlikely(offlag))
+ return -EOVERFLOW;
if (copy_to_user(ubuf, &buf, sizeof(struct compat_statfs64)))
return -EFAULT;
return 0;
diff --git a/include/linux/build_bug.h b/include/linux/build_bug.h
index b7d22d6..1d2f563 100644
--- a/include/linux/build_bug.h
+++ b/include/linux/build_bug.h
@@ -79,6 +79,16 @@
*/
#define BUILD_BUG() BUILD_BUG_ON_MSG(1, "BUILD_BUG failed")
+/**
+ * ASSIGN_CHECK_SAME_TYPE() -- assigns src value to dest but only if they have
+ * same type (will break build otherwise).
+ */
+#define ASSIGN_CHECK_SAME_TYPE(dest, src) \
+ do { \
+ BUILD_BUG_ON(!__same_type((dest), (src))); \
+ (dest) = (src); \
+ } while (0)
+
#endif /* __CHECKER__ */
#endif /* _LINUX_BUILD_BUG_H */
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index 16d41de..71b4c29 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -305,6 +305,11 @@
#define __no_sanitize_address __attribute__((no_sanitize_address))
#endif
+#if GCC_VERSION >= 50000
+#define __ASSIGN_CHECK_OVERFLOW(dest, src) \
+ __builtin_add_overflow(0, (src), &dest)
+#endif
+
#if GCC_VERSION >= 50100
/*
* Mark structures as requiring designated initializers.
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index f260ff3..6822b17 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -605,4 +605,47 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
(volatile typeof(x) *)&(x); })
#define ACCESS_ONCE(x) (*__ACCESS_ONCE(x))
+/**
+ * This is a helper macros for assignment of 32-bit fields in some syscall
+ * structures where kernel works with larger, 64-bit values and could induce
+ * overflow in 32-bit caller. If flag is set, it is expected that syscall
+ * will return -EOVERFLOW.
+ *
+ * Sizes of the fields are not compared here as it allows to validate for
+ * signedness difference in field types. Compiler will likely to eliminate
+ * overflow check if types of the fields are exactly matching.
+ *
+ * ASSIGN_CHECK_OVERFLOW_EXCEPT() is similar but allows to truncate a special
+ * magic constant (such as -1 represented as unsigned).
+ *
+ * @dest: name of the destination field
+ * @src: name of the source field
+ * @offlag: name of bool variable used to store overflow flag
+ * @value: magic value which is OK to be truncated
+ */
+#ifndef __ASSIGN_CHECK_OVERFLOW
+#define __ASSIGN_CHECK_OVERFLOW(dest, src) \
+ ({ \
+ typeof(src) ____src = (src); \
+ bool __offlag = false; \
+ (dest) = ____src; \
+ do { \
+ typeof(src) __val = dest; \
+ __offlag = (__val != ____src); \
+ } while (0); \
+ __offlag; \
+ })
+#endif
+#define ASSIGN_CHECK_OVERFLOW(dest, src, offlag) \
+ do { \
+ if (__ASSIGN_CHECK_OVERFLOW(dest, src)) \
+ offlag = true; \
+ } while (0)
+#define ASSIGN_CHECK_OVERFLOW_EXCEPT(dest, src, offlag, value) \
+ do { \
+ typeof(src) __src = (src); \
+ if (__ASSIGN_CHECK_OVERFLOW(dest, __src) && __src != (value)) \
+ offlag = true; \
+ } while (0)
+
#endif /* __LINUX_COMPILER_H */
--
2.10.2
Powered by blists - more mailing lists