[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <83c72471f9f79fa982508bd4db472686a67b8320.1601478774.git.yifeifz2@illinois.edu>
Date: Wed, 30 Sep 2020 10:19:14 -0500
From: YiFei Zhu <zhuyifei1999@...il.com>
To: containers@...ts.linux-foundation.org
Cc: YiFei Zhu <yifeifz2@...inois.edu>, bpf@...r.kernel.org,
linux-kernel@...r.kernel.org, Aleksa Sarai <cyphar@...har.com>,
Andrea Arcangeli <aarcange@...hat.com>,
Andy Lutomirski <luto@...capital.net>,
David Laight <David.Laight@...lab.com>,
Dimitrios Skarlatos <dskarlat@...cmu.edu>,
Giuseppe Scrivano <gscrivan@...hat.com>,
Hubertus Franke <frankeh@...ibm.com>,
Jack Chen <jianyan2@...inois.edu>,
Jann Horn <jannh@...gle.com>,
Josep Torrellas <torrella@...inois.edu>,
Kees Cook <keescook@...omium.org>,
Tianyin Xu <tyxu@...inois.edu>,
Tobin Feldman-Fitzthum <tobin@....com>,
Tycho Andersen <tycho@...ho.pizza>,
Valentin Rothberg <vrothber@...hat.com>,
Will Drewry <wad@...omium.org>
Subject: [PATCH v3 seccomp 3/5] seccomp/cache: Lookup syscall allowlist for fast path
From: YiFei Zhu <yifeifz2@...inois.edu>
The fast (common) path for seccomp should be that the filter permits
the syscall to pass through, and failing seccomp is expected to be
an exceptional case; it is not expected for userspace to call a
denylisted syscall over and over.
This first finds the current allow bitmask by iterating through
syscall_arches[] array and comparing it to the one in struct
seccomp_data; this loop is expected to be unrolled. It then
does a test_bit against the bitmask. If the bit is set, then
there is no need to run the full filter; it returns
SECCOMP_RET_ALLOW immediately.
Co-developed-by: Dimitrios Skarlatos <dskarlat@...cmu.edu>
Signed-off-by: Dimitrios Skarlatos <dskarlat@...cmu.edu>
Signed-off-by: YiFei Zhu <yifeifz2@...inois.edu>
---
kernel/seccomp.c | 52 ++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 52 insertions(+)
diff --git a/kernel/seccomp.c b/kernel/seccomp.c
index f09c9e74ae05..bed3b2a7f6c8 100644
--- a/kernel/seccomp.c
+++ b/kernel/seccomp.c
@@ -172,6 +172,12 @@ struct seccomp_cache_filter_data { };
static inline void seccomp_cache_prepare(struct seccomp_filter *sfilter)
{
}
+
+static inline bool seccomp_cache_check(const struct seccomp_filter *sfilter,
+ const struct seccomp_data *sd)
+{
+ return false;
+}
#endif /* CONFIG_SECCOMP_CACHE_NR_ONLY */
/**
@@ -331,6 +337,49 @@ static int seccomp_check_filter(struct sock_filter *filter, unsigned int flen)
return 0;
}
+#ifdef CONFIG_SECCOMP_CACHE_NR_ONLY
+static bool seccomp_cache_check_bitmap(const void *bitmap, size_t bitmap_size,
+ int syscall_nr)
+{
+ if (unlikely(syscall_nr < 0 || syscall_nr >= bitmap_size))
+ return false;
+ syscall_nr = array_index_nospec(syscall_nr, bitmap_size);
+
+ return test_bit(syscall_nr, bitmap);
+}
+
+/**
+ * seccomp_cache_check - lookup seccomp cache
+ * @sfilter: The seccomp filter
+ * @sd: The seccomp data to lookup the cache with
+ *
+ * Returns true if the seccomp_data is cached and allowed.
+ */
+static bool seccomp_cache_check(const struct seccomp_filter *sfilter,
+ const struct seccomp_data *sd)
+{
+ int syscall_nr = sd->nr;
+ const struct seccomp_cache_filter_data *cache = &sfilter->cache;
+
+#ifdef SECCOMP_ARCH_DEFAULT
+ if (likely(sd->arch == SECCOMP_ARCH_DEFAULT))
+ return seccomp_cache_check_bitmap(cache->syscall_allow_default,
+ SECCOMP_ARCH_DEFAULT_NR,
+ syscall_nr);
+#endif /* SECCOMP_ARCH_DEFAULT */
+
+#ifdef SECCOMP_ARCH_COMPAT
+ if (likely(sd->arch == SECCOMP_ARCH_COMPAT))
+ return seccomp_cache_check_bitmap(cache->syscall_allow_compat,
+ SECCOMP_ARCH_COMPAT_NR,
+ syscall_nr);
+#endif /* SECCOMP_ARCH_COMPAT */
+
+ WARN_ON_ONCE(true);
+ return false;
+}
+#endif /* CONFIG_SECCOMP_CACHE_NR_ONLY */
+
/**
* seccomp_run_filters - evaluates all seccomp filters against @sd
* @sd: optional seccomp data to be passed to filters
@@ -353,6 +402,9 @@ static u32 seccomp_run_filters(const struct seccomp_data *sd,
if (WARN_ON(f == NULL))
return SECCOMP_RET_KILL_PROCESS;
+ if (seccomp_cache_check(f, sd))
+ return SECCOMP_RET_ALLOW;
+
/*
* All filters in the list are evaluated and the lowest BPF return
* value always takes priority (ignoring the DATA).
--
2.28.0
Powered by blists - more mailing lists