[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20180409002738.163941-73-alexander.levin@microsoft.com>
Date: Mon, 9 Apr 2018 00:28:36 +0000
From: Sasha Levin <Alexander.Levin@...rosoft.com>
To: "stable@...r.kernel.org" <stable@...r.kernel.org>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>
CC: Kees Cook <keescook@...omium.org>,
Paul Lawrence <paullawrence@...gle.com>,
Sasha Levin <Alexander.Levin@...rosoft.com>
Subject: [PATCH AUTOSEL for 4.4 073/162] seccomp: Adjust selftests to avoid
double-join
From: Kees Cook <keescook@...omium.org>
[ Upstream commit 93bd70e3330be45542c455dde11d8dc657ab3044 ]
While glibc's pthread implementation is rather forgiving about repeat
thread joining, Bionic has recently become much more strict. To deal with
this, actually track which threads have been successfully joined and kill
the rest at teardown.
Based on a patch from Paul Lawrence.
Cc: Paul Lawrence <paullawrence@...gle.com>
Signed-off-by: Kees Cook <keescook@...omium.org>
Signed-off-by: Sasha Levin <alexander.levin@...rosoft.com>
---
tools/testing/selftests/seccomp/seccomp_bpf.c | 51 ++++++++++++++++++---------
1 file changed, 34 insertions(+), 17 deletions(-)
diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
index 882fe83a3554..821aa0a408dc 100644
--- a/tools/testing/selftests/seccomp/seccomp_bpf.c
+++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
@@ -1615,6 +1615,23 @@ struct tsync_sibling {
struct __test_metadata *metadata;
};
+/*
+ * To avoid joining joined threads (which is not allowed by Bionic),
+ * make sure we both successfully join and clear the tid to skip a
+ * later join attempt during fixture teardown. Any remaining threads
+ * will be directly killed during teardown.
+ */
+#define PTHREAD_JOIN(tid, status) \
+ do { \
+ int _rc = pthread_join(tid, status); \
+ if (_rc) { \
+ TH_LOG("pthread_join of tid %u failed: %d\n", \
+ (unsigned int)tid, _rc); \
+ } else { \
+ tid = 0; \
+ } \
+ } while (0)
+
FIXTURE_DATA(TSYNC) {
struct sock_fprog root_prog, apply_prog;
struct tsync_sibling sibling[TSYNC_SIBLINGS];
@@ -1683,14 +1700,14 @@ FIXTURE_TEARDOWN(TSYNC)
for ( ; sib < self->sibling_count; ++sib) {
struct tsync_sibling *s = &self->sibling[sib];
- void *status;
if (!s->tid)
continue;
- if (pthread_kill(s->tid, 0)) {
- pthread_cancel(s->tid);
- pthread_join(s->tid, &status);
- }
+ /*
+ * If a thread is still running, it may be stuck, so hit
+ * it over the head really hard.
+ */
+ pthread_kill(s->tid, 9);
}
pthread_mutex_destroy(&self->mutex);
pthread_cond_destroy(&self->cond);
@@ -1780,9 +1797,9 @@ TEST_F(TSYNC, siblings_fail_prctl)
pthread_mutex_unlock(&self->mutex);
/* Ensure diverging sibling failed to call prctl. */
- pthread_join(self->sibling[0].tid, &status);
+ PTHREAD_JOIN(self->sibling[0].tid, &status);
EXPECT_EQ(SIBLING_EXIT_FAILURE, (long)status);
- pthread_join(self->sibling[1].tid, &status);
+ PTHREAD_JOIN(self->sibling[1].tid, &status);
EXPECT_EQ(SIBLING_EXIT_UNKILLED, (long)status);
}
@@ -1822,9 +1839,9 @@ TEST_F(TSYNC, two_siblings_with_ancestor)
}
pthread_mutex_unlock(&self->mutex);
/* Ensure they are both killed and don't exit cleanly. */
- pthread_join(self->sibling[0].tid, &status);
+ PTHREAD_JOIN(self->sibling[0].tid, &status);
EXPECT_EQ(0x0, (long)status);
- pthread_join(self->sibling[1].tid, &status);
+ PTHREAD_JOIN(self->sibling[1].tid, &status);
EXPECT_EQ(0x0, (long)status);
}
@@ -1848,9 +1865,9 @@ TEST_F(TSYNC, two_sibling_want_nnp)
pthread_mutex_unlock(&self->mutex);
/* Ensure they are both upset about lacking nnp. */
- pthread_join(self->sibling[0].tid, &status);
+ PTHREAD_JOIN(self->sibling[0].tid, &status);
EXPECT_EQ(SIBLING_EXIT_NEWPRIVS, (long)status);
- pthread_join(self->sibling[1].tid, &status);
+ PTHREAD_JOIN(self->sibling[1].tid, &status);
EXPECT_EQ(SIBLING_EXIT_NEWPRIVS, (long)status);
}
@@ -1888,9 +1905,9 @@ TEST_F(TSYNC, two_siblings_with_no_filter)
pthread_mutex_unlock(&self->mutex);
/* Ensure they are both killed and don't exit cleanly. */
- pthread_join(self->sibling[0].tid, &status);
+ PTHREAD_JOIN(self->sibling[0].tid, &status);
EXPECT_EQ(0x0, (long)status);
- pthread_join(self->sibling[1].tid, &status);
+ PTHREAD_JOIN(self->sibling[1].tid, &status);
EXPECT_EQ(0x0, (long)status);
}
@@ -1933,9 +1950,9 @@ TEST_F(TSYNC, two_siblings_with_one_divergence)
pthread_mutex_unlock(&self->mutex);
/* Ensure they are both unkilled. */
- pthread_join(self->sibling[0].tid, &status);
+ PTHREAD_JOIN(self->sibling[0].tid, &status);
EXPECT_EQ(SIBLING_EXIT_UNKILLED, (long)status);
- pthread_join(self->sibling[1].tid, &status);
+ PTHREAD_JOIN(self->sibling[1].tid, &status);
EXPECT_EQ(SIBLING_EXIT_UNKILLED, (long)status);
}
@@ -1992,7 +2009,7 @@ TEST_F(TSYNC, two_siblings_not_under_filter)
TH_LOG("cond broadcast non-zero");
}
pthread_mutex_unlock(&self->mutex);
- pthread_join(self->sibling[sib].tid, &status);
+ PTHREAD_JOIN(self->sibling[sib].tid, &status);
EXPECT_EQ(SIBLING_EXIT_UNKILLED, (long)status);
/* Poll for actual task death. pthread_join doesn't guarantee it. */
while (!kill(self->sibling[sib].system_tid, 0))
@@ -2017,7 +2034,7 @@ TEST_F(TSYNC, two_siblings_not_under_filter)
TH_LOG("cond broadcast non-zero");
}
pthread_mutex_unlock(&self->mutex);
- pthread_join(self->sibling[sib].tid, &status);
+ PTHREAD_JOIN(self->sibling[sib].tid, &status);
EXPECT_EQ(0, (long)status);
/* Poll for actual task death. pthread_join doesn't guarantee it. */
while (!kill(self->sibling[sib].system_tid, 0))
--
2.15.1
Powered by blists - more mailing lists