[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200811000959.2486636-2-posk@google.com>
Date: Mon, 10 Aug 2020 17:09:59 -0700
From: Peter Oskolkov <posk@...gle.com>
To: Mathieu Desnoyers <mathieu.desnoyers@...icios.com>,
"Paul E . McKenney" <paulmck@...nel.org>,
Peter Zijlstra <peterz@...radead.org>,
Boqun Feng <boqun.feng@...il.com>, linux-kernel@...r.kernel.org
Cc: Paul Turner <pjt@...gle.com>,
Chris Kennelly <ckennelly@...gle.com>,
Peter Oskolkov <posk@...k.io>, Peter Oskolkov <posk@...gle.com>
Subject: [PATCH 2/2 v3] rseq/selftests: test MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ
Based on Google-internal RSEQ work done by
Paul Turner and Andrew Hunter.
This patch adds a selftest for MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ.
The test quite often fails without the previous patch in this patchset,
but consistently passes with it.
v3: added rseq_offset_deref_addv() to x86_64 to make the test
more explicit; on other architectures I kept using existing
rseq_cmpeqv_cmpeqv_storev() as I have no easy way to test
there. Added a comment explaining why the test works this way.
Signed-off-by: Peter Oskolkov <posk@...gle.com>
---
.../selftests/rseq/basic_percpu_ops_test.c | 196 ++++++++++++++++++
tools/testing/selftests/rseq/rseq-x86.h | 55 +++++
2 files changed, 251 insertions(+)
diff --git a/tools/testing/selftests/rseq/basic_percpu_ops_test.c b/tools/testing/selftests/rseq/basic_percpu_ops_test.c
index eb3f6db36d36..c9784a3d19fb 100644
--- a/tools/testing/selftests/rseq/basic_percpu_ops_test.c
+++ b/tools/testing/selftests/rseq/basic_percpu_ops_test.c
@@ -3,16 +3,22 @@
#include <assert.h>
#include <pthread.h>
#include <sched.h>
+#include <stdatomic.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stddef.h>
+#include <syscall.h>
+#include <unistd.h>
#include "rseq.h"
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
+#define MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ (1<<7)
+#define MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_RSEQ (1<<8)
+
struct percpu_lock_entry {
intptr_t v;
} __attribute__((aligned(128)));
@@ -289,6 +295,194 @@ void test_percpu_list(void)
assert(sum == expected_sum);
}
+struct test_membarrier_thread_args {
+ int stop;
+ intptr_t percpu_list_ptr;
+};
+
+/* Worker threads modify data in their "active" percpu lists. */
+void *test_membarrier_worker_thread(void *arg)
+{
+ struct test_membarrier_thread_args *args =
+ (struct test_membarrier_thread_args *)arg;
+ const int iters = 10 * 1000 * 1000;
+ int i;
+
+ if (rseq_register_current_thread()) {
+ fprintf(stderr, "Error: rseq_register_current_thread(...) failed(%d): %s\n",
+ errno, strerror(errno));
+ abort();
+ }
+
+ /* Wait for initialization. */
+ while (!atomic_load(&args->percpu_list_ptr)) {}
+
+ for (i = 0; i < iters; ++i) {
+ int ret;
+
+ do {
+ int cpu = rseq_cpu_start();
+#if defined(__x86_64__)
+ /* For x86_64, we have rseq_offset_deref_addv. */
+ ret = rseq_offset_deref_addv(&args->percpu_list_ptr,
+ 128 * cpu, 1, cpu);
+#else
+ /*
+ * For other architectures, we rely on the fact that
+ * the manager thread keeps list_ptr alive, so we can
+ * use rseq_cmpeqv_cmpeqv_storev to make sure
+ * list_ptr we got outside of rseq cs is still
+ * "active".
+ */
+ struct percpu_list *list_ptr = (struct percpu_list *)
+ atomic_load(&args->percpu_list_ptr);
+
+ struct percpu_list_node *node = list_ptr->c[cpu].head;
+ const intptr_t prev = node->data;
+
+ ret = rseq_cmpeqv_cmpeqv_storev(&node->data, prev,
+ &args->percpu_list_ptr,
+ (intptr_t)list_ptr, prev + 1, cpu);
+#endif
+ } while (rseq_unlikely(ret));
+ }
+
+ if (rseq_unregister_current_thread()) {
+ fprintf(stderr, "Error: rseq_unregister_current_thread(...) failed(%d): %s\n",
+ errno, strerror(errno));
+ abort();
+ }
+ return NULL;
+}
+
+void test_membarrier_init_percpu_list(struct percpu_list *list)
+{
+ int i;
+
+ memset(list, 0, sizeof(*list));
+ for (i = 0; i < CPU_SETSIZE; i++) {
+ struct percpu_list_node *node;
+
+ node = malloc(sizeof(*node));
+ assert(node);
+ node->data = 0;
+ node->next = NULL;
+ list->c[i].head = node;
+ }
+}
+
+void test_membarrier_free_percpu_list(struct percpu_list *list)
+{
+ int i;
+
+ for (i = 0; i < CPU_SETSIZE; i++)
+ free(list->c[i].head);
+}
+
+static int sys_membarrier(int cmd, int flags)
+{
+ return syscall(__NR_membarrier, cmd, flags);
+}
+
+/*
+ * The manager thread swaps per-cpu lists that worker threads see,
+ * and validates that there are no unexpected modifications.
+ */
+void *test_membarrier_manager_thread(void *arg)
+{
+ struct test_membarrier_thread_args *args =
+ (struct test_membarrier_thread_args *)arg;
+ struct percpu_list list_a, list_b;
+ intptr_t expect_a = 0, expect_b = 0;
+ int cpu_a = 0, cpu_b = 0;
+
+ if (rseq_register_current_thread()) {
+ fprintf(stderr, "Error: rseq_register_current_thread(...) failed(%d): %s\n",
+ errno, strerror(errno));
+ abort();
+ }
+
+ /* Init lists. */
+ test_membarrier_init_percpu_list(&list_a);
+ test_membarrier_init_percpu_list(&list_b);
+
+ atomic_store(&args->percpu_list_ptr, (intptr_t)&list_a);
+
+ while (!atomic_load(&args->stop)) {
+ /* list_a is "active". */
+ cpu_a = rand() % CPU_SETSIZE;
+ /*
+ * As list_b is "inactive", we should never see changes
+ * to list_b.
+ */
+ if (expect_b != atomic_load(&list_b.c[cpu_b].head->data)) {
+ fprintf(stderr, "Membarrier test failed\n");
+ abort();
+ }
+
+ /* Make list_b "active". */
+ atomic_store(&args->percpu_list_ptr, (intptr_t)&list_b);
+ sys_membarrier(MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ, cpu_a);
+ /*
+ * Cpu A should now only modify list_b, so we values
+ * in list_a should be stable.
+ */
+ expect_a = atomic_load(&list_a.c[cpu_a].head->data);
+
+ cpu_b = rand() % CPU_SETSIZE;
+ /*
+ * As list_a is "inactive", we should never see changes
+ * to list_a.
+ */
+ if (expect_a != atomic_load(&list_a.c[cpu_a].head->data)) {
+ fprintf(stderr, "Membarrier test failed\n");
+ abort();
+ }
+
+ /* Make list_a "active". */
+ atomic_store(&args->percpu_list_ptr, (intptr_t)&list_a);
+ sys_membarrier(MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ, cpu_b);
+ /* Remember a value from list_b. */
+ expect_b = atomic_load(&list_b.c[cpu_b].head->data);
+ }
+
+ test_membarrier_free_percpu_list(&list_a);
+ test_membarrier_free_percpu_list(&list_b);
+
+ if (rseq_unregister_current_thread()) {
+ fprintf(stderr, "Error: rseq_unregister_current_thread(...) failed(%d): %s\n",
+ errno, strerror(errno));
+ abort();
+ }
+ return NULL;
+}
+
+/* Test MEMBARRIER_CMD_PRIVATE_RESTART_RSEQ_ON_CPU membarrier command. */
+void test_membarrier(void)
+{
+ struct test_membarrier_thread_args thread_args;
+ pthread_t worker_threads[CPU_SETSIZE];
+ pthread_t manager_thread;
+ int i;
+
+ sys_membarrier(MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_RSEQ, 0);
+
+ thread_args.stop = 0;
+ thread_args.percpu_list_ptr = 0;
+ pthread_create(&manager_thread, NULL,
+ test_membarrier_manager_thread, &thread_args);
+
+ for (i = 0; i < CPU_SETSIZE; i++)
+ pthread_create(&worker_threads[i], NULL,
+ test_membarrier_worker_thread, &thread_args);
+
+ for (i = 0; i < CPU_SETSIZE; i++)
+ pthread_join(worker_threads[i], NULL);
+
+ atomic_store(&thread_args.stop, 1);
+ pthread_join(manager_thread, NULL);
+}
+
int main(int argc, char **argv)
{
if (rseq_register_current_thread()) {
@@ -300,6 +494,8 @@ int main(int argc, char **argv)
test_percpu_spinlock();
printf("percpu_list\n");
test_percpu_list();
+ printf("membarrier\n");
+ test_membarrier();
if (rseq_unregister_current_thread()) {
fprintf(stderr, "Error: rseq_unregister_current_thread(...) failed(%d): %s\n",
errno, strerror(errno));
diff --git a/tools/testing/selftests/rseq/rseq-x86.h b/tools/testing/selftests/rseq/rseq-x86.h
index b2da6004fe30..3ed13a6a47e3 100644
--- a/tools/testing/selftests/rseq/rseq-x86.h
+++ b/tools/testing/selftests/rseq/rseq-x86.h
@@ -279,6 +279,61 @@ int rseq_addv(intptr_t *v, intptr_t count, int cpu)
#endif
}
+/*
+ * pval = *(ptr+off)
+ * *pval += inc;
+ */
+static inline __attribute__((always_inline))
+int rseq_offset_deref_addv(intptr_t *ptr, off_t off, intptr_t inc, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+#endif
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_CS_OFFSET(%[rseq_abi]))
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), 4f)
+ RSEQ_INJECT_ASM(3)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), %l[error1])
+#endif
+ /* get p+v */
+ "movq %[ptr], %%rbx\n\t"
+ "addq %[off], %%rbx\n\t"
+ /* get pv */
+ "movq (%%rbx), %%rcx\n\t"
+ /* *pv += inc */
+ "addq %[inc], (%%rcx)\n\t"
+ "2:\n\t"
+ RSEQ_INJECT_ASM(4)
+ RSEQ_ASM_DEFINE_ABORT(4, "", abort)
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [rseq_abi] "r" (&__rseq_abi),
+ /* final store input */
+ [ptr] "m" (*ptr),
+ [off] "er" (off),
+ [inc] "er" (inc)
+ : "memory", "cc", "rax", "rbx", "rcx"
+ RSEQ_INJECT_CLOBBER
+ : abort
+#ifdef RSEQ_COMPARE_TWICE
+ , error1
+#endif
+ );
+ return 0;
+abort:
+ RSEQ_INJECT_FAILED
+ return -1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_bug("cpu_id comparison failed");
+#endif
+}
+
static inline __attribute__((always_inline))
int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect,
intptr_t *v2, intptr_t newv2,
--
2.28.0.236.gb10cc79966-goog
Powered by blists - more mailing lists