lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1490204338-1856-3-git-send-email-longman@redhat.com>
Date:   Wed, 22 Mar 2017 13:38:38 -0400
From:   Waiman Long <longman@...hat.com>
To:     Thomas Gleixner <tglx@...utronix.de>,
        Ingo Molnar <mingo@...nel.org>,
        Peter Zijlstra <peterz@...radead.org>,
        Jonathan Corbet <corbet@....net>
Cc:     linux-kernel@...r.kernel.org, linux-doc@...r.kernel.org,
        Arnaldo Carvalho de Melo <acme@...nel.org>,
        Davidlohr Bueso <dave@...olabs.net>,
        Mike Galbraith <umgwanakikbuti@...il.com>,
        Scott J Norton <scott.norton@....com>,
        Waiman Long <longman@...hat.com>
Subject: [PATCH-tip v6 02/22] perf bench: New microbenchmark for userspace rwlock performance

This microbenchmark simulates how the use of different futex types
can affect the actual performanace of userspace rwlock locks. The
usage is:

        perf bench futex rwlock <options>

Two sets of simple rwlock lock and unlock functions are implemented
using the wait-wake futexes and glibc rwlock respectively. This
microbenchmark then runs the locking rate measurement tests using
either one of those rwlocks or all of them consecutively.

Sample output from this microbenchmark was as follows:

  [PID 19333]: 36 threads doing WW futex lockings (load=1) for 10 secs.

  Locking statistics:
  Test run time                = 10.00s
  Total exclusive locking ops  = 16,892,644
  Total shared locking ops     = 16,892,644
  Exclusive lock slowpaths     = 13,737,148
  Exclusive unlock slowpaths   = 9,792,528
  Shared lock slowpaths        = 1,922,379
  Shared unlock slowpaths      = 4,218,813
  EAGAIN lock errors           = 15,433,105
  Process wakeups              = 226,422

  Percentages:
  Exclusive lock slowpaths     = 81.3%
  Shared lock slowpaths        = 11.4%
  Exclusive unlock slowpaths   = 58.0%
  Shared unlock slowpaths      = 25.0%
  EAGAIN lock errors           = 98.6%
  Process wakeups              = 1.6%

  Shared Lock Batch Stats:
  Total shared lock batches    = 10,569,959
  Avg batch size               = 1.6
  Max batch size               = 26

  Per-thread Locking Rates:
  Avg = 93,841 ops/sec (+- 0.40%)
  Min = 89,269 ops/sec
  Max = 101,021 ops/sec

Signed-off-by: Waiman Long <longman@...hat.com>
---
 tools/perf/Documentation/perf-bench.txt |   3 +
 tools/perf/bench/bench.h                |   1 +
 tools/perf/bench/futex-locks.c          | 668 +++++++++++++++++++++++++++++++-
 tools/perf/builtin-bench.c              |   1 +
 4 files changed, 654 insertions(+), 19 deletions(-)

diff --git a/tools/perf/Documentation/perf-bench.txt b/tools/perf/Documentation/perf-bench.txt
index 1fa5a74..e5a7079 100644
--- a/tools/perf/Documentation/perf-bench.txt
+++ b/tools/perf/Documentation/perf-bench.txt
@@ -206,6 +206,9 @@ Suite for evaluating futex lock_pi calls.
 *mutex*::
 Suite for evaluating futex calls for implementing userspace mutexes.
 
+*rwlock*::
+Suite for evaluating futex calls for implementing userspace rwlocks.
+
 SEE ALSO
 --------
 linkperf:perf[1]
diff --git a/tools/perf/bench/bench.h b/tools/perf/bench/bench.h
index b0632df..a7e2037 100644
--- a/tools/perf/bench/bench.h
+++ b/tools/perf/bench/bench.h
@@ -37,6 +37,7 @@
 /* pi futexes */
 int bench_futex_lock_pi(int argc, const char **argv, const char *prefix);
 int bench_futex_mutex(int argc, const char **argv, const char *prefix);
+int bench_futex_rwlock(int argc, const char **argv, const char *prefix);
 
 #define BENCH_FORMAT_DEFAULT_STR	"default"
 #define BENCH_FORMAT_DEFAULT		0
diff --git a/tools/perf/bench/futex-locks.c b/tools/perf/bench/futex-locks.c
index 02254c6..88f54c1 100644
--- a/tools/perf/bench/futex-locks.c
+++ b/tools/perf/bench/futex-locks.c
@@ -10,6 +10,10 @@
  * unlock functions are written to implenment a mutex lock using the
  * wait-wake (2 versions) and PI futexes respectively. These functions serve
  * as the basis for measuring the locking throughput.
+ *
+ * Two sets of simple reader/writer lock and unlock functions are also
+ * implemented using the wait-wake futexes as well as the Glibc rwlock
+ * respectively for performance measurement purpose.
  */
 
 #include <pthread.h>
@@ -21,11 +25,13 @@
 #include <subcmd/parse-options.h>
 #include <linux/compiler.h>
 #include <linux/kernel.h>
+#include <asm/byteorder.h>
 #include <errno.h>
 #include "bench.h"
 #include "futex.h"
 
 #include <err.h>
+#include <limits.h>
 #include <stdlib.h>
 #include <sys/time.h>
 
@@ -42,14 +48,21 @@
  */
 enum {
 	STAT_OPS,	/* # of exclusive locking operations	*/
+	STAT_SOPS,	/* # of shared locking operations	*/
 	STAT_LOCKS,	/* # of exclusive lock slowpath count	*/
 	STAT_UNLOCKS,	/* # of exclusive unlock slowpath count	*/
 	STAT_SLEEPS,	/* # of exclusive lock sleeps		*/
+	STAT_SLOCKS,	/* # of shared lock slowpath count	*/
+	STAT_SUNLOCKS,	/* # of shared unlock slowpath count	*/
+	STAT_SSLEEPS,	/* # of shared lock sleeps		*/
 	STAT_EAGAINS,	/* # of EAGAIN errors			*/
 	STAT_WAKEUPS,	/* # of wakeups (unlock return)		*/
 	STAT_TIMEOUTS,	/* # of exclusive lock timeouts		*/
 	STAT_LOCKERRS,	/* # of exclusive lock errors		*/
 	STAT_UNLKERRS,	/* # of exclusive unlock errors		*/
+	STAT_STIMEOUTS,	/* # of shared lock timeouts		*/
+	STAT_SLOCKERRS,	/* # of shared lock errors		*/
+	STAT_SUNLKERRS,	/* # of shared unlock errors		*/
 	STAT_NUM	/* Total # of statistical count		*/
 };
 
@@ -59,6 +72,8 @@ enum {
 enum {
 	TIME_LOCK,	/* Total exclusive lock syscall time	*/
 	TIME_UNLK,	/* Total exclusive unlock syscall time	*/
+	TIME_SLOCK,	/* Total shared lock syscall time	*/
+	TIME_SUNLK,	/* Total shared unlock syscall time	*/
 	TIME_NUM,
 };
 
@@ -104,14 +119,30 @@ struct worker {
 static unsigned int threads_stopping;
 static struct stats throughput_stats;
 static lock_fn_t mutex_lock_fn;
+static lock_fn_t read_lock_fn;
+static lock_fn_t write_lock_fn;
 static unlock_fn_t mutex_unlock_fn;
+static unlock_fn_t read_unlock_fn;
+static unlock_fn_t write_unlock_fn;
 
 /*
- * Glibc mutex
+ * Glibc mutex and rwlock
  */
 static pthread_mutex_t __cacheline_aligned mutex;
 static pthread_mutexattr_t mutex_attr;
+static pthread_rwlock_t __cacheline_aligned rwlock;
+static pthread_rwlockattr_t rwlock_attr;
 static bool mutex_inited, mutex_attr_inited;
+static bool rwlock_inited, rwlock_attr_inited;
+
+/*
+ * Global rwlock reader batch size statistics.
+ */
+static struct __cacheline_aligned {
+	u32 readers;		/* # of readers in a batch */
+	u32 readers_max;
+	u32 batches;
+} reader_stat;
 
 /*
  * Compute the syscall time in ns.
@@ -175,6 +206,19 @@ static inline void stat_inc(int tid __maybe_unused, int item __maybe_unused)
 #endif
 
 /*
+ * For rwlock, the default is to have each thread acts as both reader and
+ * writer. The use of the -p option will force the use of separate threads
+ * for readers and writers. The numbers of reader and writer threads are
+ * determined by reader percentage and the total number of threads used.
+ * So the actual ratio of reader and writer operations may not be close
+ * to the given reader percentage.
+ */
+static bool xthread;
+static bool pwriter;			/* Prefer writer flag */
+static int rthread_threshold = -1;	/* (tid < threshold) => reader */
+static unsigned int rpercent = 50;	/* Reader percentage */
+
+/*
  * The latency values within a lock critical section (load) and between locking
  * operations is in term of the number of cpu_relax() calls that are being
  * issued.
@@ -198,6 +242,28 @@ static inline void stat_inc(int tid __maybe_unused, int item __maybe_unused)
 	NULL
 };
 
+static const struct option rwlock_options[] = {
+	OPT_INTEGER ('d', "locklat",	&locklat,  "Specify inter-locking latency (default = 1)"),
+	OPT_STRING  ('f', "ftype",	&ftype,    "type", "Specify futex type: WW, GC, all (default)"),
+	OPT_INTEGER ('l', "loadlat",	&loadlat,  "Specify load latency (default = 1)"),
+	OPT_UINTEGER('R', "read-%",	&rpercent, "Specify reader percentage (default 50%)"),
+	OPT_UINTEGER('r', "runtime",	&nsecs,    "Specify runtime (in seconds, default = 10s)"),
+	OPT_BOOLEAN ('S', "shared",	&fshared,  "Use shared futexes instead of private ones"),
+	OPT_BOOLEAN ('s', "timestat",	&timestat, "Track lock/unlock syscall times"),
+	OPT_UINTEGER('T', "timeout",	&timeout,  "Specify timeout value (in us, default = no timeout)"),
+	OPT_UINTEGER('t', "threads",	&nthreads, "Specify number of threads, default = # of CPUs"),
+	OPT_BOOLEAN ('v', "verbose",	&verbose,  "Verbose mode: display thread-level details"),
+	OPT_BOOLEAN ('W', "prefer-wr",	&pwriter,  "Prefer writers instead of readers"),
+	OPT_INTEGER ('w', "wait-ratio", &wratio,   "Specify <n>/1024 of load is 1us sleep, default = 0"),
+	OPT_BOOLEAN ('x', "xthread",	&xthread,  "Use separate reader/writer threads"),
+	OPT_END()
+};
+
+static const char * const bench_futex_rwlock_usage[] = {
+	"perf bench futex rwlock <options>",
+	NULL
+};
+
 /*
  * GCC atomic builtins are only available on gcc 4.7 and higher.
  */
@@ -456,16 +522,347 @@ static void gc_mutex_unlock(futex_t *futex __maybe_unused,
 	pthread_mutex_unlock(&mutex);
 }
 
+/**********************[ RWLOCK lock/unlock functions ]********************/
+
+/*
+ * Wait-wake futex reader/writer lock/unlock functions
+ *
+ * This implementation is based on the reader-preferring futex eventcount
+ * rwlocks posted on http://locklessinc.com/articles/sleeping_rwlocks with
+ * some modification. However, it does not satisfy POSIX mutex destruction
+ * requirements and so cannot be destroyed (memory freed) after being used.
+ *
+ * It is assumed the passed-in futex have sufficient trailing space to
+ * be used by the bigger reader/writer lock structure.
+ */
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+#define LSB(field)	unsigned char field
+#else
+#define LSB(field)	struct {			\
+				unsigned char __pad[3];	\
+				unsigned char field;	\
+			}
+#endif
+
+#define RW_WLOCKED	(1U << 0)
+#define RW_READER	(1U << 8)
+#define RW_EC_CONTEND	(1U << 0)
+#define RW_EC_INC	(1U << 8)
+
+struct rwlock {
+	/*
+	 * Bits 0-7 : writer lock
+	 * Bits 8-31: reader count
+	 */
+	union {
+		futex_t val;
+		LSB(wlocked);
+	} lock;
+
+	/* Writer event count */
+	union {
+		futex_t val;
+		LSB(contend);
+	} write_ec;
+
+	/* Reader event count */
+	union {
+		futex_t val;
+		LSB(contend);
+	} read_ec;
+};
+
+static struct rwlock __cacheline_aligned rwfutex;
+
+/*
+ * Reader preferring rwlock functions
+ */
+static void ww_write_lock(futex_t *futex __maybe_unused, int tid)
+{
+	struct rwlock *rw = &rwfutex;
+	bool slowpath = false;
+
+	for (;;) {
+		futex_t ec = rw->write_ec.val | RW_EC_CONTEND;
+		futex_t val = 0;
+		int ret;
+
+		/* Set the write lock if there is no reader */
+		if (atomic_cmpxchg_acquire(&rw->lock.val, &val, RW_WLOCKED))
+			return;
+
+		/*
+		 * Make sure that lock.val is read before setting
+		 * write_ec.contend.
+		 */
+		smp_store_release(&rw->write_ec.contend, 1);
+
+		FUTEX_CALL(futex_wait, TIME_LOCK,
+			   &rw->write_ec.val, ec, ptospec, flags);
+		if (!slowpath) {
+			stat_inc(tid, STAT_LOCKS);
+			slowpath = true;
+		}
+		if (ret < 0) {
+			if (errno == EAGAIN)
+				stat_inc(tid, STAT_EAGAINS);
+			else if (errno == ETIMEDOUT)
+				stat_inc(tid, STAT_TIMEOUTS);
+			else
+				stat_inc(tid, STAT_LOCKERRS);
+		}
+
+		/* Other writers may exist */
+		rw->write_ec.contend = 1;
+	}
+}
+
+static void ww_write_unlock(futex_t *futex __maybe_unused, int tid)
+{
+	struct rwlock *rw = &rwfutex;
+	bool slowpath = false;
+	int ret;
+
+	smp_store_release(&rw->lock.wlocked, 0);	/* Unlock */
+
+	/* Wake all the readers */
+	atomic_add_return(&rw->read_ec.val, RW_EC_INC);
+	if (atomic_xchg_relaxed(&rw->read_ec.contend, 0)) {
+		FUTEX_CALL(futex_wake, TIME_UNLK,
+			   &rw->read_ec.val, INT_MAX, flags);
+		stat_inc(tid, STAT_UNLOCKS);
+		slowpath = true;
+		if (ret < 0)
+			stat_inc(tid, STAT_UNLKERRS);
+		else
+			stat_add(tid, STAT_WAKEUPS, ret);
+		if (ret > 0)
+			return;
+	}
+
+	/* Wake a writer */
+	atomic_add_acquire(&rw->write_ec.val, RW_EC_INC);
+	if (atomic_xchg_relaxed(&rw->write_ec.contend, 0)) {
+		FUTEX_CALL(futex_wake, TIME_UNLK,
+			   &rw->write_ec.val, 1, flags);
+		if (!slowpath)
+			stat_inc(tid, STAT_UNLOCKS);
+		if (ret < 0)
+			stat_inc(tid, STAT_UNLKERRS);
+		else
+			stat_add(tid, STAT_WAKEUPS, ret);
+	}
+}
+
+static void ww_read_lock(futex_t *futex __maybe_unused, int tid)
+{
+	struct rwlock *rw = &rwfutex;
+	futex_t ec = rw->read_ec.val, state;
+	bool slowpath = false;
+	int ret;
+
+	state = atomic_add_acquire(&rw->lock.val, RW_READER);
+
+	while (state & RW_WLOCKED) {
+		ec |= RW_EC_CONTEND;
+		smp_store_release(&rw->read_ec.contend, 1);
+
+		/* Sleep until no longer held by a writer */
+		FUTEX_CALL(futex_wait, TIME_SLOCK,
+			   &rw->read_ec.val, ec, ptospec, flags);
+		if (!slowpath) {
+			stat_inc(tid, STAT_SLOCKS);
+			slowpath = true;
+		}
+		if (ret < 0) {
+			if (errno == EAGAIN)
+				stat_inc(tid, STAT_EAGAINS);
+			else if (errno == ETIMEDOUT)
+				stat_inc(tid, STAT_STIMEOUTS);
+			else
+				stat_inc(tid, STAT_SLOCKERRS);
+		}
+
+		/*
+		 * read_ec.val should be read before lock.val.
+		 */
+		ec = smp_load_acquire(&rw->read_ec.val);
+		state = rw->lock.val;
+	}
+}
+
+static void ww_read_unlock(futex_t *futex __maybe_unused, int tid)
+{
+	struct rwlock *rw = &rwfutex;
+	futex_t state;
+	int ret;
+
+	/* Read unlock */
+	state = atomic_add_release(&rw->lock.val, -RW_READER);
+
+	/* Other readers there, don't do anything */
+	if (state >> 8)
+		return;
+
+	/* We may need to wake up a writer */
+	atomic_add_acquire(&rw->write_ec.val, RW_EC_INC);
+	if (atomic_xchg_relaxed(&rw->write_ec.contend, 0)) {
+		FUTEX_CALL(futex_wake, TIME_SUNLK, &rw->write_ec.val, 1, flags);
+		stat_inc(tid, STAT_SUNLOCKS);
+		if (ret < 0)
+			stat_inc(tid, STAT_SUNLKERRS);
+		else
+			stat_add(tid, STAT_WAKEUPS, ret);
+	}
+}
+
+/*
+ * Writer perferring rwlock functions
+ */
+#define ww2_write_lock	ww_write_lock
+#define ww2_read_unlock	ww_read_unlock
+
+static void ww2_write_unlock(futex_t *futex __maybe_unused, int tid)
+{
+	struct rwlock *rw = &rwfutex;
+	bool slowpath = false;
+	int ret;
+
+	smp_store_release(&rw->lock.wlocked, 0);	/* Unlock */
+
+	/* Wake a writer */
+	atomic_add_return(&rw->write_ec.val, RW_EC_INC);
+	if (atomic_xchg_relaxed(&rw->write_ec.contend, 0)) {
+		FUTEX_CALL(futex_wake, TIME_UNLK, &rw->write_ec.val, 1, flags);
+		stat_inc(tid, STAT_UNLOCKS);
+		slowpath = true;
+		if (ret < 0)
+			stat_inc(tid, STAT_UNLKERRS);
+		else
+			stat_add(tid, STAT_WAKEUPS, ret);
+		if (ret > 0)
+			return;
+	}
+
+	/* Wake all the readers */
+	atomic_add_acquire(&rw->read_ec.val, RW_EC_INC);
+	if (atomic_xchg_relaxed(&rw->read_ec.contend, 0)) {
+		FUTEX_CALL(futex_wake, TIME_UNLK,
+			   &rw->read_ec.val, INT_MAX, flags);
+		if (!slowpath)
+			stat_inc(tid, STAT_UNLOCKS);
+		if (ret < 0)
+			stat_inc(tid, STAT_UNLKERRS);
+		else
+			stat_add(tid, STAT_WAKEUPS, ret);
+	}
+}
+
+static void ww2_read_lock(futex_t *futex __maybe_unused, int tid)
+{
+	struct rwlock *rw = &rwfutex;
+	bool slowpath = false;
+
+	for (;;) {
+		futex_t ec = rw->read_ec.val | RW_EC_CONTEND;
+		futex_t state;
+		int ret;
+
+		if (!rw->write_ec.contend) {
+			state = atomic_add_acquire(&rw->lock.val, RW_READER);
+
+			if (!(state & RW_WLOCKED))
+				return;
+
+			/* Unlock */
+			state = atomic_add_release(&rw->lock.val, -RW_READER);
+		} else {
+			atomic_add_acquire(&rw->write_ec.val, RW_EC_INC);
+			if (atomic_xchg_relaxed(&rw->write_ec.contend, 0)) {
+				/*  Wake a writer, and then try again */
+				FUTEX_CALL(futex_wake, TIME_SUNLK,
+					   &rw->write_ec.val, 1, flags);
+				stat_inc(tid, STAT_SUNLOCKS);
+				if (ret < 0)
+					stat_inc(tid, STAT_SUNLKERRS);
+				else
+					stat_add(tid, STAT_WAKEUPS, ret);
+				continue;
+			}
+		}
+
+		smp_store_release(&rw->read_ec.contend, 1);
+		if (rw->read_ec.val != ec)
+			continue;
+
+		/* Sleep until no longer held by a writer */
+		FUTEX_CALL(futex_wait, TIME_SLOCK,
+			   &rw->read_ec.val, ec, ptospec, flags);
+		if (!slowpath) {
+			stat_inc(tid, STAT_SLOCKS);
+			slowpath = true;
+		}
+		if (ret < 0) {
+			if (errno == EAGAIN)
+				stat_inc(tid, STAT_EAGAINS);
+			else if (errno == ETIMEDOUT)
+				stat_inc(tid, STAT_STIMEOUTS);
+			else
+				stat_inc(tid, STAT_SLOCKERRS);
+		}
+	}
+}
+
+/*
+ * Glibc read/write lock
+ */
+static void gc_write_lock(futex_t *futex __maybe_unused,
+			  int tid __maybe_unused)
+{
+	pthread_rwlock_wrlock(&rwlock);
+}
+
+static void gc_write_unlock(futex_t *futex __maybe_unused,
+			    int tid __maybe_unused)
+{
+	pthread_rwlock_unlock(&rwlock);
+}
+
+static void gc_read_lock(futex_t *futex __maybe_unused,
+			 int tid __maybe_unused)
+{
+	pthread_rwlock_rdlock(&rwlock);
+}
+
+static void gc_read_unlock(futex_t *futex __maybe_unused,
+			   int tid __maybe_unused)
+{
+	pthread_rwlock_unlock(&rwlock);
+}
+
 /**************************************************************************/
 
 /*
  * Load function
  */
-static inline void load(int tid)
+static inline void load(int tid, bool reader)
 {
 	int n = loadlat;
 
 	/*
+	 * Update reader batch statistics
+	 * Because of racing, the readers_max number may not be accurate.
+	 */
+	if (reader) {
+		reader_stat.readers++;
+	} else if (reader_stat.readers) {
+		if (reader_stat.readers > reader_stat.readers_max)
+			reader_stat.readers_max = reader_stat.readers;
+		reader_stat.readers = 0;
+		reader_stat.batches++;
+	}
+
+	/*
 	 * Optionally does a 1us sleep instead if wratio is defined and
 	 * is within bound.
 	 */
@@ -518,7 +915,7 @@ static void *mutex_workerfn(void *arg)
 
 	do {
 		lock_fn(w->futex, tid);
-		load(tid);
+		load(tid, false);
 		unlock_fn(w->futex, tid);
 		w->stats[STAT_OPS]++;	/* One more locking operation */
 		csdelay();
@@ -530,6 +927,77 @@ static void *mutex_workerfn(void *arg)
 	return NULL;
 }
 
+static void *rwlock_workerfn(void *arg)
+{
+	long tid = (long)arg;
+	struct worker *w = &worker[tid];
+	lock_fn_t rlock_fn = read_lock_fn;
+	lock_fn_t wlock_fn = write_lock_fn;
+	unlock_fn_t runlock_fn = read_unlock_fn;
+	unlock_fn_t wunlock_fn = write_unlock_fn;
+
+	thread_id = gettid();
+	counter = 0;
+
+	atomic_dec_return(&threads_starting);
+
+	/*
+	 * Busy wait until asked to start
+	 */
+	while (!worker_start)
+		cpu_relax();
+
+	if (rthread_threshold >= 0) {
+		if (tid < rthread_threshold) {
+			do {
+				rlock_fn(w->futex, tid);
+				load(tid, true);
+				runlock_fn(w->futex, tid);
+				w->stats[STAT_SOPS]++;
+				csdelay();
+			} while (!done);
+		} else {
+			do {
+				wlock_fn(w->futex, tid);
+				load(tid, false);
+				wunlock_fn(w->futex, tid);
+				w->stats[STAT_OPS]++;
+				csdelay();
+			} while (!done);
+		}
+		goto out;
+	}
+
+	while (!done) {
+		int rcnt = rpercent;
+		int wcnt = 100 - rcnt;
+
+		do {
+			if (wcnt) {
+				wlock_fn(w->futex, tid);
+				load(tid, false);
+				wunlock_fn(w->futex, tid);
+				w->stats[STAT_OPS]++;
+				wcnt--;
+				csdelay();
+			}
+			if (rcnt) {
+				rlock_fn(w->futex, tid);
+				load(tid, true);
+				runlock_fn(w->futex, tid);
+				w->stats[STAT_SOPS]++;
+				rcnt--;
+				csdelay();
+			}
+		}  while (!done && (rcnt + wcnt));
+	}
+out:
+	if (verbose)
+		printf("[thread %3ld (%d)] exited.\n", tid, thread_id);
+	atomic_inc_return(&threads_stopping);
+	return NULL;
+}
+
 static void create_threads(struct worker *w, pthread_attr_t *thread_attr,
 			   void *(*workerfn)(void *arg), long tid)
 {
@@ -588,6 +1056,50 @@ static int futex_mutex_type(const char **ptype)
 	return 0;
 }
 
+static int futex_rwlock_type(const char **ptype)
+{
+	const char *type = *ptype;
+
+	if (!strcasecmp(type, "WW")) {
+		*ptype = "WW";
+		pfutex = &rwfutex.lock.val;
+		if (pwriter) {
+			read_lock_fn = ww2_read_lock;
+			read_unlock_fn = ww2_read_unlock;
+			write_lock_fn = ww2_write_lock;
+			write_unlock_fn = ww2_write_unlock;
+		} else {
+			read_lock_fn = ww_read_lock;
+			read_unlock_fn = ww_read_unlock;
+			write_lock_fn = ww_write_lock;
+			write_unlock_fn = ww_write_unlock;
+		}
+	} else if (!strcasecmp(type, "GC")) {
+		pthread_rwlockattr_t *attr = NULL;
+
+		*ptype = "GC";
+		read_lock_fn = gc_read_lock;
+		read_unlock_fn = gc_read_unlock;
+		write_lock_fn = gc_write_lock;
+		write_unlock_fn = gc_write_unlock;
+		if (pwriter || fshared) {
+			pthread_rwlockattr_init(&rwlock_attr);
+			attr = &rwlock_attr;
+			rwlock_attr_inited = true;
+			if (fshared)
+				pthread_rwlockattr_setpshared(attr, true);
+			if (pwriter)
+				pthread_rwlockattr_setkind_np(attr,
+				  PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP);
+		}
+		pthread_rwlock_init(&rwlock, attr);
+		rwlock_inited = true;
+	} else {
+		return -1;
+	}
+	return 0;
+}
+
 static void futex_test_driver(const char *futex_type,
 			      int (*proc_type)(const char **ptype),
 			      void *(*workerfn)(void *arg))
@@ -604,14 +1116,21 @@ static void futex_test_driver(const char *futex_type,
 	 */
 	const char *desc[STAT_NUM] = {
 		[STAT_OPS]	 = "Total exclusive locking ops",
+		[STAT_SOPS]	 = "Total shared locking ops",
 		[STAT_LOCKS]	 = "Exclusive lock slowpaths",
 		[STAT_UNLOCKS]	 = "Exclusive unlock slowpaths",
 		[STAT_SLEEPS]	 = "Exclusive lock sleeps",
+		[STAT_SLOCKS]	 = "Shared lock slowpaths",
+		[STAT_SUNLOCKS]	 = "Shared unlock slowpaths",
+		[STAT_SSLEEPS]	 = "Shared lock sleeps",
 		[STAT_WAKEUPS]	 = "Process wakeups",
 		[STAT_EAGAINS]	 = "EAGAIN lock errors",
 		[STAT_TIMEOUTS]	 = "Exclusive lock timeouts",
+		[STAT_STIMEOUTS] = "Shared lock timeouts",
 		[STAT_LOCKERRS]  = "\nExclusive lock errors",
 		[STAT_UNLKERRS]  = "\nExclusive unlock errors",
+		[STAT_SLOCKERRS] = "\nShared lock errors",
+		[STAT_SUNLKERRS] = "\nShared unlock errors",
 	};
 
 	if (exit_now)
@@ -623,9 +1142,18 @@ static void futex_test_driver(const char *futex_type,
 	}
 
 	printf("\n=====================================\n");
-	printf("[PID %d]: %d threads doing %s futex lockings (load=%d) for %d secs.\n\n",
+	printf("[PID %d]: %d threads doing %s futex lockings (load=%d) for %d secs.\n",
 	       getpid(), nthreads, futex_type, loadlat, nsecs);
 
+	if (xthread) {
+		/*
+		 * Compute numbers of reader and writer threads.
+		 */
+		rthread_threshold = (rpercent * nthreads + 50)/100;
+		printf("\t\t{%d reader threads, %d writer threads}\n",
+			rthread_threshold, nthreads - rthread_threshold);
+	}
+	printf("\n");
 	init_stats(&throughput_stats);
 
 	*pfutex = 0;
@@ -695,11 +1223,15 @@ static void futex_test_driver(const char *futex_type,
 		/*
 		 * Get a rounded estimate of the # of locking ops/sec.
 		 */
-		u64 tp = (u64)worker[i].stats[STAT_OPS] * 1000000 / us;
+		u64 tp = (u64)(worker[i].stats[STAT_OPS] +
+			       worker[i].stats[STAT_SOPS]) * 1000000 / us;
 
 		for (j = 0; j < STAT_NUM; j++)
 			total.stats[j] += worker[i].stats[j];
 
+		for (j = 0; j < TIME_NUM; j++)
+			total.times[j] += worker[i].times[j];
+
 		update_stats(&throughput_stats, tp);
 		if (verbose)
 			printf("[thread %3d] futex: %p [ %'ld ops/sec ]\n",
@@ -723,21 +1255,50 @@ static void futex_test_driver(const char *futex_type,
 		if (total.stats[STAT_UNLOCKS])
 			printf("Avg exclusive unlock syscall = %'ldns\n",
 			    total.times[TIME_UNLK]/total.stats[STAT_UNLOCKS]);
+		if (total.stats[STAT_SLOCKS])
+			printf("Avg shared lock syscall      = %'ldns\n",
+			    total.times[TIME_SLOCK]/total.stats[STAT_SLOCKS]);
+		if (total.stats[STAT_SUNLOCKS])
+			printf("Avg shared unlock syscall    = %'ldns\n",
+			    total.times[TIME_SUNLK]/total.stats[STAT_SUNLOCKS]);
 	}
 
 	printf("\nPercentages:\n");
 	if (total.stats[STAT_LOCKS])
 		printf("Exclusive lock slowpaths     = %.1f%%\n",
 			stat_percent(&total, STAT_LOCKS, STAT_OPS));
+	if (total.stats[STAT_SLOCKS])
+		printf("Shared lock slowpaths        = %.1f%%\n",
+			stat_percent(&total, STAT_SLOCKS, STAT_SOPS));
 	if (total.stats[STAT_UNLOCKS])
 		printf("Exclusive unlock slowpaths   = %.1f%%\n",
 			stat_percent(&total, STAT_UNLOCKS, STAT_OPS));
+	if (total.stats[STAT_SUNLOCKS])
+		printf("Shared unlock slowpaths      = %.1f%%\n",
+			stat_percent(&total, STAT_SUNLOCKS, STAT_SOPS));
 	if (total.stats[STAT_EAGAINS])
 		printf("EAGAIN lock errors           = %.1f%%\n",
-			stat_percent(&total, STAT_EAGAINS, STAT_LOCKS));
+			(double)total.stats[STAT_EAGAINS] * 100 /
+			(total.stats[STAT_LOCKS] + total.stats[STAT_SLOCKS]));
 	if (total.stats[STAT_WAKEUPS])
 		printf("Process wakeups              = %.1f%%\n",
-			stat_percent(&total, STAT_WAKEUPS, STAT_UNLOCKS));
+			(double)total.stats[STAT_WAKEUPS] * 100 /
+			(total.stats[STAT_UNLOCKS] +
+			 total.stats[STAT_SUNLOCKS]));
+	if (xthread)
+		printf("Reader operations            = %.1f%%\n",
+			(double)total.stats[STAT_SOPS] * 100 /
+			(total.stats[STAT_OPS] + total.stats[STAT_SOPS]));
+
+	if (reader_stat.batches) {
+		printf("\nShared Lock Batch Stats:\n");
+		printf("Total shared lock batches    = %'d\n",
+			reader_stat.batches);
+		printf("Avg batch size               = %.1f\n",
+			(double)total.stats[STAT_SOPS]/reader_stat.batches);
+		printf("Max batch size               = %'d\n",
+			reader_stat.readers_max);
+	}
 
 	printf("\nPer-thread Locking Rates:\n");
 	printf("Avg = %'d ops/sec (+- %.2f%%)\n", (int)(avg + 0.5),
@@ -745,34 +1306,63 @@ static void futex_test_driver(const char *futex_type,
 	printf("Min = %'d ops/sec\n", (int)throughput_stats.min);
 	printf("Max = %'d ops/sec\n", (int)throughput_stats.max);
 
+	/*
+	 * Compute the averagge reader and writer locking operation rates
+	 * with separate reader and writer threads.
+	 */
+	if (xthread) {
+		u64 tp;
+
+		/* Reader stats */
+		memset(&throughput_stats, 0, sizeof(throughput_stats));
+		for (i = 0, tp = 0; i < rthread_threshold; i++) {
+			tp = (u64)worker[i].stats[STAT_SOPS] * 1000000 / us;
+			update_stats(&throughput_stats, tp);
+		}
+		avg    = avg_stats(&throughput_stats);
+		stddev = stddev_stats(&throughput_stats);
+		printf("\nReader avg = %'d ops/sec (+- %.2f%%)\n",
+			(int)(avg + 0.5), rel_stddev_stats(stddev, avg));
+
+		/* Writer stats */
+		memset(&throughput_stats, 0, sizeof(throughput_stats));
+		for (tp = 0; i < (int)nthreads; i++) {
+			tp = (u64)worker[i].stats[STAT_OPS] * 1000000 / us;
+			update_stats(&throughput_stats, tp);
+		}
+		avg    = avg_stats(&throughput_stats);
+		stddev = stddev_stats(&throughput_stats);
+		printf("Writer avg = %'d ops/sec (+- %.2f%%)\n",
+			(int)(avg + 0.5), rel_stddev_stats(stddev, avg));
+	}
+
 	if (*pfutex != 0)
 		printf("\nResidual futex value = 0x%x\n", *pfutex);
 
-	/* Clear the workers area */
+	/* Clear the workers area & reader statistics */
 	memset(worker, 0, sizeof(*worker) * nthreads);
+	memset(&reader_stat, 0, sizeof(reader_stat));
 
 	if (mutex_inited)
 		pthread_mutex_destroy(&mutex);
 	if (mutex_attr_inited)
 		pthread_mutexattr_destroy(&mutex_attr);
+	if (rwlock_inited)
+		pthread_rwlock_destroy(&rwlock);
+	if (rwlock_attr_inited)
+		pthread_rwlockattr_destroy(&rwlock_attr);
 	mutex_inited  = mutex_attr_inited  = false;
+	rwlock_inited = rwlock_attr_inited = false;
 }
 
-int bench_futex_mutex(int argc, const char **argv,
-		      const char *prefix __maybe_unused)
+static void bench_futex_common(struct sigaction *act)
 {
-	struct sigaction act;
-
-	argc = parse_options(argc, argv, mutex_options,
-			     bench_futex_mutex_usage, 0);
-	if (argc)
-		goto err;
 
 	ncpus = sysconf(_SC_NPROCESSORS_ONLN);
 
-	sigfillset(&act.sa_mask);
-	act.sa_sigaction = toggle_done;
-	sigaction(SIGINT, &act, NULL);
+	sigfillset(&act->sa_mask);
+	act->sa_sigaction = toggle_done;
+	sigaction(SIGINT, act, NULL);
 
 	if (!nthreads)
 		nthreads = ncpus;
@@ -799,6 +1389,19 @@ int bench_futex_mutex(int argc, const char **argv,
 		tospec.tv_nsec = (timeout % 1000000) * 1000;
 		ptospec        = &tospec;
 	}
+}
+
+int bench_futex_mutex(int argc, const char **argv,
+		      const char *prefix __maybe_unused)
+{
+	struct sigaction act;
+
+	argc = parse_options(argc, argv, mutex_options,
+			     bench_futex_mutex_usage, 0);
+	if (argc)
+		goto err;
+
+	bench_futex_common(&act);
 
 	if (!ftype || !strcmp(ftype, "all")) {
 		futex_test_driver("WW", futex_mutex_type, mutex_workerfn);
@@ -813,3 +1416,30 @@ int bench_futex_mutex(int argc, const char **argv,
 	usage_with_options(bench_futex_mutex_usage, mutex_options);
 	exit(EXIT_FAILURE);
 }
+
+int bench_futex_rwlock(int argc, const char **argv,
+		      const char *prefix __maybe_unused)
+{
+	struct sigaction act;
+
+	argc = parse_options(argc, argv, rwlock_options,
+			     bench_futex_rwlock_usage, 0);
+	if (argc)
+		goto err;
+
+	ncpus = sysconf(_SC_NPROCESSORS_ONLN);
+
+	bench_futex_common(&act);
+
+	if (!ftype || !strcmp(ftype, "all")) {
+		futex_test_driver("WW", futex_rwlock_type, rwlock_workerfn);
+		futex_test_driver("GC", futex_rwlock_type, rwlock_workerfn);
+	} else {
+		futex_test_driver(ftype, futex_rwlock_type, rwlock_workerfn);
+	}
+	free(worker_alloc);
+	return 0;
+err:
+	usage_with_options(bench_futex_rwlock_usage, rwlock_options);
+	exit(EXIT_FAILURE);
+}
diff --git a/tools/perf/builtin-bench.c b/tools/perf/builtin-bench.c
index bf4418d..424a014 100644
--- a/tools/perf/builtin-bench.c
+++ b/tools/perf/builtin-bench.c
@@ -64,6 +64,7 @@ struct bench {
 	/* pi-futexes */
 	{ "lock-pi",	"Benchmark for futex lock_pi calls",            bench_futex_lock_pi	},
 	{ "mutex",	"Benchmark for mutex locks using futexes",	bench_futex_mutex	},
+	{ "rwlock",	"Benchmark for rwlocks using futexes",		bench_futex_rwlock	},
 	{ "all",	"Run all futex benchmarks",			NULL			},
 	{ NULL,		NULL,						NULL			}
 };
-- 
1.8.3.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ