lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Thu, 18 Jun 2020 09:09:28 -0700
From:   Jonathan Lemon <jonathan.lemon@...il.com>
To:     <netdev@...r.kernel.org>
CC:     <kernel-team@...com>, <axboe@...nel.dk>
Subject: [RFC PATCH 08/21] misc: add shqueue.h for prototyping

Shared queues between user and kernel use their own private structures
for accessing a shared data area, but they need to use the same queue
functions.

Rather than doing the 'right' thing and duplicating the file for
each domain, temporary cheat for prototyping and use a single shared
file.

Signed-off-by: Jonathan Lemon <jonathan.lemon@...il.com>
---
 include/uapi/misc/shqueue.h | 205 ++++++++++++++++++++++++++++++++++++
 1 file changed, 205 insertions(+)
 create mode 100644 include/uapi/misc/shqueue.h

diff --git a/include/uapi/misc/shqueue.h b/include/uapi/misc/shqueue.h
new file mode 100644
index 000000000000..258b9db35dbd
--- /dev/null
+++ b/include/uapi/misc/shqueue.h
@@ -0,0 +1,205 @@
+#pragma once
+
+/* XXX
+ * This is not a user api, but placed here for prototyping, in order to
+ * avoid two nigh identical copies for user and kernel space.
+ */
+
+/* kernel only */
+struct shared_queue_map {
+	unsigned prod ____cacheline_aligned_in_smp;
+	unsigned cons ____cacheline_aligned_in_smp;
+	char data[] ____cacheline_aligned_in_smp;
+};
+
+/* user and kernel private copy - identical in order to share sq* fcns */
+struct shared_queue {
+	unsigned *prod;
+	unsigned *cons;
+	char *data;
+	unsigned elt_sz;
+	unsigned mask;
+	unsigned cached_prod;
+	unsigned cached_cons;
+	unsigned entries;
+
+	unsigned map_sz;
+	void *map_ptr;
+};
+
+/*
+ * see documenation in tools/include/linux/ring_buffer.h
+ * using  explicit smp_/_ONCE is an optimization over smp_{store|load}
+ */
+
+static inline void __sq_load_acquire_cons(struct shared_queue *q)
+{
+	/* Refresh the local tail pointer */
+	q->cached_cons = READ_ONCE(*q->cons);
+	/* A, matches D */
+}
+
+static inline void __sq_store_release_cons(struct shared_queue *q)
+{
+	smp_mb(); /* D, matches A */
+	WRITE_ONCE(*q->cons, q->cached_cons);
+}
+
+static inline void __sq_load_acquire_prod(struct shared_queue *q)
+{
+	/* Refresh the local pointer */
+	q->cached_prod = READ_ONCE(*q->prod);
+	smp_rmb(); /* C, matches B */
+}
+
+static inline void __sq_store_release_prod(struct shared_queue *q)
+{
+	smp_wmb(); /* B, matches C */
+	WRITE_ONCE(*q->prod, q->cached_prod);
+}
+
+static inline void sq_cons_refresh(struct shared_queue *q)
+{
+	__sq_store_release_cons(q);
+	__sq_load_acquire_prod(q);
+}
+
+static inline bool sq_empty(struct shared_queue *q)
+{
+	return READ_ONCE(*q->prod) == READ_ONCE(*q->cons);
+}
+
+static inline bool sq_cons_empty(struct shared_queue *q)
+{
+	return q->cached_prod == q->cached_cons;
+}
+
+static inline unsigned __sq_cons_ready(struct shared_queue *q)
+{
+	return q->cached_prod - q->cached_cons;
+}
+
+static inline unsigned sq_cons_ready(struct shared_queue *q)
+{
+	if (q->cached_prod == q->cached_cons)
+		__sq_load_acquire_prod(q);
+
+	return q->cached_prod - q->cached_cons;
+}
+
+static inline bool sq_cons_avail(struct shared_queue *q, unsigned count)
+{
+	if (count <= __sq_cons_ready(q))
+		return true;
+	__sq_load_acquire_prod(q);
+	return count <= __sq_cons_ready(q);
+}
+
+static inline void *sq_get_ptr(struct shared_queue *q, unsigned idx)
+{
+	return q->data + (idx & q->mask) * q->elt_sz;
+}
+
+static inline void sq_cons_complete(struct shared_queue *q)
+{
+	__sq_store_release_cons(q);
+}
+
+static inline void *sq_cons_peek(struct shared_queue *q)
+{
+	if (sq_cons_empty(q)) {
+		sq_cons_refresh(q);
+		if (sq_cons_empty(q))
+			return NULL;
+	}
+	return sq_get_ptr(q, q->cached_cons);
+}
+
+static inline unsigned
+sq_peek_batch(struct shared_queue *q, void **ptr, unsigned count)
+{
+	unsigned i, idx, ready;
+
+	ready = sq_cons_ready(q);
+	if (!ready)
+		return 0;
+
+	count = count > ready ? ready : count;
+
+	idx = q->cached_cons;
+	for (i = 0; i < count; i++)
+		ptr[i] = sq_get_ptr(q, idx++);
+
+	q->cached_cons += count;
+
+	return count;
+}
+
+static inline unsigned
+sq_cons_batch(struct shared_queue *q, void **ptr, unsigned count)
+{
+	unsigned i, idx, ready;
+
+	ready = sq_cons_ready(q);
+	if (!ready)
+		return 0;
+
+	count = count > ready ? ready : count;
+
+	idx = q->cached_cons;
+	for (i = 0; i < count; i++)
+		ptr[i] = sq_get_ptr(q, idx++);
+
+	q->cached_cons += count;
+	sq_cons_complete(q);
+
+	return count;
+}
+
+static inline void sq_cons_advance(struct shared_queue *q)
+{
+	q->cached_cons++;
+}
+
+static inline unsigned __sq_prod_space(struct shared_queue *q)
+{
+	return q->entries - (q->cached_prod - q->cached_cons);
+}
+
+static inline unsigned sq_prod_space(struct shared_queue *q)
+{
+	unsigned space;
+
+	space = __sq_prod_space(q);
+	if (!space) {
+		__sq_load_acquire_cons(q);
+		space = __sq_prod_space(q);
+	}
+	return space;
+}
+
+static inline bool sq_prod_avail(struct shared_queue *q, unsigned count)
+{
+	if (count <= __sq_prod_space(q))
+		return true;
+	__sq_load_acquire_cons(q);
+	return count <= __sq_prod_space(q);
+}
+
+static inline void *sq_prod_get_ptr(struct shared_queue *q)
+{
+	return sq_get_ptr(q, q->cached_prod++);
+}
+
+static inline void *sq_prod_reserve(struct shared_queue *q)
+{
+	if (!sq_prod_space(q))
+		return NULL;
+
+	return sq_prod_get_ptr(q);
+}
+
+static inline void sq_prod_submit(struct shared_queue *q)
+{
+	__sq_store_release_prod(q);
+}
-- 
2.24.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ