lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190522112550.31814-1-boris.brezillon@collabora.com>
Date:   Wed, 22 May 2019 13:25:50 +0200
From:   Boris Brezillon <boris.brezillon@...labora.com>
To:     linux-kernel@...r.kernel.org
Cc:     George Spelvin <lkml@....org>,
        Rasmus Villemoes <linux@...musvillemoes.dk>,
        Andy Shevchenko <andriy.shevchenko@...ux.intel.com>,
        Andrew Morton <akpm@...ux-foundation.org>,
        Andrey Abramov <st5pub@...dex.ru>, kernel@...labora.com,
        Boris Brezillon <boris.brezillon@...labora.com>
Subject: [PATCH] lib/sort: Add the sort_r() variant

Some users might need extra context to compare 2 elements. This patch
adds the sort_r() which is similar to the qsort_r() variant of qsort().

Signed-off-by: Boris Brezillon <boris.brezillon@...labora.com>
---
Hello,

A few more details about this patch.

Even though I post it as a standalone patch, I do intend to use it in
a real driver (v4l2 driver), just didn't want to have it burried in a
huge patch series.

Note that sort() and sort_r() are now implemented as wrappers around
do_sort() so that most of the code can be shared. I initially went for
a solution that implemented sort() as a wrapper around sort_r() (which
basically contained the do_sort() logic without the cmp_func arg)
but realized this was adding one extra indirect call (the compare func
wrapper), which I know are being chased.

There's another option, but I'm pretty sure other people already
considered it and thought it was not a good idea as it would make
the code size grow: move the code to sort.h as inline funcs/macros so
that the compiler can optimize things out and replace the indirect
cmp_func() calls by direct ones. I just tried it, and it makes my .o
file grow by 576 bytes, given that we currently have 122 users of
this function, that makes the kernel code grow by ~70k (that's kind
of a max estimate since not all users will be compiled in).

Please let me know if you think we shouldn't expose the sort_r() func
and I'll just implement a private version in my driver.

Regards,

Boris
---
 include/linux/sort.h |  5 +++
 lib/sort.c           | 85 ++++++++++++++++++++++++++++++++------------
 2 files changed, 67 insertions(+), 23 deletions(-)

diff --git a/include/linux/sort.h b/include/linux/sort.h
index 2b99a5dd073d..61b96d0ebc44 100644
--- a/include/linux/sort.h
+++ b/include/linux/sort.h
@@ -4,6 +4,11 @@
 
 #include <linux/types.h>
 
+void sort_r(void *base, size_t num, size_t size,
+	    int (*cmp)(const void *, const void *, const void *),
+	    void (*swap)(void *, void *, int),
+	    const void *priv);
+
 void sort(void *base, size_t num, size_t size,
 	  int (*cmp)(const void *, const void *),
 	  void (*swap)(void *, void *, int));
diff --git a/lib/sort.c b/lib/sort.c
index 50855ea8c262..5db5602e52ee 100644
--- a/lib/sort.c
+++ b/lib/sort.c
@@ -167,27 +167,21 @@ static size_t parent(size_t i, unsigned int lsbit, size_t size)
 	return i / 2;
 }
 
-/**
- * sort - sort an array of elements
- * @base: pointer to data to sort
- * @num: number of elements
- * @size: size of each element
- * @cmp_func: pointer to comparison function
- * @swap_func: pointer to swap function or NULL
- *
- * This function does a heapsort on the given array.  You may provide
- * a swap_func function if you need to do something more than a memory
- * copy (e.g. fix up pointers or auxiliary data), but the built-in swap
- * avoids a slow retpoline and so is significantly faster.
- *
- * Sorting time is O(n log n) both on average and worst-case. While
- * quicksort is slightly faster on average, it suffers from exploitable
- * O(n*n) worst-case behavior and extra memory requirements that make
- * it less suitable for kernel use.
- */
-void sort(void *base, size_t num, size_t size,
-	  int (*cmp_func)(const void *, const void *),
-	  void (*swap_func)(void *, void *, int size))
+static int do_cmp(int (*cmp_func_r)(const void *, const void *, const void *),
+		  int (*cmp_func)(const void *, const void *),
+		  const void *a, const void *b, const void *priv)
+{
+	if (cmp_func)
+		return cmp_func(a, b);
+
+	return cmp_func_r(a, b, priv);
+}
+
+static void do_sort(void *base, size_t num, size_t size,
+		    int (*cmp_func_r)(const void *, const void *, const void *),
+		    int (*cmp_func)(const void *, const void *),
+		    void (*swap_func)(void *, void *, int size),
+		    const void *priv)
 {
 	/* pre-scale counters for performance */
 	size_t n = num * size, a = (num/2) * size;
@@ -235,12 +229,12 @@ void sort(void *base, size_t num, size_t size,
 		 * average, 3/4 worst-case.)
 		 */
 		for (b = a; c = 2*b + size, (d = c + size) < n;)
-			b = cmp_func(base + c, base + d) >= 0 ? c : d;
+			b = do_cmp(cmp_func_r, cmp_func, base + c, base + d, priv) >= 0 ? c : d;
 		if (d == n)	/* Special case last leaf with no sibling */
 			b = c;
 
 		/* Now backtrack from "b" to the correct location for "a" */
-		while (b != a && cmp_func(base + a, base + b) >= 0)
+		while (b != a && do_cmp(cmp_func_r, cmp_func, base + a, base + b, priv) >= 0)
 			b = parent(b, lsbit, size);
 		c = b;			/* Where "a" belongs */
 		while (b != a) {	/* Shift it into place */
@@ -249,4 +243,49 @@ void sort(void *base, size_t num, size_t size,
 		}
 	}
 }
+
+/**
+ * sort - sort an array of elements
+ * @base: pointer to data to sort
+ * @num: number of elements
+ * @size: size of each element
+ * @cmp_func: pointer to comparison function
+ * @swap_func: pointer to swap function or NULL
+ *
+ * This function does a heapsort on the given array. You may provide a
+ * swap_func function optimized to your element type.
+ *
+ * Sorting time is O(n log n) both on average and worst-case. While
+ * qsort is about 20% faster on average, it suffers from exploitable
+ * O(n*n) worst-case behavior and extra memory requirements that make
+ * it less suitable for kernel use.
+ */
+void sort(void *base, size_t num, size_t size,
+	  int (*cmp_func)(const void *, const void *),
+	  void (*swap_func)(void *, void *, int size))
+{
+	return do_sort(base, num, size, NULL, cmp_func, swap_func, NULL);
+}
 EXPORT_SYMBOL(sort);
+
+/**
+ * sort_r - sort an array of elements
+ * @base: pointer to data to sort
+ * @num: number of elements
+ * @size: size of each element
+ * @cmp_func: pointer to comparison function
+ * @swap_func: pointer to swap function or NULL
+ * @priv: private data passed to the compare function
+ *
+ * Same as sort() except it takes an extra private argument and pass it back
+ * to the compare function. Particularly useful when some extra context is
+ * needed to do the comparison.
+ */
+void sort_r(void *base, size_t num, size_t size,
+	    int (*cmp_func)(const void *, const void *, const void *),
+	    void (*swap_func)(void *, void *, int size),
+	    const void *priv)
+{
+	return do_sort(base, num, size, cmp_func, NULL, swap_func, priv);
+}
+EXPORT_SYMBOL(sort_r);
-- 
2.20.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ