[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230913165648.2570623-2-dhowells@redhat.com>
Date: Wed, 13 Sep 2023 17:56:36 +0100
From: David Howells <dhowells@...hat.com>
To: Al Viro <viro@...iv.linux.org.uk>,
Linus Torvalds <torvalds@...ux-foundation.org>
Cc: David Howells <dhowells@...hat.com>,
Jens Axboe <axboe@...nel.dk>,
Christoph Hellwig <hch@....de>,
Christian Brauner <christian@...uner.io>,
David Laight <David.Laight@...LAB.COM>,
Matthew Wilcox <willy@...radead.org>,
Jeff Layton <jlayton@...nel.org>,
linux-fsdevel@...r.kernel.org,
linux-block@...r.kernel.org,
linux-mm@...ck.org,
netdev@...r.kernel.org,
linux-kernel@...r.kernel.org
Subject: [PATCH v4 01/13] iov_iter: Add a benchmarking kunit test
---
lib/kunit_iov_iter.c | 181 +++++++++++++++++++++++++++++++++++++++++++
1 file changed, 181 insertions(+)
diff --git a/lib/kunit_iov_iter.c b/lib/kunit_iov_iter.c
index 859b67c4d697..478fea956f58 100644
--- a/lib/kunit_iov_iter.c
+++ b/lib/kunit_iov_iter.c
@@ -756,6 +756,184 @@ static void __init iov_kunit_extract_pages_xarray(struct kunit *test)
KUNIT_SUCCEED();
}
+static void iov_kunit_free_page(void *data)
+{
+ __free_page(data);
+}
+
+static void __init iov_kunit_benchmark_print_stats(struct kunit *test,
+ unsigned int *samples)
+{
+ unsigned long total = 0;
+ int i;
+
+ for (i = 0; i < 16; i++) {
+ total += samples[i];
+ kunit_info(test, "run %x: %u uS\n", i, samples[i]);
+ }
+
+ kunit_info(test, "avg %lu uS\n", total / 16);
+}
+
+/*
+ * Time copying 256MiB through an ITER_BVEC.
+ */
+static void __init iov_kunit_benchmark_bvec(struct kunit *test)
+{
+ struct iov_iter iter;
+ struct bio_vec *bvec;
+ struct page *page, **pages;
+ unsigned int samples[16];
+ ktime_t a, b;
+ ssize_t copied;
+ size_t size = 256 * 1024 * 1024, npages = size / PAGE_SIZE;
+ void *scratch;
+ int i;
+
+ /* Allocate a page and tile it repeatedly in the buffer. */
+ page = alloc_page(GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, page);
+ kunit_add_action_or_reset(test, iov_kunit_free_page, page);
+
+ bvec = kunit_kmalloc_array(test, npages, sizeof(bvec[0]), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, bvec);
+ for (i = 0; i < npages; i++)
+ bvec_set_page(&bvec[i], page, PAGE_SIZE, 0);
+
+ /* Create a single large buffer to copy to/from. */
+ pages = kunit_kmalloc_array(test, npages, sizeof(pages[0]), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, pages);
+ for (i = 0; i < npages; i++)
+ pages[i] = page;
+
+ scratch = vmap(pages, npages, VM_MAP | VM_MAP_PUT_PAGES, PAGE_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, scratch);
+ kunit_add_action_or_reset(test, iov_kunit_unmap, scratch);
+
+ /* Perform and time a bunch of copies. */
+ kunit_info(test, "Benchmarking copy_to_iter() over BVEC:\n");
+ for (i = 0; i < 16; i++) {
+ iov_iter_bvec(&iter, ITER_DEST, bvec, npages, size);
+ a = ktime_get_real();
+ copied = copy_to_iter(scratch, size, &iter);
+ b = ktime_get_real();
+ KUNIT_EXPECT_EQ(test, copied, size);
+ samples[i] = ktime_to_us(ktime_sub(b, a));
+ }
+
+ iov_kunit_benchmark_print_stats(test, samples);
+ KUNIT_SUCCEED();
+}
+
+/*
+ * Time copying 256MiB through an ITER_BVEC in 256 page chunks.
+ */
+static void __init iov_kunit_benchmark_bvec_split(struct kunit *test)
+{
+ struct iov_iter iter;
+ struct bio_vec *bvec;
+ struct page *page, **pages;
+ unsigned int samples[16];
+ ktime_t a, b;
+ ssize_t copied;
+ size_t size, npages = 64;
+ void *scratch;
+ int i, j;
+
+ /* Allocate a page and tile it repeatedly in the buffer. */
+ page = alloc_page(GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, page);
+ kunit_add_action_or_reset(test, iov_kunit_free_page, page);
+
+ /* Create a single large buffer to copy to/from. */
+ pages = kunit_kmalloc_array(test, npages, sizeof(pages[0]), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, pages);
+ for (i = 0; i < npages; i++)
+ pages[i] = page;
+
+ scratch = vmap(pages, npages, VM_MAP | VM_MAP_PUT_PAGES, PAGE_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, scratch);
+ kunit_add_action_or_reset(test, iov_kunit_unmap, scratch);
+
+ /* Perform and time a bunch of copies. */
+ kunit_info(test, "Benchmarking copy_to_iter() over BVEC:\n");
+ for (i = 0; i < 16; i++) {
+ size = 256 * 1024 * 1024;
+ a = ktime_get_real();
+ do {
+ size_t part = min(size, npages * PAGE_SIZE);
+
+ bvec = kunit_kmalloc_array(test, npages, sizeof(bvec[0]), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, bvec);
+ for (j = 0; j < npages; j++)
+ bvec_set_page(&bvec[j], page, PAGE_SIZE, 0);
+
+ iov_iter_bvec(&iter, ITER_DEST, bvec, npages, part);
+ copied = copy_to_iter(scratch, part, &iter);
+ KUNIT_EXPECT_EQ(test, copied, part);
+ size -= part;
+ } while (size > 0);
+ b = ktime_get_real();
+ samples[i] = ktime_to_us(ktime_sub(b, a));
+ }
+
+ iov_kunit_benchmark_print_stats(test, samples);
+ KUNIT_SUCCEED();
+}
+
+/*
+ * Time copying 256MiB through an ITER_XARRAY.
+ */
+static void __init iov_kunit_benchmark_xarray(struct kunit *test)
+{
+ struct iov_iter iter;
+ struct xarray *xarray;
+ struct page *page, **pages;
+ unsigned int samples[16];
+ ktime_t a, b;
+ ssize_t copied;
+ size_t size = 256 * 1024 * 1024, npages = size / PAGE_SIZE;
+ void *scratch;
+ int i;
+
+ /* Allocate a page and tile it repeatedly in the buffer. */
+ page = alloc_page(GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, page);
+ kunit_add_action_or_reset(test, iov_kunit_free_page, page);
+
+ xarray = iov_kunit_create_xarray(test);
+
+ for (i = 0; i < npages; i++) {
+ void *x = xa_store(xarray, i, page, GFP_KERNEL);
+
+ KUNIT_ASSERT_FALSE(test, xa_is_err(x));
+ }
+
+ /* Create a single large buffer to copy to/from. */
+ pages = kunit_kmalloc_array(test, npages, sizeof(pages[0]), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, pages);
+ for (i = 0; i < npages; i++)
+ pages[i] = page;
+
+ scratch = vmap(pages, npages, VM_MAP | VM_MAP_PUT_PAGES, PAGE_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, scratch);
+ kunit_add_action_or_reset(test, iov_kunit_unmap, scratch);
+
+ /* Perform and time a bunch of copies. */
+ kunit_info(test, "Benchmarking copy_to_iter() over XARRAY:\n");
+ for (i = 0; i < 16; i++) {
+ iov_iter_xarray(&iter, ITER_DEST, xarray, 0, size);
+ a = ktime_get_real();
+ copied = copy_to_iter(scratch, size, &iter);
+ b = ktime_get_real();
+ KUNIT_EXPECT_EQ(test, copied, size);
+ samples[i] = ktime_to_us(ktime_sub(b, a));
+ }
+
+ iov_kunit_benchmark_print_stats(test, samples);
+ KUNIT_SUCCEED();
+}
+
static struct kunit_case __refdata iov_kunit_cases[] = {
KUNIT_CASE(iov_kunit_copy_to_kvec),
KUNIT_CASE(iov_kunit_copy_from_kvec),
@@ -766,6 +944,9 @@ static struct kunit_case __refdata iov_kunit_cases[] = {
KUNIT_CASE(iov_kunit_extract_pages_kvec),
KUNIT_CASE(iov_kunit_extract_pages_bvec),
KUNIT_CASE(iov_kunit_extract_pages_xarray),
+ KUNIT_CASE(iov_kunit_benchmark_bvec),
+ KUNIT_CASE(iov_kunit_benchmark_bvec_split),
+ KUNIT_CASE(iov_kunit_benchmark_xarray),
{}
};
Powered by blists - more mailing lists