[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230920130400.203330-7-dhowells@redhat.com>
Date: Wed, 20 Sep 2023 14:03:57 +0100
From: David Howells <dhowells@...hat.com>
To: Jens Axboe <axboe@...nel.dk>
Cc: David Howells <dhowells@...hat.com>,
Al Viro <viro@...iv.linux.org.uk>,
Linus Torvalds <torvalds@...ux-foundation.org>,
Christoph Hellwig <hch@....de>,
Christian Brauner <christian@...uner.io>,
David Laight <David.Laight@...LAB.COM>,
Matthew Wilcox <willy@...radead.org>,
Brendan Higgins <brendanhiggins@...gle.com>,
David Gow <davidgow@...gle.com>,
linux-fsdevel@...r.kernel.org,
linux-block@...r.kernel.org,
linux-mm@...ck.org,
netdev@...r.kernel.org,
linux-kselftest@...r.kernel.org,
kunit-dev@...glegroups.com,
linux-kernel@...r.kernel.org,
Andrew Morton <akpm@...ux-foundation.org>,
Christian Brauner <brauner@...nel.org>,
David Hildenbrand <david@...hat.com>,
John Hubbard <jhubbard@...dia.com>
Subject: [RFC PATCH v2 6/9] iov_iter: Add copy kunit tests for ITER_UBUF and ITER_IOVEC
Add copy kunit tests for ITER_UBUF- and ITER_IOVEC-type iterators. This
attaches a userspace VM with a mapped file in it temporarily to the test
thread.
Signed-off-by: David Howells <dhowells@...hat.com>
cc: Andrew Morton <akpm@...ux-foundation.org>
cc: Christoph Hellwig <hch@....de>
cc: Christian Brauner <brauner@...nel.org>
cc: Jens Axboe <axboe@...nel.dk>
cc: Al Viro <viro@...iv.linux.org.uk>
cc: Matthew Wilcox <willy@...radead.org>
cc: David Hildenbrand <david@...hat.com>
cc: John Hubbard <jhubbard@...dia.com>
cc: Brendan Higgins <brendanhiggins@...gle.com>
cc: David Gow <davidgow@...gle.com>
cc: linux-mm@...ck.org
cc: linux-fsdevel@...r.kernel.org
cc: linux-kselftest@...r.kernel.org
cc: kunit-dev@...glegroups.com
---
lib/kunit_iov_iter.c | 236 +++++++++++++++++++++++++++++++++++++++++++
1 file changed, 236 insertions(+)
diff --git a/lib/kunit_iov_iter.c b/lib/kunit_iov_iter.c
index 85387a25484e..d1817ab4ffee 100644
--- a/lib/kunit_iov_iter.c
+++ b/lib/kunit_iov_iter.c
@@ -116,6 +116,23 @@ static void *__init iov_kunit_create_buffer(struct kunit *test,
return buffer;
}
+/*
+ * Fill a user buffer with a recognisable pattern.
+ */
+static void iov_kunit_fill_user_buf(struct kunit *test,
+ u8 __user *buffer, size_t bufsize)
+{
+ size_t i;
+ int err;
+
+ for (i = 0; i < bufsize; i++) {
+ err = put_user(pattern(i), &buffer[i]);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ if (test->status == KUNIT_FAILURE)
+ return;
+ }
+}
+
/*
* Build the reference pattern in the scratch buffer that we expect to see in
* the iterator buffer (ie. the result of copy *to*).
@@ -171,6 +188,25 @@ static void iov_kunit_check_pattern(struct kunit *test, const u8 *buffer,
}
}
+/*
+ * Compare a user and a scratch buffer to see that they're the same.
+ */
+static void iov_kunit_check_user_pattern(struct kunit *test, const u8 __user *buffer,
+ const u8 *scratch, size_t bufsize)
+{
+ size_t i;
+ int err;
+ u8 c;
+
+ for (i = 0; i < bufsize; i++) {
+ err = get_user(c, &buffer[i]);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ KUNIT_EXPECT_EQ_MSG(test, c, scratch[i], "at i=%x", i);
+ if (c != scratch[i])
+ return;
+ }
+}
+
static const struct file_operations iov_kunit_user_file_fops = {
.mmap = generic_file_mmap,
};
@@ -293,6 +329,202 @@ static u8 __user *__init iov_kunit_create_user_buf(struct kunit *test,
return buffer;
}
+/*
+ * Test copying to an ITER_UBUF-type iterator.
+ */
+static void __init iov_kunit_copy_to_ubuf(struct kunit *test)
+{
+ const struct iov_kunit_range *pr;
+ struct iov_iter iter;
+ struct page **spages;
+ u8 __user *buffer;
+ u8 *scratch;
+ ssize_t uncleared;
+ size_t bufsize, npages, size, copied;
+ int i;
+
+ bufsize = 0x100000;
+ npages = bufsize / PAGE_SIZE;
+
+ scratch = iov_kunit_create_buffer(test, &spages, npages);
+ for (i = 0; i < bufsize; i++)
+ scratch[i] = pattern(i);
+
+ buffer = iov_kunit_create_user_buf(test, npages, NULL);
+ uncleared = clear_user(buffer, bufsize);
+ KUNIT_EXPECT_EQ(test, uncleared, 0);
+ if (uncleared)
+ return;
+
+ i = 0;
+ for (pr = kvec_test_ranges; pr->page >= 0; pr++) {
+ size = pr->to - pr->from;
+ KUNIT_ASSERT_LE(test, pr->to, bufsize);
+
+ iov_iter_ubuf(&iter, ITER_DEST, buffer + pr->from, size);
+ copied = copy_to_iter(scratch + i, size, &iter);
+
+ KUNIT_EXPECT_EQ(test, copied, size);
+ KUNIT_EXPECT_EQ(test, iter.count, 0);
+ KUNIT_EXPECT_EQ(test, iter.iov_offset, size);
+ if (test->status == KUNIT_FAILURE)
+ break;
+ i += size;
+ }
+
+ iov_kunit_build_to_reference_pattern(test, scratch, bufsize, kvec_test_ranges);
+ iov_kunit_check_user_pattern(test, buffer, scratch, bufsize);
+ KUNIT_SUCCEED();
+}
+
+/*
+ * Test copying from an ITER_UBUF-type iterator.
+ */
+static void __init iov_kunit_copy_from_ubuf(struct kunit *test)
+{
+ const struct iov_kunit_range *pr;
+ struct iov_iter iter;
+ struct page **spages;
+ u8 __user *buffer;
+ u8 *scratch, *reference;
+ size_t bufsize, npages, size, copied;
+ int i;
+
+ bufsize = 0x100000;
+ npages = bufsize / PAGE_SIZE;
+
+ buffer = iov_kunit_create_user_buf(test, npages, NULL);
+ iov_kunit_fill_user_buf(test, buffer, bufsize);
+
+ scratch = iov_kunit_create_buffer(test, &spages, npages);
+ memset(scratch, 0, bufsize);
+
+ reference = iov_kunit_create_buffer(test, &spages, npages);
+
+ i = 0;
+ for (pr = kvec_test_ranges; pr->page >= 0; pr++) {
+ size = pr->to - pr->from;
+ KUNIT_ASSERT_LE(test, pr->to, bufsize);
+
+ iov_iter_ubuf(&iter, ITER_SOURCE, buffer + pr->from, size);
+ copied = copy_from_iter(scratch + i, size, &iter);
+
+ KUNIT_EXPECT_EQ(test, copied, size);
+ KUNIT_EXPECT_EQ(test, iter.count, 0);
+ KUNIT_EXPECT_EQ(test, iter.iov_offset, size);
+ if (test->status == KUNIT_FAILURE)
+ break;
+ i += size;
+ }
+
+ iov_kunit_build_from_reference_pattern(test, reference, bufsize, kvec_test_ranges);
+ iov_kunit_check_pattern(test, scratch, reference, bufsize);
+ KUNIT_SUCCEED();
+}
+
+static void __init iov_kunit_load_iovec(struct kunit *test,
+ struct iov_iter *iter, int dir,
+ struct iovec *iov, unsigned int iovmax,
+ u8 __user *buffer, size_t bufsize,
+ const struct iov_kunit_range *pr)
+{
+ size_t size = 0;
+ int i;
+
+ for (i = 0; i < iovmax; i++, pr++) {
+ if (pr->page < 0)
+ break;
+ KUNIT_ASSERT_GE(test, pr->to, pr->from);
+ KUNIT_ASSERT_LE(test, pr->to, bufsize);
+ iov[i].iov_base = buffer + pr->from;
+ iov[i].iov_len = pr->to - pr->from;
+ size += pr->to - pr->from;
+ }
+ KUNIT_ASSERT_LE(test, size, bufsize);
+
+ iov_iter_init(iter, dir, iov, i, size);
+}
+
+/*
+ * Test copying to an ITER_IOVEC-type iterator.
+ */
+static void __init iov_kunit_copy_to_iovec(struct kunit *test)
+{
+ struct iov_iter iter;
+ struct page **spages;
+ struct iovec iov[8];
+ u8 __user *buffer;
+ u8 *scratch;
+ ssize_t uncleared;
+ size_t bufsize, npages, size, copied;
+ int i;
+
+ bufsize = 0x100000;
+ npages = bufsize / PAGE_SIZE;
+
+ scratch = iov_kunit_create_buffer(test, &spages, npages);
+ for (i = 0; i < bufsize; i++)
+ scratch[i] = pattern(i);
+
+ buffer = iov_kunit_create_user_buf(test, npages, NULL);
+ uncleared = clear_user(buffer, bufsize);
+ KUNIT_EXPECT_EQ(test, uncleared, 0);
+ if (uncleared)
+ return;
+
+ iov_kunit_load_iovec(test, &iter, ITER_DEST, iov, ARRAY_SIZE(iov),
+ buffer, bufsize, kvec_test_ranges);
+ size = iter.count;
+
+ copied = copy_to_iter(scratch, size, &iter);
+
+ KUNIT_EXPECT_EQ(test, copied, size);
+ KUNIT_EXPECT_EQ(test, iter.count, 0);
+ KUNIT_EXPECT_EQ(test, iter.nr_segs, 0);
+
+ iov_kunit_build_to_reference_pattern(test, scratch, bufsize, kvec_test_ranges);
+ iov_kunit_check_user_pattern(test, buffer, scratch, bufsize);
+ KUNIT_SUCCEED();
+}
+
+/*
+ * Test copying from an ITER_IOVEC-type iterator.
+ */
+static void __init iov_kunit_copy_from_iovec(struct kunit *test)
+{
+ struct iov_iter iter;
+ struct page **spages;
+ struct iovec iov[8];
+ u8 __user *buffer;
+ u8 *scratch, *reference;
+ size_t bufsize, npages, size, copied;
+
+ bufsize = 0x100000;
+ npages = bufsize / PAGE_SIZE;
+
+ buffer = iov_kunit_create_user_buf(test, npages, NULL);
+ iov_kunit_fill_user_buf(test, buffer, bufsize);
+
+ scratch = iov_kunit_create_buffer(test, &spages, npages);
+ memset(scratch, 0, bufsize);
+
+ reference = iov_kunit_create_buffer(test, &spages, npages);
+
+ iov_kunit_load_iovec(test, &iter, ITER_SOURCE, iov, ARRAY_SIZE(iov),
+ buffer, bufsize, kvec_test_ranges);
+ size = iter.count;
+
+ copied = copy_from_iter(scratch, size, &iter);
+
+ KUNIT_EXPECT_EQ(test, copied, size);
+ KUNIT_EXPECT_EQ(test, iter.count, 0);
+ KUNIT_EXPECT_EQ(test, iter.nr_segs, 0);
+
+ iov_kunit_build_from_reference_pattern(test, reference, bufsize, kvec_test_ranges);
+ iov_kunit_check_pattern(test, reference, scratch, bufsize);
+ KUNIT_SUCCEED();
+}
+
static void __init iov_kunit_load_kvec(struct kunit *test,
struct iov_iter *iter, int dir,
struct kvec *kvec, unsigned int kvmax,
@@ -868,6 +1100,10 @@ static void __init iov_kunit_extract_pages_xarray(struct kunit *test)
}
static struct kunit_case __refdata iov_kunit_cases[] = {
+ KUNIT_CASE(iov_kunit_copy_to_ubuf),
+ KUNIT_CASE(iov_kunit_copy_from_ubuf),
+ KUNIT_CASE(iov_kunit_copy_to_iovec),
+ KUNIT_CASE(iov_kunit_copy_from_iovec),
KUNIT_CASE(iov_kunit_copy_to_kvec),
KUNIT_CASE(iov_kunit_copy_from_kvec),
KUNIT_CASE(iov_kunit_copy_to_bvec),
Powered by blists - more mailing lists