lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1c0ba11b8da5dc8f71ad45175c536fa4be720984.1646055639.git.karolinadrobnik@gmail.com>
Date:   Mon, 28 Feb 2022 15:46:50 +0100
From:   Karolina Drobnik <karolinadrobnik@...il.com>
To:     linux-mm@...ck.org
Cc:     rppt@...nel.org, akpm@...ux-foundation.org,
        linux-kernel@...r.kernel.org,
        Karolina Drobnik <karolinadrobnik@...il.com>
Subject: [PATCH 8/9] memblock tests: Add memblock_alloc_try_nid tests for bottom up

Add checks for memblock_alloc_try_nid for bottom up allocation direction.
As the definition of this function is pretty close to the core
memblock_alloc_range_nid, the test cases implemented here cover most of
the code paths related to the memory allocations.

The tested scenarios are:
  - Region can be allocated within the requested range (both with aligned
    and misaligned boundaries)
  - Region can be allocated between two already existing entries
  - Not enough space between already reserved regions
  - Memory at the range boundaries is reserved but there is enough space
    to allocate a new region
  - The memory range is too narrow but memory can be allocated before
    the maximum address
  - Edge cases:
      + Minimum address is below memblock_start_of_DRAM()
      + Maximum address is above memblock_end_of_DRAM()

Add test case wrappers to test both directions in the same context.

Signed-off-by: Karolina Drobnik <karolinadrobnik@...il.com>
---
 tools/testing/memblock/tests/alloc_nid_api.c | 496 ++++++++++++++++++-
 1 file changed, 492 insertions(+), 4 deletions(-)

diff --git a/tools/testing/memblock/tests/alloc_nid_api.c b/tools/testing/memblock/tests/alloc_nid_api.c
index 75cfca47c703..03216efe3488 100644
--- a/tools/testing/memblock/tests/alloc_nid_api.c
+++ b/tools/testing/memblock/tests/alloc_nid_api.c
@@ -653,26 +653,514 @@ static int alloc_try_nid_top_down_cap_min_check(void)
 	return 0;
 }
 
-int memblock_alloc_nid_checks(void)
+/*
+ * A simple test that tries to allocate a memory region within min_addr and
+ * max_addr range:
+ *
+ *        +                       +
+ *   |    +-----------+           |      |
+ *   |    |    rgn    |           |      |
+ *   +----+-----------+-----------+------+
+ *        ^                       ^
+ *        |                       |
+ *        min_addr                max_addr
+ *
+ * Expect to allocate a cleared region that ends before max_addr.
+ */
+static int alloc_try_nid_bottom_up_simple_check(void)
 {
-	reset_memblock_attributes();
-	dummy_physical_memory_init();
+	struct memblock_region *rgn = &memblock.reserved.regions[0];
+	void *allocated_ptr = NULL;
+	char *b;
+
+	phys_addr_t size = SZ_128;
+	phys_addr_t min_addr;
+	phys_addr_t max_addr;
+	phys_addr_t rgn_end;
+
+	setup_memblock();
+
+	min_addr = memblock_start_of_DRAM() + SMP_CACHE_BYTES * 2;
+	max_addr = min_addr + SZ_512;
+
+	allocated_ptr = memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
+					       min_addr, max_addr, NUMA_NO_NODE);
+	b = (char *)allocated_ptr;
+	rgn_end = rgn->base + rgn->size;
+
+	assert(allocated_ptr);
+	assert(*b == 0);
+
+	assert(rgn->size == size);
+	assert(rgn->base == min_addr);
+	assert(rgn_end < max_addr);
+
+	assert(memblock.reserved.cnt == 1);
+	assert(memblock.reserved.total_size == size);
+
+	return 0;
+}
+
+/*
+ * A simple test that tries to allocate a memory region within min_addr and
+ * max_addr range, where the start address is misaligned:
+ *
+ *        +                     +
+ *  |     +   +-----------+     +     |
+ *  |     |   |    rgn    |     |     |
+ *  +-----+---+-----------+-----+-----+
+ *        ^   ^----.            ^
+ *        |        |            |
+ *     min_add     |            max_addr
+ *                 |
+ *                 Aligned address
+ *                 boundary
+ *
+ * Expect to allocate a cleared, aligned region that ends before max_addr.
+ */
+static int alloc_try_nid_bottom_up_start_misaligned_check(void)
+{
+	struct memblock_region *rgn = &memblock.reserved.regions[0];
+	void *allocated_ptr = NULL;
+	char *b;
+
+	phys_addr_t size = SZ_128;
+	phys_addr_t misalign = SZ_2;
+	phys_addr_t min_addr;
+	phys_addr_t max_addr;
+	phys_addr_t rgn_end;
+
+	setup_memblock();
+
+	min_addr = memblock_start_of_DRAM() + misalign;
+	max_addr = min_addr + SZ_512;
+
+	allocated_ptr = memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
+					       min_addr, max_addr, NUMA_NO_NODE);
+	b = (char *)allocated_ptr;
+	rgn_end = rgn->base + rgn->size;
+
+	assert(allocated_ptr);
+	assert(*b == 0);
+
+	assert(rgn->size == size);
+	assert(rgn->base == min_addr + (SMP_CACHE_BYTES - misalign));
+	assert(rgn_end < max_addr);
+
+	assert(memblock.reserved.cnt == 1);
+	assert(memblock.reserved.total_size == size);
+
+	return 0;
+}
+
+/*
+ * A test that tries to allocate a memory region, which can't fit into min_addr
+ * and max_addr range:
+ *
+ *                      +    +
+ *  |---------+         +    +      |
+ *  |   rgn   |         |    |      |
+ *  +---------+---------+----+------+
+ *                      ^    ^
+ *                      |    |
+ *                      |    max_addr
+ *                      |
+ *                      min_add
+ *
+ * Expect to drop the lower limit and allocate a cleared memory region which
+ * starts at the beginning of the available memory.
+ */
+static int alloc_try_nid_bottom_up_narrow_range_check(void)
+{
+	struct memblock_region *rgn = &memblock.reserved.regions[0];
+	void *allocated_ptr = NULL;
+	char *b;
+
+	phys_addr_t size = SZ_256;
+	phys_addr_t min_addr;
+	phys_addr_t max_addr;
+
+	setup_memblock();
+
+	min_addr = memblock_start_of_DRAM() + SZ_512;
+	max_addr = min_addr + SMP_CACHE_BYTES;
+
+	allocated_ptr = memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
+					       min_addr, max_addr, NUMA_NO_NODE);
+	b = (char *)allocated_ptr;
+
+	assert(allocated_ptr);
+	assert(*b == 0);
+
+	assert(rgn->size == size);
+	assert(rgn->base == memblock_start_of_DRAM());
+
+	assert(memblock.reserved.cnt == 1);
+	assert(memblock.reserved.total_size == size);
+
+	return 0;
+}
+
+/*
+ * A test that tries to allocate memory within min_addr and max_add range, when
+ * there are two reserved regions at the borders, with a gap big enough to fit
+ * a new region:
+ *
+ *                +           +
+ *  |    +--------+-------+   +------+  |
+ *  |    |   r2   |  rgn  |   |  r1  |  |
+ *  +----+--------+-------+---+------+--+
+ *                ^           ^
+ *                |           |
+ *                min_addr    max_addr
+ *
+ * Expect to merge the new region with r2. The second region does not get
+ * updated. The total size field gets updated.
+ */
+
+static int alloc_try_nid_bottom_up_reserved_with_space_check(void)
+{
+	struct memblock_region *rgn1 = &memblock.reserved.regions[1];
+	struct memblock_region *rgn2 = &memblock.reserved.regions[0];
+	void *allocated_ptr = NULL;
+	char *b;
+	struct region r1, r2;
+
+	phys_addr_t r3_size = SZ_64;
+	phys_addr_t gap_size = SMP_CACHE_BYTES;
+	phys_addr_t total_size;
+	phys_addr_t max_addr;
+	phys_addr_t min_addr;
+
+	setup_memblock();
+
+	r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES * 2;
+	r1.size = SMP_CACHE_BYTES;
+
+	r2.size = SZ_128;
+	r2.base = r1.base - (r3_size + gap_size + r2.size);
+
+	total_size = r1.size + r2.size + r3_size;
+	min_addr = r2.base + r2.size;
+	max_addr = r1.base;
+
+	memblock_reserve(r1.base, r1.size);
+	memblock_reserve(r2.base, r2.size);
+
+	allocated_ptr = memblock_alloc_try_nid(r3_size, SMP_CACHE_BYTES,
+					       min_addr, max_addr, NUMA_NO_NODE);
+	b = (char *)allocated_ptr;
+
+	assert(allocated_ptr);
+	assert(*b == 0);
+
+	assert(rgn1->size == r1.size);
+	assert(rgn1->base == max_addr);
+
+	assert(rgn2->size == r2.size + r3_size);
+	assert(rgn2->base == r2.base);
+
+	assert(memblock.reserved.cnt == 2);
+	assert(memblock.reserved.total_size == total_size);
+
+	return 0;
+}
+
+/*
+ * A test that tries to allocate memory within min_addr and max_add range, when
+ * there are two reserved regions at the borders, with a gap of a size equal to
+ * the size of the new region:
+ *
+ *                         +   +
+ *  |----------+    +------+   +----+  |
+ *  |    r3    |    |  r2  |   | r1 |  |
+ *  +----------+----+------+---+----+--+
+ *                         ^   ^
+ *                         |   |
+ *                         |  max_addr
+ *                         |
+ *                         min_addr
+ *
+ * Expect to drop the lower limit and allocate memory at the beginning of the
+ * available memory. The region counter and total size fields get updated.
+ * Other regions are not modified.
+ */
+
+static int alloc_try_nid_bottom_up_reserved_no_space_check(void)
+{
+	struct memblock_region *rgn1 = &memblock.reserved.regions[2];
+	struct memblock_region *rgn2 = &memblock.reserved.regions[1];
+	struct memblock_region *rgn3 = &memblock.reserved.regions[0];
+	void *allocated_ptr = NULL;
+	char *b;
+	struct region r1, r2;
+
+	phys_addr_t r3_size = SZ_256;
+	phys_addr_t gap_size = SMP_CACHE_BYTES;
+	phys_addr_t total_size;
+	phys_addr_t max_addr;
+	phys_addr_t min_addr;
+
+	setup_memblock();
+
+	r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES * 2;
+	r1.size = SMP_CACHE_BYTES;
+
+	r2.size = SZ_128;
+	r2.base = r1.base - (r2.size + gap_size);
+
+	total_size = r1.size + r2.size + r3_size;
+	min_addr = r2.base + r2.size;
+	max_addr = r1.base;
+
+	memblock_reserve(r1.base, r1.size);
+	memblock_reserve(r2.base, r2.size);
+
+	allocated_ptr = memblock_alloc_try_nid(r3_size, SMP_CACHE_BYTES,
+					       min_addr, max_addr, NUMA_NO_NODE);
+	b = (char *)allocated_ptr;
+
+	assert(allocated_ptr);
+	assert(*b == 0);
+
+	assert(rgn3->size == r3_size);
+	assert(rgn3->base == memblock_start_of_DRAM());
+
+	assert(rgn2->size == r2.size);
+	assert(rgn2->base == r2.base);
+
+	assert(rgn1->size == r1.size);
+	assert(rgn1->base == r1.base);
+
+	assert(memblock.reserved.cnt == 3);
+	assert(memblock.reserved.total_size == total_size);
+
+	return 0;
+}
+
+/*
+ * A test that tries to allocate a memory region, where max_addr is
+ * bigger than the end address of the available memory. Expect to allocate
+ * a cleared region that starts at the min_addr
+ */
+static int alloc_try_nid_bottom_up_cap_max_check(void)
+{
+	struct memblock_region *rgn = &memblock.reserved.regions[0];
+	void *allocated_ptr = NULL;
+	char *b;
+
+	phys_addr_t size = SZ_256;
+	phys_addr_t min_addr;
+	phys_addr_t max_addr;
+
+	setup_memblock();
+
+	min_addr = memblock_start_of_DRAM() + SZ_1K;
+	max_addr = memblock_end_of_DRAM() + SZ_256;
+
+	allocated_ptr = memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
+					       min_addr, max_addr, NUMA_NO_NODE);
+	b = (char *)allocated_ptr;
+
+	assert(allocated_ptr);
+	assert(*b == 0);
+
+	assert(rgn->size == size);
+	assert(rgn->base == min_addr);
+
+	assert(memblock.reserved.cnt == 1);
+	assert(memblock.reserved.total_size == size);
+
+	return 0;
+}
+
+/*
+ * A test that tries to allocate a memory region, where min_addr is
+ * smaller than the start address of the available memory. Expect to allocate
+ * a cleared region at the beginning of the available memory.
+ */
+static int alloc_try_nid_bottom_up_cap_min_check(void)
+{
+	struct memblock_region *rgn = &memblock.reserved.regions[0];
+	void *allocated_ptr = NULL;
+	char *b;
+
+	phys_addr_t size = SZ_1K;
+	phys_addr_t min_addr;
+	phys_addr_t max_addr;
+
+	setup_memblock();
+
+	min_addr = memblock_start_of_DRAM();
+	max_addr = memblock_end_of_DRAM() - SZ_256;
 
+	allocated_ptr = memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
+					       min_addr, max_addr, NUMA_NO_NODE);
+	b = (char *)allocated_ptr;
+
+	assert(allocated_ptr);
+	assert(*b == 0);
+
+	assert(rgn->size == size);
+	assert(rgn->base == memblock_start_of_DRAM());
+
+	assert(memblock.reserved.cnt == 1);
+	assert(memblock.reserved.total_size == size);
+
+	return 0;
+}
+
+/* Test case wrappers */
+static int alloc_try_nid_simple_check(void)
+{
+	memblock_set_bottom_up(false);
 	alloc_try_nid_top_down_simple_check();
+	memblock_set_bottom_up(true);
+	alloc_try_nid_bottom_up_simple_check();
+
+	return 0;
+}
+
+static int alloc_try_nid_misaligned_check(void)
+{
+	memblock_set_bottom_up(false);
 	alloc_try_nid_top_down_end_misaligned_check();
+	memblock_set_bottom_up(true);
+	alloc_try_nid_bottom_up_start_misaligned_check();
+
+	return 0;
+}
+
+static int alloc_try_nid_narrow_range_check(void)
+{
+	memblock_set_bottom_up(false);
 	alloc_try_nid_top_down_narrow_range_check();
+	memblock_set_bottom_up(true);
+	alloc_try_nid_bottom_up_narrow_range_check();
+
+	return 0;
+}
+
+static int alloc_try_nid_reserved_with_space_check(void)
+{
+	memblock_set_bottom_up(false);
 	alloc_try_nid_top_down_reserved_with_space_check();
+	memblock_set_bottom_up(true);
+	alloc_try_nid_bottom_up_reserved_with_space_check();
+
+	return 0;
+}
+
+static int alloc_try_nid_reserved_no_space_check(void)
+{
+	memblock_set_bottom_up(false);
 	alloc_try_nid_top_down_reserved_no_space_check();
-	alloc_try_nid_top_down_cap_min_check();
+	memblock_set_bottom_up(true);
+	alloc_try_nid_bottom_up_reserved_no_space_check();
+
+	return 0;
+}
+
+static int alloc_try_nid_cap_max_check(void)
+{
+	memblock_set_bottom_up(false);
 	alloc_try_nid_top_down_cap_max_check();
+	memblock_set_bottom_up(true);
+	alloc_try_nid_bottom_up_cap_max_check();
+
+	return 0;
+}
+
+static int alloc_try_nid_cap_min_check(void)
+{
+	memblock_set_bottom_up(false);
+	alloc_try_nid_top_down_cap_min_check();
+	memblock_set_bottom_up(true);
+	alloc_try_nid_bottom_up_cap_min_check();
+
+	return 0;
+}
 
+static int alloc_try_nid_min_reserved_check(void)
+{
+	memblock_set_bottom_up(false);
 	alloc_try_nid_min_reserved_generic_check();
+	memblock_set_bottom_up(true);
+	alloc_try_nid_min_reserved_generic_check();
+
+	return 0;
+}
+
+static int alloc_try_nid_max_reserved_check(void)
+{
+	memblock_set_bottom_up(false);
 	alloc_try_nid_max_reserved_generic_check();
+	memblock_set_bottom_up(true);
+	alloc_try_nid_max_reserved_generic_check();
+
+	return 0;
+}
+
+static int alloc_try_nid_exact_address_check(void)
+{
+	memblock_set_bottom_up(false);
 	alloc_try_nid_exact_address_generic_check();
+	memblock_set_bottom_up(true);
+	alloc_try_nid_exact_address_generic_check();
+
+	return 0;
+}
+
+static int alloc_try_nid_reserved_full_merge_check(void)
+{
+	memblock_set_bottom_up(false);
+	alloc_try_nid_reserved_full_merge_generic_check();
+	memblock_set_bottom_up(true);
 	alloc_try_nid_reserved_full_merge_generic_check();
+
+	return 0;
+}
+
+static int alloc_try_nid_reserved_all_check(void)
+{
+	memblock_set_bottom_up(false);
+	alloc_try_nid_reserved_all_generic_check();
+	memblock_set_bottom_up(true);
 	alloc_try_nid_reserved_all_generic_check();
+
+	return 0;
+}
+
+static int alloc_try_nid_low_max_check(void)
+{
+	memblock_set_bottom_up(false);
+	alloc_try_nid_low_max_generic_check();
+	memblock_set_bottom_up(true);
 	alloc_try_nid_low_max_generic_check();
 
+	return 0;
+}
+
+int memblock_alloc_nid_checks(void)
+{
+	reset_memblock_attributes();
+	dummy_physical_memory_init();
+
+	alloc_try_nid_simple_check();
+	alloc_try_nid_misaligned_check();
+	alloc_try_nid_narrow_range_check();
+	alloc_try_nid_reserved_with_space_check();
+	alloc_try_nid_reserved_no_space_check();
+	alloc_try_nid_cap_max_check();
+	alloc_try_nid_cap_min_check();
+
+	alloc_try_nid_min_reserved_check();
+	alloc_try_nid_max_reserved_check();
+	alloc_try_nid_exact_address_check();
+	alloc_try_nid_reserved_full_merge_check();
+	alloc_try_nid_reserved_all_check();
+	alloc_try_nid_low_max_check();
+
 	dummy_physical_memory_cleanup();
 
 	return 0;
-- 
2.30.2

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ