[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190402162531.10888-3-urezki@gmail.com>
Date: Tue, 2 Apr 2019 18:25:30 +0200
From: "Uladzislau Rezki (Sony)" <urezki@...il.com>
To: Andrew Morton <akpm@...ux-foundation.org>,
Roman Gushchin <guro@...com>
Cc: Michal Hocko <mhocko@...e.com>,
Matthew Wilcox <willy@...radead.org>, linux-mm@...ck.org,
LKML <linux-kernel@...r.kernel.org>,
Thomas Garnier <thgarnie@...gle.com>,
Oleksiy Avramchenko <oleksiy.avramchenko@...ymobile.com>,
Steven Rostedt <rostedt@...dmis.org>,
Joel Fernandes <joelaf@...gle.com>,
Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...e.hu>, Tejun Heo <tj@...nel.org>,
"Uladzislau Rezki (Sony)" <urezki@...il.com>
Subject: [RESEND PATCH 2/3] mm/vmap: add DEBUG_AUGMENT_PROPAGATE_CHECK macro
This macro adds some debug code to check that the augment tree
is maintained correctly, meaning that every node contains valid
subtree_max_size value.
By default this option is set to 0 and not active. It requires
recompilation of the kernel to activate it. Set to 1, compile
the kernel.
Signed-off-by: Uladzislau Rezki (Sony) <urezki@...il.com>
---
mm/vmalloc.c | 53 +++++++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 53 insertions(+)
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 3adbad3fb6c1..1449a8c43aa2 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -322,6 +322,8 @@ unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
EXPORT_SYMBOL(vmalloc_to_pfn);
/*** Global kva allocator ***/
+#define DEBUG_AUGMENT_PROPAGATE_CHECK 0
+
#define VM_LAZY_FREE 0x02
#define VM_VM_AREA 0x04
@@ -544,6 +546,53 @@ __unlink_va(struct vmap_area *va, struct rb_root *root)
}
}
+#if DEBUG_AUGMENT_PROPAGATE_CHECK
+static void
+augment_tree_propagate_do_check(struct rb_node *n)
+{
+ struct vmap_area *va;
+ struct rb_node *node;
+ unsigned long size;
+ bool found = false;
+
+ if (n == NULL)
+ return;
+
+ va = rb_entry(n, struct vmap_area, rb_node);
+ size = va->subtree_max_size;
+ node = n;
+
+ while (node) {
+ va = rb_entry(node, struct vmap_area, rb_node);
+
+ if (get_subtree_max_size(node->rb_left) == size) {
+ node = node->rb_left;
+ } else {
+ if (__va_size(va) == size) {
+ found = true;
+ break;
+ }
+
+ node = node->rb_right;
+ }
+ }
+
+ if (!found) {
+ va = rb_entry(n, struct vmap_area, rb_node);
+ pr_emerg("tree is corrupted: %lu, %lu\n",
+ __va_size(va), va->subtree_max_size);
+ }
+
+ augment_tree_propagate_do_check(n->rb_left);
+ augment_tree_propagate_do_check(n->rb_right);
+}
+
+static void augment_tree_propagate_from_check(void)
+{
+ augment_tree_propagate_do_check(free_vmap_area_root.rb_node);
+}
+#endif
+
/*
* This function populates subtree_max_size from bottom to upper
* levels starting from VA point. The propagation must be done
@@ -593,6 +642,10 @@ __augment_tree_propagate_from(struct vmap_area *va)
va->subtree_max_size = new_va_sub_max_size;
node = rb_parent(&va->rb_node);
}
+
+#if DEBUG_AUGMENT_PROPAGATE_CHECK
+ augment_tree_propagate_from_check();
+#endif
}
static void
--
2.11.0
Powered by blists - more mailing lists