[<prev] [next>] [day] [month] [year] [list]
Message-Id: <20210528201006.459292-1-jarmo.tiitto@gmail.com>
Date: Fri, 28 May 2021 23:10:06 +0300
From: Jarmo Tiitto <jarmo.tiitto@...il.com>
To: samitolvanen@...gle.com
Cc: wcw@...gle.com, nathan@...nel.org, ndesaulniers@...gle.com,
linux-kernel@...r.kernel.org, clang-built-linux@...glegroups.com,
Jarmo Tiitto <jarmo.tiitto@...il.com>
Subject: [PATCH 4/6] pgo: modules Enable __llvm_profile_instrument_target() for modules
Enable allocate_node() for modules.
Before this patch __llvm_profile_instrument_target() profiled all
code which was instrumented, including modules.
Any module profiling was however effectively disabled by allocate_node()
if llvm_prf_data instance didn't point into core section.
Handle profiling data that orginates from modules by iterating
prf_mod_list and checking in what module the llvm_prf_data instance
points into.
If matching module is found the node is allocated from that module.
Each module has then its own current_node index.
The list iteration is protected by rcu here to avoid extra mutex.
Signed-off-by: Jarmo Tiitto <jarmo.tiitto@...il.com>
---
kernel/pgo/instrument.c | 65 +++++++++++++++++++++++++++++++++--------
1 file changed, 53 insertions(+), 12 deletions(-)
diff --git a/kernel/pgo/instrument.c b/kernel/pgo/instrument.c
index 98cfa11a7b76..a95c86d668b5 100644
--- a/kernel/pgo/instrument.c
+++ b/kernel/pgo/instrument.c
@@ -31,7 +31,7 @@
* ensures that we don't try to serialize data that's only partially updated.
*/
static DEFINE_SPINLOCK(pgo_lock);
-static int current_node;
+static int current_node = 0;
unsigned long prf_lock(void)
{
@@ -55,17 +55,58 @@ void prf_unlock(unsigned long flags)
static struct llvm_prf_value_node *allocate_node(struct llvm_prf_data *p,
u32 index, u64 value)
{
- if (&__llvm_prf_vnds_start[current_node + 1] >= __llvm_prf_vnds_end)
- return NULL; /* Out of nodes */
-
- current_node++;
-
- /* Make sure the node is entirely within the section */
- if (&__llvm_prf_vnds_start[current_node] >= __llvm_prf_vnds_end ||
- &__llvm_prf_vnds_start[current_node + 1] > __llvm_prf_vnds_end)
- return NULL;
-
- return &__llvm_prf_vnds_start[current_node];
+ struct prf_mod_private_data *pmod;
+ struct llvm_prf_data *start = __llvm_prf_data_start;
+ struct llvm_prf_data *end = __llvm_prf_data_end;
+ struct module * mod;
+ struct llvm_prf_value_node * vnds = __llvm_prf_vnds_start;
+ struct llvm_prf_value_node * vnds_end = __llvm_prf_vnds_end;
+
+ if(start <= p && p < end) {
+ /* vmlinux core node */
+ if (&vnds[current_node + 1] >= vnds_end)
+ return NULL; /* Out of nodes */
+
+ current_node++;
+
+ /* Make sure the node is entirely within the section
+ */
+ if (&vnds[current_node] >= vnds_end ||
+ &vnds[current_node + 1] > vnds_end)
+ return NULL;
+
+ return &vnds[current_node];
+
+ } else {
+ /* maybe an module node
+ * find in what module section p points into and
+ * then allocate from that module
+ */
+ rcu_read_lock();
+ list_for_each_entry_rcu(pmod,&prf_mod_list,link) {
+ mod = READ_ONCE(pmod->mod);
+ if(mod) {
+ /* get section bounds */
+ start = mod->prf_data;
+ end = mod->prf_data + mod->prf_data_size;
+ if(start <= p && p < end)
+ {
+ vnds = mod->prf_vnds;
+ vnds_end = mod->prf_vnds + mod->prf_vnds_size;
+ if (&vnds[pmod->current_node + 1] < vnds_end) {
+ pmod->current_node++;
+
+ vnds = &vnds[pmod->current_node];
+ rcu_read_unlock();
+ return vnds;
+ break;
+ }
+ }
+ }
+ }
+ rcu_read_unlock();
+ return NULL; /* Out of nodes */
+ }
}
/*
--
2.31.1
Powered by blists - more mailing lists