[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <157918593350.29301.7175144493909010321.stgit@devnote2>
Date: Thu, 16 Jan 2020 23:45:33 +0900
From: Masami Hiramatsu <mhiramat@...nel.org>
To: Brendan Gregg <brendan.d.gregg@...il.com>,
Steven Rostedt <rostedt@...dmis.org>,
Alexei Starovoitov <ast@...nel.org>
Cc: mhiramat@...nel.org, Ingo Molnar <mingo@...nel.org>,
bpf@...r.kernel.org, linux-kernel@...r.kernel.org,
Daniel Borkmann <daniel@...earbox.net>,
Arnaldo Carvalho de Melo <acme@...nel.org>,
"David S . Miller" <davem@...emloft.net>, paulmck@...nel.org,
joel@...lfernandes.org,
"Naveen N . Rao" <naveen.n.rao@...ux.ibm.com>,
Anil S Keshavamurthy <anil.s.keshavamurthy@...el.com>
Subject: [RFT PATCH 08/13] kprobes: Use workqueue for reclaiming kprobe insn cache pages
Use workqueues for reclaiming kprobe insn cache pages. This can
split the heaviest part from the unregistration process.
Signed-off-by: Masami Hiramatsu <mhiramat@...nel.org>
---
include/linux/kprobes.h | 2 ++
kernel/kprobes.c | 29 ++++++++++++++++++-----------
2 files changed, 20 insertions(+), 11 deletions(-)
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 04bdaf01112c..0f832817fca3 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -245,6 +245,7 @@ struct kprobe_insn_cache {
struct list_head pages; /* list of kprobe_insn_page */
size_t insn_size; /* size of instruction slot */
int nr_garbage;
+ struct work_struct work;
};
#ifdef __ARCH_WANT_KPROBES_INSN_SLOT
@@ -254,6 +255,7 @@ extern void __free_insn_slot(struct kprobe_insn_cache *c,
/* sleep-less address checking routine */
extern bool __is_insn_slot_addr(struct kprobe_insn_cache *c,
unsigned long addr);
+void kprobe_insn_cache_gc(struct work_struct *work);
#define DEFINE_INSN_CACHE_OPS(__name) \
extern struct kprobe_insn_cache kprobe_##__name##_slots; \
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 09b0e33bc845..a9114923da4c 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -126,8 +126,15 @@ struct kprobe_insn_cache kprobe_insn_slots = {
.pages = LIST_HEAD_INIT(kprobe_insn_slots.pages),
.insn_size = MAX_INSN_SIZE,
.nr_garbage = 0,
+ .work = __WORK_INITIALIZER(kprobe_insn_slots.work,
+ kprobe_insn_cache_gc),
};
-static int collect_garbage_slots(struct kprobe_insn_cache *c);
+
+static void kick_kprobe_insn_cache_gc(struct kprobe_insn_cache *c)
+{
+ if (!work_pending(&c->work))
+ schedule_work(&c->work);
+}
/**
* __get_insn_slot() - Find a slot on an executable page for an instruction.
@@ -140,7 +147,6 @@ kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c)
/* Since the slot array is not protected by rcu, we need a mutex */
mutex_lock(&c->mutex);
- retry:
list_for_each_entry(kip, &c->pages, list) {
if (kip->nused < slots_per_page(c)) {
int i;
@@ -158,11 +164,7 @@ kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c)
}
}
- /* If there are any garbage slots, collect it and try again. */
- if (c->nr_garbage && collect_garbage_slots(c) == 0)
- goto retry;
-
- /* All out of space. Need to allocate a new page. */
+ /* All out of space. Need to allocate a new page. */
kip = kmalloc(KPROBE_INSN_PAGE_SIZE(slots_per_page(c)), GFP_KERNEL);
if (!kip)
goto out;
@@ -213,10 +215,12 @@ static int collect_one_slot(struct kprobe_insn_page *kip, int idx)
return 0;
}
-static int collect_garbage_slots(struct kprobe_insn_cache *c)
+void kprobe_insn_cache_gc(struct work_struct *work)
{
+ struct kprobe_insn_cache *c = container_of(work, typeof(*c), work);
struct kprobe_insn_page *kip, *next;
+ mutex_lock(&c->mutex);
/* Ensure no-one is running on the garbages. */
synchronize_rcu_tasks();
@@ -226,12 +230,13 @@ static int collect_garbage_slots(struct kprobe_insn_cache *c)
continue;
kip->ngarbage = 0; /* we will collect all garbages */
for (i = 0; i < slots_per_page(c); i++) {
- if (kip->slot_used[i] == SLOT_DIRTY && collect_one_slot(kip, i))
+ if (kip->slot_used[i] == SLOT_DIRTY &&
+ collect_one_slot(kip, i))
break;
}
}
c->nr_garbage = 0;
- return 0;
+ mutex_unlock(&c->mutex);
}
void __free_insn_slot(struct kprobe_insn_cache *c,
@@ -259,7 +264,7 @@ void __free_insn_slot(struct kprobe_insn_cache *c,
kip->slot_used[idx] = SLOT_DIRTY;
kip->ngarbage++;
if (++c->nr_garbage > slots_per_page(c))
- collect_garbage_slots(c);
+ kick_kprobe_insn_cache_gc(c);
} else {
collect_one_slot(kip, idx);
}
@@ -299,6 +304,8 @@ struct kprobe_insn_cache kprobe_optinsn_slots = {
.pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages),
/* .insn_size is initialized later */
.nr_garbage = 0,
+ .work = __WORK_INITIALIZER(kprobe_optinsn_slots.work,
+ kprobe_insn_cache_gc),
};
#endif
#endif
Powered by blists - more mailing lists