[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-Id: <20200115103112.fe5dbff75d9e1e422cd60fd3@kernel.org>
Date: Wed, 15 Jan 2020 10:31:12 +0900
From: Masami Hiramatsu <mhiramat@...nel.org>
To: Joel Fernandes <joel@...lfernandes.org>
Cc: Ingo Molnar <mingo@...nel.org>, paulmck@...nel.org,
Anders Roxell <anders.roxell@...aro.org>,
"Naveen N . Rao" <naveen.n.rao@...ux.ibm.com>,
Anil S Keshavamurthy <anil.s.keshavamurthy@...el.com>,
David Miller <davem@...emloft.net>,
Linux Kernel Mailing List <linux-kernel@...r.kernel.org>
Subject: Re: [PATCH -tip V2 2/2] kprobes: Use non RCU traversal APIs on
kprobe_tables if possible
On Tue, 14 Jan 2020 08:56:21 -0500
Joel Fernandes <joel@...lfernandes.org> wrote:
> On Tue, Dec 03, 2019 at 03:06:28PM +0900, Masami Hiramatsu wrote:
> > Current kprobes uses RCU traversal APIs on kprobe_tables
> > even if it is safe because kprobe_mutex is locked.
> >
> > Make those traversals to non-RCU APIs where the kprobe_mutex
> > is locked.
>
> Reviewed-by: Joel Fernandes (Google) <joel@...lfernandes.org>
Thanks Joel!
>
> May be resend both patch with appropriate tags since it has been some time
> since originally posted?
OK, I'll update it on the latest -tip with your reviewed-by.
Thank you,
>
> thanks,
>
> - Joel
>
>
> >
> > Signed-off-by: Masami Hiramatsu <mhiramat@...nel.org>
> > ---
> > kernel/kprobes.c | 29 ++++++++++++++++++++---------
> > 1 file changed, 20 insertions(+), 9 deletions(-)
> >
> > diff --git a/kernel/kprobes.c b/kernel/kprobes.c
> > index f9ecb6d532fb..4caab01ace30 100644
> > --- a/kernel/kprobes.c
> > +++ b/kernel/kprobes.c
> > @@ -46,6 +46,11 @@
> >
> >
> > static int kprobes_initialized;
> > +/* kprobe_table can be accessed by
> > + * - Normal hlist traversal and RCU add/del under kprobe_mutex is held.
> > + * Or
> > + * - RCU hlist traversal under disabling preempt (breakpoint handlers)
> > + */
> > static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
> > static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
> >
> > @@ -829,7 +834,7 @@ static void optimize_all_kprobes(void)
> > kprobes_allow_optimization = true;
> > for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
> > head = &kprobe_table[i];
> > - hlist_for_each_entry_rcu(p, head, hlist)
> > + hlist_for_each_entry(p, head, hlist)
> > if (!kprobe_disabled(p))
> > optimize_kprobe(p);
> > }
> > @@ -856,7 +861,7 @@ static void unoptimize_all_kprobes(void)
> > kprobes_allow_optimization = false;
> > for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
> > head = &kprobe_table[i];
> > - hlist_for_each_entry_rcu(p, head, hlist) {
> > + hlist_for_each_entry(p, head, hlist) {
> > if (!kprobe_disabled(p))
> > unoptimize_kprobe(p, false);
> > }
> > @@ -1479,12 +1484,14 @@ static struct kprobe *__get_valid_kprobe(struct kprobe *p)
> > {
> > struct kprobe *ap, *list_p;
> >
> > + lockdep_assert_held(&kprobe_mutex);
> > +
> > ap = get_kprobe(p->addr);
> > if (unlikely(!ap))
> > return NULL;
> >
> > if (p != ap) {
> > - list_for_each_entry_rcu(list_p, &ap->list, list)
> > + list_for_each_entry(list_p, &ap->list, list)
> > if (list_p == p)
> > /* kprobe p is a valid probe */
> > goto valid;
> > @@ -1649,7 +1656,9 @@ static int aggr_kprobe_disabled(struct kprobe *ap)
> > {
> > struct kprobe *kp;
> >
> > - list_for_each_entry_rcu(kp, &ap->list, list)
> > + lockdep_assert_held(&kprobe_mutex);
> > +
> > + list_for_each_entry(kp, &ap->list, list)
> > if (!kprobe_disabled(kp))
> > /*
> > * There is an active probe on the list.
> > @@ -1728,7 +1737,7 @@ static int __unregister_kprobe_top(struct kprobe *p)
> > else {
> > /* If disabling probe has special handlers, update aggrprobe */
> > if (p->post_handler && !kprobe_gone(p)) {
> > - list_for_each_entry_rcu(list_p, &ap->list, list) {
> > + list_for_each_entry(list_p, &ap->list, list) {
> > if ((list_p != p) && (list_p->post_handler))
> > goto noclean;
> > }
> > @@ -2042,13 +2051,15 @@ static void kill_kprobe(struct kprobe *p)
> > {
> > struct kprobe *kp;
> >
> > + lockdep_assert_held(&kprobe_mutex);
> > +
> > p->flags |= KPROBE_FLAG_GONE;
> > if (kprobe_aggrprobe(p)) {
> > /*
> > * If this is an aggr_kprobe, we have to list all the
> > * chained probes and mark them GONE.
> > */
> > - list_for_each_entry_rcu(kp, &p->list, list)
> > + list_for_each_entry(kp, &p->list, list)
> > kp->flags |= KPROBE_FLAG_GONE;
> > p->post_handler = NULL;
> > kill_optimized_kprobe(p);
> > @@ -2217,7 +2228,7 @@ static int kprobes_module_callback(struct notifier_block *nb,
> > mutex_lock(&kprobe_mutex);
> > for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
> > head = &kprobe_table[i];
> > - hlist_for_each_entry_rcu(p, head, hlist)
> > + hlist_for_each_entry(p, head, hlist)
> > if (within_module_init((unsigned long)p->addr, mod) ||
> > (checkcore &&
> > within_module_core((unsigned long)p->addr, mod))) {
> > @@ -2468,7 +2479,7 @@ static int arm_all_kprobes(void)
> > for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
> > head = &kprobe_table[i];
> > /* Arm all kprobes on a best-effort basis */
> > - hlist_for_each_entry_rcu(p, head, hlist) {
> > + hlist_for_each_entry(p, head, hlist) {
> > if (!kprobe_disabled(p)) {
> > err = arm_kprobe(p);
> > if (err) {
> > @@ -2511,7 +2522,7 @@ static int disarm_all_kprobes(void)
> > for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
> > head = &kprobe_table[i];
> > /* Disarm all kprobes on a best-effort basis */
> > - hlist_for_each_entry_rcu(p, head, hlist) {
> > + hlist_for_each_entry(p, head, hlist) {
> > if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p)) {
> > err = disarm_kprobe(p, false);
> > if (err) {
> >
--
Masami Hiramatsu <mhiramat@...nel.org>
Powered by blists - more mailing lists