[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-Id: <20180108165933.379b5e946cb2ca0d5f0778d6@kernel.org>
Date: Mon, 8 Jan 2018 16:59:33 +0900
From: Masami Hiramatsu <mhiramat@...nel.org>
To: Jessica Yu <jeyu@...nel.org>
Cc: mhiramat@...nel.org,
Ananth N Mavinakayanahalli <ananth@...ux.vnet.ibm.com>,
Anil S Keshavamurthy <anil.s.keshavamurthy@...el.com>,
"David S . Miller" <davem@...emloft.net>,
Ingo Molnar <mingo@...nel.org>, Petr Mladek <pmladek@...e.com>,
Josh Poimboeuf <jpoimboe@...hat.com>,
Joe Lawrence <joe.lawrence@...hat.com>,
Jiri Kosina <jikos@...nel.org>,
Miroslav Benes <mbenes@...e.cz>,
Steven Rostedt <rostedt@...dmis.org>,
live-patching@...r.kernel.org, linux-kernel@...r.kernel.org
Subject: Re: [PATCH v4 1/2] kprobes: propagate error from
arm_kprobe_ftrace()
On Mon, 8 Jan 2018 03:47:49 +0100
Jessica Yu <jeyu@...nel.org> wrote:
> Improve error handling when arming ftrace-based kprobes. Specifically, if
> we fail to arm a ftrace-based kprobe, register_kprobe()/enable_kprobe()
> should report an error instead of success. Previously, this has lead to
> confusing situations where register_kprobe() would return 0 indicating
> success, but the kprobe would not be functional if ftrace registration
> during the kprobe arming process had failed. We should therefore take any
> errors returned by ftrace into account and propagate this error so that we
> do not register/enable kprobes that cannot be armed. This can happen if,
> for example, register_ftrace_function() finds an IPMODIFY conflict (since
> kprobe_ftrace_ops has this flag set) and returns an error. Such a conflict
> is possible since livepatches also set the IPMODIFY flag for their ftrace_ops.
Oops, I missed this. Since IPMODIFY flag conflict can reproduce this
error and it happens in normal usage, it should use pr_debug() instead
of WARN() as Steve pointed.
Sorry Jessica, could you replace WARN() with pr_debug() in arm_kprobe_ftrace()?
If it is reproducable, user can easily make dmesg messy.
Other parts are good to me. So you can add my Acked-by in next version.
Thank you,
>
> arm_all_kprobes() keeps its current behavior and attempts to arm all
> kprobes. It returns the last encountered error and gives a warning if
> not all probes could be armed.
>
> This patch is based on Petr Mladek's original patchset (patches 2 and 3)
> back in 2015, which improved kprobes error handling, found here:
>
> https://lkml.org/lkml/2015/2/26/452
>
> However, further work on this had been paused since then and the patches
> were not upstreamed.
>
> Based-on-patches-by: Petr Mladek <pmladek@...e.com>
> Signed-off-by: Jessica Yu <jeyu@...nel.org>
> ---
> kernel/kprobes.c | 96 +++++++++++++++++++++++++++++++++++++++++---------------
> 1 file changed, 71 insertions(+), 25 deletions(-)
>
> diff --git a/kernel/kprobes.c b/kernel/kprobes.c
> index b4aab48ad258..21d88cebb29b 100644
> --- a/kernel/kprobes.c
> +++ b/kernel/kprobes.c
> @@ -988,18 +988,32 @@ static int prepare_kprobe(struct kprobe *p)
> }
>
> /* Caller must lock kprobe_mutex */
> -static void arm_kprobe_ftrace(struct kprobe *p)
> +static int arm_kprobe_ftrace(struct kprobe *p)
> {
> - int ret;
> + int ret = 0;
>
> ret = ftrace_set_filter_ip(&kprobe_ftrace_ops,
> (unsigned long)p->addr, 0, 0);
> - WARN(ret < 0, "Failed to arm kprobe-ftrace at %p (%d)\n", p->addr, ret);
> - kprobe_ftrace_enabled++;
> - if (kprobe_ftrace_enabled == 1) {
> + if (WARN(ret < 0, "Failed to arm kprobe-ftrace at %p (%d)\n", p->addr, ret))
> + return ret;
> +
> + if (kprobe_ftrace_enabled == 0) {
> ret = register_ftrace_function(&kprobe_ftrace_ops);
> - WARN(ret < 0, "Failed to init kprobe-ftrace (%d)\n", ret);
> + if (WARN(ret < 0, "Failed to init kprobe-ftrace (%d)\n", ret))
> + goto err_ftrace;
> }
> +
> + kprobe_ftrace_enabled++;
> + return ret;
> +
> +err_ftrace:
> + /*
> + * Note: Since kprobe_ftrace_ops has IPMODIFY set, and ftrace requires a
> + * non-empty filter_hash for IPMODIFY ops, we're safe from an accidental
> + * empty filter_hash which would undesirably trace all functions.
> + */
> + ftrace_set_filter_ip(&kprobe_ftrace_ops, (unsigned long)p->addr, 1, 0);
> + return ret;
> }
>
> /* Caller must lock kprobe_mutex */
> @@ -1018,22 +1032,23 @@ static void disarm_kprobe_ftrace(struct kprobe *p)
> }
> #else /* !CONFIG_KPROBES_ON_FTRACE */
> #define prepare_kprobe(p) arch_prepare_kprobe(p)
> -#define arm_kprobe_ftrace(p) do {} while (0)
> +#define arm_kprobe_ftrace(p) (-ENODEV)
> #define disarm_kprobe_ftrace(p) do {} while (0)
> #endif
>
> /* Arm a kprobe with text_mutex */
> -static void arm_kprobe(struct kprobe *kp)
> +static int arm_kprobe(struct kprobe *kp)
> {
> - if (unlikely(kprobe_ftrace(kp))) {
> - arm_kprobe_ftrace(kp);
> - return;
> - }
> + if (unlikely(kprobe_ftrace(kp)))
> + return arm_kprobe_ftrace(kp);
> +
> cpus_read_lock();
> mutex_lock(&text_mutex);
> __arm_kprobe(kp);
> mutex_unlock(&text_mutex);
> cpus_read_unlock();
> +
> + return 0;
> }
>
> /* Disarm a kprobe with text_mutex */
> @@ -1372,9 +1387,15 @@ static int register_aggr_kprobe(struct kprobe *orig_p, struct kprobe *p)
>
> if (ret == 0 && kprobe_disabled(ap) && !kprobe_disabled(p)) {
> ap->flags &= ~KPROBE_FLAG_DISABLED;
> - if (!kprobes_all_disarmed)
> + if (!kprobes_all_disarmed) {
> /* Arm the breakpoint again. */
> - arm_kprobe(ap);
> + ret = arm_kprobe(ap);
> + if (ret) {
> + ap->flags |= KPROBE_FLAG_DISABLED;
> + list_del_rcu(&p->list);
> + synchronize_sched();
> + }
> + }
> }
> return ret;
> }
> @@ -1594,8 +1615,14 @@ int register_kprobe(struct kprobe *p)
> hlist_add_head_rcu(&p->hlist,
> &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
>
> - if (!kprobes_all_disarmed && !kprobe_disabled(p))
> - arm_kprobe(p);
> + if (!kprobes_all_disarmed && !kprobe_disabled(p)) {
> + ret = arm_kprobe(p);
> + if (ret) {
> + hlist_del_rcu(&p->hlist);
> + synchronize_sched();
> + goto out;
> + }
> + }
>
> /* Try to optimize kprobe */
> try_to_optimize_kprobe(p);
> @@ -2137,7 +2164,9 @@ int enable_kprobe(struct kprobe *kp)
>
> if (!kprobes_all_disarmed && kprobe_disabled(p)) {
> p->flags &= ~KPROBE_FLAG_DISABLED;
> - arm_kprobe(p);
> + ret = arm_kprobe(p);
> + if (ret)
> + p->flags |= KPROBE_FLAG_DISABLED;
> }
> out:
> mutex_unlock(&kprobe_mutex);
> @@ -2565,11 +2594,12 @@ static const struct file_operations debugfs_kprobe_ei_ops = {
> .release = seq_release,
> };
>
> -static void arm_all_kprobes(void)
> +static int arm_all_kprobes(void)
> {
> struct hlist_head *head;
> struct kprobe *p;
> - unsigned int i;
> + unsigned int i, total = 0, errors = 0;
> + int err, ret = 0;
>
> mutex_lock(&kprobe_mutex);
>
> @@ -2586,16 +2616,28 @@ static void arm_all_kprobes(void)
> /* Arming kprobes doesn't optimize kprobe itself */
> for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
> head = &kprobe_table[i];
> - hlist_for_each_entry_rcu(p, head, hlist)
> - if (!kprobe_disabled(p))
> - arm_kprobe(p);
> + /* Arm all kprobes on a best-effort basis */
> + hlist_for_each_entry_rcu(p, head, hlist) {
> + if (!kprobe_disabled(p)) {
> + err = arm_kprobe(p);
> + if (err) {
> + errors++;
> + ret = err;
> + }
> + total++;
> + }
> + }
> }
>
> - printk(KERN_INFO "Kprobes globally enabled\n");
> + if (errors)
> + pr_warn("Kprobes globally enabled, but failed to arm %d out of %d probes\n",
> + errors, total);
> + else
> + pr_info("Kprobes globally enabled\n");
>
> already_enabled:
> mutex_unlock(&kprobe_mutex);
> - return;
> + return ret;
> }
>
> static void disarm_all_kprobes(void)
> @@ -2652,6 +2694,7 @@ static ssize_t write_enabled_file_bool(struct file *file,
> {
> char buf[32];
> size_t buf_size;
> + int ret = 0;
>
> buf_size = min(count, (sizeof(buf)-1));
> if (copy_from_user(buf, user_buf, buf_size))
> @@ -2662,7 +2705,7 @@ static ssize_t write_enabled_file_bool(struct file *file,
> case 'y':
> case 'Y':
> case '1':
> - arm_all_kprobes();
> + ret = arm_all_kprobes();
> break;
> case 'n':
> case 'N':
> @@ -2673,6 +2716,9 @@ static ssize_t write_enabled_file_bool(struct file *file,
> return -EINVAL;
> }
>
> + if (ret)
> + return ret;
> +
> return count;
> }
>
> --
> 2.13.6
>
--
Masami Hiramatsu <mhiramat@...nel.org>
Powered by blists - more mailing lists