lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Wed, 3 Jan 2018 09:33:03 -0500
From:   Steven Rostedt <rostedt@...dmis.org>
To:     Jessica Yu <jeyu@...nel.org>
Cc:     Masami Hiramatsu <mhiramat@...nel.org>,
        Ananth N Mavinakayanahalli <ananth@...ux.vnet.ibm.com>,
        Anil S Keshavamurthy <anil.s.keshavamurthy@...el.com>,
        "David S . Miller" <davem@...emloft.net>,
        Ingo Molnar <mingo@...nel.org>, Petr Mladek <pmladek@...e.com>,
        Josh Poimboeuf <jpoimboe@...hat.com>,
        Joe Lawrence <joe.lawrence@...hat.com>,
        Jiri Kosina <jikos@...nel.org>,
        Miroslav Benes <mbenes@...e.cz>, live-patching@...r.kernel.org,
        linux-kernel@...r.kernel.org
Subject: Re: [PATCH v3 1/2] kprobes: propagate error from
 arm_kprobe_ftrace()

On Wed,  3 Jan 2018 02:40:47 +0100
Jessica Yu <jeyu@...nel.org> wrote:

> Improve error handling when arming ftrace-based kprobes. Specifically, if
> we fail to arm a ftrace-based kprobe, register_kprobe()/enable_kprobe()
> should report an error instead of success. Previously, this has lead to
> confusing situations where register_kprobe() would return 0 indicating
> success, but the kprobe would not be functional if ftrace registration
> during the kprobe arming process had failed. We should therefore take any
> errors returned by ftrace into account and propagate this error so that we
> do not register/enable kprobes that cannot be armed. This can happen if,
> for example, register_ftrace_function() finds an IPMODIFY conflict (since
> kprobe_ftrace_ops has this flag set) and returns an error. Such a conflict
> is possible since livepatches also set the IPMODIFY flag for their ftrace_ops.
> 
> arm_all_kprobes() keeps its current behavior and attempts to arm all
> kprobes. It returns the last encountered error and gives a warning if
> not all probes could be armed.
> 
> This patch is based on Petr Mladek's original patchset (patches 2 and 3)
> back in 2015, which improved kprobes error handling, found here:
> 
>    https://lkml.org/lkml/2015/2/26/452
> 
> However, further work on this had been paused since then and the patches
> were not upstreamed.
> 
> Based-on-patches-by: Petr Mladek <pmladek@...e.com>
> Signed-off-by: Jessica Yu <jeyu@...nel.org>
> ---
>  kernel/kprobes.c | 94 +++++++++++++++++++++++++++++++++++++++++---------------
>  1 file changed, 69 insertions(+), 25 deletions(-)
> 
> diff --git a/kernel/kprobes.c b/kernel/kprobes.c
> index b4aab48ad258..ae6b6fe79de3 100644
> --- a/kernel/kprobes.c
> +++ b/kernel/kprobes.c
> @@ -988,18 +988,32 @@ static int prepare_kprobe(struct kprobe *p)
>  }
>  
>  /* Caller must lock kprobe_mutex */
> -static void arm_kprobe_ftrace(struct kprobe *p)
> +static int arm_kprobe_ftrace(struct kprobe *p)
>  {
> -	int ret;
> +	int ret = 0;
>  
>  	ret = ftrace_set_filter_ip(&kprobe_ftrace_ops,
>  				   (unsigned long)p->addr, 0, 0);
> -	WARN(ret < 0, "Failed to arm kprobe-ftrace at %p (%d)\n", p->addr, ret);
> -	kprobe_ftrace_enabled++;
> -	if (kprobe_ftrace_enabled == 1) {
> +	if (WARN(ret < 0, "Failed to arm kprobe-ftrace at %p (%d)\n", p->addr, ret))
> +		return ret;

I wonder if we should change this from a WARN to a printk(). No reason
to do stack dumps here.

> +
> +	if (kprobe_ftrace_enabled == 0) {
>  		ret = register_ftrace_function(&kprobe_ftrace_ops);
> -		WARN(ret < 0, "Failed to init kprobe-ftrace (%d)\n", ret);
> +		if (WARN(ret < 0, "Failed to init kprobe-ftrace (%d)\n", ret))
> +			goto err_ftrace;
>  	}
> +
> +	kprobe_ftrace_enabled++;
> +	return ret;
> +
> +err_ftrace:
> +	/*
> +	 * Note: Since kprobe_ftrace_ops has IPMODIFY set, and ftrace requires a
> +	 * non-empty filter_hash for IPMODIFY ops, we're safe from an accidental
> +	 * empty filter_hash which would undesirably trace all functions.
> +	 */
> +	ftrace_set_filter_ip(&kprobe_ftrace_ops, (unsigned long)p->addr, 1, 0);
> +	return ret;
>  }
>  
>  /* Caller must lock kprobe_mutex */
> @@ -1018,22 +1032,23 @@ static void disarm_kprobe_ftrace(struct kprobe *p)
>  }
>  #else	/* !CONFIG_KPROBES_ON_FTRACE */
>  #define prepare_kprobe(p)	arch_prepare_kprobe(p)
> -#define arm_kprobe_ftrace(p)	do {} while (0)
> +#define arm_kprobe_ftrace(p)	(0)

Hmm. Perhaps we should have arm_kprobe_ftrace() return a failure.

>  #define disarm_kprobe_ftrace(p)	do {} while (0)
>  #endif
>  
>  /* Arm a kprobe with text_mutex */
> -static void arm_kprobe(struct kprobe *kp)
> +static int arm_kprobe(struct kprobe *kp)
>  {
> -	if (unlikely(kprobe_ftrace(kp))) {
> -		arm_kprobe_ftrace(kp);
> -		return;
> -	}
> +	if (unlikely(kprobe_ftrace(kp)))
> +		return arm_kprobe_ftrace(kp);

If CONFIG_KPROBES_ON_FTRACE is not defined, this if should always be
false. But if for some reason in the future, it is not false, we just
had arm_kprobe_ftrace() return success when it really is a failure.

 -ENODEV?


> +
>  	cpus_read_lock();
>  	mutex_lock(&text_mutex);
>  	__arm_kprobe(kp);
>  	mutex_unlock(&text_mutex);
>  	cpus_read_unlock();
> +
> +	return 0;
>  }
>  
>  /* Disarm a kprobe with text_mutex */
> @@ -1372,9 +1387,15 @@ static int register_aggr_kprobe(struct kprobe *orig_p, struct kprobe *p)
>  
>  	if (ret == 0 && kprobe_disabled(ap) && !kprobe_disabled(p)) {
>  		ap->flags &= ~KPROBE_FLAG_DISABLED;
> -		if (!kprobes_all_disarmed)
> +		if (!kprobes_all_disarmed) {
>  			/* Arm the breakpoint again. */
> -			arm_kprobe(ap);
> +			ret = arm_kprobe(ap);
> +			if (ret) {
> +				ap->flags |= KPROBE_FLAG_DISABLED;
> +				list_del_rcu(&p->list);

Don't we need to hold the mutex to modify the list?

> +				synchronize_sched();
> +			}
> +		}
>  	}
>  	return ret;
>  }
> @@ -1594,8 +1615,14 @@ int register_kprobe(struct kprobe *p)
>  	hlist_add_head_rcu(&p->hlist,
>  		       &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
>  
> -	if (!kprobes_all_disarmed && !kprobe_disabled(p))
> -		arm_kprobe(p);
> +	if (!kprobes_all_disarmed && !kprobe_disabled(p)) {
> +		ret = arm_kprobe(p);
> +		if (ret) {
> +			hlist_del_rcu(&p->hlist);

Same here.

> +			synchronize_sched();
> +			goto out;
> +		}
> +	}
>  
>  	/* Try to optimize kprobe */
>  	try_to_optimize_kprobe(p);
> @@ -2137,7 +2164,9 @@ int enable_kprobe(struct kprobe *kp)
>  
>  	if (!kprobes_all_disarmed && kprobe_disabled(p)) {
>  		p->flags &= ~KPROBE_FLAG_DISABLED;
> -		arm_kprobe(p);
> +		ret = arm_kprobe(p);
> +		if (ret)
> +			p->flags |= KPROBE_FLAG_DISABLED;
>  	}
>  out:
>  	mutex_unlock(&kprobe_mutex);
> @@ -2565,11 +2594,12 @@ static const struct file_operations debugfs_kprobe_ei_ops = {
>  	.release        = seq_release,
>  };
>  
> -static void arm_all_kprobes(void)
> +static int arm_all_kprobes(void)
>  {
>  	struct hlist_head *head;
>  	struct kprobe *p;
> -	unsigned int i;
> +	unsigned int i, errors = 0;
> +	int err, ret = 0;
>  
>  	mutex_lock(&kprobe_mutex);
>  
> @@ -2586,16 +2616,26 @@ static void arm_all_kprobes(void)
>  	/* Arming kprobes doesn't optimize kprobe itself */
>  	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
>  		head = &kprobe_table[i];
> -		hlist_for_each_entry_rcu(p, head, hlist)
> -			if (!kprobe_disabled(p))
> -				arm_kprobe(p);
> +		/* Arm all kprobes on a best-effort basis */
> +		hlist_for_each_entry_rcu(p, head, hlist) {
> +			if (!kprobe_disabled(p)) {
> +				err = arm_kprobe(p);
> +				if (err)  {
> +					errors++;
> +					ret = err;
> +				}
> +			}
> +		}
>  	}
>  
> -	printk(KERN_INFO "Kprobes globally enabled\n");
> +	if (errors)
> +		pr_warn("Kprobes globally enabled, but failed to arm %d probes\n", errors);

Perhaps we should have a count of all kprobes that were tried, and
write something like:

 "Kprobes globally enabled, but failed to arm %d out of %d probes\n",
   errors, total

-- Steve

> +	else
> +		pr_info("Kprobes globally enabled\n");
>  
>  already_enabled:
>  	mutex_unlock(&kprobe_mutex);
> -	return;
> +	return ret;
>  }
>  
>  static void disarm_all_kprobes(void)
> @@ -2652,6 +2692,7 @@ static ssize_t write_enabled_file_bool(struct file *file,
>  {
>  	char buf[32];
>  	size_t buf_size;
> +	int ret = 0;
>  
>  	buf_size = min(count, (sizeof(buf)-1));
>  	if (copy_from_user(buf, user_buf, buf_size))
> @@ -2662,7 +2703,7 @@ static ssize_t write_enabled_file_bool(struct file *file,
>  	case 'y':
>  	case 'Y':
>  	case '1':
> -		arm_all_kprobes();
> +		ret = arm_all_kprobes();
>  		break;
>  	case 'n':
>  	case 'N':
> @@ -2673,6 +2714,9 @@ static ssize_t write_enabled_file_bool(struct file *file,
>  		return -EINVAL;
>  	}
>  
> +	if (ret)
> +		return ret;
> +
>  	return count;
>  }
>  

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ