lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20160910161659.GA7987@infradead.org>
Date:   Sat, 10 Sep 2016 09:16:59 -0700
From:   Christoph Hellwig <hch@...radead.org>
To:     Todd Kjos <tkjos@...gle.com>
Cc:     Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
        Arve Hj??nnev??g <arve@...roid.com>,
        Riley Andrews <riandrews@...roid.com>,
        devel@...verdev.osuosl.org, linux-kernel@...r.kernel.org,
        Thomas Gleixner <tglx@...utronix.de>,
        Peter Zijlstra <peterz@...radead.org>
Subject: Re: [PATCH] android: binder: Disable preemption while holding the
 global binder lock

On Thu, Sep 08, 2016 at 09:12:50AM -0700, Todd Kjos wrote:
> In Android systems, the display pipeline relies on low
> latency binder transactions and is therefore sensitive to
> delays caused by contention for the global binder lock.
> Jank is siginificantly reduced by disabling preemption
> while the global binder lock is held.

That's now how preempt_disable is supposed to use.  It is for critical
sections that use per-cpu or similar resources.

> 
> Originally-from: Riley Andrews <riandrews@...gle.com>
> Signed-off-by: Todd Kjos <tkjos@...gle.com>
> ---
>  drivers/android/binder.c | 194 +++++++++++++++++++++++++++++++++++------------
>  1 file changed, 146 insertions(+), 48 deletions(-)
> 
> diff --git a/drivers/android/binder.c b/drivers/android/binder.c
> index 16288e7..c36e420 100644
> --- a/drivers/android/binder.c
> +++ b/drivers/android/binder.c
> @@ -379,6 +379,7 @@ static int task_get_unused_fd_flags(struct
> binder_proc *proc, int flags)
>   struct files_struct *files = proc->files;
>   unsigned long rlim_cur;
>   unsigned long irqs;
> + int ret;
> 
>   if (files == NULL)
>   return -ESRCH;
> @@ -389,7 +390,11 @@ static int task_get_unused_fd_flags(struct
> binder_proc *proc, int flags)
>   rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
>   unlock_task_sighand(proc->tsk, &irqs);
> 
> - return __alloc_fd(files, 0, rlim_cur, flags);
> + preempt_enable_no_resched();
> + ret = __alloc_fd(files, 0, rlim_cur, flags);
> + preempt_disable();
> +
> + return ret;
>  }
> 
>  /*
> @@ -398,8 +403,11 @@ static int task_get_unused_fd_flags(struct
> binder_proc *proc, int flags)
>  static void task_fd_install(
>   struct binder_proc *proc, unsigned int fd, struct file *file)
>  {
> - if (proc->files)
> + if (proc->files) {
> + preempt_enable_no_resched();
>   __fd_install(proc->files, fd, file);
> + preempt_disable();
> + }
>  }
> 
>  /*
> @@ -427,6 +435,7 @@ static inline void binder_lock(const char *tag)
>  {
>   trace_binder_lock(tag);
>   mutex_lock(&binder_main_lock);
> + preempt_disable();
>   trace_binder_locked(tag);
>  }
> 
> @@ -434,8 +443,65 @@ static inline void binder_unlock(const char *tag)
>  {
>   trace_binder_unlock(tag);
>   mutex_unlock(&binder_main_lock);
> + preempt_enable();
> +}
> +
> +static inline void *kzalloc_nopreempt(size_t size)
> +{
> + void *ptr;
> +
> + ptr = kzalloc(size, GFP_NOWAIT);
> + if (ptr)
> + return ptr;
> +
> + preempt_enable_no_resched();
> + ptr = kzalloc(size, GFP_KERNEL);
> + preempt_disable();
> +
> + return ptr;
> +}
> +
> +static inline long copy_to_user_nopreempt(void __user *to,
> +  const void *from, long n)
> +{
> + long ret;
> +
> + preempt_enable_no_resched();
> + ret = copy_to_user(to, from, n);
> + preempt_disable();
> + return ret;
> +}
> +
> +static inline long copy_from_user_nopreempt(void *to,
> +    const void __user *from,
> +    long n)
> +{
> + long ret;
> +
> + preempt_enable_no_resched();
> + ret = copy_from_user(to, from, n);
> + preempt_disable();
> + return ret;
>  }
> 
> +#define get_user_nopreempt(x, ptr) \
> +({ \
> + int __ret; \
> + preempt_enable_no_resched(); \
> + __ret = get_user(x, ptr); \
> + preempt_disable(); \
> + __ret; \
> +})
> +
> +#define put_user_nopreempt(x, ptr) \
> +({ \
> + int __ret; \
> + preempt_enable_no_resched(); \
> + __ret = put_user(x, ptr); \
> + preempt_disable(); \
> + __ret; \
> +})
> +
>  static void binder_set_nice(long nice)
>  {
>   long min_nice;
> @@ -568,6 +634,8 @@ static int binder_update_page_range(struct
> binder_proc *proc, int allocate,
>   else
>   mm = get_task_mm(proc->tsk);
> 
> + preempt_enable_no_resched();
> +
>   if (mm) {
>   down_write(&mm->mmap_sem);
>   vma = proc->vma;
> @@ -622,6 +690,9 @@ static int binder_update_page_range(struct
> binder_proc *proc, int allocate,
>   up_write(&mm->mmap_sem);
>   mmput(mm);
>   }
> +
> + preempt_disable();
> +
>   return 0;
> 
>  free_range:
> @@ -644,6 +715,9 @@ err_no_vma:
>   up_write(&mm->mmap_sem);
>   mmput(mm);
>   }
> +
> + preempt_disable();
> +
>   return -ENOMEM;
>  }
> 
> @@ -903,7 +977,7 @@ static struct binder_node *binder_new_node(struct
> binder_proc *proc,
>   return NULL;
>   }
> 
> - node = kzalloc(sizeof(*node), GFP_KERNEL);
> + node = kzalloc_nopreempt(sizeof(*node));
>   if (node == NULL)
>   return NULL;
>   binder_stats_created(BINDER_STAT_NODE);
> @@ -1040,7 +1114,7 @@ static struct binder_ref
> *binder_get_ref_for_node(struct binder_proc *proc,
>   else
>   return ref;
>   }
> - new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
> + new_ref = kzalloc_nopreempt(sizeof(*ref));
>   if (new_ref == NULL)
>   return NULL;
>   binder_stats_created(BINDER_STAT_REF);
> @@ -1438,14 +1512,14 @@ static void binder_transaction(struct binder_proc *proc,
>   e->to_proc = target_proc->pid;
> 
>   /* TODO: reuse incoming transaction for reply */
> - t = kzalloc(sizeof(*t), GFP_KERNEL);
> + t = kzalloc_nopreempt(sizeof(*t));
>   if (t == NULL) {
>   return_error = BR_FAILED_REPLY;
>   goto err_alloc_t_failed;
>   }
>   binder_stats_created(BINDER_STAT_TRANSACTION);
> 
> - tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
> + tcomplete = kzalloc_nopreempt(sizeof(*tcomplete));
>   if (tcomplete == NULL) {
>   return_error = BR_FAILED_REPLY;
>   goto err_alloc_tcomplete_failed;
> @@ -1502,15 +1576,16 @@ static void binder_transaction(struct binder_proc *proc,
>   offp = (binder_size_t *)(t->buffer->data +
>   ALIGN(tr->data_size, sizeof(void *)));
> 
> - if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
> -   tr->data.ptr.buffer, tr->data_size)) {
> + if (copy_from_user_nopreempt(t->buffer->data,
> + (const void __user *)(uintptr_t) tr->data.ptr.buffer,
> + tr->data_size)) {
>   binder_user_error("%d:%d got transaction with invalid data ptr\n",
>   proc->pid, thread->pid);
>   return_error = BR_FAILED_REPLY;
>   goto err_copy_data_failed;
>   }
> - if (copy_from_user(offp, (const void __user *)(uintptr_t)
> -   tr->data.ptr.offsets, tr->offsets_size)) {
> + if (copy_from_user_nopreempt(offp, (const void __user *)(uintptr_t)
> +     tr->data.ptr.offsets, tr->offsets_size)) {
>   binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
>   proc->pid, thread->pid);
>   return_error = BR_FAILED_REPLY;
> @@ -1770,7 +1845,7 @@ static int binder_thread_write(struct binder_proc *proc,
>   void __user *end = buffer + size;
> 
>   while (ptr < end && thread->return_error == BR_OK) {
> - if (get_user(cmd, (uint32_t __user *)ptr))
> + if (get_user_nopreempt(cmd, (uint32_t __user *)ptr))
>   return -EFAULT;
>   ptr += sizeof(uint32_t);
>   trace_binder_command(cmd);
> @@ -1788,7 +1863,7 @@ static int binder_thread_write(struct binder_proc *proc,
>   struct binder_ref *ref;
>   const char *debug_string;
> 
> - if (get_user(target, (uint32_t __user *)ptr))
> + if (get_user_nopreempt(target, (uint32_t __user *)ptr))
>   return -EFAULT;
>   ptr += sizeof(uint32_t);
>   if (target == 0 && binder_context_mgr_node &&
> @@ -1838,10 +1913,12 @@ static int binder_thread_write(struct binder_proc *proc,
>   binder_uintptr_t cookie;
>   struct binder_node *node;
> 
> - if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
> + if (get_user_nopreempt(node_ptr,
> + (binder_uintptr_t __user *)ptr))
>   return -EFAULT;
>   ptr += sizeof(binder_uintptr_t);
> - if (get_user(cookie, (binder_uintptr_t __user *)ptr))
> + if (get_user_nopreempt(cookie,
> + (binder_uintptr_t __user *)ptr))
>   return -EFAULT;
>   ptr += sizeof(binder_uintptr_t);
>   node = binder_get_node(proc, node_ptr);
> @@ -1899,7 +1976,8 @@ static int binder_thread_write(struct binder_proc *proc,
>   binder_uintptr_t data_ptr;
>   struct binder_buffer *buffer;
> 
> - if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
> + if (get_user_nopreempt(data_ptr,
> + (binder_uintptr_t __user *)ptr))
>   return -EFAULT;
>   ptr += sizeof(binder_uintptr_t);
> 
> @@ -1941,7 +2019,7 @@ static int binder_thread_write(struct binder_proc *proc,
>   case BC_REPLY: {
>   struct binder_transaction_data tr;
> 
> - if (copy_from_user(&tr, ptr, sizeof(tr)))
> + if (copy_from_user_nopreempt(&tr, ptr, sizeof(tr)))
>   return -EFAULT;
>   ptr += sizeof(tr);
>   binder_transaction(proc, thread, &tr, cmd == BC_REPLY);
> @@ -1991,10 +2069,12 @@ static int binder_thread_write(struct binder_proc *proc,
>   struct binder_ref *ref;
>   struct binder_ref_death *death;
> 
> - if (get_user(target, (uint32_t __user *)ptr))
> + if (get_user_nopreempt(target,
> + (uint32_t __user *)ptr))
>   return -EFAULT;
>   ptr += sizeof(uint32_t);
> - if (get_user(cookie, (binder_uintptr_t __user *)ptr))
> + if (get_user_nopreempt(cookie,
> + (binder_uintptr_t __user *)ptr))
>   return -EFAULT;
>   ptr += sizeof(binder_uintptr_t);
>   ref = binder_get_ref(proc, target);
> @@ -2023,7 +2103,7 @@ static int binder_thread_write(struct binder_proc *proc,
>   proc->pid, thread->pid);
>   break;
>   }
> - death = kzalloc(sizeof(*death), GFP_KERNEL);
> + death = kzalloc_nopreempt(sizeof(*death));
>   if (death == NULL) {
>   thread->return_error = BR_ERROR;
>   binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
> @@ -2077,8 +2157,8 @@ static int binder_thread_write(struct binder_proc *proc,
>   struct binder_work *w;
>   binder_uintptr_t cookie;
>   struct binder_ref_death *death = NULL;
> -
> - if (get_user(cookie, (binder_uintptr_t __user *)ptr))
> + if (get_user_nopreempt(cookie,
> + (binder_uintptr_t __user *)ptr))
>   return -EFAULT;
> 
>   ptr += sizeof(cookie);
> @@ -2159,7 +2239,7 @@ static int binder_thread_read(struct binder_proc *proc,
>   int wait_for_proc_work;
> 
>   if (*consumed == 0) {
> - if (put_user(BR_NOOP, (uint32_t __user *)ptr))
> + if (put_user_nopreempt(BR_NOOP, (uint32_t __user *)ptr))
>   return -EFAULT;
>   ptr += sizeof(uint32_t);
>   }
> @@ -2170,7 +2250,8 @@ retry:
> 
>   if (thread->return_error != BR_OK && ptr < end) {
>   if (thread->return_error2 != BR_OK) {
> - if (put_user(thread->return_error2, (uint32_t __user *)ptr))
> + if (put_user_nopreempt(thread->return_error2,
> +       (uint32_t __user *)ptr))
>   return -EFAULT;
>   ptr += sizeof(uint32_t);
>   binder_stat_br(proc, thread, thread->return_error2);
> @@ -2178,7 +2259,8 @@ retry:
>   goto done;
>   thread->return_error2 = BR_OK;
>   }
> - if (put_user(thread->return_error, (uint32_t __user *)ptr))
> + if (put_user_nopreempt(thread->return_error,
> +       (uint32_t __user *)ptr))
>   return -EFAULT;
>   ptr += sizeof(uint32_t);
>   binder_stat_br(proc, thread, thread->return_error);
> @@ -2256,7 +2338,7 @@ retry:
>   } break;
>   case BINDER_WORK_TRANSACTION_COMPLETE: {
>   cmd = BR_TRANSACTION_COMPLETE;
> - if (put_user(cmd, (uint32_t __user *)ptr))
> + if (put_user_nopreempt(cmd, (uint32_t __user *) ptr))
>   return -EFAULT;
>   ptr += sizeof(uint32_t);
> 
> @@ -2298,15 +2380,16 @@ retry:
>   node->has_weak_ref = 0;
>   }
>   if (cmd != BR_NOOP) {
> - if (put_user(cmd, (uint32_t __user *)ptr))
> + if (put_user_nopreempt(cmd,
> + (uint32_t __user *) ptr))
>   return -EFAULT;
>   ptr += sizeof(uint32_t);
> - if (put_user(node->ptr,
> -     (binder_uintptr_t __user *)ptr))
> + if (put_user_nopreempt(node->ptr,
> +      (binder_uintptr_t __user *)ptr))
>   return -EFAULT;
>   ptr += sizeof(binder_uintptr_t);
> - if (put_user(node->cookie,
> -     (binder_uintptr_t __user *)ptr))
> + if (put_user_nopreempt(node->cookie,
> +      (binder_uintptr_t __user *)ptr))
>   return -EFAULT;
>   ptr += sizeof(binder_uintptr_t);
> 
> @@ -2349,11 +2432,12 @@ retry:
>   cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
>   else
>   cmd = BR_DEAD_BINDER;
> - if (put_user(cmd, (uint32_t __user *)ptr))
> + if (put_user_nopreempt(cmd,
> +       (uint32_t __user *) ptr))
>   return -EFAULT;
>   ptr += sizeof(uint32_t);
> - if (put_user(death->cookie,
> -     (binder_uintptr_t __user *)ptr))
> + if (put_user_nopreempt(death->cookie,
> +       (binder_uintptr_t __user *) ptr))
>   return -EFAULT;
>   ptr += sizeof(binder_uintptr_t);
>   binder_stat_br(proc, thread, cmd);
> @@ -2420,10 +2504,10 @@ retry:
>   ALIGN(t->buffer->data_size,
>      sizeof(void *));
> 
> - if (put_user(cmd, (uint32_t __user *)ptr))
> + if (put_user_nopreempt(cmd, (uint32_t __user *) ptr))
>   return -EFAULT;
>   ptr += sizeof(uint32_t);
> - if (copy_to_user(ptr, &tr, sizeof(tr)))
> + if (copy_to_user_nopreempt(ptr, &tr, sizeof(tr)))
>   return -EFAULT;
>   ptr += sizeof(tr);
> 
> @@ -2465,7 +2549,8 @@ done:
>   binder_debug(BINDER_DEBUG_THREADS,
>       "%d:%d BR_SPAWN_LOOPER\n",
>       proc->pid, thread->pid);
> - if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
> + if (put_user_nopreempt(BR_SPAWN_LOOPER,
> +       (uint32_t __user *) buffer))
>   return -EFAULT;
>   binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
>   }
> @@ -2540,7 +2625,7 @@ static struct binder_thread
> *binder_get_thread(struct binder_proc *proc)
>   break;
>   }
>   if (*p == NULL) {
> - thread = kzalloc(sizeof(*thread), GFP_KERNEL);
> + thread = kzalloc_nopreempt(sizeof(*thread));
>   if (thread == NULL)
>   return NULL;
>   binder_stats_created(BINDER_STAT_THREAD);
> @@ -2644,7 +2729,7 @@ static int binder_ioctl_write_read(struct file *filp,
>   ret = -EINVAL;
>   goto out;
>   }
> - if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
> + if (copy_from_user_nopreempt(&bwr, ubuf, sizeof(bwr))) {
>   ret = -EFAULT;
>   goto out;
>   }
> @@ -2662,7 +2747,7 @@ static int binder_ioctl_write_read(struct file *filp,
>   trace_binder_write_done(ret);
>   if (ret < 0) {
>   bwr.read_consumed = 0;
> - if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
> + if (copy_to_user_nopreempt(ubuf, &bwr, sizeof(bwr)))
>   ret = -EFAULT;
>   goto out;
>   }
> @@ -2676,7 +2761,7 @@ static int binder_ioctl_write_read(struct file *filp,
>   if (!list_empty(&proc->todo))
>   wake_up_interruptible(&proc->wait);
>   if (ret < 0) {
> - if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
> + if (copy_to_user_nopreempt(ubuf, &bwr, sizeof(bwr)))
>   ret = -EFAULT;
>   goto out;
>   }
> @@ -2686,7 +2771,7 @@ static int binder_ioctl_write_read(struct file *filp,
>       proc->pid, thread->pid,
>       (u64)bwr.write_consumed, (u64)bwr.write_size,
>       (u64)bwr.read_consumed, (u64)bwr.read_size);
> - if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
> + if (copy_to_user_nopreempt(ubuf, &bwr, sizeof(bwr))) {
>   ret = -EFAULT;
>   goto out;
>   }
> @@ -2768,7 +2853,8 @@ static long binder_ioctl(struct file *filp,
> unsigned int cmd, unsigned long arg)
>   goto err;
>   break;
>   case BINDER_SET_MAX_THREADS:
> - if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
> + if (copy_from_user_nopreempt(&proc->max_threads, ubuf,
> +     sizeof(proc->max_threads))) {
>   ret = -EINVAL;
>   goto err;
>   }
> @@ -2791,9 +2877,9 @@ static long binder_ioctl(struct file *filp,
> unsigned int cmd, unsigned long arg)
>   ret = -EINVAL;
>   goto err;
>   }
> - if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
> -     &ver->protocol_version)) {
> - ret = -EINVAL;
> + if (put_user_nopreempt(BINDER_CURRENT_PROTOCOL_VERSION,
> +       &ver->protocol_version)) {
> + ret = -EINVAL;
>   goto err;
>   }
>   break;
> @@ -2854,6 +2940,7 @@ static const struct vm_operations_struct binder_vm_ops = {
>  static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
>  {
>   int ret;
> +
>   struct vm_struct *area;
>   struct binder_proc *proc = filp->private_data;
>   const char *failure_string;
> @@ -2914,7 +3001,12 @@ static int binder_mmap(struct file *filp,
> struct vm_area_struct *vma)
>   vma->vm_ops = &binder_vm_ops;
>   vma->vm_private_data = proc;
> 
> - if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer +
> PAGE_SIZE, vma)) {
> + /* binder_update_page_range assumes preemption is disabled */
> + preempt_disable();
> + ret = binder_update_page_range(proc, 1, proc->buffer,
> +       proc->buffer + PAGE_SIZE, vma);
> + preempt_enable_no_resched();
> + if (ret) {
>   ret = -ENOMEM;
>   failure_string = "alloc small buf";
>   goto err_alloc_small_buf_failed;
> @@ -3185,8 +3277,12 @@ static void binder_deferred_func(struct
> work_struct *work)
>   int defer;
> 
>   do {
> - binder_lock(__func__);
> + trace_binder_lock(__func__);
> + mutex_lock(&binder_main_lock);
> + trace_binder_locked(__func__);
> +
>   mutex_lock(&binder_deferred_lock);
> + preempt_disable();
>   if (!hlist_empty(&binder_deferred_list)) {
>   proc = hlist_entry(binder_deferred_list.first,
>   struct binder_proc, deferred_work_node);
> @@ -3212,7 +3308,9 @@ static void binder_deferred_func(struct work_struct *work)
>   if (defer & BINDER_DEFERRED_RELEASE)
>   binder_deferred_release(proc); /* frees proc */
> 
> - binder_unlock(__func__);
> + trace_binder_unlock(__func__);
> + mutex_unlock(&binder_main_lock);
> + preempt_enable_no_resched();
>   if (files)
>   put_files_struct(files);
>   } while (proc);
> -- 
> 2.8.0.rc3.226.g39d4020
---end quoted text---

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ