[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAB0TPYFmUgPTONABLTJAdonK7fY7oqURKCpLp1-WqHLtyen7Zw@mail.gmail.com>
Date: Tue, 31 Aug 2021 09:24:40 +0200
From: Martijn Coenen <maco@...roid.com>
To: Todd Kjos <tkjos@...gle.com>
Cc: Greg KH <gregkh@...uxfoundation.org>,
Christian Brauner <christian@...uner.io>,
Arve Hjønnevåg <arve@...roid.com>,
"open list:ANDROID DRIVERS" <devel@...verdev.osuosl.org>,
LKML <linux-kernel@...r.kernel.org>,
Martijn Coenen <maco@...gle.com>,
Joel Fernandes <joel@...lfernandes.org>,
kernel-team@...roid.com
Subject: Re: [PATCH] binder: make sure fd closes complete
On Mon, Aug 30, 2021 at 9:51 PM 'Todd Kjos' via kernel-team
<kernel-team@...roid.com> wrote:
>
> During BC_FREE_BUFFER processing, the BINDER_TYPE_FDA object
> cleanup may close 1 or more fds. The close operations are
> completed using the task work mechanism -- which means the thread
> needs to return to userspace or the file object may never be
> dereferenced -- which can lead to hung processes.
>
> Force the binder thread back to userspace if an fd is closed during
> BC_FREE_BUFFER handling.
>
> Signed-off-by: Todd Kjos <tkjos@...gle.com>
Reviewed-by: Martijn Coenen <maco@...roid.com>
> ---
> drivers/android/binder.c | 23 +++++++++++++++++------
> 1 file changed, 17 insertions(+), 6 deletions(-)
>
> diff --git a/drivers/android/binder.c b/drivers/android/binder.c
> index bcec598b89f2..c2823f0d588f 100644
> --- a/drivers/android/binder.c
> +++ b/drivers/android/binder.c
> @@ -1852,6 +1852,7 @@ static void binder_deferred_fd_close(int fd)
> }
>
> static void binder_transaction_buffer_release(struct binder_proc *proc,
> + struct binder_thread *thread,
> struct binder_buffer *buffer,
> binder_size_t failed_at,
> bool is_failure)
> @@ -2011,8 +2012,16 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
> &proc->alloc, &fd, buffer,
> offset, sizeof(fd));
> WARN_ON(err);
> - if (!err)
> + if (!err) {
> binder_deferred_fd_close(fd);
> + /*
> + * Need to make sure the thread goes
> + * back to userspace to complete the
> + * deferred close
> + */
> + if (thread)
> + thread->looper_need_return = true;
> + }
> }
> } break;
> default:
> @@ -3105,7 +3114,7 @@ static void binder_transaction(struct binder_proc *proc,
> err_copy_data_failed:
> binder_free_txn_fixups(t);
> trace_binder_transaction_failed_buffer_release(t->buffer);
> - binder_transaction_buffer_release(target_proc, t->buffer,
> + binder_transaction_buffer_release(target_proc, NULL, t->buffer,
> buffer_offset, true);
> if (target_node)
> binder_dec_node_tmpref(target_node);
> @@ -3184,7 +3193,9 @@ static void binder_transaction(struct binder_proc *proc,
> * Cleanup buffer and free it.
> */
> static void
> -binder_free_buf(struct binder_proc *proc, struct binder_buffer *buffer)
> +binder_free_buf(struct binder_proc *proc,
> + struct binder_thread *thread,
> + struct binder_buffer *buffer)
> {
> binder_inner_proc_lock(proc);
> if (buffer->transaction) {
> @@ -3212,7 +3223,7 @@ binder_free_buf(struct binder_proc *proc, struct binder_buffer *buffer)
> binder_node_inner_unlock(buf_node);
> }
> trace_binder_transaction_buffer_release(buffer);
> - binder_transaction_buffer_release(proc, buffer, 0, false);
> + binder_transaction_buffer_release(proc, thread, buffer, 0, false);
> binder_alloc_free_buf(&proc->alloc, buffer);
> }
>
> @@ -3414,7 +3425,7 @@ static int binder_thread_write(struct binder_proc *proc,
> proc->pid, thread->pid, (u64)data_ptr,
> buffer->debug_id,
> buffer->transaction ? "active" : "finished");
> - binder_free_buf(proc, buffer);
> + binder_free_buf(proc, thread, buffer);
> break;
> }
>
> @@ -4107,7 +4118,7 @@ static int binder_thread_read(struct binder_proc *proc,
> buffer->transaction = NULL;
> binder_cleanup_transaction(t, "fd fixups failed",
> BR_FAILED_REPLY);
> - binder_free_buf(proc, buffer);
> + binder_free_buf(proc, thread, buffer);
> binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
> "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
> proc->pid, thread->pid,
> --
> 2.33.0.259.gc128427fd7-goog
>
>
Powered by blists - more mailing lists