[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20170831080430.118765-6-maco@android.com>
Date: Thu, 31 Aug 2017 10:04:22 +0200
From: Martijn Coenen <maco@...roid.com>
To: gregkh@...uxfoundation.org, john.stultz@...aro.org,
tkjos@...gle.com, arve@...roid.com, amit.pundir@...aro.org,
tglx@...utronix.de
Cc: peterz@...radead.org, hch@....de, linux-kernel@...r.kernel.org,
devel@...verdev.osuosl.org, maco@...gle.com, malchev@...gle.com,
ccross@...roid.com, Martijn Coenen <maco@...roid.com>
Subject: [PATCH v2 05/13] ANDROID: binder: improve priority inheritance.
By raising the priority of a thread selected for
a transaction *before* we wake it up.
Delay restoring the priority when doing a reply
until after we wake-up the process receiving
the reply.
Signed-off-by: Martijn Coenen <maco@...roid.com>
---
drivers/android/binder.c | 74 ++++++++++++++++++++++++++++++++++--------------
1 file changed, 53 insertions(+), 21 deletions(-)
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index afb3297ae520..196676729521 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -610,6 +610,7 @@ enum {
* @is_dead: thread is dead and awaiting free
* when outstanding transactions are cleaned up
* (protected by @proc->inner_lock)
+ * @task: struct task_struct for this thread
*
* Bookkeeping structure for binder threads.
*/
@@ -628,6 +629,7 @@ struct binder_thread {
struct binder_stats stats;
atomic_t tmp_ref;
bool is_dead;
+ struct task_struct *task;
};
struct binder_transaction {
@@ -646,6 +648,7 @@ struct binder_transaction {
unsigned int flags;
struct binder_priority priority;
struct binder_priority saved_priority;
+ bool set_priority_called;
kuid_t sender_euid;
/**
* @lock: protects @from, @to_proc, and @to_thread
@@ -1209,6 +1212,38 @@ static void binder_set_priority(struct task_struct *task,
set_user_nice(task, priority);
}
+static void binder_transaction_priority(struct task_struct *task,
+ struct binder_transaction *t,
+ struct binder_priority node_prio)
+{
+ struct binder_priority desired_prio;
+
+ if (t->set_priority_called)
+ return;
+
+ t->set_priority_called = true;
+ t->saved_priority.sched_policy = task->policy;
+ t->saved_priority.prio = task->normal_prio;
+
+ desired_prio.prio = t->priority.prio;
+ desired_prio.sched_policy = t->priority.sched_policy;
+
+ if (node_prio.prio < t->priority.prio ||
+ (node_prio.prio == t->priority.prio &&
+ node_prio.sched_policy == SCHED_FIFO)) {
+ /*
+ * In case the minimum priority on the node is
+ * higher (lower value), use that priority. If
+ * the priority is the same, but the node uses
+ * SCHED_FIFO, prefer SCHED_FIFO, since it can
+ * run unbounded, unlike SCHED_RR.
+ */
+ desired_prio = node_prio;
+ }
+
+ binder_set_priority(task, desired_prio);
+}
+
static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
binder_uintptr_t ptr)
{
@@ -2682,11 +2717,15 @@ static bool binder_proc_transaction(struct binder_transaction *t,
{
struct list_head *target_list = NULL;
struct binder_node *node = t->buffer->target_node;
+ struct binder_priority node_prio;
bool oneway = !!(t->flags & TF_ONE_WAY);
bool wakeup = true;
BUG_ON(!node);
binder_node_lock(node);
+ node_prio.prio = node->min_priority;
+ node_prio.sched_policy = node->sched_policy;
+
if (oneway) {
BUG_ON(thread);
if (node->has_async_transaction) {
@@ -2708,12 +2747,14 @@ static bool binder_proc_transaction(struct binder_transaction *t,
if (!thread && !target_list)
thread = binder_select_thread_ilocked(proc);
- if (thread)
+ if (thread) {
target_list = &thread->todo;
- else if (!target_list)
+ binder_transaction_priority(thread->task, t, node_prio);
+ } else if (!target_list) {
target_list = &proc->todo;
- else
+ } else {
BUG_ON(target_list != &node->async_todo);
+ }
binder_enqueue_work_ilocked(&t->work, target_list);
@@ -2790,7 +2831,6 @@ static void binder_transaction(struct binder_proc *proc,
}
thread->transaction_stack = in_reply_to->to_parent;
binder_inner_proc_unlock(proc);
- binder_set_priority(current, in_reply_to->saved_priority);
target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
if (target_thread == NULL) {
return_error = BR_DEAD_REPLY;
@@ -3200,6 +3240,7 @@ static void binder_transaction(struct binder_proc *proc,
binder_enqueue_work_ilocked(&t->work, &target_thread->todo);
binder_inner_proc_unlock(target_proc);
wake_up_interruptible_sync(&target_thread->wait);
+ binder_set_priority(current, in_reply_to->saved_priority);
binder_free_transaction(in_reply_to);
} else if (!(t->flags & TF_ONE_WAY)) {
BUG_ON(t->buffer->async_transaction != 0);
@@ -3288,6 +3329,7 @@ static void binder_transaction(struct binder_proc *proc,
BUG_ON(thread->return_error.cmd != BR_OK);
if (in_reply_to) {
+ binder_set_priority(current, in_reply_to->saved_priority);
thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
binder_enqueue_work(thread->proc,
&thread->return_error.work,
@@ -4099,26 +4141,13 @@ static int binder_thread_read(struct binder_proc *proc,
BUG_ON(t->buffer == NULL);
if (t->buffer->target_node) {
struct binder_node *target_node = t->buffer->target_node;
- struct binder_priority prio = t->priority;
+ struct binder_priority node_prio;
tr.target.ptr = target_node->ptr;
tr.cookie = target_node->cookie;
- t->saved_priority.sched_policy = current->policy;
- t->saved_priority.prio = current->normal_prio;
- if (target_node->min_priority < t->priority.prio ||
- (target_node->min_priority == t->priority.prio &&
- target_node->sched_policy == SCHED_FIFO)) {
- /*
- * In case the minimum priority on the node is
- * higher (lower value), use that priority. If
- * the priority is the same, but the node uses
- * SCHED_FIFO, prefer SCHED_FIFO, since it can
- * run unbounded, unlike SCHED_RR.
- */
- prio.sched_policy = target_node->sched_policy;
- prio.prio = target_node->min_priority;
- }
- binder_set_priority(current, prio);
+ node_prio.sched_policy = target_node->sched_policy;
+ node_prio.prio = target_node->min_priority;
+ binder_transaction_priority(current, t, node_prio);
cmd = BR_TRANSACTION;
} else {
tr.target.ptr = 0;
@@ -4294,6 +4323,8 @@ static struct binder_thread *binder_get_thread_ilocked(
binder_stats_created(BINDER_STAT_THREAD);
thread->proc = proc;
thread->pid = current->pid;
+ get_task_struct(current);
+ thread->task = current;
atomic_set(&thread->tmp_ref, 0);
init_waitqueue_head(&thread->wait);
INIT_LIST_HEAD(&thread->todo);
@@ -4344,6 +4375,7 @@ static void binder_free_thread(struct binder_thread *thread)
BUG_ON(!list_empty(&thread->todo));
binder_stats_deleted(BINDER_STAT_THREAD);
binder_proc_dec_tmpref(thread->proc);
+ put_task_struct(thread->task);
kfree(thread);
}
--
2.14.1.581.gf28d330327-goog
Powered by blists - more mailing lists