[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20130313154905.59d00adb5b62d5169649aea0@canb.auug.org.au>
Date: Wed, 13 Mar 2013 15:49:05 +1100
From: Stephen Rothwell <sfr@...b.auug.org.au>
To: Andrew Morton <akpm@...ux-foundation.org>
Cc: linux-next@...r.kernel.org, linux-kernel@...r.kernel.org,
Tejun Heo <tj@...nel.org>
Subject: linux-next: manual merge of the akpm tree with the workqueues tree
Hi Andrew,
Today's linux-next merge of the akpm tree got a conflict in
kernel/workqueue.c between commit fa1b54e69bc6 ("workqueue: update
synchronization rules on worker_pool_idr") from the workqueues tree and
commit "workqueue: convert to idr_alloc()" from the akpm tree.
I fixed it up (I think - see below) and can carry the fix as necessary
(no action is required).
--
Cheers,
Stephen Rothwell sfr@...b.auug.org.au
diff --cc kernel/workqueue.c
index 2f43753,09bee1d..0000000
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@@ -456,31 -456,40 +456,31 @@@ static int worker_pool_assign_id(struc
{
int ret;
- mutex_lock(&worker_pool_idr_mutex);
- ret = idr_alloc(&worker_pool_idr, pool, 0, 0, GFP_KERNEL);
- if (ret >= 0)
- pool->id = ret;
- mutex_unlock(&worker_pool_idr_mutex);
+ do {
- if (!idr_pre_get(&worker_pool_idr, GFP_KERNEL))
- return -ENOMEM;
-
++ idr_preload(GFP_KERNEL);
+ spin_lock_irq(&workqueue_lock);
- ret = idr_get_new(&worker_pool_idr, pool, &pool->id);
++ ret = idr_alloc(&worker_pool_idr, pool, 0, 0, GFP_NOWAIT);
++ if (ret >= 0)
++ pool->id = ret;
+ spin_unlock_irq(&workqueue_lock);
+ } while (ret == -EAGAIN);
- return ret;
+ return ret < 0 ? ret : 0;
}
-/*
- * Lookup worker_pool by id. The idr currently is built during boot and
- * never modified. Don't worry about locking for now.
+/**
+ * first_pwq - return the first pool_workqueue of the specified workqueue
+ * @wq: the target workqueue
+ *
+ * This must be called either with workqueue_lock held or sched RCU read
+ * locked. If the pwq needs to be used beyond the locking in effect, the
+ * caller is responsible for guaranteeing that the pwq stays online.
*/
-static struct worker_pool *worker_pool_by_id(int pool_id)
-{
- return idr_find(&worker_pool_idr, pool_id);
-}
-
-static struct worker_pool *get_std_worker_pool(int cpu, bool highpri)
-{
- struct worker_pool *pools = std_worker_pools(cpu);
-
- return &pools[highpri];
-}
-
-static struct pool_workqueue *get_pwq(unsigned int cpu,
- struct workqueue_struct *wq)
+static struct pool_workqueue *first_pwq(struct workqueue_struct *wq)
{
- if (!(wq->flags & WQ_UNBOUND)) {
- if (likely(cpu < nr_cpu_ids))
- return per_cpu_ptr(wq->pool_wq.pcpu, cpu);
- } else if (likely(cpu == WORK_CPU_UNBOUND))
- return wq->pool_wq.single;
- return NULL;
+ assert_rcu_or_wq_lock();
+ return list_first_or_null_rcu(&wq->pwqs, struct pool_workqueue,
+ pwqs_node);
}
static unsigned int work_color_to_flags(int color)
Content of type "application/pgp-signature" skipped
Powered by blists - more mailing lists