[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <c50a4628b86c445391014fb16709c11f72082454.1629454773.git.rickyman7@gmail.com>
Date: Fri, 20 Aug 2021 12:53:55 +0200
From: Riccardo Mancini <rickyman7@...il.com>
To: Arnaldo Carvalho de Melo <acme@...nel.org>
Cc: Ian Rogers <irogers@...gle.com>,
Namhyung Kim <namhyung@...nel.org>,
Peter Zijlstra <peterz@...radead.org>,
Ingo Molnar <mingo@...hat.com>,
Mark Rutland <mark.rutland@....com>,
Jiri Olsa <jolsa@...hat.com>, linux-kernel@...r.kernel.org,
linux-perf-users@...r.kernel.org,
Alexey Bayduraev <alexey.v.bayduraev@...ux.intel.com>,
Riccardo Mancini <rickyman7@...il.com>
Subject: [RFC PATCH v3 09/15] perf workqueue: spinup threads when needed
This patch adds lazy thread creation in the workqueue.
When a new work is submitted, first an idle worker is searched. If one
is found, it will be selected for execution. Otherwise, a not already
spawned thread is searched. If found, it will be spun up and selected.
If none of the latter is found, one of the busy threads is chosen using
a round-robin policy.
Signed-off-by: Riccardo Mancini <rickyman7@...il.com>
---
tools/perf/util/workqueue/workqueue.c | 54 +++++++++++++++++++--------
1 file changed, 38 insertions(+), 16 deletions(-)
diff --git a/tools/perf/util/workqueue/workqueue.c b/tools/perf/util/workqueue/workqueue.c
index 1092ece9ad39d6d2..305a9cda39810b84 100644
--- a/tools/perf/util/workqueue/workqueue.c
+++ b/tools/perf/util/workqueue/workqueue.c
@@ -39,6 +39,7 @@ struct workqueue_struct {
int msg_pipe[2]; /* main thread comm pipes */
struct worker **workers; /* array of all workers */
struct worker *next_worker; /* next worker to choose (round robin) */
+ int first_stopped_worker; /* next worker to start if needed */
};
static const char * const workqueue_errno_str[] = {
@@ -423,8 +424,7 @@ static void worker_thread(int tidx, struct task_struct *task)
*/
struct workqueue_struct *create_workqueue(int nr_threads)
{
- int ret, err = 0, t;
- struct worker *worker;
+ int ret, err = 0;
struct workqueue_struct *wq = zalloc(sizeof(struct workqueue_struct));
if (!wq) {
@@ -474,24 +474,11 @@ struct workqueue_struct *create_workqueue(int nr_threads)
goto out_close_pipe;
}
- for (t = 0; t < nr_threads; t++) {
- err = spinup_worker(wq, t);
- if (err)
- goto out_stop_pool;
- }
-
wq->next_worker = NULL;
+ wq->first_stopped_worker = 0;
return wq;
-out_stop_pool:
- lock_workqueue(wq);
- for_each_idle_worker(wq, worker) {
- ret = stop_worker(worker);
- if (ret)
- err = ret;
- }
- unlock_workqueue(wq);
out_close_pipe:
close(wq->msg_pipe[0]);
wq->msg_pipe[0] = -1;
@@ -686,10 +673,28 @@ __releases(&worker->lock)
*/
int queue_work(struct workqueue_struct *wq, struct work_struct *work)
{
+ int ret;
struct worker *worker;
+repeat:
lock_workqueue(wq);
if (list_empty(&wq->idle_list)) {
+ // find a worker to spin up
+ while (wq->first_stopped_worker < threadpool__size(wq->pool)
+ && wq->workers[wq->first_stopped_worker])
+ wq->first_stopped_worker++;
+
+ // found one
+ if (wq->first_stopped_worker < threadpool__size(wq->pool)) {
+ // spinup does not hold the lock to make the thread register itself
+ unlock_workqueue(wq);
+ ret = spinup_worker(wq, wq->first_stopped_worker);
+ if (ret)
+ return ret;
+ // worker is now in idle_list
+ goto repeat;
+ }
+
worker = wq->next_worker;
advance_next_worker(wq);
} else {
@@ -705,7 +710,24 @@ int queue_work(struct workqueue_struct *wq, struct work_struct *work)
*/
int queue_work_on_worker(int tidx, struct workqueue_struct *wq, struct work_struct *work)
{
+ int ret;
+
lock_workqueue(wq);
+ if (!wq->workers[tidx]) {
+ // spinup does not hold the lock to make the thread register itself
+ unlock_workqueue(wq);
+ ret = spinup_worker(wq, tidx);
+ if (ret)
+ return ret;
+
+ // now recheck if worker is available
+ lock_workqueue(wq);
+ if (!wq->workers[tidx]) {
+ unlock_workqueue(wq);
+ return -WORKQUEUE_ERROR__NOTREADY;
+ }
+ }
+
lock_worker(wq->workers[tidx]);
return __queue_work_on_worker(wq, wq->workers[tidx], work);
}
--
2.31.1
Powered by blists - more mailing lists