[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <996325f824d095d2429f7677a87da602939f389c.1626177381.git.rickyman7@gmail.com>
Date: Tue, 13 Jul 2021 14:11:19 +0200
From: Riccardo Mancini <rickyman7@...il.com>
To: Arnaldo Carvalho de Melo <acme@...nel.org>
Cc: Ian Rogers <irogers@...gle.com>,
Namhyung Kim <namhyung@...nel.org>,
Peter Zijlstra <peterz@...radead.org>,
Ingo Molnar <mingo@...hat.com>,
Mark Rutland <mark.rutland@....com>,
Jiri Olsa <jolsa@...hat.com>, linux-kernel@...r.kernel.org,
linux-perf-users@...r.kernel.org,
Riccardo Mancini <rickyman7@...il.com>
Subject: [RFC PATCH 08/10] perf workqueue: add queue_work and flush_workqueue functions
This patch adds functions to queue and wait work_structs, and
related tests.
When a new work item is added, the workqueue first checks if there
are threads to wake up. If so, it wakes it up with the given work item,
otherwise it will just add it to the list pending work items. A thread
which completes a work item will check this list before going to sleep.
Signed-off-by: Riccardo Mancini <rickyman7@...il.com>
---
tools/perf/tests/workqueue.c | 69 +++++++++++++++++++-
tools/perf/util/workqueue/workqueue.c | 93 +++++++++++++++++++++++++++
tools/perf/util/workqueue/workqueue.h | 7 ++
3 files changed, 168 insertions(+), 1 deletion(-)
diff --git a/tools/perf/tests/workqueue.c b/tools/perf/tests/workqueue.c
index 423dc8a92ca2563c..f71a839d5752d224 100644
--- a/tools/perf/tests/workqueue.c
+++ b/tools/perf/tests/workqueue.c
@@ -146,6 +146,27 @@ static int __test__threadpool(void *_args)
return 0;
}
+struct test_work {
+ struct work_struct work;
+ int i;
+ int *array;
+};
+
+static void test_work_fn1(struct work_struct *work)
+{
+ struct test_work *mwork = container_of(work, struct test_work, work);
+
+ dummy_work(mwork->i);
+ mwork->array[mwork->i] = mwork->i+1;
+}
+
+static void test_work_fn2(struct work_struct *work)
+{
+ struct test_work *mwork = container_of(work, struct test_work, work);
+
+ dummy_work(mwork->i);
+ mwork->array[mwork->i] = mwork->i*2;
+}
static int __workqueue__prepare(struct threadpool_struct **pool,
struct workqueue_struct **wq,
@@ -180,21 +201,67 @@ static int __workqueue__teardown(struct threadpool_struct *pool,
return 0;
}
+static int __workqueue__exec_wait(struct workqueue_struct *wq,
+ int *array, struct test_work *works,
+ work_func_t func, int n_work_items)
+{
+ int ret, i;
+
+ for (i = 0; i < n_work_items; i++) {
+ works[i].array = array;
+ works[i].i = i;
+
+ init_work(&works[i].work);
+ works[i].work.func = func;
+ queue_work(wq, &works[i].work);
+ }
+
+ ret = flush_workqueue(wq);
+ TEST_ASSERT_VAL("workqueue flush failure", ret == 0);
+
+ return 0;
+}
+
+
static int __test__workqueue(void *_args)
{
struct workqueue_test_args_t *args = _args;
struct threadpool_struct *pool;
struct workqueue_struct *wq;
- int ret;
+ int ret, i;
+ int *array;
+ struct test_work *works;
+
+ array = calloc(args->n_work_items, sizeof(*array));
+ works = calloc(args->n_work_items, sizeof(*works));
ret = __workqueue__prepare(&pool, &wq, args->pool_size);
if (ret)
return ret;
+ ret = __workqueue__exec_wait(wq, array, works, test_work_fn1,
+ args->n_work_items);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < args->n_work_items; i++)
+ TEST_ASSERT_VAL("failed array check (1)", array[i] == i+1);
+
+ ret = __workqueue__exec_wait(wq, array, works, test_work_fn2,
+ args->n_work_items);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < args->n_work_items; i++)
+ TEST_ASSERT_VAL("failed array check (2)", array[i] == 2*i);
+
ret = __workqueue__teardown(pool, wq);
if (ret)
return ret;
+ free(array);
+ free(works);
+
return 0;
}
diff --git a/tools/perf/util/workqueue/workqueue.c b/tools/perf/util/workqueue/workqueue.c
index 5934b14b9ed3c0e1..20d196de9500d369 100644
--- a/tools/perf/util/workqueue/workqueue.c
+++ b/tools/perf/util/workqueue/workqueue.c
@@ -21,6 +21,7 @@ enum worker_msg {
enum workqueue_status {
WORKQUEUE_STATUS__READY, /* wq is ready to receive work */
+ WORKQUEUE_STATUS__STOPPING, /* wq is being destructed */
WORKQUEUE_STATUS__ERROR,
WORKQUEUE_STATUS__MAX
};
@@ -102,6 +103,39 @@ __must_hold(&wq->lock)
pthread_cond_signal(&wq->idle_cond);
}
+/**
+ * wake_worker - wake worker @w of workqueue @wq assigning @work to do
+ *
+ * Called from main thread.
+ * Moves worker from idle to busy list, assigns @work to it and sends it a
+ * wake up message.
+ *
+ * NB: this function releases the lock to be able to send the notification
+ * outside the critical section.
+ */
+static int wake_worker(struct workqueue_struct *wq, struct worker *w,
+ struct work_struct *work)
+__must_hold(&wq->lock)
+__releases(&wq->lock)
+{
+ enum worker_msg msg = WORKER_MSG__WAKE;
+ int err;
+
+ list_move(&w->entry, &wq->busy_list);
+ w->current_work = work;
+ unlock_workqueue(wq);
+
+ // send wake msg outside critical section to reduce time spent inside it
+ err = write(w->msg_pipe[1], &msg, sizeof(msg));
+ if (err < 0) {
+ pr_err("wake_worker[%d]: error seding msg: %s\n",
+ w->tidx, strerror(errno));
+ return -1;
+ }
+
+ return 0;
+}
+
/**
* stop_worker - stop worker @w
*
@@ -302,6 +336,11 @@ static int detach_threadpool_from_workqueue(struct workqueue_struct *wq)
return -1;
}
+ wq->status = WORKQUEUE_STATUS__STOPPING;
+ ret = flush_workqueue(wq);
+ if (ret)
+ return -1;
+
lock_workqueue(wq);
for_each_idle_worker(wq, w) {
ret = stop_worker(w);
@@ -422,3 +461,57 @@ int workqueue_nr_threads(struct workqueue_struct *wq)
{
return threadpool_size(wq->pool);
}
+
+/**
+ * queue_work - add @work to @wq internal queue
+ *
+ * If there are idle threads, one of these will be woken up.
+ * Otherwise, the work is added to the pending list.
+ */
+int queue_work(struct workqueue_struct *wq, struct work_struct *work)
+{
+ int ret = 0;
+ struct worker *chosen_worker;
+
+ // in particular, this can fail if workqueue is marked to be stopping
+ if (wq->status != WORKQUEUE_STATUS__READY) {
+ pr_err("workqueue: trying to queue but workqueue is not ready\n");
+ return -1;
+ }
+
+ lock_workqueue(wq);
+ if (list_empty(&wq->idle_list)) {
+ list_add_tail(&work->entry, &wq->pending);
+ unlock_workqueue(wq);
+ pr_debug("workqueue: queued new work item\n");
+ } else {
+ chosen_worker = list_first_entry(&wq->idle_list, struct worker, entry);
+ ret = wake_worker(wq, chosen_worker, work);
+ pr_debug("workqueue: woke worker %d\n", chosen_worker->tidx);
+ }
+
+ return ret;
+}
+
+/**
+ * flush_workqueue - wait for all currently executed and pending work to finish
+ *
+ * This function blocks until all threads become idle.
+ */
+int flush_workqueue(struct workqueue_struct *wq)
+{
+ lock_workqueue(wq);
+ while (!list_empty(&wq->busy_list))
+ pthread_cond_wait(&wq->idle_cond, &wq->lock);
+ unlock_workqueue(wq);
+
+ return 0;
+}
+
+/**
+ * init_work - initialize the @work struct
+ */
+void init_work(struct work_struct *work)
+{
+ INIT_LIST_HEAD(&work->entry);
+}
diff --git a/tools/perf/util/workqueue/workqueue.h b/tools/perf/util/workqueue/workqueue.h
index 86ec1d69274f41db..719bd0e5fb0ce7b7 100644
--- a/tools/perf/util/workqueue/workqueue.h
+++ b/tools/perf/util/workqueue/workqueue.h
@@ -21,4 +21,11 @@ extern struct workqueue_struct *create_workqueue(struct threadpool_struct *pool)
extern int destroy_workqueue(struct workqueue_struct *wq);
extern int workqueue_nr_threads(struct workqueue_struct *wq);
+
+extern int queue_work(struct workqueue_struct *wq, struct work_struct *work);
+
+extern int flush_workqueue(struct workqueue_struct *wq);
+
+extern void init_work(struct work_struct *work);
+
#endif /* __WORKQUEUE_WORKQUEUE_H */
--
2.31.1
Powered by blists - more mailing lists