[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20091119172044.1679.24462.stgit@warthog.procyon.org.uk>
Date: Thu, 19 Nov 2009 17:20:44 +0000
From: David Howells <dhowells@...hat.com>
To: linux-cachefs@...hat.com, nfsv4@...ux-nfs.org,
linux-kernel@...r.kernel.org
Cc: dhowells@...hat.com, steved@...hat.com
Subject: [PATCH 02/28] SLOW_WORK: Make slow_work_ops ->get_ref/->put_ref
optional
From: Jens Axboe <jens.axboe@...cle.com>
Make the ability for the slow-work facility to take references on a work item
optional as not everyone requires this.
Even the internal slow-work stubs them out, so those can be got rid of too.
Signed-off-by: Jens Axboe <jens.axboe@...cle.com>
Signed-off-by: David Howells <dhowells@...hat.com>
---
Documentation/slow-work.txt | 2 +-
kernel/slow-work.c | 36 ++++++++++++++++--------------------
2 files changed, 17 insertions(+), 21 deletions(-)
diff --git a/Documentation/slow-work.txt b/Documentation/slow-work.txt
index f12fda3..c655c51 100644
--- a/Documentation/slow-work.txt
+++ b/Documentation/slow-work.txt
@@ -125,7 +125,7 @@ ITEM OPERATIONS
===============
Each work item requires a table of operations of type struct slow_work_ops.
-All members are required:
+Only ->execute() is required, getting and putting of a reference are optional.
(*) Get a reference on an item:
diff --git a/kernel/slow-work.c b/kernel/slow-work.c
index dd08f37..fccf421 100644
--- a/kernel/slow-work.c
+++ b/kernel/slow-work.c
@@ -145,6 +145,20 @@ static DECLARE_COMPLETION(slow_work_last_thread_exited);
static int slow_work_user_count;
static DEFINE_MUTEX(slow_work_user_lock);
+static inline int slow_work_get_ref(struct slow_work *work)
+{
+ if (work->ops->get_ref)
+ return work->ops->get_ref(work);
+
+ return 0;
+}
+
+static inline void slow_work_put_ref(struct slow_work *work)
+{
+ if (work->ops->put_ref)
+ work->ops->put_ref(work);
+}
+
/*
* Calculate the maximum number of active threads in the pool that are
* permitted to process very slow work items.
@@ -248,7 +262,7 @@ static bool slow_work_execute(int id)
}
/* sort out the race between module unloading and put_ref() */
- work->ops->put_ref(work);
+ slow_work_put_ref(work);
#ifdef CONFIG_MODULES
module = slow_work_thread_processing[id];
@@ -309,7 +323,6 @@ int slow_work_enqueue(struct slow_work *work)
BUG_ON(slow_work_user_count <= 0);
BUG_ON(!work);
BUG_ON(!work->ops);
- BUG_ON(!work->ops->get_ref);
/* when honouring an enqueue request, we only promise that we will run
* the work function in the future; we do not promise to run it once
@@ -339,7 +352,7 @@ int slow_work_enqueue(struct slow_work *work)
if (test_bit(SLOW_WORK_EXECUTING, &work->flags)) {
set_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags);
} else {
- if (work->ops->get_ref(work) < 0)
+ if (slow_work_get_ref(work) < 0)
goto cant_get_ref;
if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags))
list_add_tail(&work->link, &vslow_work_queue);
@@ -480,21 +493,6 @@ static void slow_work_cull_timeout(unsigned long data)
}
/*
- * Get a reference on slow work thread starter
- */
-static int slow_work_new_thread_get_ref(struct slow_work *work)
-{
- return 0;
-}
-
-/*
- * Drop a reference on slow work thread starter
- */
-static void slow_work_new_thread_put_ref(struct slow_work *work)
-{
-}
-
-/*
* Start a new slow work thread
*/
static void slow_work_new_thread_execute(struct slow_work *work)
@@ -529,8 +527,6 @@ static void slow_work_new_thread_execute(struct slow_work *work)
static const struct slow_work_ops slow_work_new_thread_ops = {
.owner = THIS_MODULE,
- .get_ref = slow_work_new_thread_get_ref,
- .put_ref = slow_work_new_thread_put_ref,
.execute = slow_work_new_thread_execute,
};
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists