[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1438094371-8326-14-git-send-email-pmladek@suse.com>
Date: Tue, 28 Jul 2015 16:39:30 +0200
From: Petr Mladek <pmladek@...e.com>
To: Andrew Morton <akpm@...ux-foundation.org>,
Oleg Nesterov <oleg@...hat.com>, Tejun Heo <tj@...nel.org>,
Ingo Molnar <mingo@...hat.com>,
Peter Zijlstra <peterz@...radead.org>
Cc: Steven Rostedt <rostedt@...dmis.org>,
"Paul E. McKenney" <paulmck@...ux.vnet.ibm.com>,
Josh Triplett <josh@...htriplett.org>,
Thomas Gleixner <tglx@...utronix.de>,
Linus Torvalds <torvalds@...ux-foundation.org>,
Jiri Kosina <jkosina@...e.cz>, Borislav Petkov <bp@...e.de>,
Michal Hocko <mhocko@...e.cz>, linux-mm@...ck.org,
Vlastimil Babka <vbabka@...e.cz>,
live-patching@...r.kernel.org, linux-api@...r.kernel.org,
linux-kernel@...r.kernel.org, Petr Mladek <pmladek@...e.com>
Subject: [RFC PATCH 13/14] kthread_worker: Add set_kthread_worker_user_nice()
kthread worker API will be used for kthreads that need to modify
the scheduling priority.
This patch adds a function that allows to make it easily, safe way,
and hides implementation details. It might even help to get rid
of an init work.
Signed-off-by: Petr Mladek <pmladek@...e.com>
---
include/linux/kthread.h | 2 ++
kernel/kthread.c | 14 ++++++++++++++
kernel/trace/ring_buffer_benchmark.c | 3 ++-
mm/huge_memory.c | 10 +---------
4 files changed, 19 insertions(+), 10 deletions(-)
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index d916b024e986..b75847e1a4c9 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -142,6 +142,8 @@ int create_kthread_worker_on_node(struct kthread_worker *worker,
#define create_kthread_worker(worker, flags, namefmt, arg...) \
create_kthread_worker_on_node(worker, flags, -1, namefmt, ##arg)
+void set_kthread_worker_user_nice(struct kthread_worker *worker, long nice);
+
bool queue_kthread_work(struct kthread_worker *worker,
struct kthread_work *work);
void flush_kthread_work(struct kthread_work *work);
diff --git a/kernel/kthread.c b/kernel/kthread.c
index d02509e17f7e..ab2e235b6144 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -648,6 +648,20 @@ int create_kthread_worker_on_node(struct kthread_worker *worker,
}
EXPORT_SYMBOL(create_kthread_worker_on_node);
+/*
+ * set_kthread_worker_user_nice - set scheduling priority for the kthread worker
+ * @worker: target kthread_worker
+ * @nice: niceness value
+ */
+void set_kthread_worker_user_nice(struct kthread_worker *worker, long nice)
+{
+ struct task_struct *task = worker->task;
+
+ WARN_ON(!task);
+ set_user_nice(task, nice);
+}
+EXPORT_SYMBOL(set_kthread_worker_user_nice);
+
/* insert @work before @pos in @worker */
static void insert_kthread_work(struct kthread_worker *worker,
struct kthread_work *work,
diff --git a/kernel/trace/ring_buffer_benchmark.c b/kernel/trace/ring_buffer_benchmark.c
index 5036d284885c..73e4c7f11a2c 100644
--- a/kernel/trace/ring_buffer_benchmark.c
+++ b/kernel/trace/ring_buffer_benchmark.c
@@ -476,7 +476,8 @@ static int __init ring_buffer_benchmark_init(void)
sched_setscheduler(rb_producer_worker.task,
SCHED_FIFO, ¶m);
} else
- set_user_nice(rb_producer_worker.task, producer_nice);
+ set_kthread_worker_user_nice(&rb_producer_worker,
+ producer_nice);
return 0;
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 51a514161f2b..1d5f990c55ab 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -55,12 +55,10 @@ static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
/* during fragmentation poll the hugepage allocator once every minute */
static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
-static void khugepaged_init_func(struct kthread_work *dummy);
static void khugepaged_do_scan_func(struct kthread_work *dummy);
static void khugepaged_wait_func(struct kthread_work *dummy);
static void khugepaged_cleanup_func(struct kthread_work *dummy);
static DEFINE_KTHREAD_WORKER(khugepaged_worker);
-static DEFINE_KTHREAD_WORK(khugepaged_init_work, khugepaged_init_func);
static DEFINE_KTHREAD_WORK(khugepaged_do_scan_work, khugepaged_do_scan_func);
static DEFINE_KTHREAD_WORK(khugepaged_wait_work, khugepaged_wait_func);
static DEFINE_KTHREAD_WORK(khugepaged_cleanup_work, khugepaged_cleanup_func);
@@ -167,8 +165,7 @@ static int start_stop_khugepaged(void)
goto out;
}
- queue_kthread_work(&khugepaged_worker,
- &khugepaged_init_work);
+ set_kthread_worker_user_nice(&khugepaged_worker, MAX_NICE);
if (list_empty(&khugepaged_scan.mm_head))
queue_kthread_work(&khugepaged_worker,
@@ -2803,11 +2800,6 @@ static int khugepaged_wait_event(void)
!khugepaged_enabled());
}
-static void khugepaged_init_func(struct kthread_work *dummy)
-{
- set_user_nice(current, MAX_NICE);
-}
-
static void khugepaged_do_scan_func(struct kthread_work *dummy)
{
struct page *hpage = NULL;
--
1.8.5.6
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists