lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1516863704-21364-2-git-send-email-wen.yang99@zte.com.cn>
Date:   Thu, 25 Jan 2018 15:01:41 +0800
From:   Wen Yang <wen.yang99@....com.cn>
To:     tj@...nel.org
Cc:     zhong.weidong@....com.cn, Jiang Biao <jiang.biao2@....com.cn>,
        Tan Hu <tan.hu@....com.cn>,
        Lai Jiangshan <jiangshanlai@...il.com>,
        kernel test robot <xiaolong.ye@...el.com>,
        linux-kernel@...r.kernel.org
Subject: [RFC PATCH V4 2/5] workqueue: expose attrs for system workqueues

Expose sched_attr for system workqueues, such as:
	# cat   /sys/devices/virtual/workqueue/system_percpu/sched_attr
	policy=0 prio=0 nice=0
 	cat   /sys/devices/virtual/workqueue/system_percpu_highpri/sched_attr
	policy=0 prio=0 nice=-20

Signed-off-by: Wen Yang <wen.yang99@....com.cn>
Signed-off-by: Jiang Biao <jiang.biao2@....com.cn>
Signed-off-by: Tan Hu <tan.hu@....com.cn>
Suggested-by: Tejun Heo <tj@...nel.org>
Cc: Tejun Heo <tj@...nel.org>
Cc: Lai Jiangshan <jiangshanlai@...il.com>
Cc: kernel test robot <xiaolong.ye@...el.com>
Cc: linux-kernel@...r.kernel.org
---
 include/linux/workqueue.h |  6 ++++++
 kernel/workqueue.c        | 50 +++++++++++++++++++++++++++++++++++++++++++++--
 2 files changed, 54 insertions(+), 2 deletions(-)

diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 4a54ef9..9faaade 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -13,6 +13,7 @@
 #include <linux/threads.h>
 #include <linux/atomic.h>
 #include <linux/cpumask.h>
+#include <uapi/linux/sched/types.h>
 
 struct workqueue_struct;
 
@@ -132,6 +133,11 @@ struct workqueue_attrs {
 	int nice;
 
 	/**
+	 * @sched_attr: kworker's scheduling parameters
+	 */
+	struct sched_attr sched_attr;
+
+	/**
 	 * @cpumask: allowed CPUs
 	 */
 	cpumask_var_t cpumask;
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 67b68bb..993f225 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -256,6 +256,7 @@ struct workqueue_struct {
 	int			saved_max_active; /* WQ: saved pwq max_active */
 
 	struct workqueue_attrs	*unbound_attrs;	/* PW: only for unbound wqs */
+	struct workqueue_attrs  *attrs;
 	struct pool_workqueue	*dfl_pwq;	/* PW: only for unbound wqs */
 
 #ifdef CONFIG_SYSFS
@@ -1699,6 +1700,7 @@ static void worker_attach_to_pool(struct worker *worker,
 	 * online CPUs.  It'll be re-applied when any of the CPUs come up.
 	 */
 	set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask);
+	sched_setattr(worker->task, &pool->attrs->sched_attr);
 
 	/*
 	 * The pool->attach_mutex ensures %POOL_DISASSOCIATED remains
@@ -3166,10 +3168,20 @@ struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask)
 	return NULL;
 }
 
+static void copy_sched_attr(struct sched_attr *to,
+		const struct sched_attr *from)
+{
+	to->sched_policy = from->sched_policy;
+	to->sched_priority = from->sched_priority;
+	to->sched_nice = from->sched_nice;
+	to->sched_flags = from->sched_flags;
+}
+
 static void copy_workqueue_attrs(struct workqueue_attrs *to,
 				 const struct workqueue_attrs *from)
 {
 	to->nice = from->nice;
+	copy_sched_attr(&to->sched_attr, &from->sched_attr);
 	cpumask_copy(to->cpumask, from->cpumask);
 	/*
 	 * Unlike hash and equality test, this function doesn't ignore
@@ -3250,7 +3262,7 @@ static void rcu_free_wq(struct rcu_head *rcu)
 		free_percpu(wq->cpu_pwqs);
 	else
 		free_workqueue_attrs(wq->unbound_attrs);
-
+	free_workqueue_attrs(wq->attrs);
 	kfree(wq->rescuer);
 	kfree(wq);
 }
@@ -3979,6 +3991,10 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
 			goto err_free_wq;
 	}
 
+	wq->attrs = alloc_workqueue_attrs(GFP_KERNEL);
+	if (!wq->attrs)
+		goto err_free_wq;
+
 	va_start(args, lock_name);
 	vsnprintf(wq->name, sizeof(wq->name), fmt, args);
 	va_end(args);
@@ -3999,6 +4015,11 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
 	lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
 	INIT_LIST_HEAD(&wq->list);
 
+	wq->attrs->sched_attr.sched_policy = SCHED_NORMAL;
+	wq->attrs->sched_attr.sched_priority = 0;
+	wq->attrs->sched_attr.sched_nice = wq->flags & WQ_HIGHPRI ?
+		HIGHPRI_NICE_LEVEL : 0;
+
 	if (alloc_and_link_pwqs(wq) < 0)
 		goto err_free_wq;
 
@@ -4049,6 +4070,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
 
 err_free_wq:
 	free_workqueue_attrs(wq->unbound_attrs);
+	free_workqueue_attrs(wq->attrs);
 	kfree(wq);
 	return NULL;
 err_destroy:
@@ -5043,9 +5065,29 @@ static ssize_t max_active_store(struct device *dev,
 }
 static DEVICE_ATTR_RW(max_active);
 
+static ssize_t sched_attr_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	size_t written;
+	struct workqueue_struct *wq = dev_to_wq(dev);
+
+	mutex_lock(&wq->mutex);
+	written = scnprintf(buf, PAGE_SIZE,
+			"policy=%u prio=%u nice=%d\n",
+			wq->attrs->sched_attr.sched_policy,
+			wq->attrs->sched_attr.sched_priority,
+			wq->attrs->sched_attr.sched_nice);
+	mutex_unlock(&wq->mutex);
+
+	return written;
+}
+
+static DEVICE_ATTR_RO(sched_attr);
+
 static struct attribute *wq_sysfs_attrs[] = {
 	&dev_attr_per_cpu.attr,
 	&dev_attr_max_active.attr,
+	&dev_attr_sched_attr.attr,
 	NULL,
 };
 ATTRIBUTE_GROUPS(wq_sysfs);
@@ -5254,7 +5296,11 @@ static int __init wq_sysfs_init(void)
 	if (err)
 		return err;
 
-	return device_create_file(wq_subsys.dev_root, &wq_sysfs_cpumask_attr);
+	err = device_create_file(wq_subsys.dev_root, &wq_sysfs_cpumask_attr);
+	if (err)
+		return err;
+	return workqueue_sysfs_register(system_wq) ||
+		workqueue_sysfs_register(system_highpri_wq);
 }
 core_initcall(wq_sysfs_init);
 
-- 
1.8.3.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ