lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1436760455-5686-5-git-send-email-bsd@redhat.com>
Date:	Mon, 13 Jul 2015 00:07:35 -0400
From:	Bandan Das <bsd@...hat.com>
To:	kvm@...r.kernel.org
Cc:	netdev@...r.kernel.org, linux-kernel@...r.kernel.org,
	mst@...hat.com, Eyal Moscovici <EYALMO@...ibm.com>,
	Razya Ladelsky <RAZYA@...ibm.com>, cgroups@...r.kernel.org,
	jasowang@...hat.com
Subject: [RFC PATCH 4/4] vhost: Add cgroup-aware creation of worker threads

With the help of the cgroup function to compare groups introduced
in the previous patch, this changes worker creation policy.
If the new device belongs to different cgroups than any of the
devices we are currently serving, we end up creating a new worker
thread even if we haven't reached the devs_per_worker threshold

Signed-off-by: Bandan Das <bsd@...hat.com>
---
 drivers/vhost/vhost.c | 47 +++++++++++++++++++++++++++++++++++++++--------
 1 file changed, 39 insertions(+), 8 deletions(-)

diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 6a5d4c0..dc0fa37 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -261,12 +261,6 @@ static int vhost_worker(void *data)
 				use_mm(dev->mm);
 			}
 
-			/* TODO: Consider a more elegant solution */
-			if (worker->owner != dev->owner) {
-				/* Should check for return value */
-				cgroup_attach_task_all(dev->owner, current);
-				worker->owner = dev->owner;
-			}
 			work->fn(work);
 			if (need_resched())
 				schedule();
@@ -278,6 +272,36 @@ static int vhost_worker(void *data)
 	return 0;
 }
 
+struct vhost_attach_cgroups_struct {
+	struct vhost_work work;
+	struct task_struct *owner;
+	int ret;
+};
+
+static void vhost_attach_cgroups_work(struct vhost_work *work)
+{
+	struct vhost_attach_cgroups_struct *s;
+
+	s = container_of(work, struct vhost_attach_cgroups_struct, work);
+	s->ret = cgroup_attach_task_all(s->owner, current);
+}
+
+static void vhost_attach_cgroups(struct vhost_dev *dev,
+				struct vhost_worker *worker)
+{
+	struct vhost_attach_cgroups_struct attach;
+
+	attach.owner = dev->owner;
+	vhost_work_init(dev, &attach.work, vhost_attach_cgroups_work);
+	vhost_work_queue(worker, &attach.work);
+	vhost_work_flush(worker, &attach.work);
+
+	if (!attach.ret)
+		worker->owner = dev->owner;
+
+	dev->err = attach.ret;
+}
+
 static void vhost_create_worker(struct vhost_dev *dev)
 {
 	struct vhost_worker *worker;
@@ -300,8 +324,14 @@ static void vhost_create_worker(struct vhost_dev *dev)
 
 	spin_lock_init(&worker->work_lock);
 	INIT_LIST_HEAD(&worker->work_list);
+
+	/* attach to the cgroups of the process that created us */
+	vhost_attach_cgroups(dev, worker);
+	if (dev->err)
+		goto therror;
+	worker->owner = dev->owner;
+
 	list_add(&worker->node, &pool->workers);
-	worker->owner = NULL;
 	worker->num_devices++;
 	total_vhost_workers++;
 	dev->worker = worker;
@@ -320,7 +350,8 @@ static int vhost_dev_assign_worker(struct vhost_dev *dev)
 
 	mutex_lock(&vhost_pool->pool_lock);
 	list_for_each_entry(worker, &vhost_pool->workers, node) {
-		if (worker->num_devices < devs_per_worker) {
+		if (worker->num_devices < devs_per_worker &&
+		    (!cgroup_match_groups(dev->owner, worker->owner))) {
 			dev->worker = worker;
 			dev->worker_assigned = true;
 			worker->num_devices++;
-- 
2.4.3

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ