lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Sat,  9 Oct 2021 20:42:41 +0530
From:   "Pratik R. Sampat" <psampat@...ux.ibm.com>
To:     bristot@...hat.com, christian@...uner.io, ebiederm@...ssion.com,
        lizefan.x@...edance.com, tj@...nel.org, hannes@...xchg.org,
        mingo@...nel.org, juri.lelli@...hat.com,
        linux-kernel@...r.kernel.org, linux-fsdevel@...r.kernel.org,
        cgroups@...r.kernel.org, containers@...ts.linux.dev,
        containers@...ts.linux-foundation.org, psampat@...ux.ibm.com,
        pratik.r.sampat@...il.com
Subject: [RFC 3/5] cpuset/cpuns: Make cgroup CPUset CPU namespace aware

When a new cgroup is created or a cpuset is updated, the mask supplied
to it looks for its corresponding CPU translations for the restrictions
to apply on.

The patch also updates the display interface such that tasks within the
namespace can view the corresponding virtual CPUset based on the
requested CPU namespace context.

Signed-off-by: Pratik R. Sampat <psampat@...ux.ibm.com>
---
 kernel/cgroup/cpuset.c | 57 +++++++++++++++++++++++++++++++++++++++---
 1 file changed, 54 insertions(+), 3 deletions(-)

diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index adb5190c4429..eb1e950543cf 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -65,6 +65,7 @@
 #include <linux/mutex.h>
 #include <linux/cgroup.h>
 #include <linux/wait.h>
+#include <linux/cpu_namespace.h>
 
 DEFINE_STATIC_KEY_FALSE(cpusets_pre_enable_key);
 DEFINE_STATIC_KEY_FALSE(cpusets_enabled_key);
@@ -1061,8 +1062,19 @@ static void update_tasks_cpumask(struct cpuset *cs)
 	struct task_struct *task;
 
 	css_task_iter_start(&cs->css, 0, &it);
-	while ((task = css_task_iter_next(&it)))
+	while ((task = css_task_iter_next(&it))) {
+#ifdef CONFIG_CPU_NS
+		cpumask_t pcpus;
+		cpumask_t vcpus;
+
+		pcpus = get_pcpus_cpuns(current->nsproxy->cpu_ns, cs->effective_cpus);
+		vcpus = get_vcpus_cpuns(task->nsproxy->cpu_ns, &pcpus);
+		cpumask_copy(&task->nsproxy->cpu_ns->v_cpuset_cpus, &vcpus);
+		set_cpus_allowed_ptr(task, &pcpus);
+#else
 		set_cpus_allowed_ptr(task, cs->effective_cpus);
+#endif
+	}
 	css_task_iter_end(&it);
 }
 
@@ -2212,8 +2224,18 @@ static void cpuset_attach(struct cgroup_taskset *tset)
 		 * can_attach beforehand should guarantee that this doesn't
 		 * fail.  TODO: have a better way to handle failure here
 		 */
-		WARN_ON_ONCE(set_cpus_allowed_ptr(task, cpus_attach));
+#ifdef CONFIG_CPU_NS
+		cpumask_t pcpus;
+		cpumask_t vcpus;
 
+		pcpus = get_pcpus_cpuns(current->nsproxy->cpu_ns, cpus_attach);
+		vcpus = get_vcpus_cpuns(task->nsproxy->cpu_ns, &pcpus);
+		cpumask_copy(&task->nsproxy->cpu_ns->v_cpuset_cpus, &vcpus);
+
+		WARN_ON_ONCE(set_cpus_allowed_ptr(task, &pcpus));
+#else
+		WARN_ON_ONCE(set_cpus_allowed_ptr(task, cpus_attach));
+#endif
 		cpuset_change_task_nodemask(task, &cpuset_attach_nodemask_to);
 		cpuset_update_task_spread_flag(cs, task);
 	}
@@ -2436,13 +2458,33 @@ static int cpuset_common_seq_show(struct seq_file *sf, void *v)
 
 	switch (type) {
 	case FILE_CPULIST:
+#ifdef CONFIG_CPU_NS
+		if (current->nsproxy->cpu_ns == &init_cpu_ns) {
+			seq_printf(sf, "%*pbl\n",
+				   cpumask_pr_args(cs->cpus_allowed));
+		} else {
+			seq_printf(sf, "%*pbl\n",
+				   cpumask_pr_args(&current->nsproxy->cpu_ns->v_cpuset_cpus));
+		}
+#else
 		seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_allowed));
+#endif
 		break;
 	case FILE_MEMLIST:
 		seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->mems_allowed));
 		break;
 	case FILE_EFFECTIVE_CPULIST:
+#ifdef CONFIG_CPU_NS
+		if (current->nsproxy->cpu_ns == &init_cpu_ns) {
+			seq_printf(sf, "%*pbl\n",
+				   cpumask_pr_args(cs->effective_cpus));
+		} else {
+			seq_printf(sf, "%*pbl\n",
+				   cpumask_pr_args(&current->nsproxy->cpu_ns->v_cpuset_cpus));
+		}
+#else
 		seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->effective_cpus));
+#endif
 		break;
 	case FILE_EFFECTIVE_MEMLIST:
 		seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->effective_mems));
@@ -2884,9 +2926,18 @@ static void cpuset_bind(struct cgroup_subsys_state *root_css)
  */
 static void cpuset_fork(struct task_struct *task)
 {
+#ifdef CONFIG_CPU_NS
+	cpumask_t vcpus;
+#endif
+
 	if (task_css_is_root(task, cpuset_cgrp_id))
 		return;
-
+#ifdef CONFIG_CPU_NS
+	if (task->nsproxy->cpu_ns != &init_cpu_ns) {
+		vcpus = get_vcpus_cpuns(task->nsproxy->cpu_ns, current->cpus_ptr);
+		cpumask_copy(&task->nsproxy->cpu_ns->v_cpuset_cpus, &vcpus);
+	}
+#endif
 	set_cpus_allowed_ptr(task, current->cpus_ptr);
 	task->mems_allowed = current->mems_allowed;
 }
-- 
2.31.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ