[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1468371785-53231-28-git-send-email-fenghua.yu@intel.com>
Date: Tue, 12 Jul 2016 18:03:00 -0700
From: "Fenghua Yu" <fenghua.yu@...el.com>
To: "Thomas Gleixner" <tglx@...utronix.de>,
"Ingo Molnar" <mingo@...e.hu>,
"H. Peter Anvin" <h.peter.anvin@...el.com>,
"Tony Luck" <tony.luck@...el.com>, "Tejun Heo" <tj@...nel.org>,
"Borislav Petkov" <bp@...e.de>,
"Stephane Eranian" <eranian@...gle.com>,
"Peter Zijlstra" <peterz@...radead.org>,
"Marcelo Tosatti" <mtosatti@...hat.com>,
"David Carrillo-Cisneros" <davidcc@...gle.com>,
"Ravi V Shankar" <ravi.v.shankar@...el.com>,
"Vikas Shivappa" <vikas.shivappa@...ux.intel.com>,
"Sai Prakhya" <sai.praneeth.prakhya@...el.com>
Cc: "linux-kernel" <linux-kernel@...r.kernel.org>,
"x86" <x86@...nel.org>, "Fenghua Yu" <fenghua.yu@...el.com>
Subject: [PATCH 27/32] x86/intel_rdt_rdtgroup.c: Implement rscctrl file system commands
From: Fenghua Yu <fenghua.yu@...el.com>
Four basic file system commands are implement for rscctrl.
mount, umount, mkdir, and rmdir.
Signed-off-by: Fenghua Yu <fenghua.yu@...el.com>
Reviewed-by: Tony Luck <tony.luck@...el.com>
---
arch/x86/kernel/cpu/intel_rdt_rdtgroup.c | 237 +++++++++++++++++++++++++++++++
1 file changed, 237 insertions(+)
diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
index b2140a8..91ea3509 100644
--- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
+++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
@@ -856,6 +856,139 @@ static void rdtgroup_idr_remove(struct idr *idr, int id)
spin_unlock_bh(&rdtgroup_idr_lock);
}
+
+static int rdtgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
+ umode_t mode)
+{
+ struct rdtgroup *parent, *rdtgrp;
+ struct rdtgroup_root *root;
+ struct kernfs_node *kn;
+ int level, ret;
+
+ if (parent_kn != root_rdtgrp->kn)
+ return -EPERM;
+
+ /* Do not accept '\n' to avoid unparsable situation.
+ */
+ if (strchr(name, '\n'))
+ return -EINVAL;
+
+ parent = rdtgroup_kn_lock_live(parent_kn);
+ if (!parent)
+ return -ENODEV;
+ root = parent->root;
+ level = parent->level + 1;
+
+ /* allocate the rdtgroup and its ID, 0 is reserved for the root */
+ rdtgrp = kzalloc(sizeof(*rdtgrp) +
+ sizeof(rdtgrp->ancestor_ids[0]) * (level + 1),
+ GFP_KERNEL);
+ if (!rdtgrp) {
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
+
+ /*
+ * Temporarily set the pointer to NULL, so idr_find() won't return
+ * a half-baked rdtgroup.
+ */
+ rdtgrp->id = rdtgroup_idr_alloc(&root->rdtgroup_idr, NULL, 2, 0,
+ GFP_KERNEL);
+ if (rdtgrp->id < 0) {
+ ret = -ENOMEM;
+ goto out_cancel_ref;
+ }
+
+ INIT_LIST_HEAD(&rdtgrp->pset.tasks);
+
+ init_rdtgroup_housekeeping(rdtgrp);
+ cpumask_clear(&rdtgrp->cpu_mask);
+
+ rdtgrp->root = root;
+ rdtgrp->level = level;
+
+ if (test_bit(RDTGRP_CPUSET_CLONE_CHILDREN, &parent->flags))
+ set_bit(RDTGRP_CPUSET_CLONE_CHILDREN, &rdtgrp->flags);
+
+ /* create the directory */
+ kn = kernfs_create_dir(parent->kn, name, mode, rdtgrp);
+ if (IS_ERR(kn)) {
+ ret = PTR_ERR(kn);
+ goto out_free_id;
+ }
+ rdtgrp->kn = kn;
+
+ /*
+ * This extra ref will be put in kernfs_remove() and guarantees
+ * that @rdtgrp->kn is always accessible.
+ */
+ kernfs_get(kn);
+
+ atomic_inc(&root->nr_rdtgrps);
+
+ /*
+ * @rdtgrp is now fully operational. If something fails after this
+ * point, it'll be released via the normal destruction path.
+ */
+ rdtgroup_idr_replace(&root->rdtgroup_idr, rdtgrp, rdtgrp->id);
+
+ ret = rdtgroup_kn_set_ugid(kn);
+ if (ret)
+ goto out_destroy;
+
+ ret = rdtgroup_partition_populate_dir(kn);
+ if (ret)
+ goto out_destroy;
+
+ kernfs_activate(kn);
+
+ list_add_tail(&rdtgrp->rdtgroup_list, &rdtgroup_lists);
+ /* Generate default schema for rdtgrp. */
+ ret = get_default_resources(rdtgrp);
+ if (ret)
+ goto out_destroy;
+
+ ret = 0;
+ goto out_unlock;
+
+out_free_id:
+ rdtgroup_idr_remove(&root->rdtgroup_idr, rdtgrp->id);
+out_cancel_ref:
+ kfree(rdtgrp);
+out_unlock:
+ rdtgroup_kn_unlock(parent_kn);
+ return ret;
+
+out_destroy:
+ rdtgroup_destroy_locked(rdtgrp);
+ goto out_unlock;
+}
+
+static int rdtgroup_rmdir(struct kernfs_node *kn)
+{
+ struct rdtgroup *rdtgrp;
+ int cpu;
+ int ret = 0;
+
+ rdtgrp = rdtgroup_kn_lock_live(kn);
+ if (!rdtgrp)
+ return -ENODEV;
+
+ if (!list_empty(&rdtgrp->pset.tasks)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ for_each_cpu(cpu, &rdtgrp->cpu_mask)
+ per_cpu(cpu_rdtgroup, cpu) = 0;
+
+ ret = rdtgroup_destroy_locked(rdtgrp);
+
+out:
+ rdtgroup_kn_unlock(kn);
+ return ret;
+}
+
static int
rdtgroup_move_task_all(struct rdtgroup *src_rdtgrp, struct rdtgroup *dst_rdtgrp)
{
@@ -957,6 +1090,11 @@ static void release_root_closid(void)
}
}
+static struct kernfs_syscall_ops rdtgroup_kf_syscall_ops = {
+ .mkdir = rdtgroup_mkdir,
+ .rmdir = rdtgroup_rmdir,
+};
+
static void setup_task_rg_lists(struct rdtgroup *rdtgrp, bool enable)
{
struct task_struct *p, *g;
@@ -1009,6 +1147,105 @@ static void setup_task_rg_lists(struct rdtgroup *rdtgrp, bool enable)
*/
static bool rdtgrp_dfl_root_visible;
+bool rdtgroup_mounted;
+
+static struct dentry *rdt_mount(struct file_system_type *fs_type,
+ int flags, const char *unused_dev_name,
+ void *data)
+{
+ struct super_block *pinned_sb = NULL;
+ struct rdtgroup_root *root;
+ struct dentry *dentry;
+ int ret;
+ bool new_sb;
+
+ /*
+ * The first time anyone tries to mount a rdtgroup, enable the list
+ * linking tasks and fix up all existing tasks.
+ */
+ if (rdtgroup_mounted)
+ return ERR_PTR(-EBUSY);
+
+ rdt_opts.cdp_enabled = false;
+ rdt_opts.verbose = false;
+ cdp_enabled = false;
+
+ ret = parse_rdtgroupfs_options(data);
+ if (ret)
+ goto out_mount;
+
+ if (rdt_opts.cdp_enabled) {
+ cdp_enabled = true;
+ cconfig.max_closid >>= cdp_enabled;
+ pr_info("CDP is enabled\n");
+ }
+
+ init_msrs(cdp_enabled);
+
+ rdtgrp_dfl_root_visible = true;
+ root = &rdtgrp_dfl_root;
+
+ ret = get_default_resources(&root->rdtgrp);
+ if (ret)
+ return ERR_PTR(-ENOSPC);
+
+out_mount:
+ dentry = kernfs_mount(fs_type, flags, root->kf_root,
+ RDTGROUP_SUPER_MAGIC,
+ &new_sb);
+ if (IS_ERR(dentry) || !new_sb)
+ goto out_unlock;
+
+ /*
+ * If @pinned_sb, we're reusing an existing root and holding an
+ * extra ref on its sb. Mount is complete. Put the extra ref.
+ */
+ if (pinned_sb) {
+ WARN_ON(new_sb);
+ deactivate_super(pinned_sb);
+ }
+
+ setup_task_rg_lists(&root->rdtgrp, true);
+
+ cpumask_clear(&root->rdtgrp.cpu_mask);
+ rdtgroup_mounted = true;
+
+ return dentry;
+
+out_unlock:
+ return ERR_PTR(ret);
+}
+
+static void rdt_kill_sb(struct super_block *sb)
+{
+ int ret;
+
+ mutex_lock(&rdtgroup_mutex);
+
+ ret = rmdir_all_sub();
+ if (ret)
+ goto out_unlock;
+
+ setup_task_rg_lists(root_rdtgrp, false);
+ release_root_closid();
+ root_rdtgrp->resource.valid = false;
+
+ /* Restore max_closid to original value. */
+ cconfig.max_closid <<= cdp_enabled;
+
+ kernfs_kill_sb(sb);
+ rdtgroup_mounted = false;
+out_unlock:
+
+ mutex_unlock(&rdtgroup_mutex);
+}
+
+static struct file_system_type rdt_fs_type = {
+ .name = "rscctrl",
+ .mount = rdt_mount,
+ .kill_sb = rdt_kill_sb,
+};
+
static ssize_t rdtgroup_file_write(struct kernfs_open_file *of, char *buf,
size_t nbytes, loff_t off)
{
--
2.5.0
Powered by blists - more mailing lists