[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20220908195111.661824729@redhat.com>
Date: Thu, 08 Sep 2022 16:29:00 -0300
From: Marcelo Tosatti <mtosatti@...hat.com>
To: linux-kernel@...r.kernel.org
Cc: Frederic Weisbecker <frederic@...nel.org>,
Juri Lelli <juri.lelli@...hat.com>,
Daniel Bristot de Oliveira <bristot@...nel.org>,
Prasad Pandit <ppandit@...hat.com>,
Valentin Schneider <vschneid@...hat.com>,
Yair Podemsky <ypodemsk@...hat.com>,
Thomas Gleixner <tglx@...utronix.de>,
Marcelo Tosatti <mtosatti@...hat.com>
Subject: [RFC PATCH 1/7] cpu isolation: basic block interference infrastructure
There are a number of codepaths in the kernel that interrupt
code execution in remote CPUs. A subset of such codepaths are
triggered from userspace and can therefore return errors.
Introduce a cpumask named "block interference", writable from userspace.
This cpumask (and associated helpers) can be used by code that executes
code on remote CPUs to optionally return an error.
Signed-off-by: Marcelo Tosatti <mtosatti@...hat.com>
Index: linux-2.6/include/linux/sched/isolation.h
===================================================================
--- linux-2.6.orig/include/linux/sched/isolation.h
+++ linux-2.6/include/linux/sched/isolation.h
@@ -58,4 +58,33 @@ static inline bool housekeeping_cpu(int
return true;
}
+#ifdef CONFIG_CPU_ISOLATION
+extern cpumask_var_t block_interf_cpumask;
+extern bool block_interf_cpumask_active;
+
+void block_interf_read_lock(void);
+void block_interf_read_unlock(void);
+
+void block_interf_write_lock(void);
+void block_interf_write_unlock(void);
+
+void block_interf_assert_held(void);
+
+#else
+static inline void block_interf_read_lock(void) { }
+static inline void block_interf_read_unlock(void) { }
+static inline void block_interf_write_lock(void) { }
+static inline void block_interf_write_unlock(void) { }
+static inline void block_interf_assert_held(void) { }
+#endif
+
+static inline bool block_interf_cpu(int cpu)
+{
+#ifdef CONFIG_CPU_ISOLATION
+ if (block_interf_cpumask_active)
+ return cpumask_test_cpu(cpu, block_interf_cpumask);
+#endif
+ return false;
+}
+
#endif /* _LINUX_SCHED_ISOLATION_H */
Index: linux-2.6/kernel/sched/isolation.c
===================================================================
--- linux-2.6.orig/kernel/sched/isolation.c
+++ linux-2.6/kernel/sched/isolation.c
@@ -239,3 +239,116 @@ static int __init housekeeping_isolcpus_
return housekeeping_setup(str, flags);
}
__setup("isolcpus=", housekeeping_isolcpus_setup);
+
+DEFINE_STATIC_PERCPU_RWSEM(block_interf_lock);
+
+cpumask_var_t block_interf_cpumask;
+EXPORT_SYMBOL_GPL(block_interf_cpumask);
+
+bool block_interf_cpumask_active;
+EXPORT_SYMBOL_GPL(block_interf_cpumask_active);
+
+void block_interf_read_lock(void)
+{
+ percpu_down_read(&block_interf_lock);
+}
+EXPORT_SYMBOL_GPL(block_interf_read_lock);
+
+void block_interf_read_unlock(void)
+{
+ percpu_up_read(&block_interf_lock);
+}
+EXPORT_SYMBOL_GPL(block_interf_read_unlock);
+
+void block_interf_write_lock(void)
+{
+ percpu_down_write(&block_interf_lock);
+}
+EXPORT_SYMBOL_GPL(block_interf_write_lock);
+
+void block_interf_write_unlock(void)
+{
+ percpu_up_write(&block_interf_lock);
+}
+EXPORT_SYMBOL_GPL(block_interf_write_unlock);
+
+void block_interf_assert_held(void)
+{
+ percpu_rwsem_assert_held(&block_interf_lock);
+}
+EXPORT_SYMBOL_GPL(block_interf_assert_held);
+
+static ssize_t
+block_interf_cpumask_read(struct file *filp, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ char *mask_str;
+ int len;
+
+ len = snprintf(NULL, 0, "%*pb\n",
+ cpumask_pr_args(block_interf_cpumask)) + 1;
+ mask_str = kmalloc(len, GFP_KERNEL);
+ if (!mask_str)
+ return -ENOMEM;
+
+ len = snprintf(mask_str, len, "%*pb\n",
+ cpumask_pr_args(block_interf_cpumask));
+ if (len >= count) {
+ count = -EINVAL;
+ goto out_err;
+ }
+ count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
+
+out_err:
+ kfree(mask_str);
+
+ return count;
+}
+
+static ssize_t
+block_interf_cpumask_write(struct file *filp, const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ cpumask_var_t block_interf_cpumask_new;
+ int err;
+
+ if (!zalloc_cpumask_var(&block_interf_cpumask_new, GFP_KERNEL))
+ return -ENOMEM;
+
+ err = cpumask_parse_user(ubuf, count, block_interf_cpumask_new);
+ if (err)
+ goto err_free;
+
+ block_interf_write_lock();
+ cpumask_copy(block_interf_cpumask, block_interf_cpumask_new);
+ block_interf_write_unlock();
+ free_cpumask_var(block_interf_cpumask_new);
+
+ return count;
+
+err_free:
+ free_cpumask_var(block_interf_cpumask_new);
+
+ return err;
+}
+
+static const struct file_operations block_interf_cpumask_fops = {
+ .read = block_interf_cpumask_read,
+ .write = block_interf_cpumask_write,
+};
+
+
+static int __init block_interf_cpumask_init(void)
+{
+ if (!zalloc_cpumask_var(&block_interf_cpumask, GFP_KERNEL))
+ return -ENOMEM;
+
+ debugfs_create_file_unsafe("block_interf_cpumask", 0644, NULL, NULL,
+ &block_interf_cpumask_fops);
+
+ block_interf_cpumask_active = true;
+ return 0;
+}
+
+late_initcall(block_interf_cpumask_init);
+
Powered by blists - more mailing lists