lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240206185709.849294306@redhat.com>
Date: Tue, 06 Feb 2024 15:49:12 -0300
From: Marcelo Tosatti <mtosatti@...hat.com>
To: linux-kernel@...r.kernel.org
Cc: Daniel Bristot de Oliveira <bristot@...nel.org>,
 Juri Lelli <juri.lelli@...hat.com>,
 Valentin Schneider <vschneid@...hat.com>,
 Frederic Weisbecker <frederic@...nel.org>,
 Leonardo Bras <leobras@...hat.com>,
 Peter Zijlstra <peterz@...radead.org>,
 Thomas Gleixner <tglx@...utronix.de>,
 Marcelo Tosatti <mtosatti@...hat.com>
Subject: [patch 01/12] cpu isolation: basic block interference infrastructure

There are a number of codepaths in the kernel that interrupt
code execution in remote CPUs. A subset of such codepaths are
triggered from userspace and can therefore return errors.

Introduce a cpumask named "block interference", writable from userspace.

This cpumask (and associated helpers) can be used by code that executes
code on remote CPUs to optionally return an error.

Signed-off-by: Marcelo Tosatti <mtosatti@...hat.com>

Index: linux-isolation/include/linux/sched/isolation.h
===================================================================
--- linux-isolation.orig/include/linux/sched/isolation.h
+++ linux-isolation/include/linux/sched/isolation.h
@@ -72,4 +72,28 @@ static inline bool cpu_is_isolated(int c
 	       cpuset_cpu_is_isolated(cpu);
 }
 
+#ifdef CONFIG_CPU_ISOLATION
+extern cpumask_var_t block_interf_cpumask;
+extern bool block_interf_cpumask_active;
+
+int block_interf_srcu_read_lock(void);
+void block_interf_srcu_read_unlock(int idx);
+
+void block_interf_assert_held(void);
+
+#else
+int block_interf_srcu_read_lock(void) { return 0; }
+void block_interf_srcu_read_unlock(int idx) { }
+void block_interf_assert_held(void) { }
+#endif
+
+static inline bool block_interf_cpu(int cpu)
+{
+#ifdef CONFIG_CPU_ISOLATION
+	if (block_interf_cpumask_active)
+		return cpumask_test_cpu(cpu, block_interf_cpumask);
+#endif
+	return false;
+}
+
 #endif /* _LINUX_SCHED_ISOLATION_H */
Index: linux-isolation/kernel/sched/isolation.c
===================================================================
--- linux-isolation.orig/kernel/sched/isolation.c
+++ linux-isolation/kernel/sched/isolation.c
@@ -239,3 +239,109 @@ static int __init housekeeping_isolcpus_
 	return housekeeping_setup(str, flags);
 }
 __setup("isolcpus=", housekeeping_isolcpus_setup);
+
+struct srcu_struct block_interf_srcu;
+EXPORT_SYMBOL_GPL(block_interf_srcu);
+
+cpumask_var_t block_interf_cpumask;
+EXPORT_SYMBOL_GPL(block_interf_cpumask);
+
+bool block_interf_cpumask_active;
+EXPORT_SYMBOL_GPL(block_interf_cpumask_active);
+
+int block_interf_srcu_read_lock(void)
+{
+	return srcu_read_lock(&block_interf_srcu);
+}
+EXPORT_SYMBOL(block_interf_srcu_read_lock);
+
+void block_interf_srcu_read_unlock(int idx)
+{
+	srcu_read_unlock(&block_interf_srcu, idx);
+}
+EXPORT_SYMBOL(block_interf_srcu_read_unlock);
+
+void block_interf_assert_held(void)
+{
+	WARN_ON_ONCE(!srcu_read_lock_held(&block_interf_srcu));
+}
+EXPORT_SYMBOL(block_interf_assert_held);
+
+static ssize_t
+block_interf_cpumask_read(struct file *filp, char __user *ubuf,
+		     size_t count, loff_t *ppos)
+{
+	char *mask_str;
+	int len;
+
+	len = snprintf(NULL, 0, "%*pb\n",
+		       cpumask_pr_args(block_interf_cpumask)) + 1;
+	mask_str = kmalloc(len, GFP_KERNEL);
+	if (!mask_str)
+		return -ENOMEM;
+
+	len = snprintf(mask_str, len, "%*pb\n",
+		       cpumask_pr_args(block_interf_cpumask));
+	if (len >= count) {
+		count = -EINVAL;
+		goto out_err;
+	}
+	count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
+
+out_err:
+	kfree(mask_str);
+
+	return count;
+}
+
+static ssize_t
+block_interf_cpumask_write(struct file *filp, const char __user *ubuf,
+			   size_t count, loff_t *ppos)
+{
+	cpumask_var_t block_interf_cpumask_new;
+	int err;
+
+	if (!zalloc_cpumask_var(&block_interf_cpumask_new, GFP_KERNEL))
+		return -ENOMEM;
+
+	err = cpumask_parse_user(ubuf, count, block_interf_cpumask_new);
+	if (err)
+		goto err_free;
+
+	cpumask_copy(block_interf_cpumask, block_interf_cpumask_new);
+	synchronize_srcu(&block_interf_srcu);
+	free_cpumask_var(block_interf_cpumask_new);
+
+	return count;
+
+err_free:
+	free_cpumask_var(block_interf_cpumask_new);
+
+	return err;
+}
+
+static const struct file_operations block_interf_cpumask_fops = {
+	.read		= block_interf_cpumask_read,
+	.write		= block_interf_cpumask_write,
+};
+
+static int __init block_interf_cpumask_init(void)
+{
+	int ret;
+
+	ret = init_srcu_struct(&block_interf_srcu);
+	if (ret)
+		return ret;
+
+	if (!zalloc_cpumask_var(&block_interf_cpumask, GFP_KERNEL))
+		return -ENOMEM;
+
+	debugfs_create_file_unsafe("block_interf_cpumask", 0644, NULL, NULL,
+				   &block_interf_cpumask_fops);
+
+	block_interf_cpumask_active = true;
+	return 0;
+}
+
+late_initcall(block_interf_cpumask_init);
+



Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ