[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20201130174833.41315-3-rongwei.wang@linux.alibaba.com>
Date: Tue, 1 Dec 2020 01:48:32 +0800
From: Rongwei Wang <rongwei.wang@...ux.alibaba.com>
To: catalin.marinas@....com, will@...nel.org,
bjorn.andersson@...aro.org, shawnguo@...nel.org
Cc: vkoul@...nel.org, geert+renesas@...der.be, Anson.Huang@....com,
michael@...le.cc, krzk@...nel.org, olof@...om.net,
vincenzo.frascino@....com, ardb@...nel.org, masahiroy@...nel.org,
gshan@...hat.com, linux-arm-kernel@...ts.infradead.org,
linux-kernel@...r.kernel.org
Subject: [PATCH 2/3] arm64:msr: Introduce MSR ARM driver
The function of MSR-ARM driver is similar to that of MSR module under
x86. It is used to read and write system registers in user mode, which is easy
to debug.
In the implementation of MSR-ARM module, because the current aarch64
architecture lacks the support of rdmsr and wrmsr instructions, we have to
modify the code segment at runtime to realize the reading and writing of
arbitrary registers.
In terms of security, because modifying the code segment may generate undefined
instruction exceptions, we use the existing undefined instruction exception
hook function registration interface of the kernel to shield the exceptions
only generated by MSR-ARM.
Meanwhile, we designed a user space tool: system-register-tools, which function
similar to msr-tools under x86. We have open sourced this tool in Github, link
as follows: https://github.com/alibaba/system-register-tools.
Signed-off-by: Rongwei Wang <rongwei.wang@...ux.alibaba.com>
---
arch/arm64/Kconfig | 9 +
arch/arm64/include/asm/msr_arm.h | 75 ++++++++
arch/arm64/kernel/Makefile | 3 +-
arch/arm64/kernel/msr_arm.c | 399 +++++++++++++++++++++++++++++++++++++++
arch/arm64/kernel/msr_smp.c | 249 ++++++++++++++++++++++++
5 files changed, 734 insertions(+), 1 deletion(-)
create mode 100644 arch/arm64/include/asm/msr_arm.h
create mode 100644 arch/arm64/kernel/msr_arm.c
create mode 100644 arch/arm64/kernel/msr_smp.c
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 1515f6f..8077ff6 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -1688,6 +1688,15 @@ config ARM64_MTE
endmenu
+config ARM64_MSR
+ tristate "/dev/cpu/*/msr - AARCH64 Model-specific register support"
+ help
+ This device gives privileged processes access to the arm64
+ Model-Specific Registers (MSRs). It is a character device with
+ major 202 and minors 0 to <NR_CPUS-1> for /dev/cpu/0/msr to
+ /dev/cpu/<NR_CPUS-1>/msr. MSR accesses are directed to a specific CPU on
+ multi-processor systems.
+
config ARM64_SVE
bool "ARM Scalable Vector Extension support"
default y
diff --git a/arch/arm64/include/asm/msr_arm.h b/arch/arm64/include/asm/msr_arm.h
new file mode 100644
index 0000000..9b52ef2
--- /dev/null
+++ b/arch/arm64/include/asm/msr_arm.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_MSR_ARM_H
+#define _ASM_MSR_ARM_H
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/fcntl.h>
+#include <linux/init.h>
+#include <linux/poll.h>
+#include <linux/smp.h>
+#include <linux/major.h>
+#include <linux/fs.h>
+#include <linux/device.h>
+#include <linux/cpu.h>
+#include <linux/notifier.h>
+#include <linux/uaccess.h>
+#include <linux/gfp.h>
+#include <linux/kernel.h>
+#include <asm/cpufeature.h>
+#include <asm/cpu.h>
+#include <asm/sysreg.h>
+#include <asm/insn.h>
+#include <asm/traps.h>
+#include <asm/atomic.h>
+#include <linux/printk.h>
+
+/* the opcode of mrs/msr instruction */
+#define AARCH64_MRS_INSN (0xd5200000)
+#define AARCH64_MSR_INSN (0xd5000000)
+#define INSN_REG_MASK (0x0000001f)
+
+/*
+ * the max number of each code
+ * in system registers
+ */
+#define MAX_OP0 3
+#define MAX_OP1 7
+#define MAX_OP2 7
+#define MAX_CN 15
+#define MAX_CM 15
+
+#define MSR_DEBUG_REG 0x02
+#define MSR_NON_DEBUG_REG 0x03
+
+extern atomic_t msr_flags;
+extern atomic_t mrs_flags;
+
+struct msr {
+ union {
+ struct {
+ u32 l;
+ u32 h;
+ };
+ u64 q;
+ };
+};
+
+struct msr_info {
+ u32 msr_no;
+ u32 opt;
+ struct msr reg;
+ struct msr *msrs;
+ int err;
+};
+
+int aarch64_modify_read_text(u32 opcode);
+int aarch64_modify_write_text(u32 opcode);
+
+u32 *get_read_insn_addr(void);
+u32 *get_write_insn_addr(void);
+
+int rdmsr_safe_on_cpu_aarch64(u32 cpu, u32 opt, u32 *l, u32 *h);
+int wrmsr_safe_on_cpu_aarch64(u32 cpu, u32 opt, u32 l, u32 h);
+
+#endif /* _ASM_MSR_ARM_H */
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index bbaf0bc..0fd2eff 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -17,7 +17,7 @@ obj-y := debug-monitors.o entry.o irq.o fpsimd.o \
return_address.o cpuinfo.o cpu_errata.o \
cpufeature.o alternative.o cacheinfo.o \
smp.o smp_spin_table.o topology.o smccc-call.o \
- syscall.o proton-pack.o
+ syscall.o proton-pack.o msr_smp.o
targets += efi-entry.o
@@ -60,6 +60,7 @@ obj-$(CONFIG_ARM_SDE_INTERFACE) += sdei.o
obj-$(CONFIG_ARM64_PTR_AUTH) += pointer_auth.o
obj-$(CONFIG_SHADOW_CALL_STACK) += scs.o
obj-$(CONFIG_ARM64_MTE) += mte.o
+obj-$(CONFIG_ARM64_MSR) += msr_arm.o
obj-y += vdso/ probes/
obj-$(CONFIG_COMPAT_VDSO) += vdso32/
diff --git a/arch/arm64/kernel/msr_arm.c b/arch/arm64/kernel/msr_arm.c
new file mode 100644
index 0000000..4dec683
--- /dev/null
+++ b/arch/arm64/kernel/msr_arm.c
@@ -0,0 +1,399 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * ARM system register access device
+ *
+ * This device is accessed by read() to the appropriate register number
+ * and then read/write in chunks of 8 bytes. A larger size means multiple
+ * reads or writes of the same register.
+ *
+ * This driver uses /dev/cpu/%d/msr where %d is the minor number, and on
+ * an SMP box will direct the access to CPU %d.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/fcntl.h>
+#include <linux/init.h>
+#include <linux/poll.h>
+#include <linux/smp.h>
+#include <linux/major.h>
+#include <linux/fs.h>
+#include <linux/device.h>
+#include <linux/cpu.h>
+#include <linux/notifier.h>
+#include <linux/uaccess.h>
+#include <linux/gfp.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <asm/cpufeature.h>
+#include <asm/cpu.h>
+#include <asm/sysreg.h>
+#include <asm/insn.h>
+#include <asm/traps.h>
+#include <asm/mmu.h>
+#include <asm/barrier.h>
+#include <asm/fixmap.h>
+#include <linux/printk.h>
+#include <asm/msr_arm.h>
+
+static struct class *msr_class;
+static enum cpuhp_state cpuhp_msr_state;
+
+static int hookers_mrs(struct pt_regs *regs, u32 insn)
+{
+ /* judge whether this exception is generated by MRS instruction */
+ if (atomic_read(&mrs_flags) &&
+ (regs->pc == (u64)get_read_insn_addr())) {
+ /* skip undef instruction and jump */
+ regs->pc += 2*AARCH64_INSN_SIZE;
+ pr_warn("undef exception has been ignored!\n");
+
+ return 0;
+ } else {
+ /* NOTE: NOT an exception from MSR driver, must return 1!!! */
+ return 1;
+ }
+}
+
+static int hookers_msr(struct pt_regs *regs, u32 insn)
+{
+ /* judge whether this exception is generated by MSR instruction */
+ if (atomic_read(&msr_flags) &&
+ (regs->pc == (u64)get_write_insn_addr())) {
+ /* skip undef instruction and jump */
+ regs->pc += 2*AARCH64_INSN_SIZE;
+ pr_warn("undef exception has been ignored!\n");
+
+ return 0;
+ } else {
+ /* NOTE: NOT an exception from MSR driver, must return 1!!! */
+ return 1;
+ }
+}
+
+/*
+ * The following four variables is used to judge whether the undefine exception
+ * is generated by the MSR and MRS instruction in EL1 and EL2.
+ */
+static struct undef_hook mrs_hook_el1 = {
+ .instr_mask = 0xfff00000,
+ .instr_val = 0xd5300000,
+ .pstate_mask = PSR_MODE_MASK,
+ .pstate_val = PSR_MODE_EL1h,
+ .fn = hookers_mrs,
+};
+
+static struct undef_hook msr_hook_el1 = {
+ .instr_mask = 0xfff00000,
+ .instr_val = 0xd5100000,
+ .pstate_mask = PSR_MODE_MASK,
+ .pstate_val = PSR_MODE_EL1h,
+ .fn = hookers_msr,
+};
+
+static struct undef_hook mrs_hook_el2 = {
+ .instr_mask = 0xfff00000,
+ .instr_val = 0xd5300000,
+ .pstate_mask = PSR_MODE_MASK,
+ .pstate_val = PSR_MODE_EL2h,
+ .fn = hookers_mrs,
+};
+
+static struct undef_hook msr_hook_el2 = {
+ .instr_mask = 0xfff00000,
+ .instr_val = 0xd5100000,
+ .pstate_mask = PSR_MODE_MASK,
+ .pstate_val = PSR_MODE_EL2h,
+ .fn = hookers_msr,
+};
+
+/*
+ * ARMv8 ARM reserves the following encoding for system registers:
+ * (Ref: ARMv8 ARM, Section: "System instruction class encoding overview",
+ * C5.2, version:ARM DDI 0487A.f)
+ * [20-19] : Op0
+ * [18-16] : Op1
+ * [15-12] : CRn
+ * [11-8] : CRm
+ * [7-5] : Op2
+ *
+ * make MSR/MRS instruction
+ */
+static u32 aarch64_insn_mrs_gen(u32 op0, u32 op1, u32 crn, u32 crm, u32 op2, u32 rt)
+{
+ return (AARCH64_MRS_INSN | sys_reg(op0, op1, crn, crm, op2) | rt);
+}
+
+static u32 aarch64_insn_msr_gen(u32 op0, u32 op1, u32 crn, u32 crm, u32 op2, u32 rt)
+{
+ return (AARCH64_MSR_INSN | sys_reg(op0, op1, crn, crm, op2) | rt);
+}
+
+/*
+ * check the regcode legal
+ */
+static int aarch64_register_check(u32 reg)
+{
+ unsigned int op0, op1, cn, cm, op2;
+ u32 max_reg;
+ int ret;
+
+ max_reg = sys_reg(MAX_OP0, MAX_OP1, MAX_CN, MAX_CM, MAX_OP2);
+ if (reg & ~max_reg) {
+ /* illegal regcode */
+ return -EFAULT;
+ }
+
+ op0 = sys_reg_Op0(reg);
+ op1 = sys_reg_Op1(reg);
+ cn = sys_reg_CRn(reg);
+ cm = sys_reg_CRm(reg);
+ op2 = sys_reg_Op2(reg);
+
+ /*
+ * for system registers, their value of op0: 0b10 or 0b11.
+ */
+ if (op0 != MSR_DEBUG_REG && op0 != MSR_NON_DEBUG_REG) {
+ /* NOT support */
+ return -EFAULT;
+ }
+
+ if (op0 <= MAX_OP0 && op1 <= MAX_OP1 && op2 <= MAX_OP2
+ && cn <= MAX_CN && cm <= MAX_CM) {
+ /* legal regcode */
+ ret = 0;
+ } else {
+ /* illegal regcode */
+ ret = -EFAULT;
+ }
+ return ret;
+}
+
+/*
+ * Before reading and writing register, modify the instruction on
+ * corresponding address
+ */
+static long msr_insn_smc(unsigned int ioc, unsigned long arg)
+{
+ u32 insnp = 0, insn = 0;
+ u32 reg = arg;
+ int err = 0;
+ unsigned int op0, op1, cn, cm, op2;
+
+ op0 = sys_reg_Op0(reg);
+ op1 = sys_reg_Op1(reg);
+ cn = sys_reg_CRn(reg);
+ cm = sys_reg_CRm(reg);
+ op2 = sys_reg_Op2(reg);
+ err = aarch64_register_check(reg);
+ if (err) {
+ /* illegal register */
+ return err;
+ }
+
+ switch (ioc) {
+ case 0x00:
+ err = aarch64_insn_read((void *)get_read_insn_addr(), &insnp);
+ if (err)
+ return err;
+ insn = aarch64_insn_mrs_gen(op0, op1, cn, cm, op2,
+ insnp & INSN_REG_MASK);
+ err = aarch64_modify_read_text(insn);
+ break;
+ case 0x01:
+ err = aarch64_insn_read((void *)get_write_insn_addr(), &insnp);
+ if (err)
+ return err;
+ insn = aarch64_insn_msr_gen(op0, op1, cn, cm, op2,
+ insnp & INSN_REG_MASK);
+ err = aarch64_modify_write_text(insn);
+ break;
+ default:
+ err = -ENOTTY;
+ break;
+ }
+
+ return err;
+}
+
+static ssize_t msr_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ u32 __user *tmp = (u32 __user *) buf;
+ u32 data[2];
+ u32 reg = *ppos;
+ int cpu = iminor(file_inode(file));
+ int err;
+ ssize_t bytes = 0;
+
+ err = msr_insn_smc(0x00, reg);
+ if (err != 0) {
+ /* illegal register */
+ return err;
+ }
+
+ if (count % 8)
+ return -EINVAL; /* Invalid chunk size */
+ for (; count; count -= 8) {
+ err = rdmsr_safe_on_cpu_aarch64(cpu, 1, &data[0], &data[1]);
+ if (err)
+ break;
+ if (copy_to_user(tmp, &data, 8)) {
+ pr_err("copy error");
+ err = -EFAULT;
+ break;
+ }
+ tmp += 2;
+ bytes += 8;
+ }
+
+ return bytes ? bytes : err;
+}
+
+static ssize_t msr_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ const u32 __user *tmp = (const u32 __user *)buf;
+ u32 data[2];
+ u32 reg = *ppos;
+ int cpu = iminor(file_inode(file));
+ int err;
+ ssize_t bytes = 0;
+
+ err = msr_insn_smc(0x01, reg);
+ if (err != 0) {
+ /* illegal register */
+ return err;
+ }
+
+ if (count % 8)
+ return -EINVAL; /* Invalid chunk size */
+
+ if (copy_from_user(&data, tmp, 8)) {
+ err = -EFAULT;
+ return err;
+ }
+ err = wrmsr_safe_on_cpu_aarch64(cpu, 1, data[0], data[1]);
+ if (err)
+ return err;
+ bytes += 8;
+
+ return bytes ? bytes : err;
+}
+
+static int msr_open(struct inode *inode, struct file *file)
+{
+ unsigned int cpu = iminor(file_inode(file));
+ /* TODO */
+ struct cpuinfo_arm64 *c;
+
+ if (!capable(CAP_SYS_RAWIO))
+ return -EPERM;
+
+ if (cpu >= nr_cpu_ids || !cpu_online(cpu))
+ return -ENXIO; /* No such CPU */
+
+ c = &per_cpu(cpu_data, cpu);
+ return 0;
+}
+
+/*
+ * File operations support
+ */
+static const struct file_operations msr_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_seek_end_llseek,
+ .read = msr_read,
+ .write = msr_write,
+ .open = msr_open,
+};
+
+static int msr_device_create(unsigned int cpu)
+{
+ struct device *dev;
+
+ dev = device_create(msr_class, NULL, MKDEV(MSR_MAJOR, cpu), NULL,
+ "msr%d", cpu);
+ return PTR_ERR_OR_ZERO(dev);
+}
+
+static int msr_device_destroy(unsigned int cpu)
+{
+ device_destroy(msr_class, MKDEV(MSR_MAJOR, cpu));
+ return 0;
+}
+
+static char *msr_devnode(struct device *dev, umode_t *mode)
+{
+ return kasprintf(GFP_KERNEL, "cpu/%u/msr", MINOR(dev->devt));
+}
+
+static int __init msr_init(void)
+{
+ int err;
+
+ err = __register_chrdev(MSR_MAJOR, 0, NR_CPUS, "cpu/msr", &msr_fops);
+ if (err < 0) {
+ pr_err("unable to get major %d for msr\n", MSR_MAJOR);
+ return -EBUSY;
+ }
+
+ msr_class = class_create(THIS_MODULE, "msr");
+ if (IS_ERR(msr_class)) {
+ err = PTR_ERR(msr_class);
+ goto out_chrdev;
+ }
+ msr_class->devnode = msr_devnode;
+
+ /* TODO: set callback function for hotplug */
+ err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "arm/msr:online",
+ msr_device_create, msr_device_destroy);
+ if (err < 0)
+ goto out_class;
+ cpuhp_msr_state = err;
+
+ /*
+ * register two hooks to block undef instruction exception
+ * in EL1 and EL2.
+ */
+ register_undef_hook(&mrs_hook_el1);
+ register_undef_hook(&msr_hook_el1);
+ register_undef_hook(&mrs_hook_el2);
+ register_undef_hook(&msr_hook_el2);
+
+ /*
+ * Note: it is absolutely necessary to initialize the physical address of
+ * MSR and MRS instructions.
+ */
+ err = rdmsr_safe_on_cpu_aarch64(0, 0, NULL, NULL);
+ err = wrmsr_safe_on_cpu_aarch64(0, 0, 0, 0);
+ return 0;
+
+out_class:
+ class_destroy(msr_class);
+out_chrdev:
+ __unregister_chrdev(MSR_MAJOR, 0, NR_CPUS, "cpu/msr");
+ return err;
+}
+module_init(msr_init);
+
+static void __exit msr_exit(void)
+{
+ /* unregister the hook of MSR and MRS instruction. */
+ unregister_undef_hook(&mrs_hook_el1);
+ unregister_undef_hook(&msr_hook_el1);
+ unregister_undef_hook(&mrs_hook_el2);
+ unregister_undef_hook(&msr_hook_el2);
+ cpuhp_remove_state(cpuhp_msr_state);
+ class_destroy(msr_class);
+ __unregister_chrdev(MSR_MAJOR, 0, NR_CPUS, "cpu/msr");
+}
+module_exit(msr_exit)
+
+MODULE_AUTHOR("Rongwei Wang <rongwei.wang@...ux.alibaba.com>");
+MODULE_DESCRIPTION("ARM system register driver");
+MODULE_LICENSE("GPL");
diff --git a/arch/arm64/kernel/msr_smp.c b/arch/arm64/kernel/msr_smp.c
new file mode 100644
index 0000000..0ec0a03
--- /dev/null
+++ b/arch/arm64/kernel/msr_smp.c
@@ -0,0 +1,249 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * MSR ARM Module support.
+ *
+ * Copyright (c) 2020, Alibaba Group.
+ * Authors: Rongwei Wang <rongwei.wang@...ux.alibaba.com>
+ */
+#include <linux/export.h>
+#include <linux/preempt.h>
+#include <linux/smp.h>
+#include <linux/completion.h>
+#include <linux/mm.h>
+#include <linux/highmem.h>
+#include <asm/tlbflush.h>
+#include <asm/cacheflush.h>
+#include <asm/pgtable.h>
+#include <linux/uaccess.h>
+#include <linux/pagemap.h>
+#include <asm/msr_arm.h>
+
+static DEFINE_RAW_SPINLOCK(msr_lock);
+/* record the address of reading or writing */
+static u32 *rd_tp;
+static u32 *wr_tp;
+
+atomic_t msr_flags = ATOMIC_INIT(0);
+EXPORT_SYMBOL(msr_flags);
+atomic_t mrs_flags = ATOMIC_INIT(0);
+EXPORT_SYMBOL(mrs_flags);
+
+struct msr_info_completion {
+ struct msr_info msr;
+ struct completion done;
+};
+
+/*
+ * Self-modify code for label of read address.
+ */
+int aarch64_modify_read_text(u32 opcode)
+{
+ void *addrs[1];
+
+ addrs[0] = rd_tp;
+ /*
+ * call aarch64_insn_patch_text to modify
+ * the opcode
+ */
+ return aarch64_insn_patch_text(addrs, &opcode, 1);
+}
+EXPORT_SYMBOL(aarch64_modify_read_text);
+
+/*
+ * Self-modify code for label of write address.
+ */
+int aarch64_modify_write_text(u32 opcode)
+{
+ void *addrs[1];
+
+ addrs[0] = wr_tp;
+ /*
+ * call aarch64_insn_patch_text to modify
+ * the opcode
+ */
+ return aarch64_insn_patch_text(addrs, &opcode, AARCH64_INSN_SIZE);
+}
+EXPORT_SYMBOL(aarch64_modify_write_text);
+
+/*
+ * return a address of read or write label
+ */
+u32 *get_read_insn_addr(void)
+{
+ /*
+ * TODO: rd_tp on each cpu
+ * This is a vmalloc address for module.
+ */
+ return rd_tp;
+}
+EXPORT_SYMBOL(get_read_insn_addr);
+
+u32 *get_write_insn_addr(void)
+{
+ /* This is a vmalloc address. */
+ return wr_tp;
+}
+EXPORT_SYMBOL(get_write_insn_addr);
+
+/*
+ * Read data from register
+ *
+ * At runtime, the "mrs xyz, xyz" instruction will be modified through rd_tp
+ * address.
+ */
+static noinline int rdmsr_safe_aarch64(u32 opt, u32 *data0, u32 *data1)
+{
+ /* reg is encoded by op0,op1,cn... */
+ u32 err = 0;
+ unsigned long __val = 0;
+ unsigned long __pc_addr = 0;
+
+ if ((data0 == NULL) && (data1 == NULL)) {
+ /* init read or write address in somewhere */
+ return 0;
+ }
+
+ raw_spin_lock(&msr_lock);
+ atomic_add(1, &mrs_flags);
+ /*
+ * On the first execution, opt=0, will NOT execute "mrs xyz, xyz"
+ * instruction and ONLY initializes rd_tp value.
+ * In addition, "mrs xyz, xyz" instruction will be modified before running.
+ */
+ asm volatile("mov %3, 0\n\t"
+ "cmp %4, 0\n\t"
+ "b.eq 1f\n\t"
+ "mrs %0, MIDR_EL1\n\t"
+ "b 1f\n\t"
+ "mov %3, 1\n\t" /* Execute only when an exception occurs */
+ "1:adr %1, .\n\t"
+ "sub %1, %1, 12\n\t"
+ "mov %2, %1\n\t"
+ : "=r"(__val), "=r"(__pc_addr), "=r"(rd_tp), "=&r"(err)
+ : "r"(opt));
+ atomic_sub(1, &mrs_flags);
+ raw_spin_unlock(&msr_lock);
+ if (err == 1) {
+ /* undef instruction occurred */
+ return -1;
+ }
+
+ *data0 = __val;
+ *data1 = __val >> 32;
+ /* be successful */
+ return 0;
+}
+
+/*
+ * Write data to register
+ *
+ * At runtime, the "msr xyz, xyz" instruction will be modified through wr_tp
+ * address.
+ */
+static noinline int wrmsr_safe_aarch64(u32 opt, u32 data0, u32 data1)
+{
+ unsigned long __val = 0;
+ unsigned long __pc_addr = 0;
+ u64 data = 0;
+ int err = 0;
+
+ data = data1;
+ data = (data << 32) | (data0);
+ __val = data;
+ /*
+ * Add a lock to the following code, and a flag (msr_flags) that the
+ * current MSR is executing. This flag variable will work in hooker
+ * function.
+ */
+ raw_spin_lock(&msr_lock);
+ atomic_add(1, &msr_flags);
+ /*
+ * On the first execution, opt=0, will NOT execute "msr xyz, xyz"
+ * instruction and ONLY initializes wr_tp value.
+ * In addition, "msr xyz, xyz" instruction will be modified before running.
+ */
+ asm volatile("mov %2, 0\n\t"
+ "cmp %4, 0\n\t"
+ "b.eq 1f\n\t"
+ "msr TCR_EL1, %3\n\t"
+ "b 1f\n\t"
+ "mov %2, 1\n\t" /* exec when exception occurred */
+ "1:adr %0, .\n\t"
+ "sub %0, %0, 12\n\t"
+ "mov %1, %0\n\t"
+ : "=r"(__pc_addr), "=r"(wr_tp), "=&r"(err)
+ : "rZ"(__val), "r"(opt));
+ atomic_sub(1, &msr_flags);
+ raw_spin_unlock(&msr_lock);
+ if (err == 1) {
+ /* undef instruction occurred */
+ return -1;
+ }
+ /* be successful */
+ return 0;
+}
+
+/*
+ * These "safe" variants are slower and should be used when the target MSR
+ * may not actually exist.
+ */
+static void __rdmsr_safe_on_cpu_aarch64(void *info)
+{
+ struct msr_info_completion *rv = info;
+
+ rv->msr.err = rdmsr_safe_aarch64(rv->msr.opt, &rv->msr.reg.l,
+ &rv->msr.reg.h);
+ complete(&rv->done);
+}
+
+static void __wrmsr_safe_on_cpu_aarch64(void *info)
+{
+ struct msr_info *rv = info;
+
+ rv->err = wrmsr_safe_aarch64(rv->opt, rv->reg.l, rv->reg.h);
+}
+
+int rdmsr_safe_on_cpu_aarch64(u32 cpu, u32 opt, u32 *l, u32 *h)
+{
+ struct msr_info_completion rv;
+ call_single_data_t csd = {
+ .func = __rdmsr_safe_on_cpu_aarch64,
+ .info = &rv,
+ };
+ int err;
+
+ memset(&rv, 0, sizeof(rv));
+ init_completion(&rv.done);
+ rv.msr.opt = opt;
+
+ err = smp_call_function_single_async(cpu, &csd);
+ if (!err) {
+ wait_for_completion_timeout(&rv.done, msecs_to_jiffies(5000));
+ err = rv.msr.err;
+ }
+
+ if ((l != NULL) && (h != NULL)) {
+ *l = rv.msr.reg.l;
+ *h = rv.msr.reg.h;
+ }
+
+ return err;
+}
+EXPORT_SYMBOL(rdmsr_safe_on_cpu_aarch64);
+
+int wrmsr_safe_on_cpu_aarch64(u32 cpu, u32 opt, u32 l, u32 h)
+{
+ int err;
+ struct msr_info rv;
+
+ memset(&rv, 0, sizeof(rv));
+
+ rv.opt = opt;
+ rv.reg.l = l;
+ rv.reg.h = h;
+ err = smp_call_function_single(cpu,
+ __wrmsr_safe_on_cpu_aarch64, &rv, 1);
+
+ return err ? err : rv.err;
+}
+EXPORT_SYMBOL(wrmsr_safe_on_cpu_aarch64);
--
1.8.3.1
Powered by blists - more mailing lists