[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <158964181672.17951.9851247776841845334.tip-bot2@tip-bot2>
Date: Sat, 16 May 2020 15:10:16 -0000
From: "tip-bot2 for Yu-cheng Yu" <tip-bot2@...utronix.de>
To: linux-tip-commits@...r.kernel.org
Cc: "Yu-cheng Yu" <yu-cheng.yu@...el.com>,
Borislav Petkov <bp@...e.de>, x86 <x86@...nel.org>,
LKML <linux-kernel@...r.kernel.org>
Subject: [tip: x86/fpu] x86/fpu: Introduce copy_supervisor_to_kernel()
The following commit has been merged into the x86/fpu branch of tip:
Commit-ID: eeedf1533687b8e81865fdbde79eddf7c4b76c9a
Gitweb: https://git.kernel.org/tip/eeedf1533687b8e81865fdbde79eddf7c4b76c9a
Author: Yu-cheng Yu <yu-cheng.yu@...el.com>
AuthorDate: Tue, 12 May 2020 07:54:42 -07:00
Committer: Borislav Petkov <bp@...e.de>
CommitterDate: Sat, 16 May 2020 11:24:14 +02:00
x86/fpu: Introduce copy_supervisor_to_kernel()
The XSAVES instruction takes a mask and saves only the features specified
in that mask. The kernel normally specifies that all features be saved.
XSAVES also unconditionally uses the "compacted format" which means that
all specified features are saved next to each other in memory. If a
feature is removed from the mask, all the features after it will "move
up" into earlier locations in the buffer.
Introduce copy_supervisor_to_kernel(), which saves only supervisor states
and then moves those states into the standard location where they are
normally found.
Signed-off-by: Yu-cheng Yu <yu-cheng.yu@...el.com>
Signed-off-by: Borislav Petkov <bp@...e.de>
Link: https://lkml.kernel.org/r/20200512145444.15483-9-yu-cheng.yu@intel.com
---
arch/x86/include/asm/fpu/xstate.h | 1 +-
arch/x86/kernel/fpu/xstate.c | 84 ++++++++++++++++++++++++++++++-
2 files changed, 85 insertions(+)
diff --git a/arch/x86/include/asm/fpu/xstate.h b/arch/x86/include/asm/fpu/xstate.h
index 92104b2..422d836 100644
--- a/arch/x86/include/asm/fpu/xstate.h
+++ b/arch/x86/include/asm/fpu/xstate.h
@@ -75,6 +75,7 @@ int copy_xstate_to_kernel(void *kbuf, struct xregs_state *xsave, unsigned int of
int copy_xstate_to_user(void __user *ubuf, struct xregs_state *xsave, unsigned int offset, unsigned int size);
int copy_kernel_to_xstate(struct xregs_state *xsave, const void *kbuf);
int copy_user_to_xstate(struct xregs_state *xsave, const void __user *ubuf);
+void copy_supervisor_to_kernel(struct xregs_state *xsave);
/* Validate an xstate header supplied by userspace (ptrace or sigreturn) */
int validate_user_xstate_header(const struct xstate_header *hdr);
diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
index a68213e..587e03f 100644
--- a/arch/x86/kernel/fpu/xstate.c
+++ b/arch/x86/kernel/fpu/xstate.c
@@ -62,6 +62,7 @@ u64 xfeatures_mask_all __read_mostly;
static unsigned int xstate_offsets[XFEATURE_MAX] = { [ 0 ... XFEATURE_MAX - 1] = -1};
static unsigned int xstate_sizes[XFEATURE_MAX] = { [ 0 ... XFEATURE_MAX - 1] = -1};
static unsigned int xstate_comp_offsets[XFEATURE_MAX] = { [ 0 ... XFEATURE_MAX - 1] = -1};
+static unsigned int xstate_supervisor_only_offsets[XFEATURE_MAX] = { [ 0 ... XFEATURE_MAX - 1] = -1};
/*
* The XSAVE area of kernel can be in standard or compacted format;
@@ -393,6 +394,33 @@ static void __init setup_xstate_comp_offsets(void)
}
/*
+ * Setup offsets of a supervisor-state-only XSAVES buffer:
+ *
+ * The offsets stored in xstate_comp_offsets[] only work for one specific
+ * value of the Requested Feature BitMap (RFBM). In cases where a different
+ * RFBM value is used, a different set of offsets is required. This set of
+ * offsets is for when RFBM=xfeatures_mask_supervisor().
+ */
+static void __init setup_supervisor_only_offsets(void)
+{
+ unsigned int next_offset;
+ int i;
+
+ next_offset = FXSAVE_SIZE + XSAVE_HDR_SIZE;
+
+ for (i = FIRST_EXTENDED_XFEATURE; i < XFEATURE_MAX; i++) {
+ if (!xfeature_enabled(i) || !xfeature_is_supervisor(i))
+ continue;
+
+ if (xfeature_is_aligned(i))
+ next_offset = ALIGN(next_offset, 64);
+
+ xstate_supervisor_only_offsets[i] = next_offset;
+ next_offset += xstate_sizes[i];
+ }
+}
+
+/*
* Print out xstate component offsets and sizes
*/
static void __init print_xstate_offset_size(void)
@@ -790,6 +818,7 @@ void __init fpu__init_system_xstate(void)
fpu__init_prepare_fx_sw_frame();
setup_init_fpu_buf();
setup_xstate_comp_offsets();
+ setup_supervisor_only_offsets();
print_xstate_offset_size();
pr_info("x86/fpu: Enabled xstate features 0x%llx, context size is %d bytes, using '%s' format.\n",
@@ -1262,6 +1291,61 @@ int copy_user_to_xstate(struct xregs_state *xsave, const void __user *ubuf)
return 0;
}
+/*
+ * Save only supervisor states to the kernel buffer. This blows away all
+ * old states, and is intended to be used only in __fpu__restore_sig(), where
+ * user states are restored from the user buffer.
+ */
+void copy_supervisor_to_kernel(struct xregs_state *xstate)
+{
+ struct xstate_header *header;
+ u64 max_bit, min_bit;
+ u32 lmask, hmask;
+ int err, i;
+
+ if (WARN_ON(!boot_cpu_has(X86_FEATURE_XSAVES)))
+ return;
+
+ if (!xfeatures_mask_supervisor())
+ return;
+
+ max_bit = __fls(xfeatures_mask_supervisor());
+ min_bit = __ffs(xfeatures_mask_supervisor());
+
+ lmask = xfeatures_mask_supervisor();
+ hmask = xfeatures_mask_supervisor() >> 32;
+ XSTATE_OP(XSAVES, xstate, lmask, hmask, err);
+
+ /* We should never fault when copying to a kernel buffer: */
+ if (WARN_ON_FPU(err))
+ return;
+
+ /*
+ * At this point, the buffer has only supervisor states and must be
+ * converted back to normal kernel format.
+ */
+ header = &xstate->header;
+ header->xcomp_bv |= xfeatures_mask_all;
+
+ /*
+ * This only moves states up in the buffer. Start with
+ * the last state and move backwards so that states are
+ * not overwritten until after they are moved. Note:
+ * memmove() allows overlapping src/dst buffers.
+ */
+ for (i = max_bit; i >= min_bit; i--) {
+ u8 *xbuf = (u8 *)xstate;
+
+ if (!((header->xfeatures >> i) & 1))
+ continue;
+
+ /* Move xfeature 'i' into its normal location */
+ memmove(xbuf + xstate_comp_offsets[i],
+ xbuf + xstate_supervisor_only_offsets[i],
+ xstate_sizes[i]);
+ }
+}
+
#ifdef CONFIG_PROC_PID_ARCH_STATUS
/*
* Report the amount of time elapsed in millisecond since last AVX512
Powered by blists - more mailing lists