[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20170531030838.23182-1-nick.desaulniers@gmail.com>
Date: Tue, 30 May 2017 20:08:38 -0700
From: Nick Desaulniers <nick.desaulniers@...il.com>
To: unlisted-recipients:; (no To-header on input)
Cc: Nick Desaulniers <nick.desaulniers@...il.com>,
Paolo Bonzini <pbonzini@...hat.com>,
Radim Krčmář <rkrcmar@...hat.com>,
Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>,
"H. Peter Anvin" <hpa@...or.com>, x86@...nel.org,
kvm@...r.kernel.org, linux-kernel@...r.kernel.org
Subject: [PATCH v5] KVM: x86: avoid large stack allocations in em_fxrstor
em_fxstor previously called fxstor_fixup. Both created instances of
struct fxregs_state on the stack, which triggered the warning:
arch/x86/kvm/emulate.c:4018:12: warning: stack frame size of 1080 bytes
in function
'em_fxrstor' [-Wframe-larger-than=]
static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
^
with CONFIG_FRAME_WARN set to 1024.
This patch does the fixup in em_fxstor now, avoiding one additional
struct fxregs_state, and now fxstor_fixup can be removed as it has no
other call sites.
Further, the calculation for offsets into xmm_space can be shared
between em_fxstor and em_fxsave.
Signed-off-by: Nick Desaulniers <nick.desaulniers@...il.com>
---
New in V5:
* Updated commit message.
* moved offset calculation from em_fxstor and em_fxsave into new helper,
xmm_offset.
* Always call put_fpu after a get_fpu.
* remove temporary size from em_fxsave.
* move segmented_read_std to after conditional in em_fxstor as per feedback.
New in V4:
* undo changes for V3, prefer to only call segmented_read_std() when
size has been initialized.
New in V3:
* initialized size to 0 to avoid maybe-uninitialized warning. Check
that it gets set to something other than 0 before passing size to
segmented_read_std().
New in V2:
* reworked patch to do what was recommended by maintainers in:
https://lkml.org/lkml/2017/5/25/391
arch/x86/kvm/emulate.c | 75 ++++++++++++++++++++------------------------------
1 file changed, 30 insertions(+), 45 deletions(-)
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 0816ab2e8adc..55470ad07c2a 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -3941,6 +3941,16 @@ static int check_fxsr(struct x86_emulate_ctxt *ctxt)
}
/*
+ * Hardware doesn't save and restore XMM 0-7 without CR4.OSFXSR, but does save
+ * and restore MXCSR.
+ */
+static size_t xmm_offset(struct x86_emulate_ctxt *ctxt)
+{
+ bool b = ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR;
+ return offsetof(struct fxregs_state, xmm_space[b ? 8 * 16 / 4 : 0]);
+}
+
+/*
* FXSAVE and FXRSTOR have 4 different formats depending on execution mode,
* 1) 16 bit mode
* 2) 32 bit mode
@@ -3961,7 +3971,6 @@ static int check_fxsr(struct x86_emulate_ctxt *ctxt)
static int em_fxsave(struct x86_emulate_ctxt *ctxt)
{
struct fxregs_state fx_state;
- size_t size;
int rc;
rc = check_fxsr(ctxt);
@@ -3977,68 +3986,44 @@ static int em_fxsave(struct x86_emulate_ctxt *ctxt)
if (rc != X86EMUL_CONTINUE)
return rc;
- if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR)
- size = offsetof(struct fxregs_state, xmm_space[8 * 16/4]);
- else
- size = offsetof(struct fxregs_state, xmm_space[0]);
-
- return segmented_write_std(ctxt, ctxt->memop.addr.mem, &fx_state, size);
-}
-
-static int fxrstor_fixup(struct x86_emulate_ctxt *ctxt,
- struct fxregs_state *new)
-{
- int rc = X86EMUL_CONTINUE;
- struct fxregs_state old;
-
- rc = asm_safe("fxsave %[fx]", , [fx] "+m"(old));
- if (rc != X86EMUL_CONTINUE)
- return rc;
-
- /*
- * 64 bit host will restore XMM 8-15, which is not correct on non-64
- * bit guests. Load the current values in order to preserve 64 bit
- * XMMs after fxrstor.
- */
-#ifdef CONFIG_X86_64
- /* XXX: accessing XMM 8-15 very awkwardly */
- memcpy(&new->xmm_space[8 * 16/4], &old.xmm_space[8 * 16/4], 8 * 16);
-#endif
-
- /*
- * Hardware doesn't save and restore XMM 0-7 without CR4.OSFXSR, but
- * does save and restore MXCSR.
- */
- if (!(ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))
- memcpy(new->xmm_space, old.xmm_space, 8 * 16);
-
- return rc;
+ return segmented_write_std(ctxt, ctxt->memop.addr.mem, &fx_state,
+ xmm_offset(ctxt));
}
static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
{
struct fxregs_state fx_state;
int rc;
+ size_t size;
rc = check_fxsr(ctxt);
if (rc != X86EMUL_CONTINUE)
return rc;
- rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, 512);
- if (rc != X86EMUL_CONTINUE)
- return rc;
+ ctxt->ops->get_fpu(ctxt);
- if (fx_state.mxcsr >> 16)
- return emulate_gp(ctxt, 0);
+ if (ctxt->mode < X86EMUL_MODE_PROT64) {
+ rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state));
+ if (rc != X86EMUL_CONTINUE)
+ goto out;
+ size = xmm_offset(ctxt);
+ } else {
+ size = offsetof(struct fxregs_state, xmm_space[16]);
+ }
- ctxt->ops->get_fpu(ctxt);
+ rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, size);
+ if (rc != X86EMUL_CONTINUE)
+ goto out;
- if (ctxt->mode < X86EMUL_MODE_PROT64)
- rc = fxrstor_fixup(ctxt, &fx_state);
+ if (fx_state.mxcsr >> 16) {
+ rc = emulate_gp(ctxt, 0);
+ goto out;
+ }
if (rc == X86EMUL_CONTINUE)
rc = asm_safe("fxrstor %[fx]", : [fx] "m"(fx_state));
+out:
ctxt->ops->put_fpu(ctxt);
return rc;
--
2.11.0
Powered by blists - more mailing lists