[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20180626121449.30353-1-masonleeback@gmail.com>
Date: Tue, 26 Jun 2018 06:14:49 -0600
From: Mason Lee Back <masonleeback@...il.com>
To: pbonzini@...hat.com
Cc: Mason Lee Back <masonleeback@...il.com>,
Radim Krčmář <rkrcmar@...hat.com>,
Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>,
"H. Peter Anvin" <hpa@...or.com>, x86@...nel.org,
kvm@...r.kernel.org, linux-kernel@...r.kernel.org
Subject: [PATCH] kvm: implement VEX prefix decoder, bextr/andn
---
arch/x86/include/asm/kvm_emulate.h | 17 ++++++
arch/x86/kvm/emulate.c | 96 +++++++++++++++++++++++++++---
2 files changed, 106 insertions(+), 7 deletions(-)
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index 0f82cd91cd3c..7aa42ae90bf7 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -327,6 +327,23 @@ struct x86_emulate_ctxt {
*/
bool rip_relative;
u8 rex_prefix;
+ /* vex */
+ struct {
+ u8 prefix;
+ union {
+ struct {
+ uint8_t m : 5;
+ uint8_t b : 1;
+ uint8_t x : 1;
+ uint8_t r : 1;
+ uint8_t p : 2;
+ uint8_t l : 1;
+ uint8_t v : 4;
+ uint8_t w : 1;
+ };
+ u16 value;
+ };
+ }vex;
u8 lock_prefix;
u8 rep_prefix;
/* bitmaps of registers in _regs[] that can be read */
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 4c4f4263420c..2cca6d61a12f 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -66,6 +66,7 @@
#define OpXLat 28ull /* memory at BX/EBX/RBX + zero-extended AL */
#define OpAccLo 29ull /* Low part of extended acc (AX/AX/EAX/RAX) */
#define OpAccHi 30ull /* High part of extended acc (-/DX/EDX/RDX) */
+#define OpVex 31ull /* Vector extensions */
#define OpBits 5 /* Width of operand field */
#define OpMask ((1ull << OpBits) - 1)
@@ -117,6 +118,7 @@
#define SrcMem8 (OpMem8 << SrcShift)
#define SrcAccHi (OpAccHi << SrcShift)
#define SrcMask (OpMask << SrcShift)
+#define SrcVex (OpVex << SrcShift)
#define BitOp (1<<11)
#define MemAbs (1<<12) /* Memory operand is absolute displacement */
#define String (1<<13) /* String instruction (rep capable) */
@@ -160,6 +162,7 @@
#define Src2FS (OpFS << Src2Shift)
#define Src2GS (OpGS << Src2Shift)
#define Src2Mask (OpMask << Src2Shift)
+#define Src2Vex (OpVex << Src2Shift)
#define Mmx ((u64)1 << 40) /* MMX Vector instruction */
#define AlignMask ((u64)7 << 41)
#define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */
@@ -4118,6 +4121,46 @@ static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
return rc;
}
+static int em_andn(struct x86_emulate_ctxt *ctxt)
+{
+ u64 temp;
+ int signbit = 63;
+
+ temp = ~ctxt->src.val & ctxt->src2.val;
+ if (ctxt->op_bytes == 4) {
+ temp &= 0xFFFFFFFF;
+ signbit = 31;
+ }
+ ctxt->dst.val = temp;
+ ctxt->eflags &= ~EFLAGS_MASK;
+ ctxt->eflags |= (temp == 0) ? X86_EFLAGS_ZF : 0;
+ ctxt->eflags |= (temp >> signbit) ? X86_EFLAGS_SF : 0;
+
+ return X86EMUL_CONTINUE;
+}
+
+static int em_bextr(struct x86_emulate_ctxt *ctxt)
+{
+ u8 start, len;
+ u64 temp;
+
+ start = ctxt->src2.val & 0xFF;
+ len = (ctxt->src2.val >> 8) & 0xFF;
+ temp = ctxt->src.val;
+ if (ctxt->op_bytes == 4)
+ {
+ temp &= 0xFFFFFFFF;
+ }
+
+ temp >>= start;
+ temp &= ~(~0ULL << len);
+ ctxt->dst.val = temp;
+ ctxt->eflags &= ~EFLAGS_MASK;
+ ctxt->eflags |= (temp == 0) ? X86_EFLAGS_ZF : 0;
+
+ return X86EMUL_CONTINUE;
+}
+
static bool valid_cr(int nr)
{
switch (nr) {
@@ -4849,7 +4892,10 @@ static const struct opcode opcode_map_0f_38[256] = {
GP(EmulateOnUD | ModRM, &three_byte_0f_38_f0),
GP(EmulateOnUD | ModRM, &three_byte_0f_38_f1),
/* 0xf2 - 0xff */
- N, N, X4(N), X8(N)
+ I(ModRM | DstReg | SrcVex | Src2Mem, em_andn),
+ X4(N),
+ I(ModRM | DstReg | SrcMem | Src2Vex, em_bextr),
+ X8(N)
};
#undef D
@@ -5208,15 +5254,51 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
}
ctxt->d = opcode.flags;
- if (ctxt->d & ModRM)
- ctxt->modrm = insn_fetch(u8, ctxt);
+ if (ctxt->b == 0xC4 || ctxt->b == 0xC5) {
+ ctxt->vex.prefix = ctxt->b;
+ if (ctxt->b == 0xC4) {
+ ctxt->vex.value = insn_fetch(u16, ctxt);
+ } else {
+ ctxt->vex.value = insn_fetch(u8, ctxt) << 8;
+ ctxt->vex.r = ctxt->vex.w;
+ ctxt->vex.w = 1;
+ ctxt->vex.x = 1;
+ ctxt->vex.b = 1;
+ ctxt->vex.m = 1;
+ }
- /* vex-prefix instructions are not implemented */
- if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
- (mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) {
- ctxt->d = NotImpl;
+ if (mode != X86EMUL_MODE_PROT64 && (!ctxt->vex.r || !ctxt->vex.x)) {
+ /* todo: reinterpret as LES (0xC4) or LDS (0xC5) instruction */
+ return EMULATION_FAILED;
+ }
+
+ ctxt->rex_prefix |= ctxt->vex.r ? 0 : (1 << 2); /* rex.r */
+ ctxt->rex_prefix |= ctxt->vex.x ? 0 : (1 << 1); /* rex.x */
+ ctxt->rex_prefix |= ctxt->vex.b ? 0 : (1 << 0); /* rex.b */
+ if (mode == X86EMUL_MODE_PROT64 && ctxt->vex.w) {
+ ctxt->op_bytes = 8;
+ }
+
+ ctxt->b = insn_fetch(u8, ctxt);
+ switch (ctxt->vex.m) {
+ case 1:
+ opcode = twobyte_table[ctxt->b];
+ break;
+ case 2:
+ opcode = opcode_map_0f_38[ctxt->b];
+ break;
+ case 3:
+ /* KVM doesn't support this */
+ return EMULATION_FAILED;
+ default:
+ return EMULATION_FAILED;
+ }
}
+ ctxt->d = opcode.flags;
+ if (ctxt->d & ModRM)
+ ctxt->modrm = insn_fetch(u8, ctxt);
+
while (ctxt->d & GroupMask) {
switch (ctxt->d & GroupMask) {
case Group:
--
2.17.1
Powered by blists - more mailing lists