lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1446219386-23937-4-git-send-email-rkrcmar@redhat.com>
Date:	Fri, 30 Oct 2015 16:36:26 +0100
From:	Radim Krčmář <rkrcmar@...hat.com>
To:	linux-kernel@...r.kernel.org
Cc:	kvm@...r.kernel.org, Paolo Bonzini <pbonzini@...hat.com>,
	Laszlo Ersek <lersek@...hat.com>
Subject: [PATCH 3/3] KVM: x86: simplify RSM into 64-bit protected mode

This reverts 0123456789abc ("KVM: x86: fix RSM into 64-bit protected
mode, round 2").  We've achieved the same by treating SMBASE as a
physical address in the previous patch.

Signed-off-by: Radim Krčmář <rkrcmar@...hat.com>
---
 arch/x86/kvm/emulate.c | 37 +++++++------------------------------
 1 file changed, 7 insertions(+), 30 deletions(-)

diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 59e80e0de865..b60fed56671b 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -2311,16 +2311,7 @@ static int rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
 	return X86EMUL_CONTINUE;
 }
 
-struct rsm_stashed_seg_64 {
-	u16 selector;
-	struct desc_struct desc;
-	u32 base3;
-};
-
-static int rsm_stash_seg_64(struct x86_emulate_ctxt *ctxt,
-			    struct rsm_stashed_seg_64 *stash,
-			    u64 smbase,
-			    int n)
+static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
 {
 	struct desc_struct desc;
 	int offset;
@@ -2335,20 +2326,10 @@ static int rsm_stash_seg_64(struct x86_emulate_ctxt *ctxt,
 	set_desc_base(&desc,      GET_SMSTATE(u32, smbase, offset + 8));
 	base3 =                   GET_SMSTATE(u32, smbase, offset + 12);
 
-	stash[n].selector = selector;
-	stash[n].desc = desc;
-	stash[n].base3 = base3;
+	ctxt->ops->set_segment(ctxt, selector, &desc, base3, n);
 	return X86EMUL_CONTINUE;
 }
 
-static inline void rsm_load_seg_64(struct x86_emulate_ctxt *ctxt,
-				   struct rsm_stashed_seg_64 *stash,
-				   int n)
-{
-	ctxt->ops->set_segment(ctxt, stash[n].selector, &stash[n].desc,
-			       stash[n].base3, n);
-}
-
 static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
 				     u64 cr0, u64 cr4)
 {
@@ -2438,7 +2419,6 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
 	u32 base3;
 	u16 selector;
 	int i, r;
-	struct rsm_stashed_seg_64 stash[6];
 
 	for (i = 0; i < 16; i++)
 		*reg_write(ctxt, i) = GET_SMSTATE(u64, smbase, 0x7ff8 - i * 8);
@@ -2480,18 +2460,15 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
 	dt.address =                GET_SMSTATE(u64, smbase, 0x7e68);
 	ctxt->ops->set_gdt(ctxt, &dt);
 
-	for (i = 0; i < ARRAY_SIZE(stash); i++) {
-		r = rsm_stash_seg_64(ctxt, stash, smbase, i);
-		if (r != X86EMUL_CONTINUE)
-			return r;
-	}
-
 	r = rsm_enter_protected_mode(ctxt, cr0, cr4);
 	if (r != X86EMUL_CONTINUE)
 		return r;
 
-	for (i = 0; i < ARRAY_SIZE(stash); i++)
-		rsm_load_seg_64(ctxt, stash, i);
+	for (i = 0; i < 6; i++) {
+		r = rsm_load_seg_64(ctxt, smbase, i);
+		if (r != X86EMUL_CONTINUE)
+			return r;
+	}
 
 	return X86EMUL_CONTINUE;
 }
-- 
2.5.3

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ