lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Thu, 14 Apr 2011 23:27:52 -0400
From:	Nelson Elhage <nelhage@...lice.com>
To:	Avi Kivity <avi@...hat.com>
Cc:	kvm@...r.kernel.org, linux-kernel@...r.kernel.org,
	Nelson Elhage <nelhage@...lice.com>
Subject: [PATCH] KVM: emulator: Use linearize() when fetching instructions.

This means that the truncation behavior in linearize needs to grow an additional
slight piece of complexity: when fetching, truncation is dependent on the
execution mode, instead of the current address size.

Signed-off-by: Nelson Elhage <nelhage@...lice.com>
---
 arch/x86/include/asm/kvm_emulate.h |    1 -
 arch/x86/kvm/emulate.c             |   23 ++++++++++++-----------
 2 files changed, 12 insertions(+), 12 deletions(-)

diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index 0818448..9b760c8 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -265,7 +265,6 @@ struct x86_emulate_ctxt {
 	unsigned long eip; /* eip before instruction emulation */
 	/* Emulated execution mode, represented by an X86EMUL_MODE value. */
 	int mode;
-	u32 cs_base;
 
 	/* interruptibility state, as a result of execution of STI or MOV SS */
 	int interruptibility;
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index a5f63d4..d3d43a7 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -542,7 +542,7 @@ static int emulate_nm(struct x86_emulate_ctxt *ctxt)
 
 static int linearize(struct x86_emulate_ctxt *ctxt,
 		     struct segmented_address addr,
-		     unsigned size, bool write,
+		     unsigned size, bool write, bool fetch,
 		     ulong *linear)
 {
 	struct decode_cache *c = &ctxt->decode;
@@ -602,7 +602,7 @@ static int linearize(struct x86_emulate_ctxt *ctxt,
 		}
 		break;
 	}
-	if (c->ad_bytes != 8)
+	if (fetch ? ctxt->mode != X86EMUL_MODE_PROT64 : c->ad_bytes != 8)
 		la &= (u32)-1;
 	*linear = la;
 	return X86EMUL_CONTINUE;
@@ -621,7 +621,7 @@ static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
 	int rc;
 	ulong linear;
 
-	rc = linearize(ctxt, addr, size, false, &linear);
+	rc = linearize(ctxt, addr, size, false, false, &linear);
 	if (rc != X86EMUL_CONTINUE)
 		return rc;
 	return ctxt->ops->read_std(linear, data, size, ctxt->vcpu,
@@ -637,11 +637,13 @@ static int do_fetch_insn_byte(struct x86_emulate_ctxt *ctxt,
 	int size, cur_size;
 
 	if (eip == fc->end) {
-		unsigned long linear = eip + ctxt->cs_base;
-		if (ctxt->mode != X86EMUL_MODE_PROT64)
-			linear &= (u32)-1;
+		unsigned long linear;
+		struct segmented_address addr = {VCPU_SREG_CS, eip};
 		cur_size = fc->end - fc->start;
 		size = min(15UL - cur_size, PAGE_SIZE - offset_in_page(eip));
+		rc = linearize(ctxt, addr, size, false, true, &linear);
+		f (rc != X86EMUL_CONTINUE)
+			return rc;
 		rc = ops->fetch(linear, fc->data + cur_size,
 				size, ctxt->vcpu, &ctxt->exception);
 		if (rc != X86EMUL_CONTINUE)
@@ -1047,7 +1049,7 @@ static int segmented_read(struct x86_emulate_ctxt *ctxt,
 	int rc;
 	ulong linear;
 
-	rc = linearize(ctxt, addr, size, false, &linear);
+	rc = linearize(ctxt, addr, size, false, false, &linear);
 	if (rc != X86EMUL_CONTINUE)
 		return rc;
 	return read_emulated(ctxt, ctxt->ops, linear, data, size);
@@ -1061,7 +1063,7 @@ static int segmented_write(struct x86_emulate_ctxt *ctxt,
 	int rc;
 	ulong linear;
 
-	rc = linearize(ctxt, addr, size, true, &linear);
+	rc = linearize(ctxt, addr, size, true, false, &linear);
 	if (rc != X86EMUL_CONTINUE)
 		return rc;
 	return ctxt->ops->write_emulated(linear, data, size,
@@ -1076,7 +1078,7 @@ static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
 	int rc;
 	ulong linear;
 
-	rc = linearize(ctxt, addr, size, true, &linear);
+	rc = linearize(ctxt, addr, size, true, false, &linear);
 	if (rc != X86EMUL_CONTINUE)
 		return rc;
 	return ctxt->ops->cmpxchg_emulated(linear, orig_data, data,
@@ -2576,7 +2578,7 @@ static int em_invlpg(struct x86_emulate_ctxt *ctxt)
 	int rc;
 	ulong linear;
 
-	rc = linearize(ctxt, c->src.addr.mem, 1, false, &linear);
+	rc = linearize(ctxt, c->src.addr.mem, 1, false, false, &linear);
 	if (rc == X86EMUL_CONTINUE)
 		emulate_invlpg(ctxt->vcpu, linear);
 	/* Disable writeback. */
@@ -3154,7 +3156,6 @@ x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
 	c->fetch.end = c->fetch.start + insn_len;
 	if (insn_len > 0)
 		memcpy(c->fetch.data, insn, insn_len);
-	ctxt->cs_base = seg_base(ctxt, ops, VCPU_SREG_CS);
 
 	switch (mode) {
 	case X86EMUL_MODE_REAL:
-- 
1.7.2.43.g36c08.dirty

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists