lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Sat, 06 Mar 2010 14:53:42 +0100
From:	Stefan Bader <stefan.bader@...onical.com>
To:	Avi Kivity <avi@...hat.com>
CC:	kvm@...r.kernel.org, linux-kernel@...r.kernel.org
Subject: Re: [PATCH 13/20] KVM: x86 emulator: fix memory access during x86
 emulation

i Avi,

we currently try to integrate this patch for an update into a 2.6.32 based
system (amongst other kvm updates). But as soon as this patch gets added kvm
will die on startup in kvm_leave_lazy_mmu. This has been documented here:

https://bugs.edge.launchpad.net/ubuntu/+source/linux/+bug/531823

I have placed the backports of your patches, which are currently in linux-next
and marked for stable here:

git://kernel.ubuntu.com/smb/linux-2.6.32.y kvm

I have tested the failure with a version that got only the following patches in:
KVM: x86 emulator: Add Virtual-8086 mode of emulation
KVM: x86 emulator: fix memory access during x86 emulation
KVM: x86 emulator: Check IOPL level during io instruction emulation
KVM: x86 emulator: Fix popf emulation
KVM: x86 emulator: Check CPL level during privilege instruction emulation

and also with a version that takes all stable patches up to the bad one:
KVM: VMX: Trap and invalid MWAIT/MONITOR instruction
KVM: x86 emulator: Add group8 instruction decoding
KVM: x86 emulator: Add group9 instruction decoding
KVM: x86 emulator: Add Virtual-8086 mode of emulation
KVM: x86 emulator: fix memory access during x86 emulation

But as soon as the fix for memory access gets added, the bug will occur. Would
you have an idea what might be causing this?

Thanks,
Stefan

Avi Kivity wrote:
> From: Gleb Natapov <gleb@...hat.com>
> 
> Currently when x86 emulator needs to access memory, page walk is done with
> broadest permission possible, so if emulated instruction was executed
> by userspace process it can still access kernel memory. Fix that by
> providing correct memory access to page walker during emulation.
> 
> Signed-off-by: Gleb Natapov <gleb@...hat.com>
> Cc: stable@...nel.org
> Signed-off-by: Avi Kivity <avi@...hat.com>
> ---
>  arch/x86/include/asm/kvm_emulate.h |   14 +++-
>  arch/x86/include/asm/kvm_host.h    |    7 ++-
>  arch/x86/kvm/emulate.c             |    6 +-
>  arch/x86/kvm/mmu.c                 |   17 ++---
>  arch/x86/kvm/mmu.h                 |    6 ++
>  arch/x86/kvm/paging_tmpl.h         |   11 ++-
>  arch/x86/kvm/x86.c                 |  131 +++++++++++++++++++++++++++---------
>  7 files changed, 142 insertions(+), 50 deletions(-)
> 
> diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
> index 784d7c5..7a6f54f 100644
> --- a/arch/x86/include/asm/kvm_emulate.h
> +++ b/arch/x86/include/asm/kvm_emulate.h
> @@ -54,13 +54,23 @@ struct x86_emulate_ctxt;
>  struct x86_emulate_ops {
>  	/*
>  	 * read_std: Read bytes of standard (non-emulated/special) memory.
> -	 *           Used for instruction fetch, stack operations, and others.
> +	 *           Used for descriptor reading.
>  	 *  @addr:  [IN ] Linear address from which to read.
>  	 *  @val:   [OUT] Value read from memory, zero-extended to 'u_long'.
>  	 *  @bytes: [IN ] Number of bytes to read from memory.
>  	 */
>  	int (*read_std)(unsigned long addr, void *val,
> -			unsigned int bytes, struct kvm_vcpu *vcpu);
> +			unsigned int bytes, struct kvm_vcpu *vcpu, u32 *error);
> +
> +	/*
> +	 * fetch: Read bytes of standard (non-emulated/special) memory.
> +	 *        Used for instruction fetch.
> +	 *  @addr:  [IN ] Linear address from which to read.
> +	 *  @val:   [OUT] Value read from memory, zero-extended to 'u_long'.
> +	 *  @bytes: [IN ] Number of bytes to read from memory.
> +	 */
> +	int (*fetch)(unsigned long addr, void *val,
> +			unsigned int bytes, struct kvm_vcpu *vcpu, u32 *error);
>  
>  	/*
>  	 * read_emulated: Read bytes from emulated/special memory area.
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index 1522337..c07c16f 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -243,7 +243,8 @@ struct kvm_mmu {
>  	void (*new_cr3)(struct kvm_vcpu *vcpu);
>  	int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err);
>  	void (*free)(struct kvm_vcpu *vcpu);
> -	gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva);
> +	gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access,
> +			    u32 *error);
>  	void (*prefetch_page)(struct kvm_vcpu *vcpu,
>  			      struct kvm_mmu_page *page);
>  	int (*sync_page)(struct kvm_vcpu *vcpu,
> @@ -660,6 +661,10 @@ void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
>  int kvm_mmu_load(struct kvm_vcpu *vcpu);
>  void kvm_mmu_unload(struct kvm_vcpu *vcpu);
>  void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu);
> +gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, u32 *error);
> +gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, u32 *error);
> +gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, u32 *error);
> +gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, u32 *error);
>  
>  int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
>  
> diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
> index e4e2df3..c44b460 100644
> --- a/arch/x86/kvm/emulate.c
> +++ b/arch/x86/kvm/emulate.c
> @@ -616,7 +616,7 @@ static int do_fetch_insn_byte(struct x86_emulate_ctxt *ctxt,
>  
>  	if (linear < fc->start || linear >= fc->end) {
>  		size = min(15UL, PAGE_SIZE - offset_in_page(linear));
> -		rc = ops->read_std(linear, fc->data, size, ctxt->vcpu);
> +		rc = ops->fetch(linear, fc->data, size, ctxt->vcpu, NULL);
>  		if (rc)
>  			return rc;
>  		fc->start = linear;
> @@ -671,11 +671,11 @@ static int read_descriptor(struct x86_emulate_ctxt *ctxt,
>  		op_bytes = 3;
>  	*address = 0;
>  	rc = ops->read_std((unsigned long)ptr, (unsigned long *)size, 2,
> -			   ctxt->vcpu);
> +			   ctxt->vcpu, NULL);
>  	if (rc)
>  		return rc;
>  	rc = ops->read_std((unsigned long)ptr + 2, address, op_bytes,
> -			   ctxt->vcpu);
> +			   ctxt->vcpu, NULL);
>  	return rc;
>  }
>  
> diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
> index 7397932..741373e 100644
> --- a/arch/x86/kvm/mmu.c
> +++ b/arch/x86/kvm/mmu.c
> @@ -138,12 +138,6 @@ module_param(oos_shadow, bool, 0644);
>  #define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \
>  			| PT64_NX_MASK)
>  
> -#define PFERR_PRESENT_MASK (1U << 0)
> -#define PFERR_WRITE_MASK (1U << 1)
> -#define PFERR_USER_MASK (1U << 2)
> -#define PFERR_RSVD_MASK (1U << 3)
> -#define PFERR_FETCH_MASK (1U << 4)
> -
>  #define RMAP_EXT 4
>  
>  #define ACC_EXEC_MASK    1
> @@ -1632,7 +1626,7 @@ struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
>  {
>  	struct page *page;
>  
> -	gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
> +	gpa_t gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);
>  
>  	if (gpa == UNMAPPED_GVA)
>  		return NULL;
> @@ -2155,8 +2149,11 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
>  	spin_unlock(&vcpu->kvm->mmu_lock);
>  }
>  
> -static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
> +static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr,
> +				  u32 access, u32 *error)
>  {
> +	if (error)
> +		*error = 0;
>  	return vaddr;
>  }
>  
> @@ -2740,7 +2737,7 @@ int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
>  	if (tdp_enabled)
>  		return 0;
>  
> -	gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
> +	gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);
>  
>  	spin_lock(&vcpu->kvm->mmu_lock);
>  	r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
> @@ -3237,7 +3234,7 @@ static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
>  		if (is_shadow_present_pte(ent) && !is_last_spte(ent, level))
>  			audit_mappings_page(vcpu, ent, va, level - 1);
>  		else {
> -			gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, va);
> +			gpa_t gpa = kvm_mmu_gva_to_gpa_read(vcpu, va, NULL);
>  			gfn_t gfn = gpa >> PAGE_SHIFT;
>  			pfn_t pfn = gfn_to_pfn(vcpu->kvm, gfn);
>  			hpa_t hpa = (hpa_t)pfn << PAGE_SHIFT;
> diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
> index 61ef5a6..be66759 100644
> --- a/arch/x86/kvm/mmu.h
> +++ b/arch/x86/kvm/mmu.h
> @@ -42,6 +42,12 @@
>  #define PT_DIRECTORY_LEVEL 2
>  #define PT_PAGE_TABLE_LEVEL 1
>  
> +#define PFERR_PRESENT_MASK (1U << 0)
> +#define PFERR_WRITE_MASK (1U << 1)
> +#define PFERR_USER_MASK (1U << 2)
> +#define PFERR_RSVD_MASK (1U << 3)
> +#define PFERR_FETCH_MASK (1U << 4)
> +
>  int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]);
>  
>  static inline void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
> diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
> index df15a53..81eab9a 100644
> --- a/arch/x86/kvm/paging_tmpl.h
> +++ b/arch/x86/kvm/paging_tmpl.h
> @@ -490,18 +490,23 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
>  	spin_unlock(&vcpu->kvm->mmu_lock);
>  }
>  
> -static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr)
> +static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access,
> +			       u32 *error)
>  {
>  	struct guest_walker walker;
>  	gpa_t gpa = UNMAPPED_GVA;
>  	int r;
>  
> -	r = FNAME(walk_addr)(&walker, vcpu, vaddr, 0, 0, 0);
> +	r = FNAME(walk_addr)(&walker, vcpu, vaddr,
> +			     !!(access & PFERR_WRITE_MASK),
> +			     !!(access & PFERR_USER_MASK),
> +			     !!(access & PFERR_FETCH_MASK));
>  
>  	if (r) {
>  		gpa = gfn_to_gpa(walker.gfn);
>  		gpa |= vaddr & ~PAGE_MASK;
> -	}
> +	} else if (error)
> +		*error = walker.error_code;
>  
>  	return gpa;
>  }
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index a283795..ea3a8af 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -3039,14 +3039,41 @@ static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v)
>  	return kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, addr, len, v);
>  }
>  
> -static int kvm_read_guest_virt(gva_t addr, void *val, unsigned int bytes,
> -			       struct kvm_vcpu *vcpu)
> +gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
> +{
> +	u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
> +	return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, access, error);
> +}
> +
> + gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
> +{
> +	u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
> +	access |= PFERR_FETCH_MASK;
> +	return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, access, error);
> +}
> +
> +gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
> +{
> +	u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
> +	access |= PFERR_WRITE_MASK;
> +	return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, access, error);
> +}
> +
> +/* uses this to access any guest's mapped memory without checking CPL */
> +gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
> +{
> +	return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, 0, error);
> +}
> +
> +static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
> +				      struct kvm_vcpu *vcpu, u32 access,
> +				      u32 *error)
>  {
>  	void *data = val;
>  	int r = X86EMUL_CONTINUE;
>  
>  	while (bytes) {
> -		gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
> +		gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr, access, error);
>  		unsigned offset = addr & (PAGE_SIZE-1);
>  		unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset);
>  		int ret;
> @@ -3069,14 +3096,37 @@ out:
>  	return r;
>  }
>  
> +/* used for instruction fetching */
> +static int kvm_fetch_guest_virt(gva_t addr, void *val, unsigned int bytes,
> +				struct kvm_vcpu *vcpu, u32 *error)
> +{
> +	u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
> +	return kvm_read_guest_virt_helper(addr, val, bytes, vcpu,
> +					  access | PFERR_FETCH_MASK, error);
> +}
> +
> +static int kvm_read_guest_virt(gva_t addr, void *val, unsigned int bytes,
> +			       struct kvm_vcpu *vcpu, u32 *error)
> +{
> +	u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
> +	return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
> +					  error);
> +}
> +
> +static int kvm_read_guest_virt_system(gva_t addr, void *val, unsigned int bytes,
> +			       struct kvm_vcpu *vcpu, u32 *error)
> +{
> +	return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, error);
> +}
> +
>  static int kvm_write_guest_virt(gva_t addr, void *val, unsigned int bytes,
> -				struct kvm_vcpu *vcpu)
> +				struct kvm_vcpu *vcpu, u32 *error)
>  {
>  	void *data = val;
>  	int r = X86EMUL_CONTINUE;
>  
>  	while (bytes) {
> -		gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
> +		gpa_t gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, error);
>  		unsigned offset = addr & (PAGE_SIZE-1);
>  		unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
>  		int ret;
> @@ -3106,6 +3156,7 @@ static int emulator_read_emulated(unsigned long addr,
>  				  struct kvm_vcpu *vcpu)
>  {
>  	gpa_t                 gpa;
> +	u32 error_code;
>  
>  	if (vcpu->mmio_read_completed) {
>  		memcpy(val, vcpu->mmio_data, bytes);
> @@ -3115,17 +3166,20 @@ static int emulator_read_emulated(unsigned long addr,
>  		return X86EMUL_CONTINUE;
>  	}
>  
> -	gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
> +	gpa = kvm_mmu_gva_to_gpa_read(vcpu, addr, &error_code);
> +
> +	if (gpa == UNMAPPED_GVA) {
> +		kvm_inject_page_fault(vcpu, addr, error_code);
> +		return X86EMUL_PROPAGATE_FAULT;
> +	}
>  
>  	/* For APIC access vmexit */
>  	if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
>  		goto mmio;
>  
> -	if (kvm_read_guest_virt(addr, val, bytes, vcpu)
> +	if (kvm_read_guest_virt(addr, val, bytes, vcpu, NULL)
>  				== X86EMUL_CONTINUE)
>  		return X86EMUL_CONTINUE;
> -	if (gpa == UNMAPPED_GVA)
> -		return X86EMUL_PROPAGATE_FAULT;
>  
>  mmio:
>  	/*
> @@ -3164,11 +3218,12 @@ static int emulator_write_emulated_onepage(unsigned long addr,
>  					   struct kvm_vcpu *vcpu)
>  {
>  	gpa_t                 gpa;
> +	u32 error_code;
>  
> -	gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
> +	gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, &error_code);
>  
>  	if (gpa == UNMAPPED_GVA) {
> -		kvm_inject_page_fault(vcpu, addr, 2);
> +		kvm_inject_page_fault(vcpu, addr, error_code);
>  		return X86EMUL_PROPAGATE_FAULT;
>  	}
>  
> @@ -3232,7 +3287,7 @@ static int emulator_cmpxchg_emulated(unsigned long addr,
>  		char *kaddr;
>  		u64 val;
>  
> -		gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
> +		gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, NULL);
>  
>  		if (gpa == UNMAPPED_GVA ||
>  		   (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
> @@ -3297,7 +3352,7 @@ void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
>  
>  	rip_linear = rip + get_segment_base(vcpu, VCPU_SREG_CS);
>  
> -	kvm_read_guest_virt(rip_linear, (void *)opcodes, 4, vcpu);
> +	kvm_read_guest_virt(rip_linear, (void *)opcodes, 4, vcpu, NULL);
>  
>  	printk(KERN_ERR "emulation failed (%s) rip %lx %02x %02x %02x %02x\n",
>  	       context, rip, opcodes[0], opcodes[1], opcodes[2], opcodes[3]);
> @@ -3305,7 +3360,8 @@ void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
>  EXPORT_SYMBOL_GPL(kvm_report_emulation_failure);
>  
>  static struct x86_emulate_ops emulate_ops = {
> -	.read_std            = kvm_read_guest_virt,
> +	.read_std            = kvm_read_guest_virt_system,
> +	.fetch               = kvm_fetch_guest_virt,
>  	.read_emulated       = emulator_read_emulated,
>  	.write_emulated      = emulator_write_emulated,
>  	.cmpxchg_emulated    = emulator_cmpxchg_emulated,
> @@ -3442,12 +3498,17 @@ static int pio_copy_data(struct kvm_vcpu *vcpu)
>  	gva_t q = vcpu->arch.pio.guest_gva;
>  	unsigned bytes;
>  	int ret;
> +	u32 error_code;
>  
>  	bytes = vcpu->arch.pio.size * vcpu->arch.pio.cur_count;
>  	if (vcpu->arch.pio.in)
> -		ret = kvm_write_guest_virt(q, p, bytes, vcpu);
> +		ret = kvm_write_guest_virt(q, p, bytes, vcpu, &error_code);
>  	else
> -		ret = kvm_read_guest_virt(q, p, bytes, vcpu);
> +		ret = kvm_read_guest_virt(q, p, bytes, vcpu, &error_code);
> +
> +	if (ret == X86EMUL_PROPAGATE_FAULT)
> +		kvm_inject_page_fault(vcpu, q, error_code);
> +
>  	return ret;
>  }
>  
> @@ -3468,7 +3529,7 @@ int complete_pio(struct kvm_vcpu *vcpu)
>  		if (io->in) {
>  			r = pio_copy_data(vcpu);
>  			if (r)
> -				return r;
> +				goto out;
>  		}
>  
>  		delta = 1;
> @@ -3495,7 +3556,7 @@ int complete_pio(struct kvm_vcpu *vcpu)
>  			kvm_register_write(vcpu, VCPU_REGS_RSI, val);
>  		}
>  	}
> -
> +out:
>  	io->count -= io->cur_count;
>  	io->cur_count = 0;
>  
> @@ -3617,10 +3678,8 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, int in,
>  	if (!vcpu->arch.pio.in) {
>  		/* string PIO write */
>  		ret = pio_copy_data(vcpu);
> -		if (ret == X86EMUL_PROPAGATE_FAULT) {
> -			kvm_inject_gp(vcpu, 0);
> +		if (ret == X86EMUL_PROPAGATE_FAULT)
>  			return 1;
> -		}
>  		if (ret == 0 && !pio_string_write(vcpu)) {
>  			complete_pio(vcpu);
>  			if (vcpu->arch.pio.count == 0)
> @@ -4663,7 +4722,9 @@ static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
>  		kvm_queue_exception_e(vcpu, GP_VECTOR, selector & 0xfffc);
>  		return X86EMUL_PROPAGATE_FAULT;
>  	}
> -	return kvm_read_guest_virt(dtable.base + index*8, seg_desc, sizeof(*seg_desc), vcpu);
> +	return kvm_read_guest_virt_system(dtable.base + index*8,
> +					  seg_desc, sizeof(*seg_desc),
> +					  vcpu, NULL);
>  }
>  
>  /* allowed just for 8 bytes segments */
> @@ -4677,15 +4738,23 @@ static int save_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
>  
>  	if (dtable.limit < index * 8 + 7)
>  		return 1;
> -	return kvm_write_guest_virt(dtable.base + index*8, seg_desc, sizeof(*seg_desc), vcpu);
> +	return kvm_write_guest_virt(dtable.base + index*8, seg_desc, sizeof(*seg_desc), vcpu, NULL);
> +}
> +
> +static gpa_t get_tss_base_addr_write(struct kvm_vcpu *vcpu,
> +			       struct desc_struct *seg_desc)
> +{
> +	u32 base_addr = get_desc_base(seg_desc);
> +
> +	return kvm_mmu_gva_to_gpa_write(vcpu, base_addr, NULL);
>  }
>  
> -static gpa_t get_tss_base_addr(struct kvm_vcpu *vcpu,
> +static gpa_t get_tss_base_addr_read(struct kvm_vcpu *vcpu,
>  			     struct desc_struct *seg_desc)
>  {
>  	u32 base_addr = get_desc_base(seg_desc);
>  
> -	return vcpu->arch.mmu.gva_to_gpa(vcpu, base_addr);
> +	return kvm_mmu_gva_to_gpa_read(vcpu, base_addr, NULL);
>  }
>  
>  static u16 get_segment_selector(struct kvm_vcpu *vcpu, int seg)
> @@ -4894,7 +4963,7 @@ static int kvm_task_switch_16(struct kvm_vcpu *vcpu, u16 tss_selector,
>  			    sizeof tss_segment_16))
>  		goto out;
>  
> -	if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc),
> +	if (kvm_read_guest(vcpu->kvm, get_tss_base_addr_read(vcpu, nseg_desc),
>  			   &tss_segment_16, sizeof tss_segment_16))
>  		goto out;
>  
> @@ -4902,7 +4971,7 @@ static int kvm_task_switch_16(struct kvm_vcpu *vcpu, u16 tss_selector,
>  		tss_segment_16.prev_task_link = old_tss_sel;
>  
>  		if (kvm_write_guest(vcpu->kvm,
> -				    get_tss_base_addr(vcpu, nseg_desc),
> +				    get_tss_base_addr_write(vcpu, nseg_desc),
>  				    &tss_segment_16.prev_task_link,
>  				    sizeof tss_segment_16.prev_task_link))
>  			goto out;
> @@ -4933,7 +5002,7 @@ static int kvm_task_switch_32(struct kvm_vcpu *vcpu, u16 tss_selector,
>  			    sizeof tss_segment_32))
>  		goto out;
>  
> -	if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc),
> +	if (kvm_read_guest(vcpu->kvm, get_tss_base_addr_read(vcpu, nseg_desc),
>  			   &tss_segment_32, sizeof tss_segment_32))
>  		goto out;
>  
> @@ -4941,7 +5010,7 @@ static int kvm_task_switch_32(struct kvm_vcpu *vcpu, u16 tss_selector,
>  		tss_segment_32.prev_task_link = old_tss_sel;
>  
>  		if (kvm_write_guest(vcpu->kvm,
> -				    get_tss_base_addr(vcpu, nseg_desc),
> +				    get_tss_base_addr_write(vcpu, nseg_desc),
>  				    &tss_segment_32.prev_task_link,
>  				    sizeof tss_segment_32.prev_task_link))
>  			goto out;
> @@ -4964,7 +5033,7 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
>  	u32 old_tss_base = get_segment_base(vcpu, VCPU_SREG_TR);
>  	u16 old_tss_sel = get_segment_selector(vcpu, VCPU_SREG_TR);
>  
> -	old_tss_base = vcpu->arch.mmu.gva_to_gpa(vcpu, old_tss_base);
> +	old_tss_base = kvm_mmu_gva_to_gpa_write(vcpu, old_tss_base, NULL);
>  
>  	/* FIXME: Handle errors. Failure to read either TSS or their
>  	 * descriptors should generate a pagefault.
> @@ -5199,7 +5268,7 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
>  
>  	vcpu_load(vcpu);
>  	idx = srcu_read_lock(&vcpu->kvm->srcu);
> -	gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, vaddr);
> +	gpa = kvm_mmu_gva_to_gpa_system(vcpu, vaddr, NULL);
>  	srcu_read_unlock(&vcpu->kvm->srcu, idx);
>  	tr->physical_address = gpa;
>  	tr->valid = gpa != UNMAPPED_GVA;

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ