lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <jpgfvkmi5h6.fsf@nelium.bos.redhat.com>
Date:	Wed, 07 May 2014 00:21:25 -0400
From:	Bandan Das <bsd@...hat.com>
To:	Paolo Bonzini <pbonzini@...hat.com>
Cc:	linux-kernel@...r.kernel.org, kvm@...r.kernel.org
Subject: Re: [RFC PATCH 2/4] KVM: emulate: avoid repeated calls to do_insn_fetch_bytes

Paolo Bonzini <pbonzini@...hat.com> writes:

> do_insn_fetch_bytes will only be called once in a given insn_fetch and
> insn_fetch_arr, because in fact it will only be called at most twice
> for any instruction and the first call is explicit in x86_decode_insn.
> This observation lets us hoist the call out of the memory copying loop.
> It does not buy performance, because most fetches are one byte long
> anyway, but it prepares for the next patch.
>
> The overflow check is tricky, but correct.  Because do_insn_fetch_bytes
> has already been called once, we know that fc->end is at least 15.  So
> it is okay to subtract the number of bytes we want to read.
>
> Signed-off-by: Paolo Bonzini <pbonzini@...hat.com>
> ---
>  arch/x86/kvm/emulate.c | 25 +++++++++++++++++--------
>  1 file changed, 17 insertions(+), 8 deletions(-)
>
> diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
> index c7b625bf0b5d..886f9a88010f 100644
> --- a/arch/x86/kvm/emulate.c
> +++ b/arch/x86/kvm/emulate.c
> @@ -706,7 +706,7 @@ static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
>   * Prefetch the remaining bytes of the instruction without crossing page
>   * boundary if they are not in fetch_cache yet.
>   */
> -static int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt)
> +static int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
>  {
>  	struct fetch_cache *fc = &ctxt->fetch;
>  	int rc;
> @@ -718,7 +718,14 @@ static int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt)
>  	cur_size = fc->end - fc->start;
>  	size = min(15UL - cur_size,
>  		   PAGE_SIZE - offset_in_page(fc->end));
> -	if (unlikely(size == 0))
> +
> +	/*
> +	 * One instruction can only straddle two pages,
> +	 * and one has been loaded at the beginning of
> +	 * x86_decode_insn.  So, if not enough bytes
> +	 * still, we must have hit the 15-byte boundary.
> +	 */
> +	if (unlikely(size < op_size))
>  		return X86EMUL_UNHANDLEABLE;
>  	rc = __linearize(ctxt, addr, size, false, true, &linear);
>  	if (unlikely(rc != X86EMUL_CONTINUE))
> @@ -739,12 +746,14 @@ static int do_insn_fetch(struct x86_emulate_ctxt *ctxt,
>  	u8 *dest = __dest;
>  	u8 *src = &fc->data[ctxt->_eip - fc->start];
>  
> +	/* We have to be careful about overflow! */
> +	if (unlikely(ctxt->_eip > fc->end - size)) {
> +		rc != do_insn_fetch_bytes(ctxt, size);
This is a typo I guess ..

> +		if (rc != X86EMUL_CONTINNUE)
> +			goto done;
> +	}
> +
>  	while (size--) {
> -		if (unlikely(ctxt->_eip == fc->end)) {
> -			rc = do_insn_fetch_bytes(ctxt);
> -			if (rc != X86EMUL_CONTINUE)
> -				return rc;
> -		}
>  		*dest++ = *src++;
>  		ctxt->_eip++;
>  		continue;
> @@ -4273,7 +4282,7 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
>  	if (insn_len > 0)
>  		memcpy(ctxt->fetch.data, insn, insn_len);
>  	else {
> -		rc = do_insn_fetch_bytes(ctxt);
> +		rc = do_insn_fetch_bytes(ctxt, 1);
Is this saying that if the cache is full, then we fetch one more byte ?

>  		if (rc != X86EMUL_CONTINUE)
>  			return rc;
>  	}
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ