lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <9dc05fb2-cb84-d8e7-848b-37fb9c86b782@oracle.com>
Date:   Thu, 22 Dec 2016 10:49:21 -0500
From:   Boris Ostrovsky <boris.ostrovsky@...cle.com>
To:     Juergen Gross <jgross@...e.com>, linux-kernel@...r.kernel.org,
        xen-devel@...ts.xenproject.org
Subject: Re: [PATCH 2/3] xen: return xenstore command failures via response
 instead of rc

On 12/22/2016 02:19 AM, Juergen Gross wrote:
> When the xenbus driver does some special handling for a Xenstore
> command any error condition related to the command should be returned
> via an error response instead of letting the related write operation
> fail. Otherwise the user land handler might take wrong decisions
> assuming the connection to Xenstore is broken.

Do we expect the user to always read the reply if no error code was
returned?

-boris

>
> While at it try to return the same error values xenstored would
> return for those cases.
>
> Signed-off-by: Juergen Gross <jgross@...e.com>
> ---
>  drivers/xen/xenbus/xenbus_dev_frontend.c | 47 ++++++++++++++++++--------------
>  1 file changed, 27 insertions(+), 20 deletions(-)
>
> diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
> index a068281..79130b3 100644
> --- a/drivers/xen/xenbus/xenbus_dev_frontend.c
> +++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
> @@ -302,6 +302,29 @@ static void watch_fired(struct xenbus_watch *watch,
>  	mutex_unlock(&adap->dev_data->reply_mutex);
>  }
>  
> +static int xenbus_command_reply(struct xenbus_file_priv *u,
> +				unsigned int msg_type, const char *reply)
> +{
> +	struct {
> +		struct xsd_sockmsg hdr;
> +		const char body[16];
> +	} msg;
> +	int rc;
> +
> +	msg.hdr = u->u.msg;
> +	msg.hdr.type = msg_type;
> +	msg.hdr.len = strlen(reply) + 1;
> +	if (msg.hdr.len > sizeof(msg.body))
> +		return -E2BIG;
> +
> +	mutex_lock(&u->reply_mutex);
> +	rc = queue_reply(&u->read_buffers, &msg, sizeof(msg.hdr) + msg.hdr.len);
> +	wake_up(&u->read_waitq);
> +	mutex_unlock(&u->reply_mutex);
> +
> +	return rc;
> +}
> +
>  static int xenbus_write_transaction(unsigned msg_type,
>  				    struct xenbus_file_priv *u)
>  {
> @@ -321,7 +344,7 @@ static int xenbus_write_transaction(unsigned msg_type,
>  			if (trans->handle.id == u->u.msg.tx_id)
>  				break;
>  		if (&trans->list == &u->transactions)
> -			return -ESRCH;
> +			return xenbus_command_reply(u, XS_ERROR, "ENOENT");
>  	}
>  
>  	reply = xenbus_dev_request_and_reply(&u->u.msg);
> @@ -372,12 +395,12 @@ static int xenbus_write_watch(unsigned msg_type, struct xenbus_file_priv *u)
>  	path = u->u.buffer + sizeof(u->u.msg);
>  	token = memchr(path, 0, u->u.msg.len);
>  	if (token == NULL) {
> -		rc = -EILSEQ;
> +		rc = xenbus_command_reply(u, XS_ERROR, "EINVAL");
>  		goto out;
>  	}
>  	token++;
>  	if (memchr(token, 0, u->u.msg.len - (token - path)) == NULL) {
> -		rc = -EILSEQ;
> +		rc = xenbus_command_reply(u, XS_ERROR, "EINVAL");
>  		goto out;
>  	}
>  
> @@ -411,23 +434,7 @@ static int xenbus_write_watch(unsigned msg_type, struct xenbus_file_priv *u)
>  	}
>  
>  	/* Success.  Synthesize a reply to say all is OK. */
> -	{
> -		struct {
> -			struct xsd_sockmsg hdr;
> -			char body[3];
> -		} __packed reply = {
> -			{
> -				.type = msg_type,
> -				.len = sizeof(reply.body)
> -			},
> -			"OK"
> -		};
> -
> -		mutex_lock(&u->reply_mutex);
> -		rc = queue_reply(&u->read_buffers, &reply, sizeof(reply));
> -		wake_up(&u->read_waitq);
> -		mutex_unlock(&u->reply_mutex);
> -	}
> +	rc = xenbus_command_reply(u, msg_type, "OK");
>  
>  out:
>  	return rc;

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ