lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <alpine.DEB.2.10.1707251259590.22381@sstabellini-ThinkPad-X260>
Date:   Tue, 25 Jul 2017 13:10:04 -0700 (PDT)
From:   Stefano Stabellini <sstabellini@...nel.org>
To:     Juergen Gross <jgross@...e.com>
cc:     Stefano Stabellini <sstabellini@...nel.org>,
        xen-devel@...ts.xen.org, linux-kernel@...r.kernel.org,
        boris.ostrovsky@...cle.com,
        Stefano Stabellini <stefano@...reto.com>
Subject: Re: [PATCH v1 02/13] xen/pvcalls: connect to the backend

On Mon, 24 Jul 2017, Juergen Gross wrote:
> On 22/07/17 02:11, Stefano Stabellini wrote:
> > Implement the probe function for the pvcalls frontend. Read the
> > supported versions, max-page-order and function-calls nodes from
> > xenstore.
> > 
> > Introduce a data structure named pvcalls_bedata. It contains pointers to
> > the command ring, the event channel, a list of active sockets and a list
> > of passive sockets. Lists accesses are protected by a spin_lock.
> > 
> > Introduce a waitqueue to allow waiting for a response on commands sent
> > to the backend.
> > 
> > Introduce an array of struct xen_pvcalls_response to store commands
> > responses.
> > 
> > Only one frontend<->backend connection is supported at any given time
> > for a guest. Store the active frontend device to a static pointer.
> > 
> > Introduce a stub functions for the event handler.
> > 
> > Signed-off-by: Stefano Stabellini <stefano@...reto.com>
> > CC: boris.ostrovsky@...cle.com
> > CC: jgross@...e.com
> > ---
> >  drivers/xen/pvcalls-front.c | 153 ++++++++++++++++++++++++++++++++++++++++++++
> >  1 file changed, 153 insertions(+)
> > 
> > diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c
> > index 173e204..fb08ebf 100644
> > --- a/drivers/xen/pvcalls-front.c
> > +++ b/drivers/xen/pvcalls-front.c
> > @@ -20,6 +20,29 @@
> >  #include <xen/xenbus.h>
> >  #include <xen/interface/io/pvcalls.h>
> >  
> > +#define PVCALLS_INVALID_ID (UINT_MAX)
> > +#define RING_ORDER XENBUS_MAX_RING_GRANT_ORDER
> > +#define PVCALLS_NR_REQ_PER_RING __CONST_RING_SIZE(xen_pvcalls, XEN_PAGE_SIZE)
> > +
> > +struct pvcalls_bedata {
> > +	struct xen_pvcalls_front_ring ring;
> > +	grant_ref_t ref;
> > +	int irq;
> > +
> > +	struct list_head socket_mappings;
> > +	struct list_head socketpass_mappings;
> > +	spinlock_t pvcallss_lock;
> > +
> > +	wait_queue_head_t inflight_req;
> > +	struct xen_pvcalls_response rsp[PVCALLS_NR_REQ_PER_RING];
> > +};
> > +struct xenbus_device *pvcalls_front_dev;
> > +
> > +static irqreturn_t pvcalls_front_event_handler(int irq, void *dev_id)
> > +{
> > +	return IRQ_HANDLED;
> > +}
> > +
> >  static const struct xenbus_device_id pvcalls_front_ids[] = {
> >  	{ "pvcalls" },
> >  	{ "" }
> > @@ -33,7 +56,114 @@ static int pvcalls_front_remove(struct xenbus_device *dev)
> >  static int pvcalls_front_probe(struct xenbus_device *dev,
> >  			  const struct xenbus_device_id *id)
> >  {
> > +	int ret = -EFAULT, evtchn, ref = -1, i;
> > +	unsigned int max_page_order, function_calls, len;
> > +	char *versions;
> > +	grant_ref_t gref_head = 0;
> > +	struct xenbus_transaction xbt;
> > +	struct pvcalls_bedata *bedata = NULL;
> > +	struct xen_pvcalls_sring *sring;
> > +
> > +	if (pvcalls_front_dev != NULL) {
> > +		dev_err(&dev->dev, "only one PV Calls connection supported\n");
> > +		return -EINVAL;
> > +	}
> > +
> > +	versions = xenbus_read(XBT_NIL, dev->otherend, "versions", &len);
> > +	if (!len)
> > +		return -EINVAL;
> > +	if (strcmp(versions, "1")) {
> > +		kfree(versions);
> > +		return -EINVAL;
> > +	}
> > +	kfree(versions);
> > +	ret = xenbus_scanf(XBT_NIL, dev->otherend,
> > +			   "max-page-order", "%u", &max_page_order);
> 
> Use xenbus_read_unsigned() instead?

OK


> > +	if (ret <= 0)
> > +		return -ENODEV;
> > +	if (max_page_order < RING_ORDER)
> > +		return -ENODEV;
> > +	ret = xenbus_scanf(XBT_NIL, dev->otherend,
> > +			   "function-calls", "%u", &function_calls);
> 
> xenbus_read_unsigned() again?

OK


> > +	if (ret <= 0 || function_calls != 1)
> > +		return -ENODEV;
> > +	pr_info("%s max-page-order is %u\n", __func__, max_page_order);
> > +
> > +	bedata = kzalloc(sizeof(struct pvcalls_bedata), GFP_KERNEL);
> > +	if (!bedata)
> > +		return -ENOMEM;
> > +
> 
> You should call dev_set_drvdata() here already, otherwise entering the
> error path will dereference a NULL pointer instead of bedata.

OK


> > +	init_waitqueue_head(&bedata->inflight_req);
> > +	for (i = 0; i < PVCALLS_NR_REQ_PER_RING; i++)
> > +		bedata->rsp[i].req_id = PVCALLS_INVALID_ID;
> > +
> > +	sring = (struct xen_pvcalls_sring *) __get_free_page(GFP_KERNEL |
> > +							     __GFP_ZERO);
> > +	if (!sring)
> > +		goto error;
> 
> ret will be 1 here. Shouldn't you set it to -ENOMEM?

Yes, I'll do that


> 
> > +	SHARED_RING_INIT(sring);
> > +	FRONT_RING_INIT(&bedata->ring, sring, XEN_PAGE_SIZE);
> > +
> > +	ret = xenbus_alloc_evtchn(dev, &evtchn);
> > +	if (ret)
> > +		goto error;
> > +
> > +	bedata->irq = bind_evtchn_to_irqhandler(evtchn,
> > +						pvcalls_front_event_handler,
> > +						0, "pvcalls-frontend", dev);
> > +	if (bedata->irq < 0) {
> > +		ret = bedata->irq;
> > +		goto error;
> > +	}
> > +
> > +	ret = gnttab_alloc_grant_references(1, &gref_head);
> > +	if (ret < 0)
> > +		goto error;
> > +	bedata->ref = ref = gnttab_claim_grant_reference(&gref_head);
> > +	if (ref < 0)
> > +		goto error;
> 
> Setting ret?

OK

> 
> > +	gnttab_grant_foreign_access_ref(ref, dev->otherend_id,
> > +					virt_to_gfn((void *)sring), 0);
> > +
> > + again:
> > +	ret = xenbus_transaction_start(&xbt);
> > +	if (ret) {
> > +		xenbus_dev_fatal(dev, ret, "starting transaction");
> > +		goto error;
> > +	}
> > +	ret = xenbus_printf(xbt, dev->nodename, "version", "%u", 1);
> > +	if (ret)
> > +		goto error_xenbus;
> > +	ret = xenbus_printf(xbt, dev->nodename, "ring-ref", "%d", ref);
> > +	if (ret)
> > +		goto error_xenbus;
> > +	ret = xenbus_printf(xbt, dev->nodename, "port", "%u",
> > +			    evtchn);
> > +	if (ret)
> > +		goto error_xenbus;
> > +	ret = xenbus_transaction_end(xbt, 0);
> > +	if (ret) {
> > +		if (ret == -EAGAIN)
> > +			goto again;
> > +		xenbus_dev_fatal(dev, ret, "completing transaction");
> > +		goto error;
> > +	}
> > +
> > +	INIT_LIST_HEAD(&bedata->socket_mappings);
> > +	INIT_LIST_HEAD(&bedata->socketpass_mappings);
> > +	spin_lock_init(&bedata->pvcallss_lock);
> > +	dev_set_drvdata(&dev->dev, bedata);
> > +	pvcalls_front_dev = dev;
> > +	xenbus_switch_state(dev, XenbusStateInitialised);
> > +
> >  	return 0;
> > +
> > + error_xenbus:
> > +	xenbus_transaction_end(xbt, 1);
> > +	xenbus_dev_fatal(dev, ret, "writing xenstore");
> > + error:
> > +	pvcalls_front_remove(dev);
> > +	return ret;
> >  }
> >  
> >  static int pvcalls_front_resume(struct xenbus_device *dev)
> > @@ -45,6 +175,29 @@ static int pvcalls_front_resume(struct xenbus_device *dev)
> >  static void pvcalls_front_changed(struct xenbus_device *dev,
> >  			    enum xenbus_state backend_state)
> >  {
> > +	switch (backend_state) {
> > +	case XenbusStateReconfiguring:
> > +	case XenbusStateReconfigured:
> > +	case XenbusStateInitialising:
> > +	case XenbusStateInitialised:
> > +	case XenbusStateUnknown:
> > +		break;
> > +
> > +	case XenbusStateInitWait:
> > +		break;
> > +
> > +	case XenbusStateConnected:
> > +		xenbus_switch_state(dev, XenbusStateConnected);
> > +		break;
> > +
> > +	case XenbusStateClosed:
> > +		if (dev->state == XenbusStateClosed)
> > +			break;
> > +		/* Missed the backend's CLOSING state -- fallthrough */
> > +	case XenbusStateClosing:
> > +		xenbus_frontend_closed(dev);
> > +		break;
> > +	}
> >  }
> >  
> >  static struct xenbus_driver pvcalls_front_driver = {
> > 
> 

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ