[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <alpine.DEB.2.10.1703141334400.4807@sstabellini-ThinkPad-X260>
Date: Tue, 14 Mar 2017 14:22:57 -0700 (PDT)
From: Stefano Stabellini <sstabellini@...nel.org>
To: Juergen Gross <jgross@...e.com>
cc: Stefano Stabellini <sstabellini@...nel.org>,
xen-devel@...ts.xenproject.org, linux-kernel@...r.kernel.org,
boris.ostrovsky@...cle.com,
Stefano Stabellini <stefano@...reto.com>,
Eric Van Hensbergen <ericvh@...il.com>,
Ron Minnich <rminnich@...dia.gov>,
Latchesar Ionkov <lucho@...kov.net>,
v9fs-developer@...ts.sourceforge.net
Subject: Re: [PATCH v3 4/7] xen/9pfs: connect to the backend
Hi Juergen,
thank you for the review!
On Tue, 14 Mar 2017, Juergen Gross wrote:
> On 14/03/17 00:50, Stefano Stabellini wrote:
> > Implement functions to handle the xenbus handshake. Upon connection,
> > allocate the rings according to the protocol specification.
> >
> > Initialize a work_struct and a wait_queue. The work_struct will be used
> > to schedule work upon receiving an event channel notification from the
> > backend. The wait_queue will be used to wait when the ring is full and
> > we need to send a new request.
> >
> > Signed-off-by: Stefano Stabellini <stefano@...reto.com>
> > CC: boris.ostrovsky@...cle.com
> > CC: jgross@...e.com
> > CC: Eric Van Hensbergen <ericvh@...il.com>
> > CC: Ron Minnich <rminnich@...dia.gov>
> > CC: Latchesar Ionkov <lucho@...kov.net>
> > CC: v9fs-developer@...ts.sourceforge.net
> > ---
> > net/9p/trans_xen.c | 240 +++++++++++++++++++++++++++++++++++++++++++++++++++++
> > 1 file changed, 240 insertions(+)
> >
> > diff --git a/net/9p/trans_xen.c b/net/9p/trans_xen.c
> > index f072876..974bac3 100644
> > --- a/net/9p/trans_xen.c
> > +++ b/net/9p/trans_xen.c
> > @@ -41,6 +41,35 @@
> > #include <net/9p/client.h>
> > #include <net/9p/transport.h>
> >
> > +#define XEN_9PFS_NUM_RINGS 2
> > +
> > +/* One per ring, more than one per 9pfs share */
> > +struct xen_9pfs_dataring {
> > + struct xen_9pfs_front_priv *priv;
> > +
> > + struct xen_9pfs_data_intf *intf;
> > + grant_ref_t ref;
> > + int evtchn;
> > + int irq;
> > + spinlock_t lock;
> > +
> > + struct xen_9pfs_data data;
> > + wait_queue_head_t wq;
> > + struct work_struct work;
> > +};
> > +
> > +/* One per 9pfs share */
> > +struct xen_9pfs_front_priv {
> > + struct list_head list;
> > + struct xenbus_device *dev;
> > + char *tag;
> > + struct p9_client *client;
> > +
> > + int num_rings;
> > + struct xen_9pfs_dataring *rings;
> > +};
> > +static LIST_HEAD(xen_9pfs_devs);
> > +
> > static int p9_xen_cancel(struct p9_client *client, struct p9_req_t *req)
> > {
> > return 0;
> > @@ -60,6 +89,21 @@ static int p9_xen_request(struct p9_client *client, struct p9_req_t *p9_req)
> > return 0;
> > }
> >
> > +static void p9_xen_response(struct work_struct *work)
> > +{
> > +}
> > +
> > +static irqreturn_t xen_9pfs_front_event_handler(int irq, void *r)
> > +{
> > + struct xen_9pfs_dataring *ring = r;
> > + BUG_ON(!ring || !ring->priv->client);
> > +
> > + wake_up_interruptible(&ring->wq);
> > + schedule_work(&ring->work);
> > +
> > + return IRQ_HANDLED;
> > +}
> > +
> > static struct p9_trans_module p9_xen_trans = {
> > .name = "xen",
> > .maxsize = (1 << (XEN_9PFS_RING_ORDER + XEN_PAGE_SHIFT)),
> > @@ -76,25 +120,221 @@ static int p9_xen_request(struct p9_client *client, struct p9_req_t *p9_req)
> > { "" }
> > };
> >
> > +static int xen_9pfs_front_free(struct xen_9pfs_front_priv *priv)
>
> Return type void? You are always returning 0.
Good point
> > +{
> > + int i, j;
> > +
> > + list_del(&priv->list);
> > +
> > + for (i = 0; i < priv->num_rings; i++) {
> > + if (priv->rings[i].intf == NULL)
> > + break;
> > + if (priv->rings[i].irq > 0)
> > + unbind_from_irqhandler(priv->rings[i].irq, priv->dev);
> > + if (priv->rings[i].data.in != NULL) {
> > + for (j = 0; j < (1 << XEN_9PFS_RING_ORDER); j++)
> > + gnttab_end_foreign_access(priv->rings[i].intf->ref[j], 0, 0);
> > + free_pages((unsigned long)priv->rings[i].data.in,
> > + XEN_9PFS_RING_ORDER - (PAGE_SHIFT - XEN_PAGE_SHIFT));
> > + }
> > + gnttab_end_foreign_access(priv->rings[i].ref, 0, 0);
> > + free_page((unsigned long)priv->rings[i].intf);
> > + }
> > + kfree(priv->rings);
> > + kfree(priv);
> > +
> > + return 0;
> > +}
> > +
> > static int xen_9pfs_front_remove(struct xenbus_device *dev)
> > {
> > + int ret;
> > + struct xen_9pfs_front_priv *priv = dev_get_drvdata(&dev->dev);
> > +
> > + dev_set_drvdata(&dev->dev, NULL);
> > + ret = xen_9pfs_front_free(priv);
> > + return ret;
> > +}
> > +
> > +static int xen_9pfs_front_alloc_dataring(struct xenbus_device *dev,
> > + struct xen_9pfs_dataring *ring)
> > +{
> > + int i = 0;
> > + int ret = -ENOMEM;
> > + void *bytes = NULL;
> > +
> > + init_waitqueue_head(&ring->wq);
> > + spin_lock_init(&ring->lock);
> > + INIT_WORK(&ring->work, p9_xen_response);
> > +
> > + ring->intf = (struct xen_9pfs_data_intf *) get_zeroed_page(GFP_KERNEL | __GFP_ZERO);
>
> I don't think you need __GFP_ZERO here.
You are right
> > + if (!ring->intf)
> > + return ret;
> > + ret = ring->ref = gnttab_grant_foreign_access(dev->otherend_id,
> > + virt_to_gfn(ring->intf), 0);
> > + if (ret < 0)
> > + goto out;
> > + bytes = (void*)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
>
> Coding style: (void *)
OK
> > + XEN_9PFS_RING_ORDER - (PAGE_SHIFT - XEN_PAGE_SHIFT));
> > + if (bytes == NULL)
> > + goto out;
> > + for (; i < (1 << XEN_9PFS_RING_ORDER); i++) {
> > + ret = ring->intf->ref[i] = gnttab_grant_foreign_access(
> > + dev->otherend_id, virt_to_gfn(bytes) + i, 0);
> > + if (ret < 0)
> > + goto out;
> > + }
> > + ring->data.in = bytes;
> > + ring->data.out = bytes + XEN_9PFS_RING_SIZE;
> > +
> > + ret = xenbus_alloc_evtchn(dev, &ring->evtchn);
> > + if (ret)
> > + goto out;
> > + ring->irq = bind_evtchn_to_irqhandler(ring->evtchn, xen_9pfs_front_event_handler,
> > + 0, "xen_9pfs-frontend", ring);
>
> Did you think about using request_threaded_irq() instead of a workqueue?
> For an example see e.g. drivers/scsi/xen-scsifront.c
I like workqueues :-) It might come down to personal preferences, but I
think workqueues are more flexible and a better fit for this use case.
Not only it is easy to schedule work in a workqueue from the interrupt
handler, but also they can be used for sleeping in the request function
if there is not enough room on the ring. Besides, they can easily be
configured to share a single thread or to have multiple independent
threads.
> > + if (ring->irq < 0) {
> > + xenbus_free_evtchn(dev, ring->evtchn);
> > + ret = ring->irq;
> > + goto out;
> > + }
> > return 0;
> > +
> > +out:
> > + if (bytes != NULL) {
> > + for (i--; i >= 0; i--)
> > + gnttab_end_foreign_access(ring->intf->ref[i], 0, 0);
> > + free_pages((unsigned long)bytes,
> > + XEN_9PFS_RING_ORDER - (PAGE_SHIFT - XEN_PAGE_SHIFT));
> > + }
> > + gnttab_end_foreign_access(ring->ref, 0, 0);
> > + free_page((unsigned long)ring->intf);
> > + return ret;
> > }
> >
> > static int xen_9pfs_front_probe(struct xenbus_device *dev,
> > const struct xenbus_device_id *id)
> > {
> > + int ret, i;
> > + struct xenbus_transaction xbt;
> > + struct xen_9pfs_front_priv *priv = NULL;
> > + char *versions;
> > + unsigned int max_rings, max_ring_order, len;
> > +
> > + versions = xenbus_read(XBT_NIL, dev->otherend, "versions", &len);
> > + if (!len || strcmp(versions, "1"))
>
> You are leaking versions in case of not being "1".
I'll fix
> Can't you use xenbus_read_unsigned() instead of xenbus_read()?
I can use xenbus_read_unsigned in the other cases below, but not here,
because versions is in the form: "1,3,4"
> > + return -EINVAL;
> > + kfree(versions);
> > + ret = xenbus_scanf(XBT_NIL, dev->otherend, "max-rings", "%u", &max_rings);
>
> xenbus_read_unsigned()?
Good idea, I'll use it
> > + if (ret < 0 || max_rings < XEN_9PFS_NUM_RINGS)
> > + return -EINVAL;
> > + ret = xenbus_scanf(XBT_NIL, dev->otherend, "max-ring-page-order", "%u", &max_ring_order);
>
> xenbus_read_unsigned()
Yep
> > + if (ret < 0|| max_ring_order < XEN_9PFS_RING_ORDER)
>
> Coding style (missing space before ||)
OK
> > + return -EINVAL;
> > +
> > +
> > + priv = kzalloc(sizeof(struct xen_9pfs_front_priv), GFP_KERNEL);
> > + if (!priv)
> > + return -ENOMEM;
> > +
> > + priv->dev = dev;
> > + priv->num_rings = XEN_9PFS_NUM_RINGS;
> > + priv->rings = kzalloc(sizeof(struct xen_9pfs_dataring) * priv->num_rings,
> > + GFP_KERNEL);
>
> Coding style: please align the second line to the first parameter of
> kzalloc().
OK
> > + if (!priv->rings) {
> > + kfree(priv);
> > + return -ENOMEM;
> > + }
> > +
> > + for (i = 0; i < priv->num_rings; i++) {
> > + priv->rings[i].priv = priv;
> > + ret = xen_9pfs_front_alloc_dataring(dev, &priv->rings[i]);
> > + if (ret < 0)
> > + goto error;
> > + }
> > +
> > + again:
> > + ret = xenbus_transaction_start(&xbt);
> > + if (ret) {
> > + xenbus_dev_fatal(dev, ret, "starting transaction");
> > + goto error;
> > + }
> > + ret = xenbus_printf(xbt, dev->nodename, "version", "%u", 1);
> > + if (ret)
> > + goto error_xenbus;
> > + ret = xenbus_printf(xbt, dev->nodename, "num-rings", "%u", priv->num_rings);
> > + if (ret)
> > + goto error_xenbus;
> > + for (i = 0; i < priv->num_rings; i++) {
> > + char str[16];
> > +
> > + sprintf(str, "ring-ref%u", i);
> > + ret = xenbus_printf(xbt, dev->nodename, str, "%d", priv->rings[i].ref);
> > + if (ret)
> > + goto error_xenbus;
> > +
> > + sprintf(str, "event-channel-%u", i);
>
> This is dangerous: you are hard coding num_rings always being less than
> 10 here. Otherwise str[] isn't large enough. Mind adding
> BUILD_BUG_ON(XEN_9PFS_NUM_RINGS > 9)?
Yes, good idea.
> > + ret = xenbus_printf(xbt, dev->nodename, str, "%u", priv->rings[i].evtchn);
> > + if (ret)
> > + goto error_xenbus;
> > + }
> > + priv->tag = xenbus_read(xbt, dev->nodename, "tag", NULL);
> > + if (ret)
> > + goto error_xenbus;
> > + ret = xenbus_transaction_end(xbt, 0);
> > + if (ret) {
> > + if (ret == -EAGAIN)
> > + goto again;
> > + xenbus_dev_fatal(dev, ret, "completing transaction");
> > + goto error;
> > + }
> > +
> > + list_add_tail(&priv->list, &xen_9pfs_devs);
>
> Don't you need some kind of lock protecting the list?
I think I do, I'll add one.
> > + dev_set_drvdata(&dev->dev, priv);
> > + xenbus_switch_state(dev, XenbusStateInitialised);
> > +
> > return 0;
> > +
> > + error_xenbus:
> > + xenbus_transaction_end(xbt, 1);
> > + xenbus_dev_fatal(dev, ret, "writing xenstore");
> > + error:
> > + dev_set_drvdata(&dev->dev, NULL);
> > + xen_9pfs_front_free(priv);
> > + return ret;
> > }
> >
> > static int xen_9pfs_front_resume(struct xenbus_device *dev)
> > {
> > + dev_warn(&dev->dev, "suspsend/resume unsupported\n");
> > return 0;
> > }
> >
> > static void xen_9pfs_front_changed(struct xenbus_device *dev,
> > enum xenbus_state backend_state)
> > {
> > + switch (backend_state) {
> > + case XenbusStateReconfiguring:
> > + case XenbusStateReconfigured:
> > + case XenbusStateInitialising:
> > + case XenbusStateInitialised:
> > + case XenbusStateUnknown:
> > + break;
> > +
> > + case XenbusStateInitWait:
> > + break;
> > +
> > + case XenbusStateConnected:
> > + xenbus_switch_state(dev, XenbusStateConnected);
> > + break;
> > +
> > + case XenbusStateClosed:
> > + if (dev->state == XenbusStateClosed)
> > + break;
> > + /* Missed the backend's CLOSING state -- fallthrough */
> > + case XenbusStateClosing:
> > + xenbus_frontend_closed(dev);
> > + break;
> > + }
> > }
> >
> > static struct xenbus_driver xen_9pfs_front_driver = {
> >
Powered by blists - more mailing lists