[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1496431915-20774-5-git-send-email-sstabellini@kernel.org>
Date: Fri, 2 Jun 2017 12:31:42 -0700
From: Stefano Stabellini <sstabellini@...nel.org>
To: xen-devel@...ts.xen.org
Cc: linux-kernel@...r.kernel.org, sstabellini@...nel.org,
jgross@...e.com, boris.ostrovsky@...cle.com,
Stefano Stabellini <stefano@...reto.com>
Subject: [PATCH v3 05/18] xen/pvcalls: connect to a frontend
Introduce a per-frontend data structure named pvcalls_fedata. It
contains pointers to the command ring, its event channel, a list of
active sockets and a tree of passive sockets (passing sockets need to be
looked up from the id on listen, accept and poll commands, while active
sockets only on release).
It also has an unbound workqueue to schedule the work of parsing and
executing commands on the command ring. socket_lock protects the two
lists. In pvcalls_back_global, keep a list of connected frontends.
Signed-off-by: Stefano Stabellini <stefano@...reto.com>
CC: boris.ostrovsky@...cle.com
CC: jgross@...e.com
---
drivers/xen/pvcalls-back.c | 92 ++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 92 insertions(+)
diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c
index 7bce750..bfea25f 100644
--- a/drivers/xen/pvcalls-back.c
+++ b/drivers/xen/pvcalls-back.c
@@ -33,9 +33,101 @@ struct pvcalls_back_global {
struct semaphore frontends_lock;
} pvcalls_back_global;
+/*
+ * Per-frontend data structure. It contains pointers to the command
+ * ring, its event channel, a list of active sockets and a tree of
+ * passive sockets.
+ */
+struct pvcalls_fedata {
+ struct list_head list;
+ struct xenbus_device *dev;
+ struct xen_pvcalls_sring *sring;
+ struct xen_pvcalls_back_ring ring;
+ int irq;
+ struct list_head socket_mappings;
+ struct radix_tree_root socketpass_mappings;
+ struct semaphore socket_lock;
+ struct workqueue_struct *wq;
+ struct work_struct register_work;
+};
+
+static void pvcalls_back_work(struct work_struct *work)
+{
+}
+
+static irqreturn_t pvcalls_back_event(int irq, void *dev_id)
+{
+ return IRQ_HANDLED;
+}
+
static int backend_connect(struct xenbus_device *dev)
{
+ int err, evtchn;
+ grant_ref_t ring_ref;
+ struct pvcalls_fedata *priv = NULL;
+
+ priv = kzalloc(sizeof(struct pvcalls_fedata), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ err = xenbus_scanf(XBT_NIL, dev->otherend, "port", "%u",
+ &evtchn);
+ if (err != 1) {
+ err = -EINVAL;
+ xenbus_dev_fatal(dev, err, "reading %s/event-channel",
+ dev->otherend);
+ goto error;
+ }
+
+ err = xenbus_scanf(XBT_NIL, dev->otherend, "ring-ref", "%u", &ring_ref);
+ if (err != 1) {
+ err = -EINVAL;
+ xenbus_dev_fatal(dev, err, "reading %s/ring-ref",
+ dev->otherend);
+ goto error;
+ }
+
+ err = bind_interdomain_evtchn_to_irqhandler(dev->otherend_id, evtchn,
+ pvcalls_back_event, 0,
+ "pvcalls-backend", dev);
+ if (err < 0)
+ goto error;
+ priv->irq = err;
+
+ priv->wq = alloc_workqueue("pvcalls_back_wq", WQ_UNBOUND, 1);
+ if (!priv->wq) {
+ err = -ENOMEM;
+ goto error;
+ }
+
+ err = xenbus_map_ring_valloc(dev, &ring_ref, 1, (void**)&priv->sring);
+ if (err < 0)
+ goto error;
+
+ BACK_RING_INIT(&priv->ring, priv->sring, XEN_PAGE_SIZE * 1);
+ priv->dev = dev;
+
+ INIT_WORK(&priv->register_work, pvcalls_back_work);
+ INIT_LIST_HEAD(&priv->socket_mappings);
+ INIT_RADIX_TREE(&priv->socketpass_mappings, GFP_KERNEL);
+ sema_init(&priv->socket_lock, 1);
+ dev_set_drvdata(&dev->dev, priv);
+
+ down(&pvcalls_back_global.frontends_lock);
+ list_add_tail(&priv->list, &pvcalls_back_global.frontends);
+ up(&pvcalls_back_global.frontends_lock);
+ queue_work(priv->wq, &priv->register_work);
+
return 0;
+
+ error:
+ if (priv->sring != NULL)
+ xenbus_unmap_ring_vfree(dev, priv->sring);
+ if (priv->wq)
+ destroy_workqueue(priv->wq);
+ unbind_from_irqhandler(priv->irq, dev);
+ kfree(priv);
+ return err;
}
static int backend_disconnect(struct xenbus_device *dev)
--
1.9.1
Powered by blists - more mailing lists