[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <alpine.DEB.2.10.1706011402450.18759@sstabellini-ThinkPad-X260>
Date: Thu, 1 Jun 2017 14:06:41 -0700 (PDT)
From: Stefano Stabellini <sstabellini@...nel.org>
To: Boris Ostrovsky <boris.ostrovsky@...cle.com>
cc: Stefano Stabellini <sstabellini@...nel.org>,
xen-devel@...ts.xen.org, linux-kernel@...r.kernel.org,
jgross@...e.com, Stefano Stabellini <stefano@...reto.com>
Subject: Re: [PATCH v2 05/18] xen/pvcalls: connect to a frontend
On Fri, 26 May 2017, Boris Ostrovsky wrote:
> On 05/19/2017 07:22 PM, Stefano Stabellini wrote:
> > Introduce a per-frontend data structure named pvcalls_back_priv. It
> > contains pointers to the command ring, its event channel, a list of
> > active sockets and a tree of passive sockets (passing sockets need to be
> > looked up from the id on listen, accept and poll commands, while active
> > sockets only on release).
> >
> > It also has an unbound workqueue to schedule the work of parsing and
> > executing commands on the command ring. socket_lock protects the two
> > lists. In pvcalls_back_global, keep a list of connected frontends.
> >
> > Signed-off-by: Stefano Stabellini <stefano@...reto.com>
> > CC: boris.ostrovsky@...cle.com
> > CC: jgross@...e.com
> > ---
> > drivers/xen/pvcalls-back.c | 95 ++++++++++++++++++++++++++++++++++++++++++++++
> > 1 file changed, 95 insertions(+)
> >
> > diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c
> > index b4da138..a48b0d9 100644
> > --- a/drivers/xen/pvcalls-back.c
> > +++ b/drivers/xen/pvcalls-back.c
> > @@ -33,9 +33,104 @@ struct pvcalls_back_global {
> > struct semaphore frontends_lock;
> > } pvcalls_back_global;
> >
> > +/*
> > + * Per-frontend data structure. It contains pointers to the command
> > + * ring, its event channel, a list of active sockets and a tree of
> > + * passive sockets.
> > + */
> > +struct pvcalls_back_priv {
>
> pvcalls_fedata or pvcalls_feinfo maybe (or pvcalls_back_fedata)?
I'll go with pvcalls_fedata.
> > + struct list_head list;
> > + struct xenbus_device *dev;
> > + struct xen_pvcalls_sring *sring;
> > + struct xen_pvcalls_back_ring ring;
> > + int irq;
> > + struct list_head socket_mappings;
> > + struct radix_tree_root socketpass_mappings;
> > + struct semaphore socket_lock;
> > + atomic_t work;
> > + struct workqueue_struct *wq;
> > + struct work_struct register_work;
> > +};
> > +
> > +static void pvcalls_back_work(struct work_struct *work)
> > +{
> > +}
> > +
> > +static irqreturn_t pvcalls_back_event(int irq, void *dev_id)
> > +{
> > + return IRQ_HANDLED;
> > +}
> > +
> > static int backend_connect(struct xenbus_device *dev)
> > {
> > + int err, evtchn;
> > + grant_ref_t ring_ref;
> > + void *addr = NULL;
> > + struct pvcalls_back_priv *priv = NULL;
> > +
> > + priv = kzalloc(sizeof(struct pvcalls_back_priv), GFP_KERNEL);
> > + if (!priv)
> > + return -ENOMEM;
> > +
> > + err = xenbus_scanf(XBT_NIL, dev->otherend, "port", "%u",
> > + &evtchn);
> > + if (err != 1) {
> > + err = -EINVAL;
> > + xenbus_dev_fatal(dev, err, "reading %s/event-channel",
> > + dev->otherend);
> > + goto error;
> > + }
> > +
> > + err = xenbus_scanf(XBT_NIL, dev->otherend, "ring-ref", "%u", &ring_ref);
> > + if (err != 1) {
> > + err = -EINVAL;
> > + xenbus_dev_fatal(dev, err, "reading %s/ring-ref",
> > + dev->otherend);
> > + goto error;
> > + }
> > +
> > + err = bind_interdomain_evtchn_to_irqhandler(dev->otherend_id, evtchn,
> > + pvcalls_back_event, 0,
> > + "pvcalls-backend", dev);
> > + if (err < 0)
> > + goto error;
> > + priv->irq = err;
> > +
> > + priv->wq = alloc_workqueue("pvcalls_back_wq", WQ_UNBOUND, 1);
> > + if (!priv->wq) {
> > + err = -ENOMEM;
> > + goto error;
> > + }
> > +
> > + err = xenbus_map_ring_valloc(dev, &ring_ref, 1, &addr);
> > + if (err < 0)
> > + goto error;
> > + priv->sring = addr;
>
> You don't really need addr, since priv is kzalloc'd (and you can deal
> with it in error path).
OK
Powered by blists - more mailing lists