[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <5351d729-6b53-aa30-55e8-dd3f55324831@oracle.com>
Date: Tue, 7 Mar 2017 10:27:18 -0500
From: Boris Ostrovsky <boris.ostrovsky@...cle.com>
To: Stefano Stabellini <sstabellini@...nel.org>,
xen-devel@...ts.xenproject.org
Cc: linux-kernel@...r.kernel.org,
Stefano Stabellini <stefano@...reto.com>, jgross@...e.com,
Eric Van Hensbergen <ericvh@...il.com>,
Ron Minnich <rminnich@...dia.gov>,
Latchesar Ionkov <lucho@...kov.net>,
v9fs-developer@...ts.sourceforge.net
Subject: Re: [PATCH 5/7] xen/9pfs: send requests to the backend
On 03/06/2017 03:01 PM, Stefano Stabellini wrote:
> Implement struct p9_trans_module create and close functions by looking
> at the available Xen 9pfs frontend-backend connections. We don't expect
> many frontend-backend connections, thus walking a list is OK.
>
> Send requests to the backend by copying each request to one of the
> available rings (each frontend-backend connection comes with multiple
> rings). Handle the ring and notifications following the 9pfs
> specification. If there are not enough free bytes on the ring for the
> request, wait on the wait_queue: the backend will send a notification
> after consuming more requests.
>
> Signed-off-by: Stefano Stabellini <stefano@...reto.com>
> CC: boris.ostrovsky@...cle.com
> CC: jgross@...e.com
> CC: Eric Van Hensbergen <ericvh@...il.com>
> CC: Ron Minnich <rminnich@...dia.gov>
> CC: Latchesar Ionkov <lucho@...kov.net>
> CC: v9fs-developer@...ts.sourceforge.net
> ---
> net/9p/trans_xen.c | 83 +++++++++++++++++++++++++++++++++++++++++++++++++++++-
> 1 file changed, 82 insertions(+), 1 deletion(-)
>
> diff --git a/net/9p/trans_xen.c b/net/9p/trans_xen.c
> index 9f6cf8d..4e26556 100644
> --- a/net/9p/trans_xen.c
> +++ b/net/9p/trans_xen.c
> @@ -47,22 +47,103 @@ struct xen_9pfs_front_priv {
> };
> static LIST_HEAD(xen_9pfs_devs);
>
> +/* We don't currently allow canceling of requests */
> static int p9_xen_cancel(struct p9_client *client, struct p9_req_t *req)
> {
> - return 0;
> + return 1;
> }
>
> static int p9_xen_create(struct p9_client *client, const char *addr, char *args)
> {
> + struct xen_9pfs_front_priv *priv = NULL;
> +
> + list_for_each_entry(priv, &xen_9pfs_devs, list) {
> + if (!strcmp(priv->tag, addr))
> + break;
> + }
You could simplify this (and p9_xen_close()) but assigning client and
returning from inside the 'if' statement.
I am also not sure you need to initialize priv.
> + if (!priv || strcmp(priv->tag, addr))
> + return -EINVAL;
> +
> + priv->client = client;
> return 0;
> }
>
> static void p9_xen_close(struct p9_client *client)
> {
> + struct xen_9pfs_front_priv *priv = NULL;
> +
> + list_for_each_entry(priv, &xen_9pfs_devs, list) {
> + if (priv->client == client)
> + break;
> + }
> + if (!priv || priv->client != client)
> + return;
> +
> + priv->client = NULL;
> + return;
> +}
> +
> +static int p9_xen_write_todo(struct xen_9pfs_dataring *ring, RING_IDX size)
> +{
> + RING_IDX cons, prod;
> +
> + cons = ring->intf->out_cons;
> + prod = ring->intf->out_prod;
> + mb();
> +
> + if (XEN_9PFS_RING_SIZE - xen_9pfs_queued(prod, cons, XEN_9PFS_RING_SIZE) >= size)
> + return 1;
> + else
> + return 0;
> }
>
> static int p9_xen_request(struct p9_client *client, struct p9_req_t *p9_req)
> {
> + struct xen_9pfs_front_priv *priv = NULL;
> + RING_IDX cons, prod, masked_cons, masked_prod;
> + unsigned long flags;
> + uint32_t size = p9_req->tc->size;
> + struct xen_9pfs_dataring *ring;
> + int num;
> +
> + list_for_each_entry(priv, &xen_9pfs_devs, list) {
> + if (priv->client == client)
> + break;
> + }
> + if (priv == NULL || priv->client != client)
> + return -EINVAL;
> +
> + num = p9_req->tc->tag % priv->num_rings;
> + ring = &priv->rings[num];
> +
> +again:
> + while (wait_event_interruptible(ring->wq,
> + p9_xen_write_todo(ring, size) > 0) != 0);
> +
> + spin_lock_irqsave(&ring->lock, flags);
> + cons = ring->intf->out_cons;
> + prod = ring->intf->out_prod;
> + mb();
> +
> + if (XEN_9PFS_RING_SIZE - xen_9pfs_queued(prod, cons, XEN_9PFS_RING_SIZE) < size) {
This looks like p9_xen_write_todo(). BTW, where is xen_9pfs_queued()
defined? I couldn't find it. Same for xen_9pfs_mask() and
xen_9pfs_write_packet().
-boris
> + spin_unlock_irqrestore(&ring->lock, flags);
> + goto again;
> + }
> +
> + masked_prod = xen_9pfs_mask(prod, XEN_9PFS_RING_SIZE);
> + masked_cons = xen_9pfs_mask(cons, XEN_9PFS_RING_SIZE);
> +
> + xen_9pfs_write_packet(ring->ring.out,
> + &masked_prod, masked_cons,
> + XEN_9PFS_RING_SIZE, p9_req->tc->sdata, size);
> +
> + p9_req->status = REQ_STATUS_SENT;
> + wmb(); /* write ring before updating pointer */
> + prod += size;
> + ring->intf->out_prod = prod;
> + spin_unlock_irqrestore(&ring->lock, flags);
> + notify_remote_via_irq(ring->irq);
> +
> return 0;
> }
>
>
Powered by blists - more mailing lists