[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1490209429-5542-6-git-send-email-sstabellini@kernel.org>
Date: Wed, 22 Mar 2017 12:03:48 -0700
From: Stefano Stabellini <sstabellini@...nel.org>
To: xen-devel@...ts.xenproject.org
Cc: sstabellini@...nel.org, linux-kernel@...r.kernel.org,
jgross@...e.com, Stefano Stabellini <stefano@...reto.com>,
groug@...d.org, Eric Van Hensbergen <ericvh@...il.com>,
Ron Minnich <rminnich@...dia.gov>,
Latchesar Ionkov <lucho@...kov.net>,
v9fs-developer@...ts.sourceforge.net
Subject: [PATCH v6 6/7] xen/9pfs: receive responses
Upon receiving a notification from the backend, schedule the
p9_xen_response work_struct. p9_xen_response checks if any responses are
available, if so, it reads them one by one, calling p9_client_cb to send
them up to the 9p layer (p9_client_cb completes the request). Handle the
ring following the Xen 9pfs specification.
Signed-off-by: Stefano Stabellini <stefano@...reto.com>
Reviewed-by: Boris Ostrovsky <boris.ostrovsky@...cle.com>
Reviewed-by: Juergen Gross <jgross@...e.com>
CC: groug@...d.org
CC: jgross@...e.com
CC: Eric Van Hensbergen <ericvh@...il.com>
CC: Ron Minnich <rminnich@...dia.gov>
CC: Latchesar Ionkov <lucho@...kov.net>
CC: v9fs-developer@...ts.sourceforge.net
---
net/9p/trans_xen.c | 57 ++++++++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 57 insertions(+)
diff --git a/net/9p/trans_xen.c b/net/9p/trans_xen.c
index c0cb719..c0c5eef 100644
--- a/net/9p/trans_xen.c
+++ b/net/9p/trans_xen.c
@@ -182,6 +182,63 @@ static int p9_xen_request(struct p9_client *client, struct p9_req_t *p9_req)
static void p9_xen_response(struct work_struct *work)
{
+ struct xen_9pfs_front_priv *priv;
+ struct xen_9pfs_dataring *ring;
+ RING_IDX cons, prod, masked_cons, masked_prod;
+ struct xen_9pfs_header h;
+ struct p9_req_t *req;
+ int status;
+
+ ring = container_of(work, struct xen_9pfs_dataring, work);
+ priv = ring->priv;
+
+ while (1) {
+ cons = ring->intf->in_cons;
+ prod = ring->intf->in_prod;
+ virt_rmb();
+
+ if (xen_9pfs_queued(prod, cons, XEN_9PFS_RING_SIZE) <
+ sizeof(h)) {
+ notify_remote_via_irq(ring->irq);
+ return;
+ }
+
+ masked_prod = xen_9pfs_mask(prod, XEN_9PFS_RING_SIZE);
+ masked_cons = xen_9pfs_mask(cons, XEN_9PFS_RING_SIZE);
+
+ /* First, read just the header */
+ xen_9pfs_read_packet(ring->data.in,
+ masked_prod, &masked_cons,
+ XEN_9PFS_RING_SIZE, &h, sizeof(h));
+
+ req = p9_tag_lookup(priv->client, h.tag);
+ if (!req || req->status != REQ_STATUS_SENT) {
+ dev_warn(&priv->dev->dev, "Wrong req tag=%x\n", h.tag);
+ cons += h.size;
+ virt_mb();
+ ring->intf->in_cons = cons;
+ continue;
+ }
+
+ memcpy(req->rc, &h, sizeof(h));
+ req->rc->offset = 0;
+
+ masked_cons = xen_9pfs_mask(cons, XEN_9PFS_RING_SIZE);
+ /* Then, read the whole packet (including the header) */
+ xen_9pfs_read_packet(ring->data.in,
+ masked_prod, &masked_cons,
+ XEN_9PFS_RING_SIZE,
+ req->rc->sdata, h.size);
+
+ virt_mb();
+ cons += h.size;
+ ring->intf->in_cons = cons;
+
+ status = (req->status != REQ_STATUS_ERROR) ?
+ REQ_STATUS_RCVD : REQ_STATUS_ERROR;
+
+ p9_client_cb(priv->client, req, status);
+ }
}
static irqreturn_t xen_9pfs_front_event_handler(int irq, void *r)
--
1.9.1
Powered by blists - more mailing lists