lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1497553787-3709-12-git-send-email-sstabellini@kernel.org>
Date:   Thu, 15 Jun 2017 12:09:41 -0700
From:   Stefano Stabellini <sstabellini@...nel.org>
To:     xen-devel@...ts.xen.org
Cc:     linux-kernel@...r.kernel.org, sstabellini@...nel.org,
        jgross@...e.com, boris.ostrovsky@...cle.com,
        Stefano Stabellini <stefano@...reto.com>
Subject: [PATCH v4 12/18] xen/pvcalls: implement poll command

Implement poll on passive sockets by requesting a delayed response with
mappass->reqcopy, and reply back when there is data on the passive
socket.

Poll on active socket is unimplemented as by the spec, as the frontend
should just wait for events and check the indexes on the indexes page.

Only support one outstanding poll (or accept) request for every passive
socket at any given time.

Signed-off-by: Stefano Stabellini <stefano@...reto.com>
CC: boris.ostrovsky@...cle.com
CC: jgross@...e.com
---
 drivers/xen/pvcalls-back.c | 73 +++++++++++++++++++++++++++++++++++++++++++++-
 1 file changed, 72 insertions(+), 1 deletion(-)

diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c
index 701f1fc..be85977 100644
--- a/drivers/xen/pvcalls-back.c
+++ b/drivers/xen/pvcalls-back.c
@@ -348,11 +348,33 @@ static void __pvcalls_back_accept(struct work_struct *work)
 static void pvcalls_pass_sk_data_ready(struct sock *sock)
 {
 	struct sockpass_mapping *mappass = sock->sk_user_data;
+	struct pvcalls_fedata *fedata;
+	struct xen_pvcalls_response *rsp;
+	unsigned long flags;
+	int notify;
 
 	if (mappass == NULL)
 		return;
 
-	queue_work(mappass->wq, &mappass->register_work);
+	fedata = mappass->fedata;
+	spin_lock_irqsave(&mappass->copy_lock, flags);
+	if (mappass->reqcopy.cmd == PVCALLS_POLL) {
+		rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
+		rsp->req_id = mappass->reqcopy.req_id;
+		rsp->u.poll.id = mappass->reqcopy.u.poll.id;
+		rsp->cmd = mappass->reqcopy.cmd;
+		rsp->ret = 0;
+
+		mappass->reqcopy.cmd = 0;
+		spin_unlock_irqrestore(&mappass->copy_lock, flags);
+
+		RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&fedata->ring, notify);
+		if (notify)
+			notify_remote_via_irq(mappass->fedata->irq);
+	} else {
+		spin_unlock_irqrestore(&mappass->copy_lock, flags);
+		queue_work(mappass->wq, &mappass->register_work);
+	}
 }
 
 static int pvcalls_back_bind(struct xenbus_device *dev,
@@ -499,6 +521,55 @@ static int pvcalls_back_accept(struct xenbus_device *dev,
 static int pvcalls_back_poll(struct xenbus_device *dev,
 			     struct xen_pvcalls_request *req)
 {
+	struct pvcalls_fedata *fedata;
+	struct sockpass_mapping *mappass;
+	struct xen_pvcalls_response *rsp;
+	struct inet_connection_sock *icsk;
+	struct request_sock_queue *queue;
+	unsigned long flags;
+	int ret;
+	bool data;
+
+	fedata = dev_get_drvdata(&dev->dev);
+
+	mappass = radix_tree_lookup(&fedata->socketpass_mappings, req->u.poll.id);
+	if (mappass == NULL)
+		return -EINVAL;
+
+	/*
+	 * Limitation of the current implementation: only support one
+	 * concurrent accept or poll call on one socket.
+	 */
+	spin_lock_irqsave(&mappass->copy_lock, flags);
+	if (mappass->reqcopy.cmd != 0) {
+		ret = -EINTR;
+		goto out;
+	}
+
+	mappass->reqcopy = *req;
+	icsk = inet_csk(mappass->sock->sk);
+	queue = &icsk->icsk_accept_queue;
+	spin_lock(&queue->rskq_lock);
+	data = queue->rskq_accept_head != NULL;
+	spin_unlock(&queue->rskq_lock);
+	if (data) {
+		mappass->reqcopy.cmd = 0;
+		ret = 0;
+		goto out;
+	}
+	spin_unlock_irqrestore(&mappass->copy_lock, flags);
+
+	/* Tell the caller we don't need to send back a notification yet */
+	return -1;
+
+out:
+	spin_unlock_irqrestore(&mappass->copy_lock, flags);
+
+	rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
+	rsp->req_id = req->req_id;
+	rsp->cmd = req->cmd;
+	rsp->u.poll.id = req->u.poll.id;
+	rsp->ret = ret;
 	return 0;
 }
 
-- 
1.9.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ