lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1497553787-3709-11-git-send-email-sstabellini@kernel.org>
Date:   Thu, 15 Jun 2017 12:09:40 -0700
From:   Stefano Stabellini <sstabellini@...nel.org>
To:     xen-devel@...ts.xen.org
Cc:     linux-kernel@...r.kernel.org, sstabellini@...nel.org,
        jgross@...e.com, boris.ostrovsky@...cle.com,
        Stefano Stabellini <stefano@...reto.com>
Subject: [PATCH v4 11/18] xen/pvcalls: implement accept command

Implement the accept command by calling inet_accept. To avoid blocking
in the kernel, call inet_accept(O_NONBLOCK) from a workqueue, which get
scheduled on sk_data_ready (for a passive socket, it means that there
are connections to accept).

Use the reqcopy field to store the request. Accept the new socket from
the delayed work function, create a new sock_mapping for it, map
the indexes page and data ring, and reply to the other end. Allocate an
ioworker for the socket.

Only support one outstanding blocking accept request for every socket at
any time.

Add a field to sock_mapping to remember the passive socket from which an
active socket was created.

Signed-off-by: Stefano Stabellini <stefano@...reto.com>
CC: boris.ostrovsky@...cle.com
CC: jgross@...e.com
---
 drivers/xen/pvcalls-back.c | 110 +++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 110 insertions(+)

diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c
index e5c535d..701f1fc 100644
--- a/drivers/xen/pvcalls-back.c
+++ b/drivers/xen/pvcalls-back.c
@@ -64,6 +64,7 @@ struct pvcalls_ioworker {
 struct sock_mapping {
 	struct list_head list;
 	struct pvcalls_fedata *fedata;
+	struct sockpass_mapping *sockpass;
 	struct socket *sock;
 	uint64_t id;
 	grant_ref_t ref;
@@ -276,10 +277,82 @@ static int pvcalls_back_release(struct xenbus_device *dev,
 
 static void __pvcalls_back_accept(struct work_struct *work)
 {
+	struct sockpass_mapping *mappass = container_of(
+		work, struct sockpass_mapping, register_work);
+	struct sock_mapping *map;
+	struct pvcalls_ioworker *iow;
+	struct pvcalls_fedata *fedata;
+	struct socket *sock;
+	struct xen_pvcalls_response *rsp;
+	struct xen_pvcalls_request *req;
+	int notify;
+	int ret = -EINVAL;
+	unsigned long flags;
+
+	fedata = mappass->fedata;
+	/*
+	 * __pvcalls_back_accept can race against pvcalls_back_accept.
+	 * We only need to check the value of "cmd" on read. It could be
+	 * done atomically, but to simplify the code on the write side, we
+	 * use a spinlock.
+	 */
+	spin_lock_irqsave(&mappass->copy_lock, flags);
+	req = &mappass->reqcopy;
+	if (req->cmd != PVCALLS_ACCEPT) {
+		spin_unlock_irqrestore(&mappass->copy_lock, flags);
+		return;
+	}
+	spin_unlock_irqrestore(&mappass->copy_lock, flags);
+
+	sock = sock_alloc();
+	if (sock == NULL)
+		goto out_error;
+	sock->type = mappass->sock->type;
+	sock->ops = mappass->sock->ops;
+
+	ret = inet_accept(mappass->sock, sock, O_NONBLOCK, true);
+	if (ret == -EAGAIN) {
+		sock_release(sock);
+		goto out_error;
+	}
+
+	map = pvcalls_new_active_socket(fedata,
+					req->u.accept.id_new,
+					req->u.accept.ref,
+					req->u.accept.evtchn,
+					sock);
+	if (!map) {
+		sock_release(sock);
+		goto out_error;
+	}
+
+	map->sockpass = mappass;
+	iow = &map->ioworker;
+	atomic_inc(&map->read);
+	atomic_inc(&map->io);
+	queue_work(iow->wq, &iow->register_work);
+
+out_error:
+	rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
+	rsp->req_id = req->req_id;
+	rsp->cmd = req->cmd;
+	rsp->u.accept.id = req->u.accept.id;
+	rsp->ret = ret;
+	RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&fedata->ring, notify);
+	if (notify)
+		notify_remote_via_irq(fedata->irq);
+
+	mappass->reqcopy.cmd = 0;
 }
 
 static void pvcalls_pass_sk_data_ready(struct sock *sock)
 {
+	struct sockpass_mapping *mappass = sock->sk_user_data;
+
+	if (mappass == NULL)
+		return;
+
+	queue_work(mappass->wq, &mappass->register_work);
 }
 
 static int pvcalls_back_bind(struct xenbus_device *dev,
@@ -383,6 +456,43 @@ static int pvcalls_back_listen(struct xenbus_device *dev,
 static int pvcalls_back_accept(struct xenbus_device *dev,
 			       struct xen_pvcalls_request *req)
 {
+	struct pvcalls_fedata *fedata;
+	struct sockpass_mapping *mappass;
+	int ret = -EINVAL;
+	struct xen_pvcalls_response *rsp;
+	unsigned long flags;
+
+	fedata = dev_get_drvdata(&dev->dev);
+
+	mappass = radix_tree_lookup(&fedata->socketpass_mappings,
+		req->u.accept.id);
+	if (mappass == NULL)
+		goto out_error;
+
+	/* 
+	 * Limitation of the current implementation: only support one
+	 * concurrent accept or poll call on one socket.
+	 */
+	spin_lock_irqsave(&mappass->copy_lock, flags);
+	if (mappass->reqcopy.cmd != 0) {
+		spin_unlock_irqrestore(&mappass->copy_lock, flags);
+		ret = -EINTR;
+		goto out_error;
+	}
+
+	mappass->reqcopy = *req;
+	spin_unlock_irqrestore(&mappass->copy_lock, flags);
+	queue_work(mappass->wq, &mappass->register_work);
+
+	/* Tell the caller we don't need to send back a notification yet */
+	return -1;
+
+out_error:
+	rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
+	rsp->req_id = req->req_id;
+	rsp->cmd = req->cmd;
+	rsp->u.accept.id = req->u.accept.id;
+	rsp->ret = ret;
 	return 0;
 }
 
-- 
1.9.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ