lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1494880570-14209-17-git-send-email-sstabellini@kernel.org>
Date:   Mon, 15 May 2017 13:36:09 -0700
From:   Stefano Stabellini <sstabellini@...nel.org>
To:     xen-devel@...ts.xen.org
Cc:     linux-kernel@...r.kernel.org, sstabellini@...nel.org,
        jgross@...e.com, boris.ostrovsky@...cle.com,
        Stefano Stabellini <stefano@...reto.com>
Subject: [PATCH 17/18] xen/pvcalls: implement write

When the other end notifies us that there is data to be written
(pvcalls_back_conn_event), add the relative sock_mapping to the ioworker
list, increment the io and write counters, and schedule the ioworker.

Implement the write function called by ioworker by reading the data from
the data ring, writing it to the socket by calling inet_sendmsg.

Set out_error on error.

Signed-off-by: Stefano Stabellini <stefano@...reto.com>
CC: boris.ostrovsky@...cle.com
CC: jgross@...e.com
---
 drivers/xen/pvcalls-back.c | 80 +++++++++++++++++++++++++++++++++++++++++++++-
 1 file changed, 79 insertions(+), 1 deletion(-)

diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c
index 0f715a8..2de43c3 100644
--- a/drivers/xen/pvcalls-back.c
+++ b/drivers/xen/pvcalls-back.c
@@ -179,7 +179,67 @@ static void pvcalls_conn_back_read(unsigned long opaque)
 
 static int pvcalls_conn_back_write(struct sock_mapping *map)
 {
-	return 0;
+	struct pvcalls_data_intf *intf = map->ring;
+	struct pvcalls_data *data = &map->data;
+	struct msghdr msg;
+	struct kvec vec[2];
+	RING_IDX cons, prod, size, ring_size;
+	int ret;
+
+	cons = intf->out_cons;
+	prod = intf->out_prod;
+	/* read the indexes before dealing with the data */
+	virt_mb();
+
+	ring_size = XEN_FLEX_RING_SIZE(map->ring_order);
+	size = pvcalls_queued(prod, cons, ring_size);
+	if (size == 0)
+		return 0;
+
+	memset(&msg, 0, sizeof(msg));
+	msg.msg_flags |= MSG_DONTWAIT;
+	msg.msg_iter.type = ITER_KVEC|READ;
+	msg.msg_iter.count = size;
+	if (pvcalls_mask(prod, ring_size) > pvcalls_mask(cons, ring_size)) {
+		vec[0].iov_base = data->out + pvcalls_mask(cons, ring_size);
+		vec[0].iov_len = size;
+		msg.msg_iter.kvec = vec;
+		msg.msg_iter.nr_segs = 1;
+	} else {
+		vec[0].iov_base = data->out + pvcalls_mask(cons, ring_size);
+		vec[0].iov_len = XEN_FLEX_RING_SIZE(ring_size) -
+			pvcalls_mask(cons, ring_size);
+		vec[1].iov_base = data->out;
+		vec[1].iov_len = size - vec[0].iov_len;
+		msg.msg_iter.kvec = vec;
+		msg.msg_iter.nr_segs = 2;
+	}
+
+	atomic_set(&map->write, 0);
+	ret = inet_sendmsg(map->sock, &msg, size);
+	if (ret == -EAGAIN || ret < size) {
+		atomic_inc(&map->write);
+		atomic_inc(&pvcalls_back_global.ioworkers[map->data_worker].io);
+	}
+	if (ret == -EAGAIN)
+		return ret;
+
+	/* write the data, then update the indexes */
+	virt_wmb();
+	if (ret < 0) {
+		intf->out_error = ret;
+	} else {
+		intf->out_error = 0;
+		intf->out_cons = cons + ret;
+		prod = intf->out_prod;
+	}
+	/* update the indexes, then notify the other end */
+	virt_wmb();
+	if (prod != cons + ret)
+		atomic_inc(&map->write);
+	notify_remote_via_irq(map->irq);
+
+	return ret;
 }
 
 static void pvcalls_back_ioworker(struct work_struct *work)
@@ -914,6 +974,24 @@ static irqreturn_t pvcalls_back_event(int irq, void *dev_id)
 
 static irqreturn_t pvcalls_back_conn_event(int irq, void *sock_map)
 {
+	struct sock_mapping *map = sock_map;
+	struct pvcalls_ioworker *iow;
+	unsigned long flags;
+
+	if (map == NULL || map->sock == NULL || map->sock->sk == NULL ||
+		map->sock->sk->sk_user_data != map)
+		return IRQ_HANDLED;
+
+	iow = &pvcalls_back_global.ioworkers[map->data_worker];
+	spin_lock_irqsave(&iow->lock, flags);
+	atomic_inc(&map->write);
+	if (list_empty(&map->queue))
+		list_add_tail(&map->queue, &iow->wqs);
+	spin_unlock_irqrestore(&iow->lock, flags);
+	atomic_inc(&iow->io);
+	queue_work_on(map->data_worker, pvcalls_back_global.wq,
+		&iow->register_work);
+
 	return IRQ_HANDLED;
 }
 
-- 
1.9.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ