[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <1452426622-4471-34-git-send-email-mst@redhat.com>
Date: Sun, 10 Jan 2016 16:21:07 +0200
From: "Michael S. Tsirkin" <mst@...hat.com>
To: linux-kernel@...r.kernel.org
Cc: Peter Zijlstra <peterz@...radead.org>,
Arnd Bergmann <arnd@...db.de>, linux-arch@...r.kernel.org,
Andrew Cooper <andrew.cooper3@...rix.com>,
Russell King - ARM Linux <linux@....linux.org.uk>,
virtualization@...ts.linux-foundation.org,
Stefano Stabellini <stefano.stabellini@...citrix.com>,
Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...e.hu>, "H. Peter Anvin" <hpa@...or.com>,
Joe Perches <joe@...ches.com>,
David Miller <davem@...emloft.net>, linux-ia64@...r.kernel.org,
linuxppc-dev@...ts.ozlabs.org, linux-s390@...r.kernel.org,
sparclinux@...r.kernel.org, linux-arm-kernel@...ts.infradead.org,
linux-metag@...r.kernel.org, linux-mips@...ux-mips.org,
x86@...nel.org, user-mode-linux-devel@...ts.sourceforge.net,
adi-buildroot-devel@...ts.sourceforge.net,
linux-sh@...r.kernel.org, linux-xtensa@...ux-xtensa.org,
xen-devel@...ts.xenproject.org
Subject: [PATCH v3 33/41] virtio_ring: use virt_store_mb
We need a full barrier after writing out event index, using
virt_store_mb there seems better than open-coding. As usual, we need a
wrapper to account for strong barriers.
It's tempting to use this in vhost as well, for that, we'll
need a variant of smp_store_mb that works on __user pointers.
Signed-off-by: Michael S. Tsirkin <mst@...hat.com>
---
include/linux/virtio_ring.h | 11 +++++++++++
drivers/virtio/virtio_ring.c | 15 +++++++++------
2 files changed, 20 insertions(+), 6 deletions(-)
diff --git a/include/linux/virtio_ring.h b/include/linux/virtio_ring.h
index f3fa55b..a156e2b 100644
--- a/include/linux/virtio_ring.h
+++ b/include/linux/virtio_ring.h
@@ -45,6 +45,17 @@ static inline void virtio_wmb(bool weak_barriers)
wmb();
}
+static inline void virtio_store_mb(bool weak_barriers,
+ __virtio16 *p, __virtio16 v)
+{
+ if (weak_barriers) {
+ virt_store_mb(*p, v);
+ } else {
+ WRITE_ONCE(*p, v);
+ mb();
+ }
+}
+
struct virtio_device;
struct virtqueue;
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index ee663c4..e12e385 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -517,10 +517,10 @@ void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
/* If we expect an interrupt for the next entry, tell host
* by writing event index and flush out the write before
* the read in the next get_buf call. */
- if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
- vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, vq->last_used_idx);
- virtio_mb(vq->weak_barriers);
- }
+ if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT))
+ virtio_store_mb(vq->weak_barriers,
+ &vring_used_event(&vq->vring),
+ cpu_to_virtio16(_vq->vdev, vq->last_used_idx));
#ifdef DEBUG
vq->last_add_time_valid = false;
@@ -653,8 +653,11 @@ bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
}
/* TODO: tune this threshold */
bufs = (u16)(vq->avail_idx_shadow - vq->last_used_idx) * 3 / 4;
- vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs);
- virtio_mb(vq->weak_barriers);
+
+ virtio_store_mb(vq->weak_barriers,
+ &vring_used_event(&vq->vring),
+ cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs));
+
if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->vring.used->idx) - vq->last_used_idx) > bufs)) {
END_USE(vq);
return false;
--
MST
Powered by blists - more mailing lists