[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240328002149.1141302-4-gshan@redhat.com>
Date: Thu, 28 Mar 2024 10:21:49 +1000
From: Gavin Shan <gshan@...hat.com>
To: virtualization@...ts.linux.dev
Cc: linux-kernel@...r.kernel.org,
mst@...hat.com,
jasowang@...hat.com,
will@...nel.org,
davem@...emloft.net,
stefanha@...hat.com,
sgarzare@...hat.com,
keirf@...gle.com,
yihyu@...hat.com,
shan.gavin@...il.com
Subject: [PATCH v3 3/3] vhost: Improve vhost_get_avail_idx() with smp_rmb()
All the callers of vhost_get_avail_idx() are concerned to the memory
barrier, imposed by smp_rmb() to ensure the order of the available
ring entry read and avail_idx read.
Improve vhost_get_avail_idx() so that smp_rmb() is executed when
the avail_idx is advanced. With it, the callers needn't to worry
about the memory barrier.
Suggested-by: Michael S. Tsirkin <mst@...hat.com>
Signed-off-by: Gavin Shan <gshan@...hat.com>
---
drivers/vhost/vhost.c | 75 +++++++++++++++----------------------------
1 file changed, 26 insertions(+), 49 deletions(-)
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 32686c79c41d..e6882f4f6ce2 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -1290,10 +1290,28 @@ static void vhost_dev_unlock_vqs(struct vhost_dev *d)
mutex_unlock(&d->vqs[i]->mutex);
}
-static inline int vhost_get_avail_idx(struct vhost_virtqueue *vq,
- __virtio16 *idx)
+static inline int vhost_get_avail_idx(struct vhost_virtqueue *vq)
{
- return vhost_get_avail(vq, *idx, &vq->avail->idx);
+ __virtio16 avail_idx;
+ int r;
+
+ r = vhost_get_avail(vq, avail_idx, &vq->avail->idx);
+ if (unlikely(r)) {
+ vq_err(vq, "Failed to access avail idx at %p\n",
+ &vq->avail->idx);
+ return r;
+ }
+
+ vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
+ if (vq->avail_idx != vq->last_avail_idx) {
+ /* Ensure the available ring entry read happens
+ * before the avail_idx read when the avail_idx
+ * is advanced.
+ */
+ smp_rmb();
+ }
+
+ return 0;
}
static inline int vhost_get_avail_head(struct vhost_virtqueue *vq,
@@ -2499,7 +2517,6 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
struct vring_desc desc;
unsigned int i, head, found = 0;
u16 last_avail_idx;
- __virtio16 avail_idx;
__virtio16 ring_head;
int ret, access;
@@ -2507,12 +2524,8 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
last_avail_idx = vq->last_avail_idx;
if (vq->avail_idx == vq->last_avail_idx) {
- if (unlikely(vhost_get_avail_idx(vq, &avail_idx))) {
- vq_err(vq, "Failed to access avail idx at %p\n",
- &vq->avail->idx);
+ if (unlikely(vhost_get_avail_idx(vq)))
return -EFAULT;
- }
- vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
if (unlikely((u16)(vq->avail_idx - last_avail_idx) > vq->num)) {
vq_err(vq, "Guest moved used index from %u to %u",
@@ -2525,11 +2538,6 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
*/
if (vq->avail_idx == last_avail_idx)
return vq->num;
-
- /* Only get avail ring entries after they have been
- * exposed by guest.
- */
- smp_rmb();
}
/* Grab the next descriptor number they're advertising, and increment
@@ -2790,35 +2798,19 @@ EXPORT_SYMBOL_GPL(vhost_add_used_and_signal_n);
/* return true if we're sure that avaiable ring is empty */
bool vhost_vq_avail_empty(struct vhost_dev *dev, struct vhost_virtqueue *vq)
{
- __virtio16 avail_idx;
- int r;
-
if (vq->avail_idx != vq->last_avail_idx)
return false;
- r = vhost_get_avail_idx(vq, &avail_idx);
- if (unlikely(r))
- return false;
-
- vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
- if (vq->avail_idx != vq->last_avail_idx) {
- /* Since we have updated avail_idx, the following
- * call to vhost_get_vq_desc() will read available
- * ring entries. Make sure that read happens after
- * the avail_idx read.
- */
- smp_rmb();
+ if (unlikely(vhost_get_avail_idx(vq)))
return false;
- }
- return true;
+ return vq->avail_idx == vq->last_avail_idx;
}
EXPORT_SYMBOL_GPL(vhost_vq_avail_empty);
/* OK, now we need to know about added descriptors. */
bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
{
- __virtio16 avail_idx;
int r;
if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY))
@@ -2842,25 +2834,10 @@ bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
/* They could have slipped one in as we were doing that: make
* sure it's written, then check again. */
smp_mb();
- r = vhost_get_avail_idx(vq, &avail_idx);
- if (r) {
- vq_err(vq, "Failed to check avail idx at %p: %d\n",
- &vq->avail->idx, r);
+ if (unlikely(vhost_get_avail_idx(vq)))
return false;
- }
-
- vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
- if (vq->avail_idx != vq->last_avail_idx) {
- /* Since we have updated avail_idx, the following
- * call to vhost_get_vq_desc() will read available
- * ring entries. Make sure that read happens after
- * the avail_idx read.
- */
- smp_rmb();
- return true;
- }
- return false;
+ return vq->avail_idx != vq->last_avail_idx;
}
EXPORT_SYMBOL_GPL(vhost_enable_notify);
--
2.44.0
Powered by blists - more mailing lists