[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <20220607005951.2221-1-liubo03@inspur.com>
Date: Mon, 6 Jun 2022 20:59:51 -0400
From: Bo Liu <liubo03@...pur.com>
To: <mst@...hat.com>, <jasowang@...hat.com>
CC: <virtualization@...ts.linux-foundation.org>,
<linux-kernel@...r.kernel.org>, Bo Liu <liubo03@...pur.com>
Subject: [PATCH] virtio_ring: remove unnecessary to_vvq call
In many functions, the parameter passed in is "_vq", which still call
to_vvq() to get 'vq'. It can avoid unnecessary call of to_vvq() by directly
passing in the parameter "vq".
Signed-off-by: Bo Liu <liubo03@...pur.com>
---
drivers/virtio/virtio_ring.c | 100 ++++++++++++++---------------------
1 file changed, 41 insertions(+), 59 deletions(-)
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 13a7348cedff..f82db59fdbdc 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -648,9 +648,8 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
return -ENOMEM;
}
-static bool virtqueue_kick_prepare_split(struct virtqueue *_vq)
+static bool virtqueue_kick_prepare_split(struct vring_virtqueue *vq)
{
- struct vring_virtqueue *vq = to_vvq(_vq);
u16 new, old;
bool needs_kick;
@@ -667,12 +666,12 @@ static bool virtqueue_kick_prepare_split(struct virtqueue *_vq)
LAST_ADD_TIME_INVALID(vq);
if (vq->event) {
- needs_kick = vring_need_event(virtio16_to_cpu(_vq->vdev,
+ needs_kick = vring_need_event(virtio16_to_cpu(vq->vq.vdev,
vring_avail_event(&vq->split.vring)),
new, old);
} else {
needs_kick = !(vq->split.vring.used->flags &
- cpu_to_virtio16(_vq->vdev,
+ cpu_to_virtio16(vq->vq.vdev,
VRING_USED_F_NO_NOTIFY));
}
END_USE(vq);
@@ -735,11 +734,10 @@ static inline bool more_used_split(const struct vring_virtqueue *vq)
vq->split.vring.used->idx);
}
-static void *virtqueue_get_buf_ctx_split(struct virtqueue *_vq,
+static void *virtqueue_get_buf_ctx_split(struct vring_virtqueue *vq,
unsigned int *len,
void **ctx)
{
- struct vring_virtqueue *vq = to_vvq(_vq);
void *ret;
unsigned int i;
u16 last_used;
@@ -761,9 +759,9 @@ static void *virtqueue_get_buf_ctx_split(struct virtqueue *_vq,
virtio_rmb(vq->weak_barriers);
last_used = (vq->last_used_idx & (vq->split.vring.num - 1));
- i = virtio32_to_cpu(_vq->vdev,
+ i = virtio32_to_cpu(vq->vq.vdev,
vq->split.vring.used->ring[last_used].id);
- *len = virtio32_to_cpu(_vq->vdev,
+ *len = virtio32_to_cpu(vq->vq.vdev,
vq->split.vring.used->ring[last_used].len);
if (unlikely(i >= vq->split.vring.num)) {
@@ -785,7 +783,7 @@ static void *virtqueue_get_buf_ctx_split(struct virtqueue *_vq,
if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT))
virtio_store_mb(vq->weak_barriers,
&vring_used_event(&vq->split.vring),
- cpu_to_virtio16(_vq->vdev, vq->last_used_idx));
+ cpu_to_virtio16(vq->vq.vdev, vq->last_used_idx));
LAST_ADD_TIME_INVALID(vq);
@@ -793,10 +791,8 @@ static void *virtqueue_get_buf_ctx_split(struct virtqueue *_vq,
return ret;
}
-static void virtqueue_disable_cb_split(struct virtqueue *_vq)
+static void virtqueue_disable_cb_split(struct vring_virtqueue *vq)
{
- struct vring_virtqueue *vq = to_vvq(_vq);
-
if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
if (vq->event)
@@ -804,14 +800,13 @@ static void virtqueue_disable_cb_split(struct virtqueue *_vq)
vring_used_event(&vq->split.vring) = 0x0;
else
vq->split.vring.avail->flags =
- cpu_to_virtio16(_vq->vdev,
+ cpu_to_virtio16(vq->vq.vdev,
vq->split.avail_flags_shadow);
}
}
-static unsigned int virtqueue_enable_cb_prepare_split(struct virtqueue *_vq)
+static unsigned int virtqueue_enable_cb_prepare_split(struct vring_virtqueue *vq)
{
- struct vring_virtqueue *vq = to_vvq(_vq);
u16 last_used_idx;
START_USE(vq);
@@ -825,26 +820,23 @@ static unsigned int virtqueue_enable_cb_prepare_split(struct virtqueue *_vq)
vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
if (!vq->event)
vq->split.vring.avail->flags =
- cpu_to_virtio16(_vq->vdev,
+ cpu_to_virtio16(vq->vq.vdev,
vq->split.avail_flags_shadow);
}
- vring_used_event(&vq->split.vring) = cpu_to_virtio16(_vq->vdev,
+ vring_used_event(&vq->split.vring) = cpu_to_virtio16(vq->vq.vdev,
last_used_idx = vq->last_used_idx);
END_USE(vq);
return last_used_idx;
}
-static bool virtqueue_poll_split(struct virtqueue *_vq, unsigned int last_used_idx)
+static bool virtqueue_poll_split(struct vring_virtqueue *vq, unsigned int last_used_idx)
{
- struct vring_virtqueue *vq = to_vvq(_vq);
-
- return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev,
+ return (u16)last_used_idx != virtio16_to_cpu(vq->vq.vdev,
vq->split.vring.used->idx);
}
-static bool virtqueue_enable_cb_delayed_split(struct virtqueue *_vq)
+static bool virtqueue_enable_cb_delayed_split(struct vring_virtqueue *vq)
{
- struct vring_virtqueue *vq = to_vvq(_vq);
u16 bufs;
START_USE(vq);
@@ -858,7 +850,7 @@ static bool virtqueue_enable_cb_delayed_split(struct virtqueue *_vq)
vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
if (!vq->event)
vq->split.vring.avail->flags =
- cpu_to_virtio16(_vq->vdev,
+ cpu_to_virtio16(vq->vq.vdev,
vq->split.avail_flags_shadow);
}
/* TODO: tune this threshold */
@@ -866,9 +858,9 @@ static bool virtqueue_enable_cb_delayed_split(struct virtqueue *_vq)
virtio_store_mb(vq->weak_barriers,
&vring_used_event(&vq->split.vring),
- cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs));
+ cpu_to_virtio16(vq->vq.vdev, vq->last_used_idx + bufs));
- if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->split.vring.used->idx)
+ if (unlikely((u16)(virtio16_to_cpu(vq->vq.vdev, vq->split.vring.used->idx)
- vq->last_used_idx) > bufs)) {
END_USE(vq);
return false;
@@ -878,9 +870,8 @@ static bool virtqueue_enable_cb_delayed_split(struct virtqueue *_vq)
return true;
}
-static void *virtqueue_detach_unused_buf_split(struct virtqueue *_vq)
+static void *virtqueue_detach_unused_buf_split(struct vring_virtqueue *vq)
{
- struct vring_virtqueue *vq = to_vvq(_vq);
unsigned int i;
void *buf;
@@ -893,7 +884,7 @@ static void *virtqueue_detach_unused_buf_split(struct virtqueue *_vq)
buf = vq->split.desc_state[i].data;
detach_buf_split(vq, i, NULL);
vq->split.avail_idx_shadow--;
- vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev,
+ vq->split.vring.avail->idx = cpu_to_virtio16(vq->vq.vdev,
vq->split.avail_idx_shadow);
END_USE(vq);
return buf;
@@ -1296,9 +1287,8 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq,
return -EIO;
}
-static bool virtqueue_kick_prepare_packed(struct virtqueue *_vq)
+static bool virtqueue_kick_prepare_packed(struct vring_virtqueue *vq)
{
- struct vring_virtqueue *vq = to_vvq(_vq);
u16 new, old, off_wrap, flags, wrap_counter, event_idx;
bool needs_kick;
union {
@@ -1410,11 +1400,10 @@ static inline bool more_used_packed(const struct vring_virtqueue *vq)
vq->packed.used_wrap_counter);
}
-static void *virtqueue_get_buf_ctx_packed(struct virtqueue *_vq,
+static void *virtqueue_get_buf_ctx_packed(struct vring_virtqueue *vq,
unsigned int *len,
void **ctx)
{
- struct vring_virtqueue *vq = to_vvq(_vq);
u16 last_used, id;
void *ret;
@@ -1475,10 +1464,8 @@ static void *virtqueue_get_buf_ctx_packed(struct virtqueue *_vq,
return ret;
}
-static void virtqueue_disable_cb_packed(struct virtqueue *_vq)
+static void virtqueue_disable_cb_packed(struct vring_virtqueue *vq)
{
- struct vring_virtqueue *vq = to_vvq(_vq);
-
if (vq->packed.event_flags_shadow != VRING_PACKED_EVENT_FLAG_DISABLE) {
vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
vq->packed.vring.driver->flags =
@@ -1486,10 +1473,8 @@ static void virtqueue_disable_cb_packed(struct virtqueue *_vq)
}
}
-static unsigned int virtqueue_enable_cb_prepare_packed(struct virtqueue *_vq)
+static unsigned int virtqueue_enable_cb_prepare_packed(struct vring_virtqueue *vq)
{
- struct vring_virtqueue *vq = to_vvq(_vq);
-
START_USE(vq);
/*
@@ -1522,9 +1507,8 @@ static unsigned int virtqueue_enable_cb_prepare_packed(struct virtqueue *_vq)
VRING_PACKED_EVENT_F_WRAP_CTR);
}
-static bool virtqueue_poll_packed(struct virtqueue *_vq, u16 off_wrap)
+static bool virtqueue_poll_packed(struct vring_virtqueue *vq, u16 off_wrap)
{
- struct vring_virtqueue *vq = to_vvq(_vq);
bool wrap_counter;
u16 used_idx;
@@ -1534,9 +1518,8 @@ static bool virtqueue_poll_packed(struct virtqueue *_vq, u16 off_wrap)
return is_used_desc_packed(vq, used_idx, wrap_counter);
}
-static bool virtqueue_enable_cb_delayed_packed(struct virtqueue *_vq)
+static bool virtqueue_enable_cb_delayed_packed(struct vring_virtqueue *vq)
{
- struct vring_virtqueue *vq = to_vvq(_vq);
u16 used_idx, wrap_counter;
u16 bufs;
@@ -1593,9 +1576,8 @@ static bool virtqueue_enable_cb_delayed_packed(struct virtqueue *_vq)
return true;
}
-static void *virtqueue_detach_unused_buf_packed(struct virtqueue *_vq)
+static void *virtqueue_detach_unused_buf_packed(struct vring_virtqueue *vq)
{
- struct vring_virtqueue *vq = to_vvq(_vq);
unsigned int i;
void *buf;
@@ -1906,8 +1888,8 @@ bool virtqueue_kick_prepare(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
- return vq->packed_ring ? virtqueue_kick_prepare_packed(_vq) :
- virtqueue_kick_prepare_split(_vq);
+ return vq->packed_ring ? virtqueue_kick_prepare_packed(vq) :
+ virtqueue_kick_prepare_split(vq);
}
EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);
@@ -1977,8 +1959,8 @@ void *virtqueue_get_buf_ctx(struct virtqueue *_vq, unsigned int *len,
{
struct vring_virtqueue *vq = to_vvq(_vq);
- return vq->packed_ring ? virtqueue_get_buf_ctx_packed(_vq, len, ctx) :
- virtqueue_get_buf_ctx_split(_vq, len, ctx);
+ return vq->packed_ring ? virtqueue_get_buf_ctx_packed(vq, len, ctx) :
+ virtqueue_get_buf_ctx_split(vq, len, ctx);
}
EXPORT_SYMBOL_GPL(virtqueue_get_buf_ctx);
@@ -2007,9 +1989,9 @@ void virtqueue_disable_cb(struct virtqueue *_vq)
return;
if (vq->packed_ring)
- virtqueue_disable_cb_packed(_vq);
+ virtqueue_disable_cb_packed(vq);
else
- virtqueue_disable_cb_split(_vq);
+ virtqueue_disable_cb_split(vq);
}
EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
@@ -2032,8 +2014,8 @@ unsigned int virtqueue_enable_cb_prepare(struct virtqueue *_vq)
if (vq->event_triggered)
vq->event_triggered = false;
- return vq->packed_ring ? virtqueue_enable_cb_prepare_packed(_vq) :
- virtqueue_enable_cb_prepare_split(_vq);
+ return vq->packed_ring ? virtqueue_enable_cb_prepare_packed(vq) :
+ virtqueue_enable_cb_prepare_split(vq);
}
EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);
@@ -2054,8 +2036,8 @@ bool virtqueue_poll(struct virtqueue *_vq, unsigned int last_used_idx)
return false;
virtio_mb(vq->weak_barriers);
- return vq->packed_ring ? virtqueue_poll_packed(_vq, last_used_idx) :
- virtqueue_poll_split(_vq, last_used_idx);
+ return vq->packed_ring ? virtqueue_poll_packed(vq, last_used_idx) :
+ virtqueue_poll_split(vq, last_used_idx);
}
EXPORT_SYMBOL_GPL(virtqueue_poll);
@@ -2098,8 +2080,8 @@ bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
if (vq->event_triggered)
vq->event_triggered = false;
- return vq->packed_ring ? virtqueue_enable_cb_delayed_packed(_vq) :
- virtqueue_enable_cb_delayed_split(_vq);
+ return vq->packed_ring ? virtqueue_enable_cb_delayed_packed(vq) :
+ virtqueue_enable_cb_delayed_split(vq);
}
EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
@@ -2115,8 +2097,8 @@ void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
- return vq->packed_ring ? virtqueue_detach_unused_buf_packed(_vq) :
- virtqueue_detach_unused_buf_split(_vq);
+ return vq->packed_ring ? virtqueue_detach_unused_buf_packed(vq) :
+ virtqueue_detach_unused_buf_split(vq);
}
EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
--
2.27.0
Powered by blists - more mailing lists