[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <ba91cc01-4e48-2743-f6ef-4aad4b821eb6@redhat.com>
Date: Thu, 7 Mar 2019 10:38:30 +0800
From: Jason Wang <jasowang@...hat.com>
To: Christophe de Dinechin <christophe.de.dinechin@...il.com>
Cc: "Michael S. Tsirkin" <mst@...hat.com>,
KVM list <kvm@...r.kernel.org>,
"open list:VIRTIO GPU DRIVER"
<virtualization@...ts.linux-foundation.org>,
netdev@...r.kernel.org, open list <linux-kernel@...r.kernel.org>,
Peter Xu <peterx@...hat.com>, linux-mm@...ck.org,
aarcange@...hat.com
Subject: Re: [RFC PATCH V2 2/5] vhost: fine grain userspace memory accessors
On 2019/3/6 下午6:45, Christophe de Dinechin wrote:
>
>> On 6 Mar 2019, at 08:18, Jason Wang <jasowang@...hat.com> wrote:
>>
>> This is used to hide the metadata address from virtqueue helpers. This
>> will allow to implement a vmap based fast accessing to metadata.
>>
>> Signed-off-by: Jason Wang <jasowang@...hat.com>
>> ---
>> drivers/vhost/vhost.c | 94 +++++++++++++++++++++++++++++++++++++++++----------
>> 1 file changed, 77 insertions(+), 17 deletions(-)
>>
>> diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
>> index 400aa78..29709e7 100644
>> --- a/drivers/vhost/vhost.c
>> +++ b/drivers/vhost/vhost.c
>> @@ -869,6 +869,34 @@ static inline void __user *__vhost_get_user(struct vhost_virtqueue *vq,
>> ret; \
>> })
>>
>> +static inline int vhost_put_avail_event(struct vhost_virtqueue *vq)
>> +{
>> + return vhost_put_user(vq, cpu_to_vhost16(vq, vq->avail_idx),
>> + vhost_avail_event(vq));
>> +}
>> +
>> +static inline int vhost_put_used(struct vhost_virtqueue *vq,
>> + struct vring_used_elem *head, int idx,
>> + int count)
>> +{
>> + return vhost_copy_to_user(vq, vq->used->ring + idx, head,
>> + count * sizeof(*head));
>> +}
>> +
>> +static inline int vhost_put_used_flags(struct vhost_virtqueue *vq)
>> +
>> +{
>> + return vhost_put_user(vq, cpu_to_vhost16(vq, vq->used_flags),
>> + &vq->used->flags);
>> +}
>> +
>> +static inline int vhost_put_used_idx(struct vhost_virtqueue *vq)
>> +
>> +{
>> + return vhost_put_user(vq, cpu_to_vhost16(vq, vq->last_used_idx),
>> + &vq->used->idx);
>> +}
>> +
>> #define vhost_get_user(vq, x, ptr, type) \
>> ({ \
>> int ret; \
>> @@ -907,6 +935,43 @@ static void vhost_dev_unlock_vqs(struct vhost_dev *d)
>> mutex_unlock(&d->vqs[i]->mutex);
>> }
>>
>> +static inline int vhost_get_avail_idx(struct vhost_virtqueue *vq,
>> + __virtio16 *idx)
>> +{
>> + return vhost_get_avail(vq, *idx, &vq->avail->idx);
>> +}
>> +
>> +static inline int vhost_get_avail_head(struct vhost_virtqueue *vq,
>> + __virtio16 *head, int idx)
>> +{
>> + return vhost_get_avail(vq, *head,
>> + &vq->avail->ring[idx & (vq->num - 1)]);
>> +}
>> +
>> +static inline int vhost_get_avail_flags(struct vhost_virtqueue *vq,
>> + __virtio16 *flags)
>> +{
>> + return vhost_get_avail(vq, *flags, &vq->avail->flags);
>> +}
>> +
>> +static inline int vhost_get_used_event(struct vhost_virtqueue *vq,
>> + __virtio16 *event)
>> +{
>> + return vhost_get_avail(vq, *event, vhost_used_event(vq));
>> +}
>> +
>> +static inline int vhost_get_used_idx(struct vhost_virtqueue *vq,
>> + __virtio16 *idx)
>> +{
>> + return vhost_get_used(vq, *idx, &vq->used->idx);
>> +}
>> +
>> +static inline int vhost_get_desc(struct vhost_virtqueue *vq,
>> + struct vring_desc *desc, int idx)
>> +{
>> + return vhost_copy_from_user(vq, desc, vq->desc + idx, sizeof(*desc));
>> +}
>> +
>> static int vhost_new_umem_range(struct vhost_umem *umem,
>> u64 start, u64 size, u64 end,
>> u64 userspace_addr, int perm)
>> @@ -1840,8 +1905,7 @@ int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
>> static int vhost_update_used_flags(struct vhost_virtqueue *vq)
>> {
>> void __user *used;
>> - if (vhost_put_user(vq, cpu_to_vhost16(vq, vq->used_flags),
>> - &vq->used->flags) < 0)
>> + if (vhost_put_used_flags(vq))
>> return -EFAULT;
>> if (unlikely(vq->log_used)) {
>> /* Make sure the flag is seen before log. */
>> @@ -1858,8 +1922,7 @@ static int vhost_update_used_flags(struct vhost_virtqueue *vq)
>>
>> static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event)
>> {
>> - if (vhost_put_user(vq, cpu_to_vhost16(vq, vq->avail_idx),
>> - vhost_avail_event(vq)))
>> + if (vhost_put_avail_event(vq))
>> return -EFAULT;
>> if (unlikely(vq->log_used)) {
>> void __user *used;
>> @@ -1895,7 +1958,7 @@ int vhost_vq_init_access(struct vhost_virtqueue *vq)
>> r = -EFAULT;
>> goto err;
>> }
>> - r = vhost_get_used(vq, last_used_idx, &vq->used->idx);
>> + r = vhost_get_used_idx(vq, &last_used_idx);
>> if (r) {
>> vq_err(vq, "Can't access used idx at %p\n",
>> &vq->used->idx);
> From the error case, it looks like you are not entirely encapsulating
> knowledge of what the accessor uses, i.e. it’s not:
>
> vq_err(vq, "Can't access used idx at %p\n",
> &last_user_idx);
>
> Maybe move error message within accessor?
Good catch. Will fix but I still prefer to keep the place of vq_err().
Moving error message (if needed) could be done in the future.
Thanks
>
>> @@ -2094,7 +2157,7 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
>> last_avail_idx = vq->last_avail_idx;
>>
>> if (vq->avail_idx == vq->last_avail_idx) {
>> - if (unlikely(vhost_get_avail(vq, avail_idx, &vq->avail->idx))) {
>> + if (unlikely(vhost_get_avail_idx(vq, &avail_idx))) {
>> vq_err(vq, "Failed to access avail idx at %p\n",
>> &vq->avail->idx);
>> return -EFAULT;
> Same here.
>
>> @@ -2121,8 +2184,7 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
>>
>> /* Grab the next descriptor number they're advertising, and increment
>> * the index we've seen. */
>> - if (unlikely(vhost_get_avail(vq, ring_head,
>> - &vq->avail->ring[last_avail_idx & (vq->num - 1)]))) {
>> + if (unlikely(vhost_get_avail_head(vq, &ring_head, last_avail_idx))) {
>> vq_err(vq, "Failed to read head: idx %d address %p\n",
>> last_avail_idx,
>> &vq->avail->ring[last_avail_idx % vq->num]);
>> @@ -2157,8 +2219,7 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
>> i, vq->num, head);
>> return -EINVAL;
>> }
>> - ret = vhost_copy_from_user(vq, &desc, vq->desc + i,
>> - sizeof desc);
>> + ret = vhost_get_desc(vq, &desc, i);
>> if (unlikely(ret)) {
>> vq_err(vq, "Failed to get descriptor: idx %d addr %p\n",
>> i, vq->desc + i);
>> @@ -2251,7 +2312,7 @@ static int __vhost_add_used_n(struct vhost_virtqueue *vq,
>>
>> start = vq->last_used_idx & (vq->num - 1);
>> used = vq->used->ring + start;
>> - if (vhost_copy_to_user(vq, used, heads, count * sizeof *used)) {
>> + if (vhost_put_used(vq, heads, start, count)) {
>> vq_err(vq, "Failed to write used");
>> return -EFAULT;
>> }
>> @@ -2293,8 +2354,7 @@ int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
>>
>> /* Make sure buffer is written before we update index. */
>> smp_wmb();
>> - if (vhost_put_user(vq, cpu_to_vhost16(vq, vq->last_used_idx),
>> - &vq->used->idx)) {
>> + if (vhost_put_used_idx(vq)) {
>> vq_err(vq, "Failed to increment used idx");
>> return -EFAULT;
>> }
>> @@ -2327,7 +2387,7 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
>>
>> if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
>> __virtio16 flags;
>> - if (vhost_get_avail(vq, flags, &vq->avail->flags)) {
>> + if (vhost_get_avail_flags(vq, &flags)) {
>> vq_err(vq, "Failed to get flags");
>> return true;
>> }
>> @@ -2341,7 +2401,7 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
>> if (unlikely(!v))
>> return true;
>>
>> - if (vhost_get_avail(vq, event, vhost_used_event(vq))) {
>> + if (vhost_get_used_event(vq, &event)) {
>> vq_err(vq, "Failed to get used event idx");
>> return true;
>> }
>> @@ -2386,7 +2446,7 @@ bool vhost_vq_avail_empty(struct vhost_dev *dev, struct vhost_virtqueue *vq)
>> if (vq->avail_idx != vq->last_avail_idx)
>> return false;
>>
>> - r = vhost_get_avail(vq, avail_idx, &vq->avail->idx);
>> + r = vhost_get_avail_idx(vq, &avail_idx);
>> if (unlikely(r))
>> return false;
>> vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
>> @@ -2422,7 +2482,7 @@ bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
>> /* They could have slipped one in as we were doing that: make
>> * sure it's written, then check again. */
>> smp_mb();
>> - r = vhost_get_avail(vq, avail_idx, &vq->avail->idx);
>> + r = vhost_get_avail_idx(vq, &avail_idx);
>> if (r) {
>> vq_err(vq, "Failed to check avail idx at %p: %d\n",
>> &vq->avail->idx, r);
>> --
>> 1.8.3.1
>>
Powered by blists - more mailing lists