lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <3afd0747-d058-61b6-7818-f3c6993ef728@redhat.com>
Date:   Fri, 9 Oct 2020 11:49:31 +0800
From:   Jason Wang <jasowang@...hat.com>
To:     Eugenio Perez Martin <eperezma@...hat.com>
Cc:     Michael Tsirkin <mst@...hat.com>, Cindy Lu <lulu@...hat.com>,
        kvm list <kvm@...r.kernel.org>,
        virtualization@...ts.linux-foundation.org, netdev@...r.kernel.org,
        linux-kernel@...r.kernel.org, Rob Miller <rob.miller@...adcom.com>,
        lingshan.zhu@...el.com, Harpreet Singh Anand <hanand@...inx.com>,
        mhabets@...arflare.com, eli@...lanox.com,
        Adrian Moreno Zapata <amorenoz@...hat.com>,
        Maxime Coquelin <maxime.coquelin@...hat.com>,
        Stefan Hajnoczi <stefanha@...hat.com>,
        Stefano Garzarella <sgarzare@...hat.com>
Subject: Re: [RFC PATCH 08/24] vdpa: introduce virtqueue groups


On 2020/9/28 下午11:44, Eugenio Perez Martin wrote:
> On Thu, Sep 24, 2020 at 5:23 AM Jason Wang<jasowang@...hat.com>  wrote:
>> This patch introduces virtqueue groups to vDPA device. The virtqueue
>> group is the minimal set of virtqueues that must share an address
>> space. And the adddress space identifier could only be attached to
>> a specific virtqueue group.
>>
>> A new mandated bus operation is introduced to get the virtqueue group
>> ID for a specific virtqueue.
>>
>> All the vDPA device drivers were converted to simply support a single
>> virtqueue group.
>>
>> Signed-off-by: Jason Wang<jasowang@...hat.com>
>> ---
>>   drivers/vdpa/ifcvf/ifcvf_main.c   |  9 ++++++++-
>>   drivers/vdpa/mlx5/net/mlx5_vnet.c |  8 +++++++-
>>   drivers/vdpa/vdpa.c               |  4 +++-
>>   drivers/vdpa/vdpa_sim/vdpa_sim.c  | 11 ++++++++++-
>>   include/linux/vdpa.h              | 12 +++++++++---
>>   5 files changed, 37 insertions(+), 7 deletions(-)
>>
>> diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c
>> index 076d7ac5e723..e6a0be374e51 100644
>> --- a/drivers/vdpa/ifcvf/ifcvf_main.c
>> +++ b/drivers/vdpa/ifcvf/ifcvf_main.c
>> @@ -327,6 +327,11 @@ static u32 ifcvf_vdpa_get_vq_align(struct vdpa_device *vdpa_dev)
>>          return IFCVF_QUEUE_ALIGNMENT;
>>   }
>>
>> +static u32 ifcvf_vdpa_get_vq_group(struct vdpa_device *vdpa, u16 idx)
>> +{
>> +       return 0;
>> +}
>> +
>>   static void ifcvf_vdpa_get_config(struct vdpa_device *vdpa_dev,
>>                                    unsigned int offset,
>>                                    void *buf, unsigned int len)
>> @@ -387,6 +392,7 @@ static const struct vdpa_config_ops ifc_vdpa_ops = {
>>          .get_device_id  = ifcvf_vdpa_get_device_id,
>>          .get_vendor_id  = ifcvf_vdpa_get_vendor_id,
>>          .get_vq_align   = ifcvf_vdpa_get_vq_align,
>> +       .get_vq_group   = ifcvf_vdpa_get_vq_group,
>>          .get_config     = ifcvf_vdpa_get_config,
>>          .set_config     = ifcvf_vdpa_set_config,
>>          .set_config_cb  = ifcvf_vdpa_set_config_cb,
>> @@ -434,7 +440,8 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
>>
>>          adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa,
>>                                      dev, &ifc_vdpa_ops,
>> -                                   IFCVF_MAX_QUEUE_PAIRS * 2);
>> +                                   IFCVF_MAX_QUEUE_PAIRS * 2, 1);
>> +
>>          if (adapter == NULL) {
>>                  IFCVF_ERR(pdev, "Failed to allocate vDPA structure");
>>                  return -ENOMEM;
>> diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
>> index 9df69d5efe8c..4e480f4f754e 100644
>> --- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
>> +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
>> @@ -1428,6 +1428,11 @@ static u32 mlx5_vdpa_get_vq_align(struct vdpa_device *vdev)
>>          return PAGE_SIZE;
>>   }
>>
>> +static u32 mlx5_vdpa_get_vq_group(struct vdpa_device *vdpa, u16 idx)
>> +{
>> +       return 0;
>> +}
>> +
>>   enum { MLX5_VIRTIO_NET_F_GUEST_CSUM = 1 << 9,
>>          MLX5_VIRTIO_NET_F_CSUM = 1 << 10,
>>          MLX5_VIRTIO_NET_F_HOST_TSO6 = 1 << 11,
>> @@ -1838,6 +1843,7 @@ static const struct vdpa_config_ops mlx5_vdpa_ops = {
>>          .get_vq_notification = mlx5_get_vq_notification,
>>          .get_vq_irq = mlx5_get_vq_irq,
>>          .get_vq_align = mlx5_vdpa_get_vq_align,
>> +       .get_vq_group = mlx5_vdpa_get_vq_group,
>>          .get_features = mlx5_vdpa_get_features,
>>          .set_features = mlx5_vdpa_set_features,
>>          .set_config_cb = mlx5_vdpa_set_config_cb,
>> @@ -1925,7 +1931,7 @@ void *mlx5_vdpa_add_dev(struct mlx5_core_dev *mdev)
>>          max_vqs = min_t(u32, max_vqs, MLX5_MAX_SUPPORTED_VQS);
>>
>>          ndev = vdpa_alloc_device(struct mlx5_vdpa_net, mvdev.vdev, mdev->device, &mlx5_vdpa_ops,
>> -                                2 * mlx5_vdpa_max_qps(max_vqs));
>> +                                2 * mlx5_vdpa_max_qps(max_vqs), 1);
>>          if (IS_ERR(ndev))
>>                  return ndev;
>>
>> diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c
>> index a69ffc991e13..46399746ec7c 100644
>> --- a/drivers/vdpa/vdpa.c
>> +++ b/drivers/vdpa/vdpa.c
>> @@ -62,6 +62,7 @@ static void vdpa_release_dev(struct device *d)
>>    * @parent: the parent device
>>    * @config: the bus operations that is supported by this device
>>    * @nvqs: number of virtqueues supported by this device
>> + * @ngroups: number of groups supported by this device
> Hi!
>
> Maybe the description of "ngroups" could be "number of*virtqueue*
> groups supported by this device"? I think that it could be needed in
> some contexts reading the code.


Exactly.

Will fix.

Thanks


>
> Thanks!
>

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ