[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <814df96d-8796-44f6-d5e5-09f36862c497@arm.com>
Date: Thu, 30 Sep 2021 14:47:54 +0530
From: Vivek Kumar Gautam <vivek.gautam@....com>
To: Jean-Philippe Brucker <jean-philippe@...aro.org>
Cc: linux-kernel@...r.kernel.org, linux-arm-kernel@...ts.infradead.org,
iommu@...ts.linux-foundation.org,
virtualization@...ts.linux-foundation.org, joro@...tes.org,
will.deacon@....com, mst@...hat.com, robin.murphy@....com,
eric.auger@...hat.com, kevin.tian@...el.com,
jacob.jun.pan@...ux.intel.com, yi.l.liu@...el.com,
Lorenzo.Pieralisi@....com, shameerali.kolothum.thodi@...wei.com
Subject: Re: [PATCH RFC v1 02/11] iommu/virtio: Maintain a list of endpoints
served by viommu_dev
On 9/21/21 9:29 PM, Jean-Philippe Brucker wrote:
> On Fri, Apr 23, 2021 at 03:21:38PM +0530, Vivek Gautam wrote:
>> Keeping a record of list of endpoints that are served by the virtio-iommu
>> device would help in redirecting the requests of page faults to the
>> correct endpoint device to handle such requests.
>>
>> Signed-off-by: Vivek Gautam <vivek.gautam@....com>
>> ---
>> drivers/iommu/virtio-iommu.c | 21 +++++++++++++++++++++
>> 1 file changed, 21 insertions(+)
>>
>> diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
>> index 50039070e2aa..c970f386f031 100644
>> --- a/drivers/iommu/virtio-iommu.c
>> +++ b/drivers/iommu/virtio-iommu.c
>> @@ -48,6 +48,7 @@ struct viommu_dev {
>> spinlock_t request_lock;
>> struct list_head requests;
>> void *evts;
>> + struct list_head endpoints;
>
> As we're going to search by ID, an xarray or rb_tree would be more
> appropriate than a list
Sure, I will update this with a rb_tree.
>
>>
>> /* Device configuration */
>> struct iommu_domain_geometry geometry;
>> @@ -115,6 +116,12 @@ struct viommu_endpoint {
>> void *pgtf;
>> };
>>
>> +struct viommu_ep_entry {
>> + u32 eid;
>> + struct viommu_endpoint *vdev;
>> + struct list_head list;
>> +};
>
> No need for a separate struct, I think you can just add the list head and
> id into viommu_endpoint.
Yea right. I will update it.
>
>> +
>> struct viommu_request {
>> struct list_head list;
>> void *writeback;
>> @@ -573,6 +580,7 @@ static int viommu_probe_endpoint(struct viommu_dev *viommu, struct device *dev)
>> size_t probe_len;
>> struct virtio_iommu_req_probe *probe;
>> struct virtio_iommu_probe_property *prop;
>> + struct viommu_ep_entry *ep;
>> struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
>> struct viommu_endpoint *vdev = dev_iommu_priv_get(dev);
>>
>> @@ -640,6 +648,18 @@ static int viommu_probe_endpoint(struct viommu_dev *viommu, struct device *dev)
>> prop = (void *)probe->properties + cur;
>> type = le16_to_cpu(prop->type) & VIRTIO_IOMMU_PROBE_T_MASK;
>> }
>> + if (ret)
>> + goto out_free;
>> +
>> + ep = kzalloc(sizeof(*ep), GFP_KERNEL);
>> + if (!ep) {
>> + ret = -ENOMEM;
>> + goto out_free;
>> + }
>> + ep->eid = probe->endpoint;
>> + ep->vdev = vdev;
>> +
>> + list_add(&ep->list, &viommu->endpoints);
>
> This should be in viommu_probe_device() (viommu_probe_endpoint() is only
> called if F_PROBE is negotiated). I think we need a lock for this
> list/xarray
Sure, will fix this, and add the needed locking around.
Thanks & regards
Vivek
>
> Thanks,
> Jean
>
>>
>> out_free:
>> kfree(probe);
>> @@ -1649,6 +1669,7 @@ static int viommu_probe(struct virtio_device *vdev)
>> viommu->dev = dev;
>> viommu->vdev = vdev;
>> INIT_LIST_HEAD(&viommu->requests);
>> + INIT_LIST_HEAD(&viommu->endpoints);
>>
>> ret = viommu_init_vqs(viommu);
>> if (ret)
>> --
>> 2.17.1
>>
Powered by blists - more mailing lists