lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <0da83c5c-0e35-4087-aacf-6831060cab8e@linux.intel.com>
Date: Mon, 19 Feb 2024 14:54:22 +0800
From: Ethan Zhao <haifeng.zhao@...ux.intel.com>
To: Lu Baolu <baolu.lu@...ux.intel.com>, Joerg Roedel <joro@...tes.org>,
 Will Deacon <will@...nel.org>, Robin Murphy <robin.murphy@....com>,
 Jason Gunthorpe <jgg@...pe.ca>, Kevin Tian <kevin.tian@...el.com>
Cc: Huang Jiaqing <jiaqing.huang@...el.com>, iommu@...ts.linux.dev,
 linux-kernel@...r.kernel.org
Subject: Re: [PATCH 2/2] iommu/vt-d: Use device rbtree in iopf reporting path

On 2/15/2024 3:22 PM, Lu Baolu wrote:
> The existing IO page fault handler currently locates the PCI device by
> calling pci_get_domain_bus_and_slot(). This function searches the list
> of all PCI devices until the desired device is found. To improve lookup
> efficiency, a helper function named device_rbtree_find() is introduced
> to search for the device within the rbtree. Replace
> pci_get_domain_bus_and_slot() in the IO page fault handling path.
>
> Co-developed-by: Huang Jiaqing <jiaqing.huang@...el.com>
> Signed-off-by: Huang Jiaqing <jiaqing.huang@...el.com>
> Signed-off-by: Lu Baolu <baolu.lu@...ux.intel.com>
> ---
>   drivers/iommu/intel/iommu.h |  1 +
>   drivers/iommu/intel/iommu.c | 29 +++++++++++++++++++++++++++++
>   drivers/iommu/intel/svm.c   | 14 ++++++--------
>   3 files changed, 36 insertions(+), 8 deletions(-)
>
> diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h
> index 54eeaa8e35a9..f13c228924f8 100644
> --- a/drivers/iommu/intel/iommu.h
> +++ b/drivers/iommu/intel/iommu.h
> @@ -1081,6 +1081,7 @@ void free_pgtable_page(void *vaddr);
>   void iommu_flush_write_buffer(struct intel_iommu *iommu);
>   struct iommu_domain *intel_nested_domain_alloc(struct iommu_domain *parent,
>   					       const struct iommu_user_data *user_data);
> +struct device *device_rbtree_find(struct intel_iommu *iommu, u16 rid);
>   
>   #ifdef CONFIG_INTEL_IOMMU_SVM
>   void intel_svm_check(struct intel_iommu *iommu);
> diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
> index 09009d96e553..d92c680bcc96 100644
> --- a/drivers/iommu/intel/iommu.c
> +++ b/drivers/iommu/intel/iommu.c
> @@ -120,6 +120,35 @@ static int device_rid_cmp(struct rb_node *lhs, const struct rb_node *rhs)
>   	return device_rid_cmp_key(&key, rhs);
>   }
>   
> +/*
> + * Looks up an IOMMU-probed device using its source ID.
> + *
> + * If the device is found:
> + *  - Increments its reference count.
> + *  - Returns a pointer to the device.
> + *  - The caller must call put_device() after using the pointer.
> + *
> + * If the device is not found, returns NULL.
> + */
> +struct device *device_rbtree_find(struct intel_iommu *iommu, u16 rid)
> +{
> +	struct device_domain_info *info;
> +	struct device *dev = NULL;
> +	struct rb_node *node;
> +	unsigned long flags;
> +
> +	spin_lock_irqsave(&iommu->device_rbtree_lock, flags);

Though per iommu device rbtree isn't a big tree, given already holds spin_lock
why still needs irq off ?


Thanks,
Ethan

> +	node = rb_find(&rid, &iommu->device_rbtree, device_rid_cmp_key);
> +	if (node) {
> +		info = rb_entry(node, struct device_domain_info, node);
> +		dev = info->dev;
> +		get_device(dev);
> +	}
> +	spin_unlock_irqrestore(&iommu->device_rbtree_lock, flags);
> +
> +	return dev;
> +}
> +
>   static int device_rbtree_insert(struct intel_iommu *iommu,
>   				struct device_domain_info *info)
>   {
> diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
> index b644d57da841..717b7041973c 100644
> --- a/drivers/iommu/intel/svm.c
> +++ b/drivers/iommu/intel/svm.c
> @@ -645,7 +645,7 @@ static irqreturn_t prq_event_thread(int irq, void *d)
>   	struct intel_iommu *iommu = d;
>   	struct page_req_dsc *req;
>   	int head, tail, handled;
> -	struct pci_dev *pdev;
> +	struct device *dev;
>   	u64 address;
>   
>   	/*
> @@ -691,21 +691,19 @@ static irqreturn_t prq_event_thread(int irq, void *d)
>   		if (unlikely(req->lpig && !req->rd_req && !req->wr_req))
>   			goto prq_advance;
>   
> -		pdev = pci_get_domain_bus_and_slot(iommu->segment,
> -						   PCI_BUS_NUM(req->rid),
> -						   req->rid & 0xff);
>   		/*
>   		 * If prq is to be handled outside iommu driver via receiver of
>   		 * the fault notifiers, we skip the page response here.
>   		 */
> -		if (!pdev)
> +		dev = device_rbtree_find(iommu, req->rid);
> +		if (!dev)
>   			goto bad_req;
>   
> -		intel_svm_prq_report(iommu, &pdev->dev, req);
> -		trace_prq_report(iommu, &pdev->dev, req->qw_0, req->qw_1,
> +		intel_svm_prq_report(iommu, dev, req);
> +		trace_prq_report(iommu, dev, req->qw_0, req->qw_1,
>   				 req->priv_data[0], req->priv_data[1],
>   				 iommu->prq_seq_number++);
> -		pci_dev_put(pdev);
> +		put_device(dev);
>   prq_advance:
>   		head = (head + sizeof(*req)) & PRQ_RING_MASK;
>   	}

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ