[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <202006240426.OxB1Ov3k%lkp@intel.com>
Date: Wed, 24 Jun 2020 04:12:19 +0800
From: kernel test robot <lkp@...el.com>
To: Jacob Pan <jacob.jun.pan@...ux.intel.com>,
iommu@...ts.linux-foundation.org,
LKML <linux-kernel@...r.kernel.org>,
Lu Baolu <baolu.lu@...ux.intel.com>,
Joerg Roedel <joro@...tes.org>,
David Woodhouse <dwmw2@...radead.org>
Cc: kbuild-all@...ts.01.org, Yi Liu <yi.l.liu@...el.com>,
"Tian, Kevin" <kevin.tian@...el.com>,
Raj Ashok <ashok.raj@...el.com>,
Eric Auger <eric.auger@...hat.com>,
Jacob Pan <jacob.jun.pan@...ux.intel.com>
Subject: Re: [PATCH 5/7] iommu/vt-d: Fix devTLB flush for vSVA
Hi Jacob,
Thank you for the patch! Perhaps something to improve:
[auto build test WARNING on iommu/next]
[also build test WARNING on linux/master linus/master v5.8-rc2 next-20200623]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use as documented in
https://git-scm.com/docs/git-format-patch]
url: https://github.com/0day-ci/linux/commits/Jacob-Pan/iommu-vt-d-Misc-tweaks-and-fixes-for-vSVA/20200623-233905
base: https://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git next
config: i386-allyesconfig (attached as .config)
compiler: gcc-9 (Debian 9.3.0-13) 9.3.0
reproduce (this is a W=1 build):
# save the attached .config to linux build tree
make W=1 ARCH=i386
If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@...el.com>
All warnings (new ones prefixed by >>):
drivers/iommu/intel/iommu.c: In function 'intel_iommu_sva_invalidate':
>> drivers/iommu/intel/iommu.c:5420:7: warning: variable 'addr' set but not used [-Wunused-but-set-variable]
5420 | u64 addr = 0;
| ^~~~
vim +/addr +5420 drivers/iommu/intel/iommu.c
5370
5371 #ifdef CONFIG_INTEL_IOMMU_SVM
5372 static int
5373 intel_iommu_sva_invalidate(struct iommu_domain *domain, struct device *dev,
5374 struct iommu_cache_invalidate_info *inv_info)
5375 {
5376 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5377 struct device_domain_info *info;
5378 struct intel_iommu *iommu;
5379 unsigned long flags;
5380 int cache_type;
5381 u8 bus, devfn;
5382 u16 did, sid;
5383 int ret = 0;
5384 u64 size = 0;
5385
5386 if (!inv_info || !dmar_domain ||
5387 inv_info->version != IOMMU_CACHE_INVALIDATE_INFO_VERSION_1)
5388 return -EINVAL;
5389
5390 if (!dev || !dev_is_pci(dev))
5391 return -ENODEV;
5392
5393 iommu = device_to_iommu(dev, &bus, &devfn);
5394 if (!iommu)
5395 return -ENODEV;
5396
5397 if (!(dmar_domain->flags & DOMAIN_FLAG_NESTING_MODE))
5398 return -EINVAL;
5399
5400 spin_lock_irqsave(&device_domain_lock, flags);
5401 spin_lock(&iommu->lock);
5402 info = get_domain_info(dev);
5403 if (!info) {
5404 ret = -EINVAL;
5405 goto out_unlock;
5406 }
5407 did = dmar_domain->iommu_did[iommu->seq_id];
5408 sid = PCI_DEVID(bus, devfn);
5409
5410 /* Size is only valid in address selective invalidation */
5411 if (inv_info->granularity == IOMMU_INV_GRANU_ADDR)
5412 size = to_vtd_size(inv_info->addr_info.granule_size,
5413 inv_info->addr_info.nb_granules);
5414
5415 for_each_set_bit(cache_type,
5416 (unsigned long *)&inv_info->cache,
5417 IOMMU_CACHE_INV_TYPE_NR) {
5418 int granu = 0;
5419 u64 pasid = 0;
> 5420 u64 addr = 0;
5421
5422 granu = to_vtd_granularity(cache_type, inv_info->granularity);
5423 if (granu == -EINVAL) {
5424 pr_err_ratelimited("Invalid cache type and granu combination %d/%d\n",
5425 cache_type, inv_info->granularity);
5426 break;
5427 }
5428
5429 /*
5430 * PASID is stored in different locations based on the
5431 * granularity.
5432 */
5433 if (inv_info->granularity == IOMMU_INV_GRANU_PASID &&
5434 (inv_info->pasid_info.flags & IOMMU_INV_PASID_FLAGS_PASID))
5435 pasid = inv_info->pasid_info.pasid;
5436 else if (inv_info->granularity == IOMMU_INV_GRANU_ADDR &&
5437 (inv_info->addr_info.flags & IOMMU_INV_ADDR_FLAGS_PASID))
5438 pasid = inv_info->addr_info.pasid;
5439
5440 switch (BIT(cache_type)) {
5441 case IOMMU_CACHE_INV_TYPE_IOTLB:
5442 if (inv_info->granularity == IOMMU_INV_GRANU_ADDR &&
5443 size &&
5444 (inv_info->addr_info.addr & ((BIT(VTD_PAGE_SHIFT + size)) - 1))) {
5445 pr_err_ratelimited("Address out of range, 0x%llx, size order %llu\n",
5446 inv_info->addr_info.addr, size);
5447 ret = -ERANGE;
5448 goto out_unlock;
5449 }
5450
5451 /*
5452 * If granu is PASID-selective, address is ignored.
5453 * We use npages = -1 to indicate that.
5454 */
5455 qi_flush_piotlb(iommu, did, pasid,
5456 mm_to_dma_pfn(inv_info->addr_info.addr),
5457 (granu == QI_GRAN_NONG_PASID) ? -1 : 1 << size,
5458 inv_info->addr_info.flags & IOMMU_INV_ADDR_FLAGS_LEAF);
5459
5460 if (!info->ats_enabled)
5461 break;
5462 /*
5463 * Always flush device IOTLB if ATS is enabled. vIOMMU
5464 * in the guest may assume IOTLB flush is inclusive,
5465 * which is more efficient.
5466 */
5467 fallthrough;
5468 case IOMMU_CACHE_INV_TYPE_DEV_IOTLB:
5469 /*
5470 * There is no PASID selective flush for device TLB, so
5471 * the equivalent of that is we set the size to be the
5472 * entire range of 64 bit. User only provides PASID info
5473 * without address info. So we set addr to 0.
5474 */
5475 if (inv_info->granularity == IOMMU_INV_GRANU_PASID) {
5476 size = 64 - VTD_PAGE_SHIFT;
5477 addr = 0;
5478 } else if (inv_info->granularity == IOMMU_INV_GRANU_ADDR)
5479 addr = inv_info->addr_info.addr;
5480
5481 if (info->ats_enabled)
5482 qi_flush_dev_iotlb_pasid(iommu, sid,
5483 info->pfsid, pasid,
5484 info->ats_qdep,
5485 inv_info->addr_info.addr,
5486 size);
5487 else
5488 pr_warn_ratelimited("Passdown device IOTLB flush w/o ATS!\n");
5489 break;
5490 default:
5491 dev_err_ratelimited(dev, "Unsupported IOMMU invalidation type %d\n",
5492 cache_type);
5493 ret = -EINVAL;
5494 }
5495 }
5496 out_unlock:
5497 spin_unlock(&iommu->lock);
5498 spin_unlock_irqrestore(&device_domain_lock, flags);
5499
5500 return ret;
5501 }
5502 #endif
5503
---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org
Download attachment ".config.gz" of type "application/gzip" (72813 bytes)
Powered by blists - more mailing lists