lists.openwall.net | lists / announce owl-users owl-dev john-users john-dev passwdqc-users yescrypt popa3d-users / oss-security kernel-hardening musl sabotage tlsify passwords / crypt-dev xvendor / Bugtraq Full-Disclosure linux-kernel linux-netdev linux-ext4 linux-hardening linux-cve-announce PHC | |
Open Source and information security mailing list archives
| ||
|
Date: Tue, 23 Apr 2019 16:31:18 -0700 From: Jacob Pan <jacob.jun.pan@...ux.intel.com> To: iommu@...ts.linux-foundation.org, LKML <linux-kernel@...r.kernel.org>, Joerg Roedel <joro@...tes.org>, David Woodhouse <dwmw2@...radead.org>, Eric Auger <eric.auger@...hat.com>, Alex Williamson <alex.williamson@...hat.com>, Jean-Philippe Brucker <jean-philippe.brucker@....com> Cc: "Yi Liu" <yi.l.liu@...el.com>, "Tian, Kevin" <kevin.tian@...el.com>, Raj Ashok <ashok.raj@...el.com>, "Christoph Hellwig" <hch@...radead.org>, "Lu Baolu" <baolu.lu@...ux.intel.com>, Andriy Shevchenko <andriy.shevchenko@...ux.intel.com>, Jacob Pan <jacob.jun.pan@...ux.intel.com> Subject: [PATCH v2 18/19] iommu/vt-d: Support flushing more translation cache types When Shared Virtual Memory is exposed to a guest via vIOMMU, extended IOTLB invalidation may be passed down from outside IOMMU subsystems. This patch adds invalidation functions that can be used for additional translation cache types. Signed-off-by: Jacob Pan <jacob.jun.pan@...ux.intel.com> --- drivers/iommu/dmar.c | 48 +++++++++++++++++++++++++++++++++++++++++++++ include/linux/intel-iommu.h | 21 ++++++++++++++++---- 2 files changed, 65 insertions(+), 4 deletions(-) diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c index 9c49300..680894e 100644 --- a/drivers/iommu/dmar.c +++ b/drivers/iommu/dmar.c @@ -1357,6 +1357,20 @@ void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, qi_submit_sync(&desc, iommu); } +void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u64 addr, u32 pasid, + unsigned int size_order, u64 granu) +{ + struct qi_desc desc; + + desc.qw0 = QI_EIOTLB_PASID(pasid) | QI_EIOTLB_DID(did) | + QI_EIOTLB_GRAN(granu) | QI_EIOTLB_TYPE; + desc.qw1 = QI_EIOTLB_ADDR(addr) | QI_EIOTLB_IH(0) | + QI_EIOTLB_AM(size_order); + desc.qw2 = 0; + desc.qw3 = 0; + qi_submit_sync(&desc, iommu); +} + void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid, u16 qdep, u64 addr, unsigned mask) { @@ -1380,6 +1394,40 @@ void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid, qi_submit_sync(&desc, iommu); } +void qi_flush_dev_piotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid, + u32 pasid, u16 qdep, u64 addr, unsigned size, u64 granu) +{ + struct qi_desc desc; + + desc.qw0 = QI_DEV_EIOTLB_PASID(pasid) | QI_DEV_EIOTLB_SID(sid) | + QI_DEV_EIOTLB_QDEP(qdep) | QI_DEIOTLB_TYPE | + QI_DEV_IOTLB_PFSID(pfsid); + desc.qw1 |= QI_DEV_EIOTLB_GLOB(granu); + + /* If S bit is 0, we only flush a single page. If S bit is set, + * The least significant zero bit indicates the size. VT-d spec + * 6.5.2.6 + */ + if (!size) + desc.qw0 = QI_DEV_EIOTLB_ADDR(addr) & ~QI_DEV_EIOTLB_SIZE; + else { + unsigned long mask = 1UL << (VTD_PAGE_SHIFT + size); + + desc.qw1 = QI_DEV_EIOTLB_ADDR(addr & ~mask) | QI_DEV_EIOTLB_SIZE; + } + qi_submit_sync(&desc, iommu); +} + +void qi_flush_pasid_cache(struct intel_iommu *iommu, u16 did, u64 granu, int pasid) +{ + struct qi_desc desc; + + desc.qw0 = QI_PC_TYPE | QI_PC_DID(did) | QI_PC_GRAN(granu) | QI_PC_PASID(pasid); + desc.qw1 = 0; + desc.qw2 = 0; + desc.qw3 = 0; + qi_submit_sync(&desc, iommu); +} /* * Disable Queued Invalidation interface. */ diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 5d67d0d4..38e5efb 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -339,7 +339,7 @@ enum { #define QI_IOTLB_GRAN(gran) (((u64)gran) >> (DMA_TLB_FLUSH_GRANU_OFFSET-4)) #define QI_IOTLB_ADDR(addr) (((u64)addr) & VTD_PAGE_MASK) #define QI_IOTLB_IH(ih) (((u64)ih) << 6) -#define QI_IOTLB_AM(am) (((u8)am)) +#define QI_IOTLB_AM(am) (((u8)am) & 0x3f) #define QI_CC_FM(fm) (((u64)fm) << 48) #define QI_CC_SID(sid) (((u64)sid) << 32) @@ -357,17 +357,22 @@ enum { #define QI_PC_DID(did) (((u64)did) << 16) #define QI_PC_GRAN(gran) (((u64)gran) << 4) -#define QI_PC_ALL_PASIDS (QI_PC_TYPE | QI_PC_GRAN(0)) -#define QI_PC_PASID_SEL (QI_PC_TYPE | QI_PC_GRAN(1)) +/* PASID cache invalidation granu */ +#define QI_PC_ALL_PASIDS 0 +#define QI_PC_PASID_SEL 1 #define QI_EIOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK) #define QI_EIOTLB_GL(gl) (((u64)gl) << 7) #define QI_EIOTLB_IH(ih) (((u64)ih) << 6) -#define QI_EIOTLB_AM(am) (((u64)am)) +#define QI_EIOTLB_AM(am) (((u64)am) & 0x3f) #define QI_EIOTLB_PASID(pasid) (((u64)pasid) << 32) #define QI_EIOTLB_DID(did) (((u64)did) << 16) #define QI_EIOTLB_GRAN(gran) (((u64)gran) << 4) +/* QI Dev-IOTLB inv granu */ +#define QI_DEV_IOTLB_GRAN_ALL 1 +#define QI_DEV_IOTLB_GRAN_PASID_SEL 0 + #define QI_DEV_EIOTLB_ADDR(a) ((u64)(a) & VTD_PAGE_MASK) #define QI_DEV_EIOTLB_SIZE (((u64)1) << 11) #define QI_DEV_EIOTLB_GLOB(g) ((u64)g) @@ -658,8 +663,16 @@ extern void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm, u64 type); extern void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, unsigned int size_order, u64 type); +extern void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u64 addr, + u32 pasid, unsigned int size_order, u64 type); extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid, u16 qdep, u64 addr, unsigned mask); + +extern void qi_flush_dev_piotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid, + u32 pasid, u16 qdep, u64 addr, unsigned size, u64 granu); + +extern void qi_flush_pasid_cache(struct intel_iommu *iommu, u16 did, u64 granu, int pasid); + extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu); extern int dmar_ir_support(void); -- 2.7.4
Powered by blists - more mailing lists