lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Wed, 20 Mar 2019 09:37:27 -0700
From:   Jacob Pan <jacob.jun.pan@...ux.intel.com>
To:     Eric Auger <eric.auger@...hat.com>
Cc:     eric.auger.pro@...il.com, iommu@...ts.linux-foundation.org,
        linux-kernel@...r.kernel.org, kvm@...r.kernel.org,
        kvmarm@...ts.cs.columbia.edu, joro@...tes.org,
        alex.williamson@...hat.com, yi.l.liu@...ux.intel.com,
        jean-philippe.brucker@....com, will.deacon@....com,
        robin.murphy@....com, kevin.tian@...el.com, ashok.raj@...el.com,
        marc.zyngier@....com, christoffer.dall@....com,
        peter.maydell@...aro.org, vincent.stehle@....com,
        jacob.jun.pan@...ux.intel.com
Subject: Re: [PATCH v6 05/22] iommu: Introduce cache_invalidate API

On Sun, 17 Mar 2019 18:22:15 +0100
Eric Auger <eric.auger@...hat.com> wrote:

> From: "Liu, Yi L" <yi.l.liu@...ux.intel.com>
> 
> In any virtualization use case, when the first translation stage
> is "owned" by the guest OS, the host IOMMU driver has no knowledge
> of caching structure updates unless the guest invalidation activities
> are trapped by the virtualizer and passed down to the host.
> 
> Since the invalidation data are obtained from user space and will be
> written into physical IOMMU, we must allow security check at various
> layers. Therefore, generic invalidation data format are proposed here,
> model specific IOMMU drivers need to convert them into their own
> format.
> 
> Signed-off-by: Liu, Yi L <yi.l.liu@...ux.intel.com>
> Signed-off-by: Jean-Philippe Brucker <jean-philippe.brucker@....com>
> Signed-off-by: Jacob Pan <jacob.jun.pan@...ux.intel.com>
> Signed-off-by: Ashok Raj <ashok.raj@...el.com>
> Signed-off-by: Eric Auger <eric.auger@...hat.com>
> 
> ---
> v5 -> v6:
> - fix merge issue
> 
> v3 -> v4:
> - full reshape of the API following Alex' comments
> 
> v1 -> v2:
> - add arch_id field
> - renamed tlb_invalidate into cache_invalidate as this API allows
>   to invalidate context caches on top of IOTLBs
> 
> v1:
> renamed sva_invalidate into tlb_invalidate and add iommu_ prefix in
> header. Commit message reworded.
> ---
>  drivers/iommu/iommu.c      | 14 ++++++++
>  include/linux/iommu.h      | 15 ++++++++
>  include/uapi/linux/iommu.h | 71
> ++++++++++++++++++++++++++++++++++++++ 3 files changed, 100
> insertions(+)
> 
> diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
> index 7d9285cea100..b72e326ddd41 100644
> --- a/drivers/iommu/iommu.c
> +++ b/drivers/iommu/iommu.c
> @@ -1544,6 +1544,20 @@ void iommu_detach_pasid_table(struct
> iommu_domain *domain) }
>  EXPORT_SYMBOL_GPL(iommu_detach_pasid_table);
>  
> +int iommu_cache_invalidate(struct iommu_domain *domain, struct
> device *dev,
> +			   struct iommu_cache_invalidate_info
> *inv_info) +{
> +	int ret = 0;
> +
> +	if (unlikely(!domain->ops->cache_invalidate))
> +		return -ENODEV;
> +
> +	ret = domain->ops->cache_invalidate(domain, dev, inv_info);
> +
> +	return ret;
> +}
> +EXPORT_SYMBOL_GPL(iommu_cache_invalidate);
> +
>  static void __iommu_detach_device(struct iommu_domain *domain,
>  				  struct device *dev)
>  {
> diff --git a/include/linux/iommu.h b/include/linux/iommu.h
> index fb9b7a8de25f..7c7c6bad1420 100644
> --- a/include/linux/iommu.h
> +++ b/include/linux/iommu.h
> @@ -191,6 +191,7 @@ struct iommu_resv_region {
>   *                      driver init to device driver init (default
> no)
>   * @attach_pasid_table: attach a pasid table
>   * @detach_pasid_table: detach the pasid table
> + * @cache_invalidate: invalidate translation caches
>   * @pgsize_bitmap: bitmap of all possible supported page sizes
>   */
>  struct iommu_ops {
> @@ -239,6 +240,9 @@ struct iommu_ops {
>  				  struct iommu_pasid_table_config
> *cfg); void (*detach_pasid_table)(struct iommu_domain *domain);
>  
> +	int (*cache_invalidate)(struct iommu_domain *domain, struct
> device *dev,
> +				struct iommu_cache_invalidate_info
> *inv_info); +
>  	unsigned long pgsize_bitmap;
>  };
>  
> @@ -349,6 +353,9 @@ extern void iommu_detach_device(struct
> iommu_domain *domain, extern int iommu_attach_pasid_table(struct
> iommu_domain *domain, struct iommu_pasid_table_config *cfg);
>  extern void iommu_detach_pasid_table(struct iommu_domain *domain);
> +extern int iommu_cache_invalidate(struct iommu_domain *domain,
> +				  struct device *dev,
> +				  struct iommu_cache_invalidate_info
> *inv_info); extern struct iommu_domain
> *iommu_get_domain_for_dev(struct device *dev); extern struct
> iommu_domain *iommu_get_dma_domain(struct device *dev); extern int
> iommu_map(struct iommu_domain *domain, unsigned long iova, @@ -797,6
> +804,14 @@ int iommu_attach_pasid_table(struct iommu_domain *domain,
> static inline void iommu_detach_pasid_table(struct iommu_domain
> *domain) {} 
> +static inline int
> +iommu_cache_invalidate(struct iommu_domain *domain,
> +		       struct device *dev,
> +		       struct iommu_cache_invalidate_info *inv_info)
> +{
> +	return -ENODEV;
> +}
> +
>  #endif /* CONFIG_IOMMU_API */
>  
>  #ifdef CONFIG_IOMMU_DEBUGFS
> diff --git a/include/uapi/linux/iommu.h b/include/uapi/linux/iommu.h
> index 532a64075f23..e4c6a447e85a 100644
> --- a/include/uapi/linux/iommu.h
> +++ b/include/uapi/linux/iommu.h
> @@ -159,4 +159,75 @@ struct iommu_pasid_table_config {
>  	};
>  };
>  
> +/* defines the granularity of the invalidation */
> +enum iommu_inv_granularity {
> +	IOMMU_INV_GRANU_DOMAIN,	/* domain-selective
> invalidation */
> +	IOMMU_INV_GRANU_PASID,	/* pasid-selective
> invalidation */
> +	IOMMU_INV_GRANU_ADDR,	/* page-selective invalidation
> */ +};
> +
> +/**
> + * Address Selective Invalidation Structure
> + *
> + * @flags indicates the granularity of the address-selective
> invalidation
> + * - if PASID bit is set, @pasid field is populated and the
> invalidation
> + *   relates to cache entries tagged with this PASID and matching the
> + *   address range.
> + * - if ARCHID bit is set, @archid is populated and the invalidation
> relates
> + *   to cache entries tagged with this architecture specific id and
> matching
> + *   the address range.
> + * - Both PASID and ARCHID can be set as they may tag different
> caches.
> + * - if neither PASID or ARCHID is set, global addr invalidation
> applies
> + * - LEAF flag indicates whether only the leaf PTE caching needs to
> be
> + *   invalidated and other paging structure caches can be preserved.
> + * @pasid: process address space id
> + * @archid: architecture-specific id
> + * @addr: first stage/level input address
> + * @granule_size: page/block size of the mapping in bytes
> + * @nb_granules: number of contiguous granules to be invalidated
> + */
> +struct iommu_inv_addr_info {
> +#define IOMMU_INV_ADDR_FLAGS_PASID	(1 << 0)
> +#define IOMMU_INV_ADDR_FLAGS_ARCHID	(1 << 1)
> +#define IOMMU_INV_ADDR_FLAGS_LEAF	(1 << 2)
> +	__u32	flags;
> +	__u32	archid;
> +	__u64	pasid;
> +	__u64	addr;
> +	__u64	granule_size;
> +	__u64	nb_granules;
> +};
> +
> +/**
> + * First level/stage invalidation information
> + * @cache: bitfield that allows to select which caches to invalidate
> + * @granularity: defines the lowest granularity used for the
> invalidation:
> + *     domain > pasid > addr
> + *
> + * Not all the combinations of cache/granularity make sense:
> + *
> + *         type |   DEV_IOTLB   |     IOTLB     |      PASID    |
> + * granularity	|		|		|
> cache	|
> + * -------------+---------------+---------------+---------------+
> + * DOMAIN	|	N/A	|       Y	|
> Y	|
> + * PASID	|	Y	|       Y	|
> Y	|
> + * ADDR		|       Y	|       Y	|
> N/A	|
> + */
> +struct iommu_cache_invalidate_info {
> +#define IOMMU_CACHE_INVALIDATE_INFO_VERSION_1 1
> +	__u32	version;
> +/* IOMMU paging structure cache */
> +#define IOMMU_CACHE_INV_TYPE_IOTLB	(1 << 0) /* IOMMU IOTLB */
> +#define IOMMU_CACHE_INV_TYPE_DEV_IOTLB	(1 << 1) /* Device
> IOTLB */ +#define IOMMU_CACHE_INV_TYPE_PASID	(1 << 2) /* PASID
> cache */
Just a clarification, this used to be an enum. You do intend to issue a
single invalidation request on multiple cache types? Perhaps for
virtio-IOMMU? I only see a single cache type in your patch #14. For VT-d
we plan to issue one cache type at a time for now. So this format works
for us.
However, if multiple cache types are issued in a single invalidation.
They must share a single granularity, not all combinations are valid.
e.g. dev IOTLB does not support domain granularity. Just a reminder,
not an issue. Driver could filter out invalid combinations.

> +	__u8	cache;
> +	__u8	granularity;
> +	__u8	padding[2];
> +	union {
> +		__u64	pasid;
> +		struct iommu_inv_addr_info addr_info;
> +	};
> +};
> +
> +
>  #endif /* _UAPI_IOMMU_H */

[Jacob Pan]

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ