lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20201023113939.GB2265982@myrica>
Date:   Fri, 23 Oct 2020 13:39:39 +0200
From:   Jean-Philippe Brucker <jean-philippe@...aro.org>
To:     Jacob Pan <jacob.pan.linux@...il.com>
Cc:     iommu@...ts.linux-foundation.org,
        LKML <linux-kernel@...r.kernel.org>,
        Joerg Roedel <joro@...tes.org>,
        Alex Williamson <alex.williamson@...hat.com>,
        Lu Baolu <baolu.lu@...ux.intel.com>,
        David Woodhouse <dwmw2@...radead.org>,
        Jonathan Corbet <corbet@....net>, linux-api@...r.kernel.org,
        Jean-Philippe Brucker <jean-philippe@...aro.com>,
        Eric Auger <eric.auger@...hat.com>,
        Jacob Pan <jacob.jun.pan@...ux.intel.com>,
        Yi Liu <yi.l.liu@...el.com>,
        "Tian, Kevin" <kevin.tian@...el.com>,
        Raj Ashok <ashok.raj@...el.com>, Wu Hao <hao.wu@...el.com>,
        Yi Sun <yi.y.sun@...el.com>, Dave Jiang <dave.jiang@...el.com>,
        Randy Dunlap <rdunlap@...radead.org>
Subject: Re: [PATCH v3 09/14] iommu/ioasid: Introduce ioasid_set private ID

On Mon, Sep 28, 2020 at 02:38:36PM -0700, Jacob Pan wrote:
> When an IOASID set is used for guest SVA, each VM will acquire its
> ioasid_set for IOASID allocations. IOASIDs within the VM must have a
> host/physical IOASID backing, mapping between guest and host IOASIDs can
> be non-identical. IOASID set private ID (SPID) is introduced in this
> patch to be used as guest IOASID. However, the concept of ioasid_set
> specific namespace is generic, thus named SPID.
> 
> As SPID namespace is within the IOASID set, the IOASID core can provide
> lookup services at both directions. SPIDs may not be available when its
> IOASID is allocated, the mapping between SPID and IOASID is usually
> established when a guest page table is bound to a host PASID.
> 
> Signed-off-by: Jacob Pan <jacob.jun.pan@...ux.intel.com>
> ---
>  drivers/iommu/ioasid.c | 102 +++++++++++++++++++++++++++++++++++++++++++++++++
>  include/linux/ioasid.h |  19 +++++++++
>  2 files changed, 121 insertions(+)
> 
> diff --git a/drivers/iommu/ioasid.c b/drivers/iommu/ioasid.c
> index 828cc44b1b1c..378fef8f23d9 100644
> --- a/drivers/iommu/ioasid.c
> +++ b/drivers/iommu/ioasid.c
> @@ -26,6 +26,7 @@ enum ioasid_state {
>   * struct ioasid_data - Meta data about ioasid
>   *
>   * @id:		Unique ID
> + * @spid:	Private ID unique within a set
>   * @users:	Number of active users
>   * @state:	Track state of the IOASID
>   * @set:	ioasid_set of the IOASID belongs to
> @@ -34,6 +35,7 @@ enum ioasid_state {
>   */
>  struct ioasid_data {
>  	ioasid_t id;
> +	ioasid_t spid;
>  	refcount_t users;
>  	enum ioasid_state state;
>  	struct ioasid_set *set;
> @@ -363,6 +365,105 @@ void ioasid_detach_data(ioasid_t ioasid)
>  }
>  EXPORT_SYMBOL_GPL(ioasid_detach_data);
>  
> +static ioasid_t ioasid_find_by_spid_locked(struct ioasid_set *set, ioasid_t spid)
> +{
> +	ioasid_t ioasid = INVALID_IOASID;
> +	struct ioasid_data *entry;
> +	unsigned long index;
> +
> +	if (!xa_load(&ioasid_sets, set->id)) {
> +		pr_warn("Invalid set\n");

Could use ioasid_set_is_valid(), and perhaps a WARN_ON() instead of
pr_warn() since this is a programming error.

> +		goto done;

Or just return INVALID_IOASID

> +	}
> +
> +	xa_for_each(&set->xa, index, entry) {
> +		if (spid == entry->spid) {
> +			refcount_inc(&entry->users);
> +			ioasid = index;

break

> +		}
> +	}
> +done:
> +	return ioasid;
> +}
> +
> +/**
> + * ioasid_attach_spid - Attach ioasid_set private ID to an IOASID
> + *
> + * @ioasid: the system-wide IOASID to attach
> + * @spid:   the ioasid_set private ID of @ioasid
> + *
> + * After attching SPID, future lookup can be done via ioasid_find_by_spid().

            attaching

> + */
> +int ioasid_attach_spid(ioasid_t ioasid, ioasid_t spid)
> +{
> +	struct ioasid_data *data;
> +	int ret = 0;
> +
> +	if (spid == INVALID_IOASID)
> +		return -EINVAL;
> +
> +	spin_lock(&ioasid_allocator_lock);
> +	data = xa_load(&active_allocator->xa, ioasid);
> +
> +	if (!data) {
> +		pr_err("No IOASID entry %d to attach SPID %d\n",
> +			ioasid, spid);
> +		ret = -ENOENT;
> +		goto done_unlock;
> +	}
> +	/* Check if SPID is unique within the set */
> +	if (ioasid_find_by_spid_locked(data->set, spid) != INVALID_IOASID) {

We need an additional parameter to ioasid_find_by_spid_locked(), telling
it not to take a reference to the conflicting entry. Here we return with
the reference, which will never be released.

> +		ret = -EINVAL;
> +		goto done_unlock;
> +	}
> +	data->spid = spid;
> +
> +done_unlock:
> +	spin_unlock(&ioasid_allocator_lock);
> +	return ret;
> +}
> +EXPORT_SYMBOL_GPL(ioasid_attach_spid);
> +
> +void ioasid_detach_spid(ioasid_t ioasid)

Could add a small comment to this public function

Thanks,
Jean

> +{
> +	struct ioasid_data *data;
> +
> +	spin_lock(&ioasid_allocator_lock);
> +	data = xa_load(&active_allocator->xa, ioasid);
> +
> +	if (!data || data->spid == INVALID_IOASID) {
> +		pr_err("Invalid IOASID entry %d to detach\n", ioasid);
> +		goto done_unlock;
> +	}
> +	data->spid = INVALID_IOASID;
> +
> +done_unlock:
> +	spin_unlock(&ioasid_allocator_lock);
> +}
> +EXPORT_SYMBOL_GPL(ioasid_detach_spid);
> +
> +/**
> + * ioasid_find_by_spid - Find the system-wide IOASID by a set private ID and
> + * its set.
> + *
> + * @set:	the ioasid_set to search within
> + * @spid:	the set private ID
> + *
> + * Given a set private ID and its IOASID set, find the system-wide IOASID. Take
> + * a reference upon finding the matching IOASID. Return INVALID_IOASID if the
> + * IOASID is not found in the set or the set is not valid.
> + */
> +ioasid_t ioasid_find_by_spid(struct ioasid_set *set, ioasid_t spid)
> +{
> +	ioasid_t ioasid;
> +
> +	spin_lock(&ioasid_allocator_lock);
> +	ioasid = ioasid_find_by_spid_locked(set, spid);
> +	spin_unlock(&ioasid_allocator_lock);
> +	return ioasid;
> +}
> +EXPORT_SYMBOL_GPL(ioasid_find_by_spid);
> +
>  static inline bool ioasid_set_is_valid(struct ioasid_set *set)
>  {
>  	return xa_load(&ioasid_sets, set->id) == set;
> @@ -529,6 +630,7 @@ ioasid_t ioasid_alloc(struct ioasid_set *set, ioasid_t min, ioasid_t max,
>  		goto exit_free;
>  	}
>  	data->id = id;
> +	data->spid = INVALID_IOASID;
>  	data->state = IOASID_STATE_ACTIVE;
>  	refcount_set(&data->users, 1);
>  
> diff --git a/include/linux/ioasid.h b/include/linux/ioasid.h
> index 16d421357173..2dfe85e6cb7e 100644
> --- a/include/linux/ioasid.h
> +++ b/include/linux/ioasid.h
> @@ -79,6 +79,10 @@ bool ioasid_put(struct ioasid_set *set, ioasid_t ioasid);
>  bool ioasid_put_locked(struct ioasid_set *set, ioasid_t ioasid);
>  int ioasid_attach_data(ioasid_t ioasid, void *data);
>  void ioasid_detach_data(ioasid_t ioasid);
> +int ioasid_attach_spid(ioasid_t ioasid, ioasid_t spid);
> +void ioasid_detach_spid(ioasid_t ioasid);
> +ioasid_t ioasid_find_by_spid(struct ioasid_set *set, ioasid_t spid);
> +
>  void ioasid_set_for_each_ioasid(struct ioasid_set *sdata,
>  				void (*fn)(ioasid_t id, void *data),
>  				void *data);
> @@ -159,6 +163,21 @@ static inline void ioasid_detach_data(ioasid_t ioasid)
>  {
>  }
>  
> +static inline int ioasid_attach_spid(ioasid_t ioasid, ioasid_t spid)
> +{
> +	return -ENOTSUPP;
> +}
> +
> +static inline void ioasid_detach_spid(ioasid_t ioasid)
> +{
> +}
> +
> +static inline ioasid_t ioasid_find_by_spid(struct ioasid_set *set,
> +					   ioasid_t spid)
> +{
> +	return INVALID_IOASID;
> +}
> +
>  static inline void ioasid_set_for_each_ioasid(struct ioasid_set *sdata,
>  					      void (*fn)(ioasid_t id, void *data),
>  					      void *data)
> -- 
> 2.7.4
> 

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ