lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Tue, 18 Oct 2022 14:00:38 -0700
From:   Dave Hansen <dave.hansen@...el.com>
To:     "Kirill A. Shutemov" <kirill.shutemov@...ux.intel.com>,
        Dave Hansen <dave.hansen@...ux.intel.com>,
        Andy Lutomirski <luto@...nel.org>,
        Peter Zijlstra <peterz@...radead.org>
Cc:     x86@...nel.org, Kostya Serebryany <kcc@...gle.com>,
        Andrey Ryabinin <ryabinin.a.a@...il.com>,
        Andrey Konovalov <andreyknvl@...il.com>,
        Alexander Potapenko <glider@...gle.com>,
        Taras Madan <tarasmadan@...gle.com>,
        Dmitry Vyukov <dvyukov@...gle.com>,
        "H . J . Lu" <hjl.tools@...il.com>,
        Andi Kleen <ak@...ux.intel.com>,
        Rick Edgecombe <rick.p.edgecombe@...el.com>,
        Bharata B Rao <bharata@....com>,
        Jacob Pan <jacob.jun.pan@...ux.intel.com>,
        Ashok Raj <ashok.raj@...el.com>, linux-mm@...ck.org,
        linux-kernel@...r.kernel.org
Subject: Re: [PATCHv10 10/15] x86/mm, iommu/sva: Make LAM and SVM mutually
 exclusive

On 10/18/22 04:33, Kirill A. Shutemov wrote:
> IOMMU and SVM-capable devices know nothing about LAM and only expect
> canonical addresses. Attempt to pass down tagged pointer will lead to

		      ^ An attempt...

> address translation failure.
> 
> By default do not allow to enable both LAM and use SVM in the same
> process.
> 
> The new ARCH_FORCE_TAGGED_SVM arch_prctl() overrides the limitation.
> By using the arch_prctl() userspace takes responsibility to never pass
> tagged address to the device.
> 
> Signed-off-by: Kirill A. Shutemov <kirill.shutemov@...ux.intel.com>
> ---
>  arch/x86/include/asm/mmu.h         |  6 ++++--
>  arch/x86/include/asm/mmu_context.h |  2 ++
>  arch/x86/include/uapi/asm/prctl.h  |  1 +
>  arch/x86/kernel/process_64.c       | 13 +++++++++++++
>  drivers/iommu/iommu-sva-lib.c      | 12 ++++++++++++
>  include/linux/mmu_context.h        |  4 ++++
>  6 files changed, 36 insertions(+), 2 deletions(-)
> 
> diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
> index 2fdb390040b5..cce9b32b0d6d 100644
> --- a/arch/x86/include/asm/mmu.h
> +++ b/arch/x86/include/asm/mmu.h
> @@ -9,9 +9,11 @@
>  #include <linux/bits.h>
>  
>  /* Uprobes on this MM assume 32-bit code */
> -#define MM_CONTEXT_UPROBE_IA32	BIT(0)
> +#define MM_CONTEXT_UPROBE_IA32		BIT(0)
>  /* vsyscall page is accessible on this MM */
> -#define MM_CONTEXT_HAS_VSYSCALL	BIT(1)
> +#define MM_CONTEXT_HAS_VSYSCALL		BIT(1)
> +/* Allow LAM and SVM coexisting */
> +#define MM_CONTEXT_FORCE_TAGGED_SVM	BIT(2)
>  
>  /*
>   * x86 has arch-specific MMU state beyond what lives in mm_struct.
> diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
> index b0e9ea23758b..6b9ac2c60cec 100644
> --- a/arch/x86/include/asm/mmu_context.h
> +++ b/arch/x86/include/asm/mmu_context.h
> @@ -113,6 +113,8 @@ static inline void mm_reset_untag_mask(struct mm_struct *mm)
>  	mm->context.untag_mask = -1UL;
>  }
>  
> +#define arch_pgtable_dma_compat(mm)	\
> +	(!mm_lam_cr3_mask(mm) || (mm->context.flags & MM_CONTEXT_FORCE_TAGGED_SVM))
>  #else

This needs to be a 'static inline' unless there's a compelling and
documented reason that it can't be.

>  static inline unsigned long mm_lam_cr3_mask(struct mm_struct *mm)
> diff --git a/arch/x86/include/uapi/asm/prctl.h b/arch/x86/include/uapi/asm/prctl.h
> index a31e27b95b19..7bd22defb558 100644
> --- a/arch/x86/include/uapi/asm/prctl.h
> +++ b/arch/x86/include/uapi/asm/prctl.h
> @@ -23,5 +23,6 @@
>  #define ARCH_GET_UNTAG_MASK		0x4001
>  #define ARCH_ENABLE_TAGGED_ADDR		0x4002
>  #define ARCH_GET_MAX_TAG_BITS		0x4003
> +#define ARCH_FORCE_TAGGED_SVM		0x4004
>  
>  #endif /* _ASM_X86_PRCTL_H */
> diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
> index 9952e9f517ec..8faa8774bb93 100644
> --- a/arch/x86/kernel/process_64.c
> +++ b/arch/x86/kernel/process_64.c
> @@ -783,6 +783,13 @@ static int prctl_enable_tagged_addr(struct mm_struct *mm, unsigned long nr_bits)
>  		goto out;
>  	}
>  
> +#ifdef CONFIG_IOMMU_SVA
> +	if (pasid_valid(mm->pasid) &&
> +	    !(mm->context.flags & MM_CONTEXT_FORCE_TAGGED_SVM)) {
> +		ret = -EBUSY;
> +		goto out;
> +	}
> +#endif

Is this #ifdef really necessary?  CONFIG_IOMMU_SVA selects IOASID,
without which pasid_valid() is just stubbed out to 0.


>  	if (!nr_bits) {
>  		ret = -EINVAL;
>  		goto out;
> @@ -893,6 +900,12 @@ long do_arch_prctl_64(struct task_struct *task, int option, unsigned long arg2)
>  				(unsigned long __user *)arg2);
>  	case ARCH_ENABLE_TAGGED_ADDR:
>  		return prctl_enable_tagged_addr(task->mm, arg2);
> +	case ARCH_FORCE_TAGGED_SVM:
> +		if (mmap_write_lock_killable(task->mm))
> +			return -EINTR;
> +		task->mm->context.flags |= MM_CONTEXT_FORCE_TAGGED_SVM;
> +		mmap_write_unlock(task->mm);
> +		return 0;
>  	case ARCH_GET_MAX_TAG_BITS:
>  		if (!cpu_feature_enabled(X86_FEATURE_LAM))
>  			return put_user(0, (unsigned long __user *)arg2);
> diff --git a/drivers/iommu/iommu-sva-lib.c b/drivers/iommu/iommu-sva-lib.c
> index 106506143896..593ae2472e2c 100644
> --- a/drivers/iommu/iommu-sva-lib.c
> +++ b/drivers/iommu/iommu-sva-lib.c
> @@ -2,6 +2,8 @@
>  /*
>   * Helpers for IOMMU drivers implementing SVA
>   */
> +#include <linux/mm.h>
> +#include <linux/mmu_context.h>
>  #include <linux/mutex.h>
>  #include <linux/sched/mm.h>
>  
> @@ -31,6 +33,15 @@ int iommu_sva_alloc_pasid(struct mm_struct *mm, ioasid_t min, ioasid_t max)
>  	    min == 0 || max < min)
>  		return -EINVAL;
>  
> +	/* Serialize against address tagging enabling */
> +	if (mmap_write_lock_killable(mm))
> +		return -EINTR;
> +
> +	if (!arch_pgtable_dma_compat(mm)) {
> +		mmap_write_unlock(mm);
> +		return -EBUSY;
> +	}
> +
>  	mutex_lock(&iommu_sva_lock);
>  	/* Is a PASID already associated with this mm? */
>  	if (pasid_valid(mm->pasid)) {
> @@ -46,6 +57,7 @@ int iommu_sva_alloc_pasid(struct mm_struct *mm, ioasid_t min, ioasid_t max)
>  		mm_pasid_set(mm, pasid);
>  out:
>  	mutex_unlock(&iommu_sva_lock);
> +	mmap_write_unlock(mm);
>  	return ret;
>  }
>  EXPORT_SYMBOL_GPL(iommu_sva_alloc_pasid);
> diff --git a/include/linux/mmu_context.h b/include/linux/mmu_context.h
> index b9b970f7ab45..115e2b518079 100644
> --- a/include/linux/mmu_context.h
> +++ b/include/linux/mmu_context.h
> @@ -28,4 +28,8 @@ static inline void leave_mm(int cpu) { }
>  # define task_cpu_possible(cpu, p)	cpumask_test_cpu((cpu), task_cpu_possible_mask(p))
>  #endif
>  
> +#ifndef arch_pgtable_dma_compat
> +#define arch_pgtable_dma_compat(mm)	true
> +#endif
> +
>  #endif

Powered by blists - more mailing lists