lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Wed, 12 Dec 2018 18:27:50 +0100
From:   Ard Biesheuvel <ard.biesheuvel@...aro.org>
To:     Julien Thierry <julien.thierry@....com>
Cc:     linux-arm-kernel <linux-arm-kernel@...ts.infradead.org>,
        Linux Kernel Mailing List <linux-kernel@...r.kernel.org>,
        Daniel Thompson <daniel.thompson@...aro.org>,
        joel@...lfernandes.org, Marc Zyngier <marc.zyngier@....com>,
        Christoffer Dall <christoffer.dall@....com>,
        James Morse <james.morse@....com>,
        Catalin Marinas <catalin.marinas@....com>,
        Will Deacon <will.deacon@....com>,
        Mark Rutland <mark.rutland@....com>, oleg@...hat.com
Subject: Re: [PATCH v7 11/25] arm64: irqflags: Use ICC_PMR_EL1 for interrupt masking

On Wed, 12 Dec 2018 at 17:48, Julien Thierry <julien.thierry@....com> wrote:
>
> Instead disabling interrupts by setting the PSR.I bit, use a priority
> higher than the one used for interrupts to mask them via PMR.
>
> When using PMR to disable interrupts, the value of PMR will be used
> instead of PSR.[DAIF] for the irqflags.
>
> Signed-off-by: Julien Thierry <julien.thierry@....com>
> Suggested-by: Daniel Thompson <daniel.thompson@...aro.org>
> Cc: Catalin Marinas <catalin.marinas@....com>
> Cc: Will Deacon <will.deacon@....com>
> Cc: Ard Biesheuvel <ard.biesheuvel@...aro.org>
> Cc: Oleg Nesterov <oleg@...hat.com>
> ---
>  arch/arm64/include/asm/efi.h      |   5 +-
>  arch/arm64/include/asm/irqflags.h | 123 +++++++++++++++++++++++++++++---------
>  2 files changed, 99 insertions(+), 29 deletions(-)
>
> diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h
> index 7ed3208..a9d3ebc 100644
> --- a/arch/arm64/include/asm/efi.h
> +++ b/arch/arm64/include/asm/efi.h
> @@ -42,7 +42,10 @@
>
>  efi_status_t __efi_rt_asm_wrapper(void *, const char *, ...);
>
> -#define ARCH_EFI_IRQ_FLAGS_MASK (PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT)
> +#define ARCH_EFI_IRQ_FLAGS_MASK                                                \
> +       (system_uses_irq_prio_masking() ?                               \
> +               GIC_PRIO_IRQON :                                        \
> +               (PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT))
>

This mask is used to determine whether we return from a firmware call
with a different value for the I flag than we entered it with. So
instead of changing the mask, we should change the way we record DAIF,
given that the firmware is still going to poke the I bit if it
misbehaves, regardless of whether the OS happens to use priorities for
interrupt masking.

It also means that the NMI concept is a best effort thing only, given
that uncooperative firmware could prevent them from being delivered.



>  /* arch specific definitions used by the stub code */
>
> diff --git a/arch/arm64/include/asm/irqflags.h b/arch/arm64/include/asm/irqflags.h
> index 24692ed..fa3b06f 100644
> --- a/arch/arm64/include/asm/irqflags.h
> +++ b/arch/arm64/include/asm/irqflags.h
> @@ -18,7 +18,9 @@
>
>  #ifdef __KERNEL__
>
> +#include <asm/alternative.h>
>  #include <asm/ptrace.h>
> +#include <asm/sysreg.h>
>
>  /*
>   * Aarch64 has flags for masking: Debug, Asynchronous (serror), Interrupts and
> @@ -36,47 +38,96 @@
>  /*
>   * CPU interrupt mask handling.
>   */
> -static inline unsigned long arch_local_irq_save(void)
> -{
> -       unsigned long flags;
> -       asm volatile(
> -               "mrs    %0, daif                // arch_local_irq_save\n"
> -               "msr    daifset, #2"
> -               : "=r" (flags)
> -               :
> -               : "memory");
> -       return flags;
> -}
> -
>  static inline void arch_local_irq_enable(void)
>  {
> -       asm volatile(
> -               "msr    daifclr, #2             // arch_local_irq_enable"
> -               :
> +       unsigned long unmasked = GIC_PRIO_IRQON;
> +
> +       asm volatile(ALTERNATIVE(
> +               "msr    daifclr, #2             // arch_local_irq_enable\n"
> +               "nop",
> +               "msr_s  " __stringify(SYS_ICC_PMR_EL1) ",%0\n"
> +               "dsb    sy",
> +               ARM64_HAS_IRQ_PRIO_MASKING)
>                 :
> +               : "r" (unmasked)
>                 : "memory");
>  }
>
>  static inline void arch_local_irq_disable(void)
>  {
> -       asm volatile(
> -               "msr    daifset, #2             // arch_local_irq_disable"
> -               :
> +       unsigned long masked = GIC_PRIO_IRQOFF;
> +
> +       asm volatile(ALTERNATIVE(
> +               "msr    daifset, #2             // arch_local_irq_disable",
> +               "msr_s  " __stringify(SYS_ICC_PMR_EL1) ", %0",
> +               ARM64_HAS_IRQ_PRIO_MASKING)
>                 :
> +               : "r" (masked)
>                 : "memory");
>  }
>
>  /*
> + * Having two ways to control interrupt status is a bit complicated. Some
> + * locations like exception entries will have PSR.I bit set by the architecture
> + * while PMR is unmasked.
> + * We need the irqflags to represent that interrupts are disabled in such cases.
> + *
> + * For this, we lower the value read from PMR when the I bit is set so it is
> + * considered as an irq masking priority. (With PMR, lower value means masking
> + * more interrupts).
> + */
> +#define _get_irqflags(daif_bits, pmr)                                  \
> +({                                                                     \
> +       unsigned long flags;                                            \
> +                                                                       \
> +       BUILD_BUG_ON(GIC_PRIO_IRQOFF < (GIC_PRIO_IRQON & ~PSR_I_BIT));  \
> +       asm volatile(ALTERNATIVE(                                       \
> +               "mov    %0, %1\n"                                       \
> +               "nop\n"                                                 \
> +               "nop",                                                  \
> +               "and    %0, %1, #" __stringify(PSR_I_BIT) "\n"          \
> +               "mvn    %0, %0\n"                                       \
> +               "and    %0, %0, %2",                                    \
> +               ARM64_HAS_IRQ_PRIO_MASKING)                             \
> +               : "=&r" (flags)                                         \
> +               : "r" (daif_bits), "r" (pmr)                            \
> +               : "memory");                                            \
> +                                                                       \
> +       flags;                                                          \
> +})
> +
> +/*
>   * Save the current interrupt enable state.
>   */
>  static inline unsigned long arch_local_save_flags(void)
>  {
> -       unsigned long flags;
> -       asm volatile(
> -               "mrs    %0, daif                // arch_local_save_flags"
> -               : "=r" (flags)
> +       unsigned long daif_bits;
> +       unsigned long pmr; // Only used if alternative is on
> +
> +       daif_bits = read_sysreg(daif);
> +
> +       // Get PMR
> +       asm volatile(ALTERNATIVE(
> +                       "nop",
> +                       "mrs_s  %0, " __stringify(SYS_ICC_PMR_EL1),
> +                       ARM64_HAS_IRQ_PRIO_MASKING)
> +               : "=&r" (pmr)
>                 :
>                 : "memory");
> +
> +       return _get_irqflags(daif_bits, pmr);
> +}
> +
> +#undef _get_irqflags
> +
> +static inline unsigned long arch_local_irq_save(void)
> +{
> +       unsigned long flags;
> +
> +       flags = arch_local_save_flags();
> +
> +       arch_local_irq_disable();
> +
>         return flags;
>  }
>
> @@ -85,16 +136,32 @@ static inline unsigned long arch_local_save_flags(void)
>   */
>  static inline void arch_local_irq_restore(unsigned long flags)
>  {
> -       asm volatile(
> -               "msr    daif, %0                // arch_local_irq_restore"
> -       :
> -       : "r" (flags)
> -       : "memory");
> +       asm volatile(ALTERNATIVE(
> +                       "msr    daif, %0\n"
> +                       "nop",
> +                       "msr_s  " __stringify(SYS_ICC_PMR_EL1) ", %0\n"
> +                       "dsb    sy",
> +                       ARM64_HAS_IRQ_PRIO_MASKING)
> +               : "+r" (flags)
> +               :
> +               : "memory");
>  }
>
>  static inline int arch_irqs_disabled_flags(unsigned long flags)
>  {
> -       return flags & PSR_I_BIT;
> +       int res;
> +
> +       asm volatile(ALTERNATIVE(
> +                       "and    %w0, %w1, #" __stringify(PSR_I_BIT) "\n"
> +                       "nop",
> +                       "cmp    %w1, #" __stringify(GIC_PRIO_IRQOFF) "\n"
> +                       "cset   %w0, ls",
> +                       ARM64_HAS_IRQ_PRIO_MASKING)
> +               : "=&r" (res)
> +               : "r" ((int) flags)
> +               : "memory");
> +
> +       return res;
>  }
>  #endif
>  #endif
> --
> 1.9.1
>

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ