lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Tue, 31 Aug 2021 10:42:22 -0700
From:   Guenter Roeck <linux@...ck-us.net>
To:     Thomas Gleixner <tglx@...utronix.de>
Cc:     Xiaoming Ni <nixiaoming@...wei.com>, linux-kernel@...r.kernel.org,
        peterz@...radead.org, mingo@...hat.com, will@...nel.org,
        longman@...hat.com, boqun.feng@...il.com, wangle6@...wei.com,
        xiaoqian9@...wei.com, shaolexi@...wei.com,
        linux-acpi@...r.kernel.org,
        Lorenzo Pieralisi <lorenzo.pieralisi@....com>,
        Hanjun Guo <guohanjun@...wei.com>,
        Sudeep Holla <sudeep.holla@....com>,
        Greg Kroah-Hartman <gregkh@...uxfoundation.org>
Subject: Re: [PATCH] semaphore: Add might_sleep() to down_*() family

On Tue, Aug 31, 2021 at 02:13:08PM +0200, Thomas Gleixner wrote:
> On Tue, Aug 31 2021 at 04:13, Guenter Roeck wrote:
> 
> > Hi,
> >
> > On Mon, Aug 09, 2021 at 10:12:15AM +0800, Xiaoming Ni wrote:
> >> Semaphore is sleeping lock. Add might_sleep() to down*() family
> >> (with exception of down_trylock()) to detect atomic context sleep.
> >> 
> >> Previously discussed with Peter Zijlstra, see link:
> >>  https://lore.kernel.org/lkml/20210806082320.GD22037@worktop.programming.kicks-ass.net
> >> 
> >> Signed-off-by: Xiaoming Ni <nixiaoming@...wei.com>
> >> Acked-by: Will Deacon <will@...nel.org>
> >
> > This patch results in the following traceback on all arm64 boots with
> > EFI BIOS.
> 
> That's what this change was supposed to catch :)
> 
> > The problem is only seen with CONFIG_ACPI_PPTT=y, and thus only on arm64.
> 
> The below should fix this.
> 
> Thanks,
> 
>         tglx
> ---
> Subject: drivers: base: cacheinfo: Get rid of DEFINE_SMP_CALL_CACHE_FUNCTION()
> From: Thomas Gleixner <tglx@...utronix.de>
> Date: Tue, 31 Aug 2021 13:48:34 +0200
> 
> DEFINE_SMP_CALL_CACHE_FUNCTION() was usefel before the CPU hotplug rework
> to ensure that the cache related functions are called on the upcoming CPU
> because the notifier itself could run on any online CPU.
> 
> The hotplug state machine guarantees that the callbacks are invoked on the
> upcoming CPU. So there is no need to have this SMP function call
> obfuscation. That indirection was missed when the hotplug notifiers were
> converted.
> 
> This also solves the problem of ARM64 init_cache_level() invoking ACPI
> functions which take a semaphore in that context. That's invalid as SMP
> function calls run with interrupts disabled. Running it just from the
> callback in context of the CPU hotplug thread solves this.
>  
> Reported-by: Guenter Roeck <linux@...ck-us.net>
> Fixes: 8571890e1513 ("arm64: Add support for ACPI based firmware tables")
> Signed-off-by: Thomas Gleixner <tglx@...utronix.de>

The warning is no longer seen with this patch applied on top of
v5.14-1100-gb91db6a0b52e, and I don't see any new problems on riscv,
x86/x86_64, or mips.

Tested-by: Guenter Roeck <linux@...ck-us.net>

Thanks,
Guenter

> ---
>  arch/arm64/kernel/cacheinfo.c   |    7 ++-----
>  arch/mips/kernel/cacheinfo.c    |    7 ++-----
>  arch/riscv/kernel/cacheinfo.c   |    7 ++-----
>  arch/x86/kernel/cpu/cacheinfo.c |    7 ++-----
>  include/linux/cacheinfo.h       |   18 ------------------
>  5 files changed, 8 insertions(+), 38 deletions(-)
> 
> --- a/arch/arm64/kernel/cacheinfo.c
> +++ b/arch/arm64/kernel/cacheinfo.c
> @@ -43,7 +43,7 @@ static void ci_leaf_init(struct cacheinf
>  	this_leaf->type = type;
>  }
>  
> -static int __init_cache_level(unsigned int cpu)
> +int init_cache_level(unsigned int cpu)
>  {
>  	unsigned int ctype, level, leaves, fw_level;
>  	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
> @@ -78,7 +78,7 @@ static int __init_cache_level(unsigned i
>  	return 0;
>  }
>  
> -static int __populate_cache_leaves(unsigned int cpu)
> +int populate_cache_leaves(unsigned int cpu)
>  {
>  	unsigned int level, idx;
>  	enum cache_type type;
> @@ -97,6 +97,3 @@ static int __populate_cache_leaves(unsig
>  	}
>  	return 0;
>  }
> -
> -DEFINE_SMP_CALL_CACHE_FUNCTION(init_cache_level)
> -DEFINE_SMP_CALL_CACHE_FUNCTION(populate_cache_leaves)
> --- a/arch/mips/kernel/cacheinfo.c
> +++ b/arch/mips/kernel/cacheinfo.c
> @@ -17,7 +17,7 @@ do {								\
>  	leaf++;							\
>  } while (0)
>  
> -static int __init_cache_level(unsigned int cpu)
> +int init_cache_level(unsigned int cpu)
>  {
>  	struct cpuinfo_mips *c = &current_cpu_data;
>  	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
> @@ -74,7 +74,7 @@ static void fill_cpumask_cluster(int cpu
>  			cpumask_set_cpu(cpu1, cpu_map);
>  }
>  
> -static int __populate_cache_leaves(unsigned int cpu)
> +int populate_cache_leaves(unsigned int cpu)
>  {
>  	struct cpuinfo_mips *c = &current_cpu_data;
>  	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
> @@ -114,6 +114,3 @@ static int __populate_cache_leaves(unsig
>  
>  	return 0;
>  }
> -
> -DEFINE_SMP_CALL_CACHE_FUNCTION(init_cache_level)
> -DEFINE_SMP_CALL_CACHE_FUNCTION(populate_cache_leaves)
> --- a/arch/riscv/kernel/cacheinfo.c
> +++ b/arch/riscv/kernel/cacheinfo.c
> @@ -113,7 +113,7 @@ static void fill_cacheinfo(struct cachei
>  	}
>  }
>  
> -static int __init_cache_level(unsigned int cpu)
> +int init_cache_level(unsigned int cpu)
>  {
>  	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
>  	struct device_node *np = of_cpu_device_node_get(cpu);
> @@ -155,7 +155,7 @@ static int __init_cache_level(unsigned i
>  	return 0;
>  }
>  
> -static int __populate_cache_leaves(unsigned int cpu)
> +int populate_cache_leaves(unsigned int cpu)
>  {
>  	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
>  	struct cacheinfo *this_leaf = this_cpu_ci->info_list;
> @@ -187,6 +187,3 @@ static int __populate_cache_leaves(unsig
>  
>  	return 0;
>  }
> -
> -DEFINE_SMP_CALL_CACHE_FUNCTION(init_cache_level)
> -DEFINE_SMP_CALL_CACHE_FUNCTION(populate_cache_leaves)
> --- a/arch/x86/kernel/cpu/cacheinfo.c
> +++ b/arch/x86/kernel/cpu/cacheinfo.c
> @@ -985,7 +985,7 @@ static void ci_leaf_init(struct cacheinf
>  	this_leaf->priv = base->nb;
>  }
>  
> -static int __init_cache_level(unsigned int cpu)
> +int init_cache_level(unsigned int cpu)
>  {
>  	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
>  
> @@ -1014,7 +1014,7 @@ static void get_cache_id(int cpu, struct
>  	id4_regs->id = c->apicid >> index_msb;
>  }
>  
> -static int __populate_cache_leaves(unsigned int cpu)
> +int populate_cache_leaves(unsigned int cpu)
>  {
>  	unsigned int idx, ret;
>  	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
> @@ -1033,6 +1033,3 @@ static int __populate_cache_leaves(unsig
>  
>  	return 0;
>  }
> -
> -DEFINE_SMP_CALL_CACHE_FUNCTION(init_cache_level)
> -DEFINE_SMP_CALL_CACHE_FUNCTION(populate_cache_leaves)
> --- a/include/linux/cacheinfo.h
> +++ b/include/linux/cacheinfo.h
> @@ -79,24 +79,6 @@ struct cpu_cacheinfo {
>  	bool cpu_map_populated;
>  };
>  
> -/*
> - * Helpers to make sure "func" is executed on the cpu whose cache
> - * attributes are being detected
> - */
> -#define DEFINE_SMP_CALL_CACHE_FUNCTION(func)			\
> -static inline void _##func(void *ret)				\
> -{								\
> -	int cpu = smp_processor_id();				\
> -	*(int *)ret = __##func(cpu);				\
> -}								\
> -								\
> -int func(unsigned int cpu)					\
> -{								\
> -	int ret;						\
> -	smp_call_function_single(cpu, _##func, &ret, true);	\
> -	return ret;						\
> -}
> -
>  struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu);
>  int init_cache_level(unsigned int cpu);
>  int populate_cache_leaves(unsigned int cpu);

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ