lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [day] [month] [year] [list]
Date:   Sat, 8 Oct 2016 15:59:57 +0200 (CEST)
From:   Thomas Gleixner <tglx@...utronix.de>
To:     Linus Torvalds <torvalds@...ux-foundation.org>
cc:     LKML <linux-kernel@...r.kernel.org>,
        Andrew Morton <akpm@...ux-foundation.org>,
        Ingo Molnar <mingo@...nel.org>,
        "H. Peter Anvin" <hpa@...or.com>
Subject: [GIT pull] x86 updates for 4.9

Linus,

please pull the latest x86-urgent-for-linus git tree from:

   git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86-urgent-for-linus

A pile of regression fixes and updates:

  - Address the fallout of the patches which made the cpuid - nodeid
    relation permanent: Handling of invalid APIC ids and preventing
    pointless warning messages.

  - Force eager FPU when protection keys are enabled. Protection keys are
    not generating FPU exceptions so they cannot work with the lazy FPU
    mechanism.

  - Prevent force migration of interrupts which are not part of the CPU
    vector domain.

  - Handle the fact that APIC ids are not updated in the ACPI/MADT tables
    on physical CPU hotplug

  - Remove bash-isms from syscall table generator script

  - Use the hypervisor supplied APIC frequency when running on VMware


Thanks,

	tglx

------------------>
Dave Hansen (1):
      x86/pkeys: Make protection keys an "eager" feature

Josh Poimboeuf (1):
      x86/unwind: Fix oprofile module link error

Mika Westerberg (1):
      x86/irq: Prevent force migration of irqs which are not in the vector domain

Prarit Bhargava (1):
      arch/x86: Handle non enumerated CPU after physical hotplug

Renat Valiullin (1):
      x86/vmware: Skip lapic calibration on VMware

Thomas Gleixner (2):
      x86/acpi: Prevent LAPIC id 0xff from being accounted
      x86/apic: Prevent pointless warning messages

sylvain.bertrand@...il.com (1):
      x86/syscalls: Remove bash-isms in syscall table generator


 arch/x86/entry/syscalls/syscalltbl.sh | 15 +++++++++------
 arch/x86/include/asm/fpu/xstate.h     |  7 ++++---
 arch/x86/include/asm/unwind.h         | 14 ++------------
 arch/x86/kernel/acpi/boot.c           |  4 ++++
 arch/x86/kernel/apic/apic.c           |  8 +++++---
 arch/x86/kernel/apic/vector.c         | 23 ++++++++++++++++++++---
 arch/x86/kernel/cpu/vmware.c          | 12 ++++++++++--
 arch/x86/kernel/smpboot.c             | 18 +++++++++++++++---
 arch/x86/kernel/unwind_guess.c        | 10 ++++++++++
 9 files changed, 79 insertions(+), 32 deletions(-)

diff --git a/arch/x86/entry/syscalls/syscalltbl.sh b/arch/x86/entry/syscalls/syscalltbl.sh
index cd3d3015d7df..751d1f992630 100644
--- a/arch/x86/entry/syscalls/syscalltbl.sh
+++ b/arch/x86/entry/syscalls/syscalltbl.sh
@@ -10,8 +10,11 @@ syscall_macro() {
 
     # Entry can be either just a function name or "function/qualifier"
     real_entry="${entry%%/*}"
-    qualifier="${entry:${#real_entry}}"		# Strip the function name
-    qualifier="${qualifier:1}"			# Strip the slash, if any
+    if [ "$entry" = "$real_entry" ]; then
+        qualifier=
+    else
+        qualifier=${entry#*/}
+    fi
 
     echo "__SYSCALL_${abi}($nr, $real_entry, $qualifier)"
 }
@@ -22,7 +25,7 @@ emit() {
     entry="$3"
     compat="$4"
 
-    if [ "$abi" == "64" -a -n "$compat" ]; then
+    if [ "$abi" = "64" -a -n "$compat" ]; then
 	echo "a compat entry for a 64-bit syscall makes no sense" >&2
 	exit 1
     fi
@@ -45,17 +48,17 @@ emit() {
 grep '^[0-9]' "$in" | sort -n | (
     while read nr abi name entry compat; do
 	abi=`echo "$abi" | tr '[a-z]' '[A-Z]'`
-	if [ "$abi" == "COMMON" -o "$abi" == "64" ]; then
+	if [ "$abi" = "COMMON" -o "$abi" = "64" ]; then
 	    # COMMON is the same as 64, except that we don't expect X32
 	    # programs to use it.  Our expectation has nothing to do with
 	    # any generated code, so treat them the same.
 	    emit 64 "$nr" "$entry" "$compat"
-	elif [ "$abi" == "X32" ]; then
+	elif [ "$abi" = "X32" ]; then
 	    # X32 is equivalent to 64 on an X32-compatible kernel.
 	    echo "#ifdef CONFIG_X86_X32_ABI"
 	    emit 64 "$nr" "$entry" "$compat"
 	    echo "#endif"
-	elif [ "$abi" == "I386" ]; then
+	elif [ "$abi" = "I386" ]; then
 	    emit "$abi" "$nr" "$entry" "$compat"
 	else
 	    echo "Unknown abi $abi" >&2
diff --git a/arch/x86/include/asm/fpu/xstate.h b/arch/x86/include/asm/fpu/xstate.h
index d4957ac72b48..430bacf73074 100644
--- a/arch/x86/include/asm/fpu/xstate.h
+++ b/arch/x86/include/asm/fpu/xstate.h
@@ -27,11 +27,12 @@
 				 XFEATURE_MASK_YMM | \
 				 XFEATURE_MASK_OPMASK | \
 				 XFEATURE_MASK_ZMM_Hi256 | \
-				 XFEATURE_MASK_Hi16_ZMM	 | \
-				 XFEATURE_MASK_PKRU)
+				 XFEATURE_MASK_Hi16_ZMM)
 
 /* Supported features which require eager state saving */
-#define XFEATURE_MASK_EAGER	(XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR)
+#define XFEATURE_MASK_EAGER	(XFEATURE_MASK_BNDREGS | \
+				 XFEATURE_MASK_BNDCSR | \
+				 XFEATURE_MASK_PKRU)
 
 /* All currently supported features */
 #define XCNTXT_MASK	(XFEATURE_MASK_LAZY | XFEATURE_MASK_EAGER)
diff --git a/arch/x86/include/asm/unwind.h b/arch/x86/include/asm/unwind.h
index c4b6d1cafa46..46de9ac4b990 100644
--- a/arch/x86/include/asm/unwind.h
+++ b/arch/x86/include/asm/unwind.h
@@ -23,6 +23,8 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
 
 bool unwind_next_frame(struct unwind_state *state);
 
+unsigned long unwind_get_return_address(struct unwind_state *state);
+
 static inline bool unwind_done(struct unwind_state *state)
 {
 	return state->stack_info.type == STACK_TYPE_UNKNOWN;
@@ -48,8 +50,6 @@ unsigned long *unwind_get_return_address_ptr(struct unwind_state *state)
 	return state->bp + 1;
 }
 
-unsigned long unwind_get_return_address(struct unwind_state *state);
-
 #else /* !CONFIG_FRAME_POINTER */
 
 static inline
@@ -58,16 +58,6 @@ unsigned long *unwind_get_return_address_ptr(struct unwind_state *state)
 	return NULL;
 }
 
-static inline
-unsigned long unwind_get_return_address(struct unwind_state *state)
-{
-	if (unwind_done(state))
-		return 0;
-
-	return ftrace_graph_ret_addr(state->task, &state->graph_idx,
-				     *state->sp, state->sp);
-}
-
 #endif /* CONFIG_FRAME_POINTER */
 
 #endif /* _ASM_X86_UNWIND_H */
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 32a7d70913ac..8a5abaa7d453 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -233,6 +233,10 @@ acpi_parse_lapic(struct acpi_subtable_header * header, const unsigned long end)
 
 	acpi_table_print_madt_entry(header);
 
+	/* Ignore invalid ID */
+	if (processor->id == 0xff)
+		return 0;
+
 	/*
 	 * We need to register disabled CPU as well to permit
 	 * counting disabled CPUs. This allows us to size
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index f266b8a92a9e..88c657b057e2 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -2128,9 +2128,11 @@ int __generic_processor_info(int apicid, int version, bool enabled)
 	if (num_processors >= nr_cpu_ids) {
 		int thiscpu = max + disabled_cpus;
 
-		pr_warning(
-			"APIC: NR_CPUS/possible_cpus limit of %i reached."
-			"  Processor %d/0x%x ignored.\n", max, thiscpu, apicid);
+		if (enabled) {
+			pr_warning("APIC: NR_CPUS/possible_cpus limit of %i "
+				   "reached. Processor %d/0x%x ignored.\n",
+				   max, thiscpu, apicid);
+		}
 
 		disabled_cpus++;
 		return -EINVAL;
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index 6066d945c40e..5d30c5e42bb1 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -661,11 +661,28 @@ void irq_complete_move(struct irq_cfg *cfg)
  */
 void irq_force_complete_move(struct irq_desc *desc)
 {
-	struct irq_data *irqdata = irq_desc_get_irq_data(desc);
-	struct apic_chip_data *data = apic_chip_data(irqdata);
-	struct irq_cfg *cfg = data ? &data->cfg : NULL;
+	struct irq_data *irqdata;
+	struct apic_chip_data *data;
+	struct irq_cfg *cfg;
 	unsigned int cpu;
 
+	/*
+	 * The function is called for all descriptors regardless of which
+	 * irqdomain they belong to. For example if an IRQ is provided by
+	 * an irq_chip as part of a GPIO driver, the chip data for that
+	 * descriptor is specific to the irq_chip in question.
+	 *
+	 * Check first that the chip_data is what we expect
+	 * (apic_chip_data) before touching it any further.
+	 */
+	irqdata = irq_domain_get_irq_data(x86_vector_domain,
+					  irq_desc_get_irq(desc));
+	if (!irqdata)
+		return;
+
+	data = apic_chip_data(irqdata);
+	cfg = data ? &data->cfg : NULL;
+
 	if (!cfg)
 		return;
 
diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c
index 1ff0598d309c..81160578b91a 100644
--- a/arch/x86/kernel/cpu/vmware.c
+++ b/arch/x86/kernel/cpu/vmware.c
@@ -27,6 +27,7 @@
 #include <asm/div64.h>
 #include <asm/x86_init.h>
 #include <asm/hypervisor.h>
+#include <asm/apic.h>
 
 #define CPUID_VMWARE_INFO_LEAF	0x40000000
 #define VMWARE_HYPERVISOR_MAGIC	0x564D5868
@@ -82,10 +83,17 @@ static void __init vmware_platform_setup(void)
 
 	VMWARE_PORT(GETHZ, eax, ebx, ecx, edx);
 
-	if (ebx != UINT_MAX)
+	if (ebx != UINT_MAX) {
 		x86_platform.calibrate_tsc = vmware_get_tsc_khz;
-	else
+#ifdef CONFIG_X86_LOCAL_APIC
+		/* Skip lapic calibration since we know the bus frequency. */
+		lapic_timer_frequency = ecx / HZ;
+		pr_info("Host bus clock speed read from hypervisor : %u Hz\n",
+			ecx);
+#endif
+	} else {
 		pr_warn("Failed to get TSC freq from the hypervisor\n");
+	}
 }
 
 /*
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 42a93621f5b0..951f093a96fe 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1407,9 +1407,21 @@ __init void prefill_possible_map(void)
 {
 	int i, possible;
 
-	/* no processor from mptable or madt */
-	if (!num_processors)
-		num_processors = 1;
+	/* No boot processor was found in mptable or ACPI MADT */
+	if (!num_processors) {
+		int apicid = boot_cpu_physical_apicid;
+		int cpu = hard_smp_processor_id();
+
+		pr_warn("Boot CPU (id %d) not listed by BIOS\n", cpu);
+
+		/* Make sure boot cpu is enumerated */
+		if (apic->cpu_present_to_apicid(0) == BAD_APICID &&
+		    apic->apic_id_valid(apicid))
+			generic_processor_info(apicid, boot_cpu_apic_version);
+
+		if (!num_processors)
+			num_processors = 1;
+	}
 
 	i = setup_max_cpus ?: 1;
 	if (setup_possible_cpus == -1) {
diff --git a/arch/x86/kernel/unwind_guess.c b/arch/x86/kernel/unwind_guess.c
index b5a834c93065..9298993dc8b7 100644
--- a/arch/x86/kernel/unwind_guess.c
+++ b/arch/x86/kernel/unwind_guess.c
@@ -5,6 +5,16 @@
 #include <asm/stacktrace.h>
 #include <asm/unwind.h>
 
+unsigned long unwind_get_return_address(struct unwind_state *state)
+{
+	if (unwind_done(state))
+		return 0;
+
+	return ftrace_graph_ret_addr(state->task, &state->graph_idx,
+				     *state->sp, state->sp);
+}
+EXPORT_SYMBOL_GPL(unwind_get_return_address);
+
 bool unwind_next_frame(struct unwind_state *state)
 {
 	struct stack_info *info = &state->stack_info;

Powered by blists - more mailing lists