lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <2622717.o1c258ItSq@vostro.rjw.lan>
Date:	Wed, 11 Feb 2015 05:04:57 +0100
From:	"Rafael J. Wysocki" <rjw@...ysocki.net>
To:	Thomas Gleixner <tglx@...utronix.de>
Cc:	Peter Zijlstra <peterz@...radead.org>,
	Alan Cox <alan@...ux.intel.com>,
	"Li, Aubrey" <aubrey.li@...ux.intel.com>,
	LKML <linux-kernel@...r.kernel.org>,
	Linux PM list <linux-pm@...r.kernel.org>,
	ACPI Devel Maling List <linux-acpi@...r.kernel.org>,
	Kristen Carlson Accardi <kristen@...ux.intel.com>,
	John Stultz <john.stultz@...aro.org>,
	Len Brown <len.brown@...el.com>
Subject: [PATCH 6/6] ACPI / idle: Implement ->enter_freeze callback routine

From: Rafael J. Wysocki <rafael.j.wysocki@...el.com>

Add an ->enter_freeze callback routine, acpi_idle_enter_freeze(), to
the ACPI cpuidle driver and point ->enter_freeze to it for all the
C2-type and C3-type states that don't need to fall back to C1
(which may be halt-induced and that will re-enable interrupts on
exit from idle, which ->enter_freeze cannot do).

Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@...el.com>
---
 drivers/acpi/processor_idle.c |   48 +++++++++++++++++++++++++++++++++++-------
 1 file changed, 40 insertions(+), 8 deletions(-)

Index: linux-pm/drivers/acpi/processor_idle.c
===================================================================
--- linux-pm.orig/drivers/acpi/processor_idle.c
+++ linux-pm/drivers/acpi/processor_idle.c
@@ -732,9 +732,8 @@ static int acpi_idle_play_dead(struct cp
 
 static bool acpi_idle_fallback_to_c1(struct acpi_processor *pr)
 {
-	return IS_ENABLED(CONFIG_HOTPLUG_CPU) && num_online_cpus() > 1 &&
-		!(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED) &&
-		!pr->flags.has_cst;
+	return IS_ENABLED(CONFIG_HOTPLUG_CPU) && !pr->flags.has_cst &&
+		!(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED);
 }
 
 static int c3_cpu_count;
@@ -744,9 +743,10 @@ static DEFINE_RAW_SPINLOCK(c3_lock);
  * acpi_idle_enter_bm - enters C3 with proper BM handling
  * @pr: Target processor
  * @cx: Target state context
+ * @timer_bc: Whether or not to change timer mode to broadcast
  */
 static void acpi_idle_enter_bm(struct acpi_processor *pr,
-			       struct acpi_processor_cx *cx)
+			       struct acpi_processor_cx *cx, bool timer_bc)
 {
 	acpi_unlazy_tlb(smp_processor_id());
 
@@ -754,7 +754,8 @@ static void acpi_idle_enter_bm(struct ac
 	 * Must be done before busmaster disable as we might need to
 	 * access HPET !
 	 */
-	lapic_timer_state_broadcast(pr, cx, 1);
+	if (timer_bc)
+		lapic_timer_state_broadcast(pr, cx, 1);
 
 	/*
 	 * disable bus master
@@ -784,7 +785,8 @@ static void acpi_idle_enter_bm(struct ac
 		raw_spin_unlock(&c3_lock);
 	}
 
-	lapic_timer_state_broadcast(pr, cx, 0);
+	if (timer_bc)
+		lapic_timer_state_broadcast(pr, cx, 0);
 }
 
 static int acpi_idle_enter(struct cpuidle_device *dev,
@@ -798,12 +800,12 @@ static int acpi_idle_enter(struct cpuidl
 		return -EINVAL;
 
 	if (cx->type != ACPI_STATE_C1) {
-		if (acpi_idle_fallback_to_c1(pr)) {
+		if (acpi_idle_fallback_to_c1(pr) && num_online_cpus() > 1) {
 			index = CPUIDLE_DRIVER_STATE_START;
 			cx = per_cpu(acpi_cstate[index], dev->cpu);
 		} else if (cx->type == ACPI_STATE_C3 && pr->flags.bm_check) {
 			if (cx->bm_sts_skip || !acpi_idle_bm_check()) {
-				acpi_idle_enter_bm(pr, cx);
+				acpi_idle_enter_bm(pr, cx, true);
 				return index;
 			} else if (drv->safe_state_index >= 0) {
 				index = drv->safe_state_index;
@@ -827,6 +829,27 @@ static int acpi_idle_enter(struct cpuidl
 	return index;
 }
 
+static void acpi_idle_enter_freeze(struct cpuidle_device *dev,
+				   struct cpuidle_driver *drv, int index)
+{
+	struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
+
+	if (cx->type == ACPI_STATE_C3) {
+		struct acpi_processor *pr = __this_cpu_read(processors);
+
+		if (unlikely(!pr))
+			return;
+
+		if (pr->flags.bm_check) {
+			acpi_idle_enter_bm(pr, cx, false);
+			return;
+		} else {
+			ACPI_FLUSH_CPU_CACHE();
+		}
+	}
+	acpi_idle_do_entry(cx);
+}
+
 struct cpuidle_driver acpi_idle_driver = {
 	.name =		"acpi_idle",
 	.owner =	THIS_MODULE,
@@ -925,6 +948,15 @@ static int acpi_processor_setup_cpuidle_
 			state->enter_dead = acpi_idle_play_dead;
 			drv->safe_state_index = count;
 		}
+		/*
+		 * Halt-induced C1 is not good for ->enter_freeze, because it
+		 * re-enables interrupts on exit.  Moreover, C1 is generally not
+		 * particularly interesting from the suspend-to-idle angle, so
+		 * avoid C1 and the situations in which we may need to fall back
+		 * to it altogether.
+		 */
+		if (cx->type != ACPI_STATE_C1 && !acpi_idle_fallback_to_c1(pr))
+			state->enter_freeze = acpi_idle_enter_freeze;
 
 		count++;
 		if (count == CPUIDLE_STATE_MAX)

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ