lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Thu, 14 Mar 2019 16:42:12 +0800
From:   Zhenzhong Duan <zhenzhong.duan@...cle.com>
To:     linux-kernel@...r.kernel.org
Cc:     Zhenzhong Duan <zhenzhong.duan@...cle.com>,
        Thomas Gleixner <tglx@...utronix.de>,
        Ingo Molnar <mingo@...hat.com>, Borislav Petkov <bp@...en8.de>,
        "H. Peter Anvin" <hpa@...or.com>, Waiman Long <longman@...hat.com>,
        Srinivas Eeda <srinivas.eeda@...cle.com>, x86@...nel.org
Subject: [PATCH 2/2] Revert "x86/hpet: Reduce HPET counter read contention"

This reverts commit f99fd22e4d4bc84880a8a3117311bbf0e3a6a9dc.

It's unnecessory after commit "acpi_pm: Fix bootup softlockup due to PMTMR
counter read contention", the simple HPET access code could be restored.

On a general system with good TSC, TSC is the final default clocksource.
So the potential performce loss is only at bootup stage before TSC
replacing HPET, we didn't observe obvious delay of bootup.

Signed-off-by: Zhenzhong Duan <zhenzhong.duan@...cle.com>
Tested-by: Kin Cho <kin.cho@...cle.com>
Cc: Thomas Gleixner <tglx@...utronix.de>
Cc: Ingo Molnar <mingo@...hat.com>
Cc: Borislav Petkov <bp@...en8.de>
Cc: "H. Peter Anvin" <hpa@...or.com>
Cc: Waiman Long <longman@...hat.com>
Cc: Srinivas Eeda <srinivas.eeda@...cle.com>
Cc: x86@...nel.org
---
 arch/x86/kernel/hpet.c | 94 --------------------------------------------------
 1 file changed, 94 deletions(-)

diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index dfd3aca..b4fdee6a 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -749,104 +749,10 @@ static void hpet_reserve_msi_timers(struct hpet_data *hd)
 /*
  * Clock source related code
  */
-#if defined(CONFIG_SMP) && defined(CONFIG_64BIT)
-/*
- * Reading the HPET counter is a very slow operation. If a large number of
- * CPUs are trying to access the HPET counter simultaneously, it can cause
- * massive delay and slow down system performance dramatically. This may
- * happen when HPET is the default clock source instead of TSC. For a
- * really large system with hundreds of CPUs, the slowdown may be so
- * severe that it may actually crash the system because of a NMI watchdog
- * soft lockup, for example.
- *
- * If multiple CPUs are trying to access the HPET counter at the same time,
- * we don't actually need to read the counter multiple times. Instead, the
- * other CPUs can use the counter value read by the first CPU in the group.
- *
- * This special feature is only enabled on x86-64 systems. It is unlikely
- * that 32-bit x86 systems will have enough CPUs to require this feature
- * with its associated locking overhead. And we also need 64-bit atomic
- * read.
- *
- * The lock and the hpet value are stored together and can be read in a
- * single atomic 64-bit read. It is explicitly assumed that arch_spinlock_t
- * is 32 bits in size.
- */
-union hpet_lock {
-	struct {
-		arch_spinlock_t lock;
-		u32 value;
-	};
-	u64 lockval;
-};
-
-static union hpet_lock hpet __cacheline_aligned = {
-	{ .lock = __ARCH_SPIN_LOCK_UNLOCKED, },
-};
-
-static u64 read_hpet(struct clocksource *cs)
-{
-	unsigned long flags;
-	union hpet_lock old, new;
-
-	BUILD_BUG_ON(sizeof(union hpet_lock) != 8);
-
-	/*
-	 * Read HPET directly if in NMI.
-	 */
-	if (in_nmi())
-		return (u64)hpet_readl(HPET_COUNTER);
-
-	/*
-	 * Read the current state of the lock and HPET value atomically.
-	 */
-	old.lockval = READ_ONCE(hpet.lockval);
-
-	if (arch_spin_is_locked(&old.lock))
-		goto contended;
-
-	local_irq_save(flags);
-	if (arch_spin_trylock(&hpet.lock)) {
-		new.value = hpet_readl(HPET_COUNTER);
-		/*
-		 * Use WRITE_ONCE() to prevent store tearing.
-		 */
-		WRITE_ONCE(hpet.value, new.value);
-		arch_spin_unlock(&hpet.lock);
-		local_irq_restore(flags);
-		return (u64)new.value;
-	}
-	local_irq_restore(flags);
-
-contended:
-	/*
-	 * Contended case
-	 * --------------
-	 * Wait until the HPET value change or the lock is free to indicate
-	 * its value is up-to-date.
-	 *
-	 * It is possible that old.value has already contained the latest
-	 * HPET value while the lock holder was in the process of releasing
-	 * the lock. Checking for lock state change will enable us to return
-	 * the value immediately instead of waiting for the next HPET reader
-	 * to come along.
-	 */
-	do {
-		cpu_relax();
-		new.lockval = READ_ONCE(hpet.lockval);
-	} while ((new.value == old.value) && arch_spin_is_locked(&new.lock));
-
-	return (u64)new.value;
-}
-#else
-/*
- * For UP or 32-bit.
- */
 static u64 read_hpet(struct clocksource *cs)
 {
 	return (u64)hpet_readl(HPET_COUNTER);
 }
-#endif
 
 static struct clocksource clocksource_hpet = {
 	.name		= "hpet",
-- 
1.8.3.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ