[<prev] [next>] [day] [month] [year] [list]
Message-ID: <158539837851.28353.12926101418295280116.tip-bot2@tip-bot2>
Date: Sat, 28 Mar 2020 12:26:18 -0000
From: "tip-bot2 for Clark Williams" <tip-bot2@...utronix.de>
To: linux-tip-commits@...r.kernel.org
Cc: Clark Williams <williams@...hat.com>,
Sebastian Andrzej Siewior <bigeasy@...utronix.de>,
Thomas Gleixner <tglx@...utronix.de>, x86 <x86@...nel.org>,
LKML <linux-kernel@...r.kernel.org>
Subject: [tip: locking/core] thermal/x86_pkg_temp: Make pkg_temp_lock a raw_spinlock_t
The following commit has been merged into the locking/core branch of tip:
Commit-ID: fc32150e6f43d6cb93ea75937bb6a88a1764cc37
Gitweb: https://git.kernel.org/tip/fc32150e6f43d6cb93ea75937bb6a88a1764cc37
Author: Clark Williams <williams@...hat.com>
AuthorDate: Tue, 08 Oct 2019 13:00:21 +02:00
Committer: Thomas Gleixner <tglx@...utronix.de>
CommitterDate: Sat, 28 Mar 2020 13:21:08 +01:00
thermal/x86_pkg_temp: Make pkg_temp_lock a raw_spinlock_t
The pkg_temp_lock spinlock is acquired in the thermal vector handler which
is truly atomic context even on PREEMPT_RT kernels.
The critical sections are tiny, so change it to a raw spinlock.
Signed-off-by: Clark Williams <williams@...hat.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@...utronix.de>
Signed-off-by: Thomas Gleixner <tglx@...utronix.de>
Link: https://lore.kernel.org/r/20191008110021.2j44ayunal7fkb7i@linutronix.de
---
drivers/thermal/intel/x86_pkg_temp_thermal.c | 24 +++++++++----------
1 file changed, 12 insertions(+), 12 deletions(-)
diff --git a/drivers/thermal/intel/x86_pkg_temp_thermal.c b/drivers/thermal/intel/x86_pkg_temp_thermal.c
index ddb4a97..e18758a 100644
--- a/drivers/thermal/intel/x86_pkg_temp_thermal.c
+++ b/drivers/thermal/intel/x86_pkg_temp_thermal.c
@@ -63,7 +63,7 @@ static int max_id __read_mostly;
/* Array of zone pointers */
static struct zone_device **zones;
/* Serializes interrupt notification, work and hotplug */
-static DEFINE_SPINLOCK(pkg_temp_lock);
+static DEFINE_RAW_SPINLOCK(pkg_temp_lock);
/* Protects zone operation in the work function against hotplug removal */
static DEFINE_MUTEX(thermal_zone_mutex);
@@ -266,12 +266,12 @@ static void pkg_temp_thermal_threshold_work_fn(struct work_struct *work)
u64 msr_val, wr_val;
mutex_lock(&thermal_zone_mutex);
- spin_lock_irq(&pkg_temp_lock);
+ raw_spin_lock_irq(&pkg_temp_lock);
++pkg_work_cnt;
zonedev = pkg_temp_thermal_get_dev(cpu);
if (!zonedev) {
- spin_unlock_irq(&pkg_temp_lock);
+ raw_spin_unlock_irq(&pkg_temp_lock);
mutex_unlock(&thermal_zone_mutex);
return;
}
@@ -285,7 +285,7 @@ static void pkg_temp_thermal_threshold_work_fn(struct work_struct *work)
}
enable_pkg_thres_interrupt();
- spin_unlock_irq(&pkg_temp_lock);
+ raw_spin_unlock_irq(&pkg_temp_lock);
/*
* If tzone is not NULL, then thermal_zone_mutex will prevent the
@@ -310,7 +310,7 @@ static int pkg_thermal_notify(u64 msr_val)
struct zone_device *zonedev;
unsigned long flags;
- spin_lock_irqsave(&pkg_temp_lock, flags);
+ raw_spin_lock_irqsave(&pkg_temp_lock, flags);
++pkg_interrupt_cnt;
disable_pkg_thres_interrupt();
@@ -322,7 +322,7 @@ static int pkg_thermal_notify(u64 msr_val)
pkg_thermal_schedule_work(zonedev->cpu, &zonedev->work);
}
- spin_unlock_irqrestore(&pkg_temp_lock, flags);
+ raw_spin_unlock_irqrestore(&pkg_temp_lock, flags);
return 0;
}
@@ -368,9 +368,9 @@ static int pkg_temp_thermal_device_add(unsigned int cpu)
zonedev->msr_pkg_therm_high);
cpumask_set_cpu(cpu, &zonedev->cpumask);
- spin_lock_irq(&pkg_temp_lock);
+ raw_spin_lock_irq(&pkg_temp_lock);
zones[id] = zonedev;
- spin_unlock_irq(&pkg_temp_lock);
+ raw_spin_unlock_irq(&pkg_temp_lock);
return 0;
}
@@ -407,7 +407,7 @@ static int pkg_thermal_cpu_offline(unsigned int cpu)
}
/* Protect against work and interrupts */
- spin_lock_irq(&pkg_temp_lock);
+ raw_spin_lock_irq(&pkg_temp_lock);
/*
* Check whether this cpu was the current target and store the new
@@ -439,9 +439,9 @@ static int pkg_thermal_cpu_offline(unsigned int cpu)
* To cancel the work we need to drop the lock, otherwise
* we might deadlock if the work needs to be flushed.
*/
- spin_unlock_irq(&pkg_temp_lock);
+ raw_spin_unlock_irq(&pkg_temp_lock);
cancel_delayed_work_sync(&zonedev->work);
- spin_lock_irq(&pkg_temp_lock);
+ raw_spin_lock_irq(&pkg_temp_lock);
/*
* If this is not the last cpu in the package and the work
* did not run after we dropped the lock above, then we
@@ -452,7 +452,7 @@ static int pkg_thermal_cpu_offline(unsigned int cpu)
pkg_thermal_schedule_work(target, &zonedev->work);
}
- spin_unlock_irq(&pkg_temp_lock);
+ raw_spin_unlock_irq(&pkg_temp_lock);
/* Final cleanup if this is the last cpu */
if (lastcpu)
Powered by blists - more mailing lists