[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250730082725.183133-21-daniel.lezcano@linaro.org>
Date: Wed, 30 Jul 2025 10:27:22 +0200
From: Daniel Lezcano <daniel.lezcano@...aro.org>
To: daniel.lezcano@...aro.org,
tglx@...utronix.de
Cc: S32@....com,
linux-kernel@...r.kernel.org,
ghennadi.procopciuc@....nxp.com
Subject: [PATCH v2 20/20] clocksource/drivers/nxp-pit: Add NXP Automotive s32g2 / s32g3 support
The previous changes put in place the encapsulation of the code in
order to allow multiple instances of the driver.
The S32G platform has two Periodic Interrupt Timer (PIT). The IP is
exactly the same as the VF platform.
Each PIT has four channels which are 32 bits wide and counting
down. The two first channels can be chained to implement a 64 bits
counter. The channel usage is kept unchanged with the original driver,
channel 2 is used as a clocksource, channel 3 is used as a
clockevent. Other channels are unused.
In order to support the S32G platform which has two PIT, we initialize
the timer and bind it to a CPU. The S32G platforms can have 2, 4 or 8
CPUs and this kind of configuration can appear unusual as we may endup
with two PIT used as a clockevent for the two first CPUs while the
other CPUs use the architected timers. However, in the context of the
automotive, the platform can be partioned to assign 2 CPUs for Linux
and the others CPUs to third party OS. The PIT is then used with their
specifities like the ability to freeze the time which is needed for
instance for debugging purpose.
The setup found for this platform is each timer instance is bound to
CPU0 and CPU1.
A counter is incremented when a timer is successfully initialized and
assigned to a CPU. This counter is used as an index for the CPU number
and to detect when we reach the maximum possible instances for the
platform. That in turn triggers the CPU hotplug callbacks to achieve
the per CPU setup. It is the exact same mechanism found in the NXP STM
driver.
If the timers must be bound to different CPUs, it would require an
additionnal mechanism which is not part of these changes.
Tested on a s32g274a-rdb2.
Signed-off-by: Daniel Lezcano <daniel.lezcano@...aro.org>
---
drivers/clocksource/timer-nxp-pit.c | 115 +++++++++++++++++++++++-----
1 file changed, 97 insertions(+), 18 deletions(-)
diff --git a/drivers/clocksource/timer-nxp-pit.c b/drivers/clocksource/timer-nxp-pit.c
index 2a0ee4109ead..f2534172b9d4 100644
--- a/drivers/clocksource/timer-nxp-pit.c
+++ b/drivers/clocksource/timer-nxp-pit.c
@@ -1,14 +1,16 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2012-2013 Freescale Semiconductor, Inc.
+ * Copyright 2018,2021-2025 NXP
*/
-
#include <linux/interrupt.h>
#include <linux/clockchips.h>
+#include <linux/cpuhotplug.h>
#include <linux/clk.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/sched_clock.h>
+#include <linux/platform_device.h>
/*
* Each pit takes 0x10 Bytes register space
@@ -37,11 +39,23 @@
struct pit_timer {
void __iomem *clksrc_base;
void __iomem *clkevt_base;
- unsigned long cycle_per_jiffy;
struct clock_event_device ced;
struct clocksource cs;
+ int rate;
+};
+
+struct pit_timer_data {
+ int max_pit_instances;
};
+static DEFINE_PER_CPU(struct pit_timer *, pit_timers);
+
+/*
+ * Global structure for multiple PITs initialization
+ */
+static int pit_instances;
+static int max_pit_instances = 1;
+
static void __iomem *sched_clock_base;
static inline struct pit_timer *ced_to_pit(struct clock_event_device *ced)
@@ -98,8 +112,8 @@ static u64 pit_timer_clocksource_read(struct clocksource *cs)
return (u64)~readl(PITCVAL(pit->clksrc_base));
}
-static int __init pit_clocksource_init(struct pit_timer *pit, const char *name,
- void __iomem *base, unsigned long rate)
+static int pit_clocksource_init(struct pit_timer *pit, const char *name,
+ void __iomem *base, unsigned long rate)
{
/*
* The channels 0 and 1 can be chained to build a 64-bit
@@ -155,7 +169,7 @@ static int pit_set_periodic(struct clock_event_device *ced)
{
struct pit_timer *pit = ced_to_pit(ced);
- pit_set_next_event(pit->cycle_per_jiffy, ced);
+ pit_set_next_event(pit->rate / HZ, ced);
return 0;
}
@@ -181,24 +195,28 @@ static irqreturn_t pit_timer_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int __init pit_clockevent_init(struct pit_timer *pit, const char *name,
- void __iomem *base, unsigned long rate,
- int irq, unsigned int cpu)
+static int pit_clockevent_per_cpu_init(struct pit_timer *pit, const char *name,
+ void __iomem *base, unsigned long rate,
+ int irq, unsigned int cpu)
{
+ int ret;
+
/*
* The channels 0 and 1 can be chained to build a 64-bit
* timer. Let's use the channel 3 as a clockevent and leave
* the channels 0 and 1 unused for anyone else who needs them
*/
pit->clkevt_base = base + PIT_CH(3);
- pit->cycle_per_jiffy = rate / (HZ);
+ pit->rate = rate;
pit_timer_disable(pit->clkevt_base);
pit_timer_irqack(pit);
- BUG_ON(request_irq(irq, pit_timer_interrupt, IRQF_TIMER | IRQF_IRQPOLL,
- name, &pit->ced));
+ ret = request_irq(irq, pit_timer_interrupt, IRQF_TIMER | IRQF_NOBALANCING,
+ name, &pit->ced);
+ if (ret)
+ return ret;
pit->ced.cpumask = cpumask_of(cpu);
pit->ced.irq = irq;
@@ -210,6 +228,23 @@ static int __init pit_clockevent_init(struct pit_timer *pit, const char *name,
pit->ced.set_next_event = pit_set_next_event;
pit->ced.rating = 300;
+ per_cpu(pit_timers, cpu) = pit;
+
+ return 0;
+}
+
+static int pit_clockevent_starting_cpu(unsigned int cpu)
+{
+ struct pit_timer *pit = per_cpu(pit_timers, cpu);
+ int ret;
+
+ if (!pit)
+ return 0;
+
+ ret = irq_force_affinity(pit->ced.irq, cpumask_of(cpu));
+ if (ret)
+ return ret;
+
/*
* The value for the LDVAL register trigger is calculated as:
* LDVAL trigger = (period / clock period) - 1
@@ -218,12 +253,12 @@ static int __init pit_clockevent_init(struct pit_timer *pit, const char *name,
* LDVAL trigger value is 1. And then the min_delta is
* minimal LDVAL trigger value + 1, and the max_delta is full 32-bit.
*/
- clockevents_config_and_register(&pit->ced, rate, 2, 0xffffffff);
+ clockevents_config_and_register(&pit->ced, pit->rate, 2, 0xffffffff);
return 0;
}
-static int __init pit_timer_init(struct device_node *np)
+static int pit_timer_init(struct device_node *np)
{
struct pit_timer *pit;
struct clk *pit_clk;
@@ -262,16 +297,31 @@ static int __init pit_timer_init(struct device_node *np)
clk_rate = clk_get_rate(pit_clk);
- /* enable the pit module */
- pit_module_enable(timer_base);
+ pit_module_disable(timer_base);
ret = pit_clocksource_init(pit, name, timer_base, clk_rate);
- if (ret)
+ if (ret) {
+ pr_err("Failed to initialize clocksource '%pOF'\n", np);
goto out_pit_module_disable;
+ }
- ret = pit_clockevent_init(pit, name, timer_base, clk_rate, irq, 0);
- if (ret)
+ ret = pit_clockevent_per_cpu_init(pit, name, timer_base, clk_rate, irq, pit_instances);
+ if (ret) {
+ pr_err("Failed to initialize clockevent '%pOF'\n", np);
goto out_pit_clocksource_unregister;
+ }
+
+ /* enable the pit module */
+ pit_module_enable(timer_base);
+
+ pit_instances++;
+
+ if (pit_instances == max_pit_instances) {
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "PIT timer:starting",
+ pit_clockevent_starting_cpu, NULL);
+ if (ret < 0)
+ goto out_pit_clocksource_unregister;
+ }
return 0;
@@ -289,4 +339,33 @@ static int __init pit_timer_init(struct device_node *np)
return ret;
}
+
+static int pit_timer_probe(struct platform_device *pdev)
+{
+ const struct pit_timer_data *pit_timer_data;
+
+ pit_timer_data = of_device_get_match_data(&pdev->dev);
+ if (pit_timer_data)
+ max_pit_instances = pit_timer_data->max_pit_instances;
+
+ return pit_timer_init(pdev->dev.of_node);
+}
+
+static struct pit_timer_data s32g2_data = { .max_pit_instances = 2 };
+
+static const struct of_device_id pit_timer_of_match[] = {
+ { .compatible = "nxp,s32g2-pit", .data = &s32g2_data },
+ { }
+};
+MODULE_DEVICE_TABLE(of, pit_timer_of_match);
+
+static struct platform_driver nxp_pit_driver = {
+ .driver = {
+ .name = "nxp-pit",
+ .of_match_table = pit_timer_of_match,
+ },
+ .probe = pit_timer_probe,
+};
+module_platform_driver(nxp_pit_driver);
+
TIMER_OF_DECLARE(vf610, "fsl,vf610-pit", pit_timer_init);
--
2.43.0
Powered by blists - more mailing lists