[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <20230829105936.GJ1694454@linux.vnet.ibm.com>
Date: Tue, 29 Aug 2023 16:29:36 +0530
From: Srikar Dronamraju <srikar@...ux.vnet.ibm.com>
To: Aboorva Devarajan <aboorvad@...ux.vnet.ibm.com>
Cc: mpe@...erman.id.au, npiggin@...il.com, rmclure@...ux.ibm.com,
arnd@...db.de, joel@....id.au, shuah@...nel.org,
linux-kselftest@...r.kernel.org, linuxppc-dev@...ts.ozlabs.org,
linux-kernel@...r.kernel.org, pratik.r.sampat@...il.com,
sshegde@...ux.vnet.ibm.com
Subject: Re: [RFC v2 1/2] powerpc/cpuidle: cpuidle wakeup latency based on
IPI and timer events
* Aboorva Devarajan <aboorvad@...ux.vnet.ibm.com> [2023-08-28 11:45:29]:
> From: Pratik R. Sampat <psampat@...ux.ibm.com>
>
> Introduce a mechanism to fire directed IPIs from a source CPU to a
> specified target CPU and measure the time incurred on waking up the
> target CPU in response.
>
> Also, introduce a mechanism to queue a hrtimer on a specified CPU and
> subsequently measure the time taken to wakeup the CPU.
>
> Define a simple debugfs interface that allows for adjusting the
> settings to trigger IPI and timer events on a designated CPU, and to
> observe the resulting cpuidle wakeup latencies.
>
> Signed-off-by: Pratik R. Sampat <psampat@...ux.ibm.com>
> Signed-off-by: Aboorva Devarajan <aboorvad@...ux.vnet.ibm.com>
> Reviewed-by: Shrikanth Hegde <sshegde@...ux.vnet.ibm.com>
> ---
> arch/powerpc/Kconfig.debug | 10 ++
> arch/powerpc/kernel/Makefile | 1 +
> arch/powerpc/kernel/test_cpuidle_latency.c | 156 +++++++++++++++++++++
> 3 files changed, 167 insertions(+)
> create mode 100644 arch/powerpc/kernel/test_cpuidle_latency.c
>
> diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
> index 2a54fadbeaf5..e175fc3028ac 100644
> --- a/arch/powerpc/Kconfig.debug
> +++ b/arch/powerpc/Kconfig.debug
> @@ -391,3 +391,13 @@ config KASAN_SHADOW_OFFSET
> default 0xe0000000 if PPC32
> default 0xa80e000000000000 if PPC_BOOK3S_64
> default 0xa8001c0000000000 if PPC_BOOK3E_64
> +
> +config CPUIDLE_LATENCY_SELFTEST
> + tristate "Cpuidle latency selftests"
> + depends on CPU_IDLE
> + help
> + Provides a kernel module that run tests using the IPI and
> + timers to measure cpuidle latency.
> +
> + Say M if you want these self tests to build as a module.
> + Say N if you are unsure.
> diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
> index 2919433be355..3205ecbd9d8f 100644
> --- a/arch/powerpc/kernel/Makefile
> +++ b/arch/powerpc/kernel/Makefile
> @@ -87,6 +87,7 @@ obj-$(CONFIG_PPC_WATCHDOG) += watchdog.o
> obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
> obj-$(CONFIG_PPC_DAWR) += dawr.o
> obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_ppc970.o cpu_setup_pa6t.o
> +obj-$(CONFIG_CPUIDLE_LATENCY_SELFTEST) += test_cpuidle_latency.o
This line is now introduced in between CONFIG_PPC_BOOK3S_64 objects.
May be its better this was added such that CONFIG_PPC_BOOK3S_64 objects
are all together.
> obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power.o
> obj-$(CONFIG_PPC_BOOK3S_64) += mce.o mce_power.o
> obj-$(CONFIG_PPC_BOOK3E_64) += exceptions-64e.o idle_64e.o
> diff --git a/arch/powerpc/kernel/test_cpuidle_latency.c b/arch/powerpc/kernel/test_cpuidle_latency.c
> new file mode 100644
> index 000000000000..3c3c119389c1
> --- /dev/null
> +++ b/arch/powerpc/kernel/test_cpuidle_latency.c
> @@ -0,0 +1,156 @@
> +// SPDX-License-Identifier: GPL-2.0-or-later
> +/*
> + * Module-based API test facility for cpuidle latency using IPIs and timers
> + */
> +
> +#include <linux/debugfs.h>
> +#include <linux/kernel.h>
> +#include <linux/module.h>
> +
> +/*
> + * IPI based wakeup latencies
> + * Measure time taken for a CPU to wakeup on a IPI sent from another CPU
> + * The latency measured also includes the latency of sending the IPI
> + */
> +struct latency {
> + unsigned int src_cpu;
> + unsigned int dest_cpu;
> + ktime_t time_start;
> + ktime_t time_end;
> + u64 latency_ns;
> +} ipi_wakeup;
> +
> +static void measure_latency(void *info)
> +{
> + struct latency *v;
> + ktime_t time_diff;
> +
> + v = (struct latency *)info;
NIT: The above line could have been part of the declaration itself.
> + v->time_end = ktime_get();
> + time_diff = ktime_sub(v->time_end, v->time_start);
> + v->latency_ns = ktime_to_ns(time_diff);
> +}
> +
> +void run_smp_call_function_test(unsigned int cpu)
> +{
> + ipi_wakeup.src_cpu = smp_processor_id();
> + ipi_wakeup.dest_cpu = cpu;
> + ipi_wakeup.time_start = ktime_get();
> + smp_call_function_single(cpu, measure_latency, &ipi_wakeup, 1);
> +}
> +
> +/*
> + * Timer based wakeup latencies
> + * Measure time taken for a CPU to wakeup on a timer being armed and fired
> + */
> +struct timer_data {
> + unsigned int src_cpu;
> + u64 timeout;
> + ktime_t time_start;
> + ktime_t time_end;
> + struct hrtimer timer;
> + u64 timeout_diff_ns;
> +} timer_wakeup;
> +
> +static enum hrtimer_restart hrtimer_callback(struct hrtimer *hrtimer)
> +{
> + struct timer_data *w;
> + ktime_t time_diff;
> +
> + w = container_of(hrtimer, struct timer_data, timer);
> + w->time_end = ktime_get();
> +
> + time_diff = ktime_sub(w->time_end, w->time_start);
> + time_diff = ktime_sub(time_diff, ns_to_ktime(w->timeout));
> + w->timeout_diff_ns = ktime_to_ns(time_diff);
> + return HRTIMER_NORESTART;
> +}
> +
> +static void run_timer_test(unsigned int ns)
> +{
> + hrtimer_init(&timer_wakeup.timer, CLOCK_MONOTONIC,
> + HRTIMER_MODE_REL);
No need to break the above line.
> + timer_wakeup.timer.function = hrtimer_callback;
> + timer_wakeup.src_cpu = smp_processor_id();
> + timer_wakeup.timeout = ns;
> + timer_wakeup.time_start = ktime_get();
> +
> + hrtimer_start(&timer_wakeup.timer, ns_to_ktime(ns),
> + HRTIMER_MODE_REL_PINNED);
> +}
> +
> +static struct dentry *dir;
> +
> +static int cpu_read_op(void *data, u64 *dest_cpu)
> +{
> + *dest_cpu = ipi_wakeup.dest_cpu;
> + return 0;
> +}
> +
> +/*
> + * Send a directed IPI from the current CPU (source) to the destination CPU and
> + * measure the latency on wakeup.
> + */
> +static int cpu_write_op(void *data, u64 value)
> +{
> + run_smp_call_function_test(value);
> + return 0;
> +}
> +DEFINE_SIMPLE_ATTRIBUTE(ipi_ops, cpu_read_op, cpu_write_op, "%llu\n");
> +
> +static int timeout_read_op(void *data, u64 *timeout)
> +{
> + *timeout = timer_wakeup.timeout;
> + return 0;
> +}
> +
> +/* Queue a hrtimer on a specified desitination CPU and measure the time taken to
> + * wakeup the CPU.
> + */
> +static int timeout_write_op(void *data, u64 value)
> +{
> + run_timer_test(value);
> + return 0;
> +}
> +DEFINE_SIMPLE_ATTRIBUTE(timeout_ops, timeout_read_op, timeout_write_op, "%llu\n");
> +
> +static int __init latency_init(void)
> +{
> + struct dentry *temp;
> +
> + dir = debugfs_create_dir("latency_test", arch_debugfs_dir);
> + if (!dir) {
> + pr_alert("latency_test: failed to create /sys/kernel/debug/powerpc/latency_test\n");
> + return -1;
> + }
> + temp = debugfs_create_file("ipi_cpu_dest", 0644, dir, NULL, &ipi_ops);
> + if (!temp) {
> + pr_alert("latency_test: failed to create /sys/kernel/debug/powerpc/ipi_cpu_dest\n");
> + return -1;
> + }
> + debugfs_create_u64("ipi_latency_ns", 0444, dir, &ipi_wakeup.latency_ns);
> + debugfs_create_u32("ipi_cpu_src", 0444, dir, &ipi_wakeup.src_cpu);
> +
> + temp = debugfs_create_file("timeout_expected_ns", 0644, dir, NULL, &timeout_ops);
> + if (!temp) {
> + pr_alert("latency_test: failed to create /sys/kernel/debug/powerpc/timeout_expected_ns\n");
> + return -1;
> + }
> + debugfs_create_u64("timeout_diff_ns", 0444, dir, &timer_wakeup.timeout_diff_ns);
> + debugfs_create_u32("timeout_cpu_src", 0444, dir, &timer_wakeup.src_cpu);
> + pr_info("Latency Test module loaded\n");
> + return 0;
> +}
> +
> +static void __exit latency_cleanup(void)
> +{
> + pr_info("Cleaning up Latency Test module.\n");
> + debugfs_remove_recursive(dir);
> +}
> +
> +module_init(latency_init);
> +module_exit(latency_cleanup);
> +
> +MODULE_LICENSE("GPL");
> +MODULE_AUTHOR("IBM Corporation");
> +MODULE_DESCRIPTION("Measuring idle latency for IPIs and Timers");
> --
> 2.25.1
>
Otherwise looks good to me.
Reviewed-by: Srikar Dronamraju <srikar@...ux.vnet.ibm.com>
--
Thanks and Regards
Srikar Dronamraju
Powered by blists - more mailing lists