[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250224121353.98697-10-vdonnefort@google.com>
Date: Mon, 24 Feb 2025 12:13:51 +0000
From: Vincent Donnefort <vdonnefort@...gle.com>
To: rostedt@...dmis.org, mhiramat@...nel.org, mathieu.desnoyers@...icios.com,
linux-trace-kernel@...r.kernel.org, maz@...nel.org, oliver.upton@...ux.dev,
joey.gouly@....com, suzuki.poulose@....com, yuzenghui@...wei.com
Cc: kvmarm@...ts.linux.dev, linux-arm-kernel@...ts.infradead.org,
jstultz@...gle.com, qperret@...gle.com, will@...nel.org,
kernel-team@...roid.com, linux-kernel@...r.kernel.org,
Vincent Donnefort <vdonnefort@...gle.com>
Subject: [PATCH 09/11] KVM: arm64: Add trace interface for hyp tracefs
The trace interface is solely here to reset tracing. Non-consuming read
is not yet supported due to the lack of support in the ring-buffer
meta page.
Signed-off-by: Vincent Donnefort <vdonnefort@...gle.com>
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index 87d3e0e73b68..74f10847a55e 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -91,6 +91,7 @@ enum __kvm_host_smccc_func {
__KVM_HOST_SMCCC_FUNC___pkvm_load_tracing,
__KVM_HOST_SMCCC_FUNC___pkvm_teardown_tracing,
__KVM_HOST_SMCCC_FUNC___pkvm_enable_tracing,
+ __KVM_HOST_SMCCC_FUNC___pkvm_reset_tracing,
__KVM_HOST_SMCCC_FUNC___pkvm_swap_reader_tracing,
};
diff --git a/arch/arm64/kvm/hyp/include/nvhe/trace.h b/arch/arm64/kvm/hyp/include/nvhe/trace.h
index 6f1cc571b47a..28bbb54b7a0b 100644
--- a/arch/arm64/kvm/hyp/include/nvhe/trace.h
+++ b/arch/arm64/kvm/hyp/include/nvhe/trace.h
@@ -20,6 +20,7 @@ void __pkvm_update_clock_tracing(u32 mult, u32 shift, u64 epoch_ns, u64 epoch_cy
int __pkvm_load_tracing(unsigned long desc_va, size_t desc_size);
void __pkvm_teardown_tracing(void);
int __pkvm_enable_tracing(bool enable);
+int __pkvm_reset_tracing(unsigned int cpu);
int __pkvm_swap_reader_tracing(unsigned int cpu);
#else
static inline void *tracing_reserve_entry(unsigned long length) { return NULL; }
@@ -30,6 +31,7 @@ void __pkvm_update_clock_tracing(u32 mult, u32 shift, u64 epoch_ns, u64 epoch_cy
static inline int __pkvm_load_tracing(unsigned long desc_va, size_t desc_size) { return -ENODEV; }
static inline void __pkvm_teardown_tracing(void) { }
static inline int __pkvm_enable_tracing(bool enable) { return -ENODEV; }
+static inline int __pkvm_reset_tracing(unsigned int cpu) { return -ENODEV; }
static inline int __pkvm_swap_reader_tracing(unsigned int cpu) { return -ENODEV; }
#endif
#endif
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
index a8b497b22407..e2419c97c57d 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
@@ -605,6 +605,13 @@ static void handle___pkvm_enable_tracing(struct kvm_cpu_context *host_ctxt)
cpu_reg(host_ctxt, 1) = __pkvm_enable_tracing(enable);
}
+static void handle___pkvm_reset_tracing(struct kvm_cpu_context *host_ctxt)
+{
+ DECLARE_REG(unsigned int, cpu, host_ctxt, 1);
+
+ cpu_reg(host_ctxt, 1) = __pkvm_reset_tracing(cpu);
+}
+
static void handle___pkvm_swap_reader_tracing(struct kvm_cpu_context *host_ctxt)
{
DECLARE_REG(unsigned int, cpu, host_ctxt, 1);
@@ -655,6 +662,7 @@ static const hcall_t host_hcall[] = {
HANDLE_FUNC(__pkvm_load_tracing),
HANDLE_FUNC(__pkvm_teardown_tracing),
HANDLE_FUNC(__pkvm_enable_tracing),
+ HANDLE_FUNC(__pkvm_reset_tracing),
HANDLE_FUNC(__pkvm_swap_reader_tracing),
};
diff --git a/arch/arm64/kvm/hyp/nvhe/trace.c b/arch/arm64/kvm/hyp/nvhe/trace.c
index d79b6539377e..bf935645ed91 100644
--- a/arch/arm64/kvm/hyp/nvhe/trace.c
+++ b/arch/arm64/kvm/hyp/nvhe/trace.c
@@ -262,7 +262,7 @@ void tracing_commit_entry(void)
smp_store_release(&cpu_buffer->status, HYP_RB_READY);
}
-static void hyp_rb_disable_writing(struct hyp_rb_per_cpu *cpu_buffer)
+static u32 hyp_rb_disable_writing(struct hyp_rb_per_cpu *cpu_buffer)
{
u32 prev_status;
@@ -272,6 +272,8 @@ static void hyp_rb_disable_writing(struct hyp_rb_per_cpu *cpu_buffer)
HYP_RB_READY,
HYP_RB_UNAVAILABLE);
} while (prev_status == HYP_RB_WRITING);
+
+ return prev_status;
}
static int hyp_rb_enable_writing(struct hyp_rb_per_cpu *cpu_buffer)
@@ -284,6 +286,44 @@ static int hyp_rb_enable_writing(struct hyp_rb_per_cpu *cpu_buffer)
return 0;
}
+static int hyp_rb_reset(struct hyp_rb_per_cpu *cpu_buffer)
+{
+ struct hyp_buffer_page *bpage;
+ u32 prev_status;
+
+ if (!hyp_rb_loaded(cpu_buffer))
+ return -ENODEV;
+
+ prev_status = hyp_rb_disable_writing(cpu_buffer);
+
+ while (!hyp_bpage_is_head(cpu_buffer->head_page))
+ cpu_buffer->head_page = hyp_bpage_next_page(cpu_buffer->head_page);
+
+ bpage = cpu_buffer->tail_page = cpu_buffer->head_page;
+ do {
+ hyp_bpage_reset(bpage);
+ bpage = hyp_bpage_next_page(bpage);
+ } while (bpage != cpu_buffer->head_page);
+
+ hyp_bpage_reset(cpu_buffer->reader_page);
+
+ cpu_buffer->last_overrun = 0;
+ cpu_buffer->write_stamp = 0;
+
+ cpu_buffer->meta->reader.read = 0;
+ cpu_buffer->meta->reader.lost_events = 0;
+ cpu_buffer->meta->entries = 0;
+ cpu_buffer->meta->overrun = 0;
+ cpu_buffer->meta->read = 0;
+ meta_pages_lost(cpu_buffer->meta) = 0;
+ meta_pages_touched(cpu_buffer->meta) = 0;
+
+ if (prev_status == HYP_RB_READY)
+ hyp_rb_enable_writing(cpu_buffer);
+
+ return 0;
+}
+
static void hyp_rb_teardown(struct hyp_rb_per_cpu *cpu_buffer)
{
int i;
@@ -572,3 +612,17 @@ int __pkvm_enable_tracing(bool enable)
return ret;
}
+
+int __pkvm_reset_tracing(unsigned int cpu)
+{
+ int ret = 0;
+
+ if (cpu >= hyp_nr_cpus)
+ return -EINVAL;
+
+ hyp_spin_lock(&trace_rb_lock);
+ ret = hyp_rb_reset(per_cpu_ptr(&trace_rb, cpu));
+ hyp_spin_unlock(&trace_rb_lock);
+
+ return ret;
+}
diff --git a/arch/arm64/kvm/hyp_trace.c b/arch/arm64/kvm/hyp_trace.c
index 38d97e34eada..03a6813cbe66 100644
--- a/arch/arm64/kvm/hyp_trace.c
+++ b/arch/arm64/kvm/hyp_trace.c
@@ -200,6 +200,11 @@ static int __get_reader_page(int cpu)
return kvm_call_hyp_nvhe(__pkvm_swap_reader_tracing, cpu);
}
+static int __reset(int cpu)
+{
+ return kvm_call_hyp_nvhe(__pkvm_reset_tracing, cpu);
+}
+
static void hyp_trace_free_pages(struct hyp_trace_desc *desc)
{
struct rb_page_desc *rb_desc;
@@ -361,6 +366,7 @@ static int hyp_trace_buffer_load(struct hyp_trace_buffer *hyp_buffer, size_t siz
hyp_buffer->remote.pdesc = &desc->page_desc;
hyp_buffer->remote.get_reader_page = __get_reader_page;
+ hyp_buffer->remote.reset = __reset;
hyp_buffer->trace_buffer = ring_buffer_remote(&hyp_buffer->remote);
if (!hyp_buffer->trace_buffer) {
ret = -ENOMEM;
@@ -825,6 +831,49 @@ static const struct file_operations hyp_trace_raw_fops = {
.release = hyp_trace_raw_release,
};
+static void hyp_trace_reset(int cpu)
+{
+ struct hyp_trace_buffer *hyp_buffer = &hyp_trace_buffer;
+
+ mutex_lock(&hyp_buffer->lock);
+
+ if (!hyp_trace_buffer_loaded(hyp_buffer))
+ goto out;
+
+ if (cpu == RING_BUFFER_ALL_CPUS)
+ ring_buffer_reset(hyp_buffer->trace_buffer);
+ else
+ ring_buffer_reset_cpu(hyp_buffer->trace_buffer, cpu);
+
+out:
+ mutex_unlock(&hyp_buffer->lock);
+}
+
+static int hyp_trace_open(struct inode *inode, struct file *file)
+{
+ int cpu = (s64)inode->i_private;
+
+ if (file->f_mode & FMODE_WRITE) {
+ hyp_trace_reset(cpu);
+
+ return 0;
+ }
+
+ return -EPERM;
+}
+
+static ssize_t hyp_trace_write(struct file *filp, const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ return count;
+}
+
+static const struct file_operations hyp_trace_fops = {
+ .open = hyp_trace_open,
+ .write = hyp_trace_write,
+ .release = NULL,
+};
+
static int hyp_trace_clock_show(struct seq_file *m, void *v)
{
seq_puts(m, "[boot]\n");
@@ -857,6 +906,9 @@ int hyp_trace_init_tracefs(void)
tracefs_create_file("trace_pipe", TRACEFS_MODE_WRITE, root,
(void *)RING_BUFFER_ALL_CPUS, &hyp_trace_pipe_fops);
+ tracefs_create_file("trace", TRACEFS_MODE_WRITE, root,
+ (void *)RING_BUFFER_ALL_CPUS, &hyp_trace_fops);
+
tracefs_create_file("trace_clock", TRACEFS_MODE_READ, root, NULL,
&hyp_trace_clock_fops);
@@ -882,6 +934,9 @@ int hyp_trace_init_tracefs(void)
tracefs_create_file("trace_pipe_raw", TRACEFS_MODE_READ, per_cpu_dir,
(void *)cpu, &hyp_trace_pipe_fops);
+
+ tracefs_create_file("trace", TRACEFS_MODE_READ, per_cpu_dir,
+ (void *)cpu, &hyp_trace_fops);
}
return 0;
--
2.48.1.601.g30ceb7b040-goog
Powered by blists - more mailing lists