lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Tue, 20 Jun 2023 15:55:14 +0000
From:   Yang Jihong <yangjihong1@...wei.com>
To:     <peterz@...radead.org>, <mingo@...hat.com>, <acme@...nel.org>,
        <mark.rutland@....com>, <alexander.shishkin@...ux.intel.com>,
        <jolsa@...nel.org>, <namhyung@...nel.org>, <irogers@...gle.com>,
        <adrian.hunter@...el.com>, <linux-perf-users@...r.kernel.org>,
        <linux-kernel@...r.kernel.org>
CC:     <yangjihong1@...wei.com>
Subject: [PATCH 1/2] perf/core: perf_iterate_sb_cpu() supports to receive side-band events for all oneline cpus

Add a bool parameter `system_wide` to perf_iterate_sb_cpu() to output
side-band events to events on all online cpus.
No functional change.

Signed-off-by: Yang Jihong <yangjihong1@...wei.com>
---
 kernel/events/core.c | 58 +++++++++++++++++++++++++++++++-------------
 1 file changed, 41 insertions(+), 17 deletions(-)

diff --git a/kernel/events/core.c b/kernel/events/core.c
index db016e418931..66dbca1ba577 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -7886,9 +7886,9 @@ perf_iterate_ctx(struct perf_event_context *ctx,
 	}
 }
 
-static void perf_iterate_sb_cpu(perf_iterate_f output, void *data)
+static void perf_iterate_sb_pel(perf_iterate_f output, void *data,
+				 struct pmu_event_list *pel, bool system_wide)
 {
-	struct pmu_event_list *pel = this_cpu_ptr(&pmu_sb_events);
 	struct perf_event *event;
 
 	list_for_each_entry_rcu(event, &pel->list, sb_list) {
@@ -7902,12 +7902,30 @@ static void perf_iterate_sb_cpu(perf_iterate_f output, void *data)
 
 		if (event->state < PERF_EVENT_STATE_INACTIVE)
 			continue;
-		if (!event_filter_match(event))
+		if (!system_wide && !event_filter_match(event))
 			continue;
 		output(event, data);
 	}
 }
 
+static void perf_iterate_sb_cpu(perf_iterate_f output, void *data,
+				bool system_wide)
+{
+	unsigned int i;
+
+	if (system_wide) {
+		for_each_online_cpu(i) {
+			perf_iterate_sb_pel(output, data,
+					    per_cpu_ptr(&pmu_sb_events, i),
+					    system_wide);
+		}
+	} else {
+		perf_iterate_sb_pel(output, data,
+				    this_cpu_ptr(&pmu_sb_events),
+				    system_wide);
+	}
+}
+
 /*
  * Iterate all events that need to receive side-band events.
  *
@@ -7916,7 +7934,8 @@ static void perf_iterate_sb_cpu(perf_iterate_f output, void *data)
  */
 static void
 perf_iterate_sb(perf_iterate_f output, void *data,
-	       struct perf_event_context *task_ctx)
+		struct perf_event_context *task_ctx,
+		bool system_wide)
 {
 	struct perf_event_context *ctx;
 
@@ -7933,7 +7952,7 @@ perf_iterate_sb(perf_iterate_f output, void *data,
 		goto done;
 	}
 
-	perf_iterate_sb_cpu(output, data);
+	perf_iterate_sb_cpu(output, data, system_wide);
 
 	ctx = rcu_dereference(current->perf_event_ctxp);
 	if (ctx)
@@ -8174,8 +8193,9 @@ static void perf_event_task(struct task_struct *task,
 	};
 
 	perf_iterate_sb(perf_event_task_output,
-		       &task_event,
-		       task_ctx);
+			&task_event,
+			task_ctx,
+			false);
 }
 
 void perf_event_fork(struct task_struct *task)
@@ -8254,8 +8274,9 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event)
 	comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
 
 	perf_iterate_sb(perf_event_comm_output,
-		       comm_event,
-		       NULL);
+			comm_event,
+			NULL,
+			false);
 }
 
 void perf_event_comm(struct task_struct *task, bool exec)
@@ -8410,7 +8431,8 @@ void perf_event_namespaces(struct task_struct *task)
 
 	perf_iterate_sb(perf_event_namespaces_output,
 			&namespaces_event,
-			NULL);
+			NULL,
+			false);
 }
 
 /*
@@ -8505,7 +8527,8 @@ static void perf_event_cgroup(struct cgroup *cgrp)
 
 	perf_iterate_sb(perf_event_cgroup_output,
 			&cgroup_event,
-			NULL);
+			NULL,
+			false);
 
 	kfree(pathname);
 }
@@ -8730,8 +8753,9 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
 		build_id_parse(vma, mmap_event->build_id, &mmap_event->build_id_size);
 
 	perf_iterate_sb(perf_event_mmap_output,
-		       mmap_event,
-		       NULL);
+			mmap_event,
+			NULL,
+			false);
 
 	kfree(buf);
 }
@@ -9020,7 +9044,7 @@ static void perf_event_switch(struct task_struct *task,
 				PERF_RECORD_MISC_SWITCH_OUT_PREEMPT;
 	}
 
-	perf_iterate_sb(perf_event_switch_output, &switch_event, NULL);
+	perf_iterate_sb(perf_event_switch_output, &switch_event, NULL, false);
 }
 
 /*
@@ -9149,7 +9173,7 @@ void perf_event_ksymbol(u16 ksym_type, u64 addr, u32 len, bool unregister,
 		},
 	};
 
-	perf_iterate_sb(perf_event_ksymbol_output, &ksymbol_event, NULL);
+	perf_iterate_sb(perf_event_ksymbol_output, &ksymbol_event, NULL, false);
 	return;
 err:
 	WARN_ONCE(1, "%s: Invalid KSYMBOL type 0x%x\n", __func__, ksym_type);
@@ -9261,7 +9285,7 @@ void perf_event_bpf_event(struct bpf_prog *prog,
 	BUILD_BUG_ON(BPF_TAG_SIZE % sizeof(u64));
 
 	memcpy(bpf_event.event_id.tag, prog->tag, BPF_TAG_SIZE);
-	perf_iterate_sb(perf_event_bpf_output, &bpf_event, NULL);
+	perf_iterate_sb(perf_event_bpf_output, &bpf_event, NULL, false);
 }
 
 struct perf_text_poke_event {
@@ -9345,7 +9369,7 @@ void perf_event_text_poke(const void *addr, const void *old_bytes,
 		},
 	};
 
-	perf_iterate_sb(perf_event_text_poke_output, &text_poke_event, NULL);
+	perf_iterate_sb(perf_event_text_poke_output, &text_poke_event, NULL, false);
 }
 
 void perf_event_itrace_started(struct perf_event *event)
-- 
2.30.GIT

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ