[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240522215616.762195-2-namhyung@kernel.org>
Date: Wed, 22 May 2024 14:56:11 -0700
From: Namhyung Kim <namhyung@...nel.org>
To: Arnaldo Carvalho de Melo <acme@...nel.org>,
Ian Rogers <irogers@...gle.com>,
Kan Liang <kan.liang@...ux.intel.com>
Cc: Jiri Olsa <jolsa@...nel.org>,
Adrian Hunter <adrian.hunter@...el.com>,
Peter Zijlstra <peterz@...radead.org>,
Ingo Molnar <mingo@...nel.org>,
LKML <linux-kernel@...r.kernel.org>,
linux-perf-users@...r.kernel.org,
KP Singh <kpsingh@...nel.org>,
Stephane Eranian <eranian@...gle.com>,
Song Liu <song@...nel.org>,
bpf@...r.kernel.org
Subject: [PATCH 1/6] perf bpf-filter: Make filters map a single entry hashmap
And the value is now an array. This is to support multiple filter
entries in the map later.
No functional changes intended.
Signed-off-by: Namhyung Kim <namhyung@...nel.org>
---
tools/perf/util/bpf-filter.c | 81 ++++++++++++++------
tools/perf/util/bpf_skel/sample-filter.h | 3 +-
tools/perf/util/bpf_skel/sample_filter.bpf.c | 37 +++++----
3 files changed, 78 insertions(+), 43 deletions(-)
diff --git a/tools/perf/util/bpf-filter.c b/tools/perf/util/bpf-filter.c
index b51544996046..12e9c7dbb4dd 100644
--- a/tools/perf/util/bpf-filter.c
+++ b/tools/perf/util/bpf-filter.c
@@ -87,71 +87,102 @@ static int check_sample_flags(struct evsel *evsel, struct perf_bpf_filter_expr *
int perf_bpf_filter__prepare(struct evsel *evsel)
{
- int i, x, y, fd;
+ int i, x, y, fd, ret;
struct sample_filter_bpf *skel;
struct bpf_program *prog;
struct bpf_link *link;
struct perf_bpf_filter_expr *expr;
+ struct perf_bpf_filter_entry *entry;
+
+ entry = calloc(MAX_FILTERS, sizeof(*entry));
+ if (entry == NULL)
+ return -1;
skel = sample_filter_bpf__open_and_load();
if (!skel) {
pr_err("Failed to load perf sample-filter BPF skeleton\n");
- return -1;
+ ret = -EPERM;
+ goto err;
}
i = 0;
fd = bpf_map__fd(skel->maps.filters);
list_for_each_entry(expr, &evsel->bpf_filters, list) {
- struct perf_bpf_filter_entry entry = {
- .op = expr->op,
- .part = expr->part,
- .flags = expr->sample_flags,
- .value = expr->val,
- };
+ if (check_sample_flags(evsel, expr) < 0) {
+ ret = -EINVAL;
+ goto err;
+ }
- if (check_sample_flags(evsel, expr) < 0)
- return -1;
+ if (i == MAX_FILTERS) {
+ ret = -E2BIG;
+ goto err;
+ }
- bpf_map_update_elem(fd, &i, &entry, BPF_ANY);
+ entry[i].op = expr->op;
+ entry[i].part = expr->part;
+ entry[i].flags = expr->sample_flags;
+ entry[i].value = expr->val;
i++;
if (expr->op == PBF_OP_GROUP_BEGIN) {
struct perf_bpf_filter_expr *group;
list_for_each_entry(group, &expr->groups, list) {
- struct perf_bpf_filter_entry group_entry = {
- .op = group->op,
- .part = group->part,
- .flags = group->sample_flags,
- .value = group->val,
- };
- bpf_map_update_elem(fd, &i, &group_entry, BPF_ANY);
+ if (i == MAX_FILTERS) {
+ ret = -E2BIG;
+ goto err;
+ }
+
+ entry[i].op = group->op;
+ entry[i].part = group->part;
+ entry[i].flags = group->sample_flags;
+ entry[i].value = group->val;
i++;
}
- memset(&entry, 0, sizeof(entry));
- entry.op = PBF_OP_GROUP_END;
- bpf_map_update_elem(fd, &i, &entry, BPF_ANY);
+ if (i == MAX_FILTERS) {
+ ret = -E2BIG;
+ goto err;
+ }
+
+ entry[i].op = PBF_OP_GROUP_END;
i++;
}
}
- if (i > MAX_FILTERS) {
- pr_err("Too many filters: %d (max = %d)\n", i, MAX_FILTERS);
- return -1;
+ if (i < MAX_FILTERS) {
+ /* to terminate the loop early */
+ entry[i].op = PBF_OP_DONE;
+ i++;
+ }
+
+ /* The filters map has only one entry for now */
+ i = 0;
+ if (bpf_map_update_elem(fd, &i, entry, BPF_ANY) < 0) {
+ ret = -errno;
+ pr_err("Failed to update the filter map\n");
+ goto err;
}
+
prog = skel->progs.perf_sample_filter;
for (x = 0; x < xyarray__max_x(evsel->core.fd); x++) {
for (y = 0; y < xyarray__max_y(evsel->core.fd); y++) {
link = bpf_program__attach_perf_event(prog, FD(evsel, x, y));
if (IS_ERR(link)) {
pr_err("Failed to attach perf sample-filter program\n");
- return PTR_ERR(link);
+ ret = PTR_ERR(link);
+ goto err;
}
}
}
+ free(entry);
evsel->bpf_skel = skel;
return 0;
+
+err:
+ free(entry);
+ sample_filter_bpf__destroy(skel);
+ return ret;
}
int perf_bpf_filter__destroy(struct evsel *evsel)
diff --git a/tools/perf/util/bpf_skel/sample-filter.h b/tools/perf/util/bpf_skel/sample-filter.h
index 2e96e1ab084a..cf18f570eef4 100644
--- a/tools/perf/util/bpf_skel/sample-filter.h
+++ b/tools/perf/util/bpf_skel/sample-filter.h
@@ -14,6 +14,7 @@ enum perf_bpf_filter_op {
PBF_OP_AND,
PBF_OP_GROUP_BEGIN,
PBF_OP_GROUP_END,
+ PBF_OP_DONE,
};
/* BPF map entry for filtering */
@@ -24,4 +25,4 @@ struct perf_bpf_filter_entry {
__u64 value;
};
-#endif /* PERF_UTIL_BPF_SKEL_SAMPLE_FILTER_H */
\ No newline at end of file
+#endif /* PERF_UTIL_BPF_SKEL_SAMPLE_FILTER_H */
diff --git a/tools/perf/util/bpf_skel/sample_filter.bpf.c b/tools/perf/util/bpf_skel/sample_filter.bpf.c
index fb94f5280626..5f17cd6458b7 100644
--- a/tools/perf/util/bpf_skel/sample_filter.bpf.c
+++ b/tools/perf/util/bpf_skel/sample_filter.bpf.c
@@ -9,10 +9,10 @@
/* BPF map that will be filled by user space */
struct filters {
- __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(type, BPF_MAP_TYPE_HASH);
__type(key, int);
- __type(value, struct perf_bpf_filter_entry);
- __uint(max_entries, MAX_FILTERS);
+ __type(value, struct perf_bpf_filter_entry[MAX_FILTERS]);
+ __uint(max_entries, 1);
} filters SEC(".maps");
int dropped;
@@ -144,35 +144,35 @@ int perf_sample_filter(void *ctx)
kctx = bpf_cast_to_kern_ctx(ctx);
- for (i = 0; i < MAX_FILTERS; i++) {
- int key = i; /* needed for verifier :( */
+ i = 0;
+ entry = bpf_map_lookup_elem(&filters, &i);
+ if (entry == NULL)
+ goto drop;
- entry = bpf_map_lookup_elem(&filters, &key);
- if (entry == NULL)
- break;
- sample_data = perf_get_sample(kctx, entry);
+ for (i = 0; i < MAX_FILTERS; i++) {
+ sample_data = perf_get_sample(kctx, &entry[i]);
- switch (entry->op) {
+ switch (entry[i].op) {
case PBF_OP_EQ:
- CHECK_RESULT(sample_data, ==, entry->value)
+ CHECK_RESULT(sample_data, ==, entry[i].value)
break;
case PBF_OP_NEQ:
- CHECK_RESULT(sample_data, !=, entry->value)
+ CHECK_RESULT(sample_data, !=, entry[i].value)
break;
case PBF_OP_GT:
- CHECK_RESULT(sample_data, >, entry->value)
+ CHECK_RESULT(sample_data, >, entry[i].value)
break;
case PBF_OP_GE:
- CHECK_RESULT(sample_data, >=, entry->value)
+ CHECK_RESULT(sample_data, >=, entry[i].value)
break;
case PBF_OP_LT:
- CHECK_RESULT(sample_data, <, entry->value)
+ CHECK_RESULT(sample_data, <, entry[i].value)
break;
case PBF_OP_LE:
- CHECK_RESULT(sample_data, <=, entry->value)
+ CHECK_RESULT(sample_data, <=, entry[i].value)
break;
case PBF_OP_AND:
- CHECK_RESULT(sample_data, &, entry->value)
+ CHECK_RESULT(sample_data, &, entry[i].value)
break;
case PBF_OP_GROUP_BEGIN:
in_group = 1;
@@ -183,6 +183,9 @@ int perf_sample_filter(void *ctx)
goto drop;
in_group = 0;
break;
+ case PBF_OP_DONE:
+ /* no failures so far, accept it */
+ return 1;
}
}
/* generate sample data */
--
2.45.1.288.g0e0cd299f1-goog
Powered by blists - more mailing lists