[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230219061329.1001079-9-namhyung@kernel.org>
Date: Sat, 18 Feb 2023 22:13:29 -0800
From: Namhyung Kim <namhyung@...nel.org>
To: Arnaldo Carvalho de Melo <acme@...nel.org>,
Jiri Olsa <jolsa@...nel.org>
Cc: Peter Zijlstra <peterz@...radead.org>,
Ingo Molnar <mingo@...nel.org>,
Ian Rogers <irogers@...gle.com>,
Adrian Hunter <adrian.hunter@...el.com>,
Andi Kleen <ak@...ux.intel.com>,
Kan Liang <kan.liang@...ux.intel.com>,
Song Liu <song@...nel.org>,
Stephane Eranian <eranian@...gle.com>,
Ravi Bangoria <ravi.bangoria@....com>,
Leo Yan <leo.yan@...aro.org>,
James Clark <james.clark@....com>, Hao Luo <haoluo@...gle.com>,
LKML <linux-kernel@...r.kernel.org>,
linux-perf-users@...r.kernel.org, bpf@...r.kernel.org
Subject: [PATCH 8/8] perf bpf filter: Add logical OR operator
It supports two or more expressions connected as a group and the group
result is considered true when one of them returns true. The new group
operators (GROUP_BEGIN and GROUP_END) are added to setup and check the
condition. As it doesn't allow nested groups, the condition is saved
in local variables.
For example, the following is to get samples only if the data source
memory level is L2 cache or the weight value is greater than 30.
$ sudo ./perf record -adW -e cpu/mem-loads/pp \
> --filter 'mem_lvl == l2 || weight > 30' -- sleep 1
$ sudo ./perf script -F data_src,weight
10668100842 |OP LOAD|LVL L3 or L3 hit|SNP None|TLB L1 or L2 hit|LCK No|BLK N/A 47
11868100242 |OP LOAD|LVL LFB/MAB or LFB/MAB hit|SNP None|TLB L1 or L2 hit|LCK No|BLK N/A 57
10668100842 |OP LOAD|LVL L3 or L3 hit|SNP None|TLB L1 or L2 hit|LCK No|BLK N/A 56
10650100842 |OP LOAD|LVL L3 or L3 hit|SNP None|TLB L2 miss|LCK No|BLK N/A 144
10468100442 |OP LOAD|LVL L2 or L2 hit|SNP None|TLB L1 or L2 hit|LCK No|BLK N/A 16
10468100442 |OP LOAD|LVL L2 or L2 hit|SNP None|TLB L1 or L2 hit|LCK No|BLK N/A 20
11868100242 |OP LOAD|LVL LFB/MAB or LFB/MAB hit|SNP None|TLB L1 or L2 hit|LCK No|BLK N/A 189
1026a100142 |OP LOAD|LVL L1 or L1 hit|SNP None|TLB L1 or L2 hit|LCK Yes|BLK N/A 193
10468100442 |OP LOAD|LVL L2 or L2 hit|SNP None|TLB L1 or L2 hit|LCK No|BLK N/A 18
...
Signed-off-by: Namhyung Kim <namhyung@...nel.org>
---
tools/perf/util/bpf-filter.c | 25 +++++++++++++
tools/perf/util/bpf-filter.h | 1 +
tools/perf/util/bpf-filter.l | 1 +
tools/perf/util/bpf-filter.y | 25 +++++++++++--
tools/perf/util/bpf_skel/sample-filter.h | 6 ++--
tools/perf/util/bpf_skel/sample_filter.bpf.c | 38 +++++++++++++-------
6 files changed, 79 insertions(+), 17 deletions(-)
diff --git a/tools/perf/util/bpf-filter.c b/tools/perf/util/bpf-filter.c
index 2e02dc965dd9..79d79992e608 100644
--- a/tools/perf/util/bpf-filter.c
+++ b/tools/perf/util/bpf-filter.c
@@ -49,8 +49,32 @@ int perf_bpf_filter__prepare(struct evsel *evsel)
};
bpf_map_update_elem(fd, &i, &entry, BPF_ANY);
i++;
+
+ if (expr->op == PBF_OP_GROUP_BEGIN) {
+ struct perf_bpf_filter_expr *group;
+
+ list_for_each_entry(group, &expr->groups, list) {
+ struct perf_bpf_filter_entry group_entry = {
+ .op = group->op,
+ .part = group->part,
+ .flags = group->sample_flags,
+ .value = group->val,
+ };
+ bpf_map_update_elem(fd, &i, &group_entry, BPF_ANY);
+ i++;
+ }
+
+ memset(&entry, 0, sizeof(entry));
+ entry.op = PBF_OP_GROUP_END;
+ bpf_map_update_elem(fd, &i, &entry, BPF_ANY);
+ i++;
+ }
}
+ if (i > MAX_FILTERS) {
+ pr_err("Too many filters: %d (max = %d)\n", i, MAX_FILTERS);
+ return -1;
+ }
prog = skel->progs.perf_sample_filter;
for (x = 0; x < xyarray__max_x(evsel->core.fd); x++) {
for (y = 0; y < xyarray__max_y(evsel->core.fd); y++) {
@@ -96,6 +120,7 @@ struct perf_bpf_filter_expr *perf_bpf_filter_expr__new(unsigned long sample_flag
expr->part = part;
expr->op = op;
expr->val = val;
+ INIT_LIST_HEAD(&expr->groups);
}
return expr;
}
diff --git a/tools/perf/util/bpf-filter.h b/tools/perf/util/bpf-filter.h
index 4fb33d296d9c..cf2b605e9121 100644
--- a/tools/perf/util/bpf-filter.h
+++ b/tools/perf/util/bpf-filter.h
@@ -8,6 +8,7 @@
struct perf_bpf_filter_expr {
struct list_head list;
+ struct list_head groups;
enum perf_bpf_filter_op op;
int part;
unsigned long sample_flags;
diff --git a/tools/perf/util/bpf-filter.l b/tools/perf/util/bpf-filter.l
index 3af9331302cf..817531428530 100644
--- a/tools/perf/util/bpf-filter.l
+++ b/tools/perf/util/bpf-filter.l
@@ -141,6 +141,7 @@ hops2 { return constant(PERF_MEM_HOPS_2); }
hops3 { return constant(PERF_MEM_HOPS_3); }
"," { return ','; }
+"||" { return BFT_LOGICAL_OR; }
. { }
%%
diff --git a/tools/perf/util/bpf-filter.y b/tools/perf/util/bpf-filter.y
index 0c6035937a6e..19de79131ad3 100644
--- a/tools/perf/util/bpf-filter.y
+++ b/tools/perf/util/bpf-filter.y
@@ -27,8 +27,8 @@ static void perf_bpf_filter_error(struct list_head *expr __maybe_unused,
struct perf_bpf_filter_expr *expr;
}
-%token BFT_SAMPLE BFT_OP BFT_ERROR BFT_NUM
-%type <expr> filter_term
+%token BFT_SAMPLE BFT_OP BFT_ERROR BFT_NUM BFT_LOGICAL_OR
+%type <expr> filter_term filter_expr
%destructor { free ($$); } <expr>
%type <sample> BFT_SAMPLE
%type <op> BFT_OP
@@ -48,6 +48,27 @@ filter_term
}
filter_term:
+filter_term BFT_LOGICAL_OR filter_expr
+{
+ struct perf_bpf_filter_expr *expr;
+
+ if ($1->op == PBF_OP_GROUP_BEGIN) {
+ expr = $1;
+ } else {
+ expr = perf_bpf_filter_expr__new(0, 0, PBF_OP_GROUP_BEGIN, 1);
+ list_add_tail(&$1->list, &expr->groups);
+ }
+ expr->val++;
+ list_add_tail(&$3->list, &expr->groups);
+ $$ = expr;
+}
+|
+filter_expr
+{
+ $$ = $1;
+}
+
+filter_expr:
BFT_SAMPLE BFT_OP BFT_NUM
{
$$ = perf_bpf_filter_expr__new($1.type, $1.part, $2, $3);
diff --git a/tools/perf/util/bpf_skel/sample-filter.h b/tools/perf/util/bpf_skel/sample-filter.h
index 6b9fd554ad7b..2e96e1ab084a 100644
--- a/tools/perf/util/bpf_skel/sample-filter.h
+++ b/tools/perf/util/bpf_skel/sample-filter.h
@@ -1,7 +1,7 @@
#ifndef PERF_UTIL_BPF_SKEL_SAMPLE_FILTER_H
#define PERF_UTIL_BPF_SKEL_SAMPLE_FILTER_H
-#define MAX_FILTERS 32
+#define MAX_FILTERS 64
/* supported filter operations */
enum perf_bpf_filter_op {
@@ -11,7 +11,9 @@ enum perf_bpf_filter_op {
PBF_OP_GE,
PBF_OP_LT,
PBF_OP_LE,
- PBF_OP_AND
+ PBF_OP_AND,
+ PBF_OP_GROUP_BEGIN,
+ PBF_OP_GROUP_END,
};
/* BPF map entry for filtering */
diff --git a/tools/perf/util/bpf_skel/sample_filter.bpf.c b/tools/perf/util/bpf_skel/sample_filter.bpf.c
index 0148b47de7b9..4b4269054ed8 100644
--- a/tools/perf/util/bpf_skel/sample_filter.bpf.c
+++ b/tools/perf/util/bpf_skel/sample_filter.bpf.c
@@ -91,6 +91,14 @@ static inline __u64 perf_get_sample(struct bpf_perf_event_data_kern *kctx,
return 0;
}
+#define CHECK_RESULT(data, op, val) \
+ if (!(data op val)) { \
+ if (!in_group) \
+ goto drop; \
+ } else if (in_group) { \
+ group_result = 1; \
+ }
+
/* BPF program to be called from perf event overflow handler */
SEC("perf_event")
int perf_sample_filter(void *ctx)
@@ -98,6 +106,8 @@ int perf_sample_filter(void *ctx)
struct bpf_perf_event_data_kern *kctx;
struct perf_bpf_filter_entry *entry;
__u64 sample_data;
+ int in_group = 0;
+ int group_result = 0;
int i;
kctx = bpf_cast_to_kern_ctx(ctx);
@@ -112,32 +122,34 @@ int perf_sample_filter(void *ctx)
switch (entry->op) {
case PBF_OP_EQ:
- if (!(sample_data == entry->value))
- goto drop;
+ CHECK_RESULT(sample_data, ==, entry->value)
break;
case PBF_OP_NEQ:
- if (!(sample_data != entry->value))
- goto drop;
+ CHECK_RESULT(sample_data, !=, entry->value)
break;
case PBF_OP_GT:
- if (!(sample_data > entry->value))
- goto drop;
+ CHECK_RESULT(sample_data, >, entry->value)
break;
case PBF_OP_GE:
- if (!(sample_data >= entry->value))
- goto drop;
+ CHECK_RESULT(sample_data, >=, entry->value)
break;
case PBF_OP_LT:
- if (!(sample_data < entry->value))
- goto drop;
+ CHECK_RESULT(sample_data, <, entry->value)
break;
case PBF_OP_LE:
- if (!(sample_data <= entry->value))
- goto drop;
+ CHECK_RESULT(sample_data, <=, entry->value)
break;
case PBF_OP_AND:
- if (!(sample_data & entry->value))
+ CHECK_RESULT(sample_data, &, entry->value)
+ break;
+ case PBF_OP_GROUP_BEGIN:
+ in_group = 1;
+ group_result = 0;
+ break;
+ case PBF_OP_GROUP_END:
+ if (group_result == 0)
goto drop;
+ in_group = 0;
break;
}
}
--
2.39.2.637.g21b0678d19-goog
Powered by blists - more mailing lists