lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Thu, 22 Mar 2018 08:41:55 -0700
From:   Alexei Starovoitov <ast@...com>
To:     Daniel Borkmann <daniel@...earbox.net>, <davem@...emloft.net>
CC:     <torvalds@...ux-foundation.org>, <peterz@...radead.org>,
        <rostedt@...dmis.org>, <netdev@...r.kernel.org>,
        <kernel-team@...com>, <linux-api@...r.kernel.org>
Subject: Re: [PATCH v2 bpf-next 5/8] bpf: introduce BPF_RAW_TRACEPOINT

On 3/22/18 2:43 AM, Daniel Borkmann wrote:
> On 03/21/2018 07:54 PM, Alexei Starovoitov wrote:
> [...]
>> @@ -546,6 +556,53 @@ extern void ftrace_profile_free_filter(struct perf_event *event);
>>  void perf_trace_buf_update(void *record, u16 type);
>>  void *perf_trace_buf_alloc(int size, struct pt_regs **regs, int *rctxp);
>>
>> +void bpf_trace_run1(struct bpf_prog *prog, u64 arg1);
>> +void bpf_trace_run2(struct bpf_prog *prog, u64 arg1, u64 arg2);
>> +void bpf_trace_run3(struct bpf_prog *prog, u64 arg1, u64 arg2,
>> +		    u64 arg3);
>> +void bpf_trace_run4(struct bpf_prog *prog, u64 arg1, u64 arg2,
>> +		    u64 arg3, u64 arg4);
>> +void bpf_trace_run5(struct bpf_prog *prog, u64 arg1, u64 arg2,
>> +		    u64 arg3, u64 arg4, u64 arg5);
>> +void bpf_trace_run6(struct bpf_prog *prog, u64 arg1, u64 arg2,
>> +		    u64 arg3, u64 arg4, u64 arg5, u64 arg6);
>> +void bpf_trace_run7(struct bpf_prog *prog, u64 arg1, u64 arg2,
>> +		    u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7);
>> +void bpf_trace_run8(struct bpf_prog *prog, u64 arg1, u64 arg2,
>> +		    u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
>> +		    u64 arg8);
>> +void bpf_trace_run9(struct bpf_prog *prog, u64 arg1, u64 arg2,
>> +		    u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
>> +		    u64 arg8, u64 arg9);
>> +void bpf_trace_run10(struct bpf_prog *prog, u64 arg1, u64 arg2,
>> +		     u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
>> +		     u64 arg8, u64 arg9, u64 arg10);
>> +void bpf_trace_run11(struct bpf_prog *prog, u64 arg1, u64 arg2,
>> +		     u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
>> +		     u64 arg8, u64 arg9, u64 arg10, u64 arg11);
>> +void bpf_trace_run12(struct bpf_prog *prog, u64 arg1, u64 arg2,
>> +		     u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
>> +		     u64 arg8, u64 arg9, u64 arg10, u64 arg11, u64 arg12);
>> +void bpf_trace_run13(struct bpf_prog *prog, u64 arg1, u64 arg2,
>> +		     u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
>> +		     u64 arg8, u64 arg9, u64 arg10, u64 arg11, u64 arg12,
>> +		     u64 arg13);
>> +void bpf_trace_run14(struct bpf_prog *prog, u64 arg1, u64 arg2,
>> +		     u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
>> +		     u64 arg8, u64 arg9, u64 arg10, u64 arg11, u64 arg12,
>> +		     u64 arg13, u64 arg14);
>> +void bpf_trace_run15(struct bpf_prog *prog, u64 arg1, u64 arg2,
>> +		     u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
>> +		     u64 arg8, u64 arg9, u64 arg10, u64 arg11, u64 arg12,
>> +		     u64 arg13, u64 arg14, u64 arg15);
>> +void bpf_trace_run16(struct bpf_prog *prog, u64 arg1, u64 arg2,
>> +		     u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
>> +		     u64 arg8, u64 arg9, u64 arg10, u64 arg11, u64 arg12,
>> +		     u64 arg13, u64 arg14, u64 arg15, u64 arg16);
>> +void bpf_trace_run17(struct bpf_prog *prog, u64 arg1, u64 arg2,
>> +		     u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
>> +		     u64 arg8, u64 arg9, u64 arg10, u64 arg11, u64 arg12,
>> +		     u64 arg13, u64 arg14, u64 arg15, u64 arg16, u64 arg17);
>>  void perf_trace_run_bpf_submit(void *raw_data, int size, int rctx,
>>  			       struct trace_event_call *call, u64 count,
>>  			       struct pt_regs *regs, struct hlist_head *head,
> [...]
>> @@ -896,3 +976,206 @@ int perf_event_query_prog_array(struct perf_event *event, void __user *info)
>>
>>  	return ret;
>>  }
>> +
>> +static __always_inline
>> +void __bpf_trace_run(struct bpf_prog *prog, u64 *args)
>> +{
>> +	rcu_read_lock();
>> +	preempt_disable();
>> +	(void) BPF_PROG_RUN(prog, args);
>> +	preempt_enable();
>> +	rcu_read_unlock();
>> +}
>> +
>> +#define EVAL1(FN, X) FN(X)
>> +#define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y)
>> +#define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y)
>> +#define EVAL4(FN, X, Y...) FN(X) EVAL3(FN, Y)
>> +#define EVAL5(FN, X, Y...) FN(X) EVAL4(FN, Y)
>> +#define EVAL6(FN, X, Y...) FN(X) EVAL5(FN, Y)
>> +
>> +#define COPY(X) args[X - 1] = arg##X;
>> +
>> +void bpf_trace_run1(struct bpf_prog *prog, u64 arg1)
>> +{
>> +	u64 args[1];
>> +
>> +	EVAL1(COPY, 1);
>> +	__bpf_trace_run(prog, args);
>> +}
>> +EXPORT_SYMBOL_GPL(bpf_trace_run1);
>> +void bpf_trace_run2(struct bpf_prog *prog, u64 arg1, u64 arg2)
>> +{
>> +	u64 args[2];
>> +
>> +	EVAL2(COPY, 1, 2);
>> +	__bpf_trace_run(prog, args);
>> +}
>> +EXPORT_SYMBOL_GPL(bpf_trace_run2);
>> +void bpf_trace_run3(struct bpf_prog *prog, u64 arg1, u64 arg2,
>> +		    u64 arg3)
>> +{
>> +	u64 args[3];
>> +
>> +	EVAL3(COPY, 1, 2, 3);
>> +	__bpf_trace_run(prog, args);
>> +}
>> +EXPORT_SYMBOL_GPL(bpf_trace_run3);
>> +void bpf_trace_run4(struct bpf_prog *prog, u64 arg1, u64 arg2,
>> +		    u64 arg3, u64 arg4)
>> +{
>> +	u64 args[4];
>> +
>> +	EVAL4(COPY, 1, 2, 3, 4);
>> +	__bpf_trace_run(prog, args);
>> +}
>> +EXPORT_SYMBOL_GPL(bpf_trace_run4);
>> +void bpf_trace_run5(struct bpf_prog *prog, u64 arg1, u64 arg2,
>> +		    u64 arg3, u64 arg4, u64 arg5)
>> +{
>> +	u64 args[5];
>> +
>> +	EVAL5(COPY, 1, 2, 3, 4, 5);
>> +	__bpf_trace_run(prog, args);
>> +}
>> +EXPORT_SYMBOL_GPL(bpf_trace_run5);
>> +void bpf_trace_run6(struct bpf_prog *prog, u64 arg1, u64 arg2,
>> +		    u64 arg3, u64 arg4, u64 arg5, u64 arg6)
>> +{
>> +	u64 args[6];
>> +
>> +	EVAL6(COPY, 1, 2, 3, 4, 5, 6);
>> +	__bpf_trace_run(prog, args);
>> +}
>> +EXPORT_SYMBOL_GPL(bpf_trace_run6);
>> +void bpf_trace_run7(struct bpf_prog *prog, u64 arg1, u64 arg2,
>> +		    u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7)
>> +{
>> +	u64 args[7];
>> +
>> +	EVAL6(COPY, 1, 2, 3, 4, 5, 6);
>> +	EVAL1(COPY, 7);
>> +	__bpf_trace_run(prog, args);
>> +}
>> +EXPORT_SYMBOL_GPL(bpf_trace_run7);
>> +void bpf_trace_run8(struct bpf_prog *prog, u64 arg1, u64 arg2,
>> +		    u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
>> +		    u64 arg8)
>> +{
>> +	u64 args[8];
>> +
>> +	EVAL6(COPY, 1, 2, 3, 4, 5, 6);
>> +	EVAL2(COPY, 7, 8);
>> +	__bpf_trace_run(prog, args);
>> +}
>> +EXPORT_SYMBOL_GPL(bpf_trace_run8);
>> +void bpf_trace_run9(struct bpf_prog *prog, u64 arg1, u64 arg2,
>> +		    u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
>> +		    u64 arg8, u64 arg9)
>> +{
>> +	u64 args[9];
>> +
>> +	EVAL6(COPY, 1, 2, 3, 4, 5, 6);
>> +	EVAL3(COPY, 7, 8, 9);
>> +	__bpf_trace_run(prog, args);
>> +}
>> +EXPORT_SYMBOL_GPL(bpf_trace_run9);
>> +void bpf_trace_run10(struct bpf_prog *prog, u64 arg1, u64 arg2,
>> +		     u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
>> +		     u64 arg8, u64 arg9, u64 arg10)
>> +{
>> +	u64 args[10];
>> +
>> +	EVAL6(COPY, 1, 2, 3, 4, 5, 6);
>> +	EVAL4(COPY, 7, 8, 9, 10);
>> +	__bpf_trace_run(prog, args);
>> +}
>> +EXPORT_SYMBOL_GPL(bpf_trace_run10);
>> +void bpf_trace_run11(struct bpf_prog *prog, u64 arg1, u64 arg2,
>> +		     u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
>> +		     u64 arg8, u64 arg9, u64 arg10, u64 arg11)
>> +{
>> +	u64 args[11];
>> +
>> +	EVAL6(COPY, 1, 2, 3, 4, 5, 6);
>> +	EVAL5(COPY, 7, 8, 9, 10, 11);
>> +	__bpf_trace_run(prog, args);
>> +}
>> +EXPORT_SYMBOL_GPL(bpf_trace_run11);
>> +void bpf_trace_run12(struct bpf_prog *prog, u64 arg1, u64 arg2,
>> +		     u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
>> +		     u64 arg8, u64 arg9, u64 arg10, u64 arg11, u64 arg12)
>> +{
>> +	u64 args[12];
>> +
>> +	EVAL6(COPY, 1, 2, 3, 4, 5, 6);
>> +	EVAL6(COPY, 7, 8, 9, 10, 11, 12);
>> +	__bpf_trace_run(prog, args);
>> +}
>> +EXPORT_SYMBOL_GPL(bpf_trace_run12);
>> +void bpf_trace_run17(struct bpf_prog *prog, u64 arg1, u64 arg2,
>> +		     u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
>> +		     u64 arg8, u64 arg9, u64 arg10, u64 arg11, u64 arg12,
>> +		     u64 arg13, u64 arg14, u64 arg15, u64 arg16, u64 arg17)
>> +{
>> +	u64 args[17];
>> +
>> +	EVAL6(COPY, 1, 2, 3, 4, 5, 6);
>> +	EVAL6(COPY, 7, 8, 9, 10, 11, 12);
>> +	EVAL5(COPY, 13, 14, 15, 16, 17);
>> +	__bpf_trace_run(prog, args);
>> +}
>> +EXPORT_SYMBOL_GPL(bpf_trace_run17);
>
> Would be nice if we could generate all these above via macro, e.g. when we define
> a hard upper limit for max number of tracepoint args anyway, so this gets automatically
> adjusted as well. Maybe some of the logic from BPF_CALL_*() macros could be borrowed
> for this purpose.

I've thought about it, but couldn't figure out how to do it.
Suggestions are welcome.
The preprocessor cannot expand a constant N into N statements.
There gotta be something like:
...
#define EVAL5(FN, X, Y...) FN(X) EVAL4(FN, Y)
#define EVAL6(FN, X, Y...) FN(X) EVAL5(FN, Y)
for whatever maximum we will pick.
I picked 6 as a good compromise and used it twice in bpf_trace_run1x()
Similar thing possible for u64 arg1, u64 arg2, ...
but it will be harder to read.
Looking forward what you can come up with.

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ