lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1600436075-2961-6-git-send-email-alan.maguire@oracle.com>
Date:   Fri, 18 Sep 2020 14:34:34 +0100
From:   Alan Maguire <alan.maguire@...cle.com>
To:     ast@...nel.org, daniel@...earbox.net, andriin@...com, yhs@...com
Cc:     linux@...musvillemoes.dk, andriy.shevchenko@...ux.intel.com,
        pmladek@...e.com, kafai@...com, songliubraving@...com,
        john.fastabend@...il.com, kpsingh@...omium.org, shuah@...nel.org,
        rdna@...com, scott.branden@...adcom.com, quentin@...valent.com,
        cneirabustos@...il.com, jakub@...udflare.com, mingo@...hat.com,
        rostedt@...dmis.org, bpf@...r.kernel.org, netdev@...r.kernel.org,
        linux-kernel@...r.kernel.org, linux-kselftest@...r.kernel.org,
        acme@...nel.org, Alan Maguire <alan.maguire@...cle.com>
Subject: [PATCH v5 bpf-next 5/6] bpf: add bpf_seq_btf_write helper

A helper is added to allow seq file writing of kernel data
structures using vmlinux BTF.  Its signature is

long bpf_seq_btf_write(struct seq_file *m, struct btf_ptr *ptr,
		       u32 btf_ptr_size, u64 flags);

Flags and struct btf_ptr definitions/use are identical to the
bpf_btf_snprintf helper, and the helper returns 0 on success
or a negative error value.

Suggested-by: Alexei Starovoitov <alexei.starovoitov@...il.com>
Signed-off-by: Alan Maguire <alan.maguire@...cle.com>
---
 include/linux/btf.h            |  3 ++
 include/uapi/linux/bpf.h       | 10 ++++++
 kernel/bpf/btf.c               | 17 +++++++---
 kernel/trace/bpf_trace.c       | 75 +++++++++++++++++++++++++++++++++---------
 tools/include/uapi/linux/bpf.h | 10 ++++++
 5 files changed, 96 insertions(+), 19 deletions(-)

diff --git a/include/linux/btf.h b/include/linux/btf.h
index 3e5cdc2..eed23a4 100644
--- a/include/linux/btf.h
+++ b/include/linux/btf.h
@@ -69,6 +69,9 @@ const struct btf_type *btf_type_id_size(const struct btf *btf,
 void btf_type_seq_show(const struct btf *btf, u32 type_id, void *obj,
 		       struct seq_file *m);
 
+int btf_type_seq_show_flags(const struct btf *btf, u32 type_id, void *obj,
+			    struct seq_file *m, u64 flags);
+
 /*
  * Copy len bytes of string representation of obj of BTF type_id into buf.
  *
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 9b89b67..c0815f1 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -3614,6 +3614,15 @@ struct bpf_stack_build_id {
  *		The number of bytes that were written (or would have been
  *		written if output had to be truncated due to string size),
  *		or a negative error in cases of failure.
+ *
+ * long bpf_seq_btf_write(struct seq_file *m, struct btf_ptr *ptr, u32 ptr_size, u64 flags)
+ *	Description
+ *		Use BTF to write to seq_write a string representation of
+ *		*ptr*->ptr, using *ptr*->type name or *ptr*->type_id as per
+ *		bpf_btf_snprintf() above.  *flags* are identical to those
+ *		used for bpf_btf_snprintf.
+ *	Return
+ *		0 on success or a negative error in case of failure.
  */
 #define __BPF_FUNC_MAPPER(FN)		\
 	FN(unspec),			\
@@ -3766,6 +3775,7 @@ struct bpf_stack_build_id {
 	FN(d_path),			\
 	FN(copy_from_user),		\
 	FN(btf_snprintf),		\
+	FN(seq_btf_write),		\
 	/* */
 
 /* integer value in 'imm' field of BPF_CALL instruction selects which helper
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
index 70f5b88..0902464 100644
--- a/kernel/bpf/btf.c
+++ b/kernel/bpf/btf.c
@@ -5328,17 +5328,26 @@ static void btf_seq_show(struct btf_show *show, const char *fmt, ...)
 	va_end(args);
 }
 
-void btf_type_seq_show(const struct btf *btf, u32 type_id, void *obj,
-			struct seq_file *m)
+int btf_type_seq_show_flags(const struct btf *btf, u32 type_id, void *obj,
+			struct seq_file *m, u64 flags)
 {
 	struct btf_show sseq;
 
 	sseq.target = m;
 	sseq.showfn = btf_seq_show;
-	sseq.flags = BTF_SHOW_NONAME | BTF_SHOW_COMPACT | BTF_SHOW_ZERO |
-		     BTF_SHOW_UNSAFE;
+	sseq.flags = flags;
 
 	btf_type_show(btf, type_id, obj, &sseq);
+
+	return sseq.state.status;
+}
+
+void btf_type_seq_show(const struct btf *btf, u32 type_id, void *obj,
+		       struct seq_file *m)
+{
+	(void) btf_type_seq_show_flags(btf, type_id, obj, m,
+				       BTF_SHOW_NONAME | BTF_SHOW_COMPACT |
+				       BTF_SHOW_ZERO | BTF_SHOW_UNSAFE);
 }
 
 struct btf_show_snprintf {
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index f171e03..eee36a8 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -71,6 +71,10 @@ static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
 u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
 u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
 
+static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size,
+				  u64 flags, const struct btf **btf,
+				  s32 *btf_id);
+
 /**
  * trace_call_bpf - invoke BPF program
  * @call: tracepoint event
@@ -780,6 +784,30 @@ struct bpf_seq_printf_buf {
 	.btf_id		= bpf_seq_write_btf_ids,
 };
 
+BPF_CALL_4(bpf_seq_btf_write, struct seq_file *, m, struct btf_ptr *, ptr,
+	   u32, btf_ptr_size, u64, flags)
+{
+	const struct btf *btf;
+	s32 btf_id;
+	int ret;
+
+	ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id);
+	if (ret)
+		return ret;
+
+	return btf_type_seq_show_flags(btf, btf_id, ptr->ptr, m, flags);
+}
+
+static const struct bpf_func_proto bpf_seq_btf_write_proto = {
+	.func		= bpf_seq_btf_write,
+	.gpl_only	= true,
+	.ret_type	= RET_INTEGER,
+	.arg1_type      = ARG_PTR_TO_BTF_ID,
+	.arg2_type	= ARG_PTR_TO_MEM,
+	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
+	.btf_id		= bpf_seq_write_btf_ids,
+};
+
 static __always_inline int
 get_map_perf_counter(struct bpf_map *map, u64 flags,
 		     u64 *value, u64 *enabled, u64 *running)
@@ -1151,15 +1179,14 @@ static bool bpf_d_path_allowed(const struct bpf_prog *prog)
 #define BTF_F_ALL	(BTF_F_COMPACT  | BTF_F_NONAME | \
 			 BTF_F_PTR_RAW | BTF_F_ZERO)
 
-BPF_CALL_5(bpf_btf_snprintf, char *, str, u32, str_size, struct btf_ptr *, ptr,
-	   u32, btf_ptr_size, u64, flags)
+static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size,
+				  u64 flags, const struct btf **btf,
+				  s32 *btf_id)
 {
 	u8 btf_kind = BTF_KIND_TYPEDEF;
 	char type_name[KSYM_NAME_LEN];
 	const struct btf_type *t;
-	const struct btf *btf;
 	const char *btf_type;
-	s32 btf_id;
 	int ret;
 
 	if (unlikely(flags & ~(BTF_F_ALL)))
@@ -1168,10 +1195,10 @@ static bool bpf_d_path_allowed(const struct bpf_prog *prog)
 	if (btf_ptr_size != sizeof(struct btf_ptr))
 		return -EINVAL;
 
-	btf = bpf_get_btf_vmlinux();
+	*btf = bpf_get_btf_vmlinux();
 
-	if (IS_ERR_OR_NULL(btf))
-		return PTR_ERR(btf);
+	if (IS_ERR_OR_NULL(*btf))
+		return PTR_ERR(*btf);
 
 	if (ptr->type != NULL) {
 		ret = copy_from_kernel_nofault(type_name, ptr->type,
@@ -1201,20 +1228,34 @@ static bool bpf_d_path_allowed(const struct bpf_prog *prog)
 		 *
 		 * Fall back to BTF_KIND_INT if this fails.
 		 */
-		btf_id = btf_find_by_name_kind(btf, btf_type, btf_kind);
-		if (btf_id < 0)
-			btf_id = btf_find_by_name_kind(btf, btf_type,
-						       BTF_KIND_INT);
+		*btf_id = btf_find_by_name_kind(*btf, btf_type, btf_kind);
+		if (*btf_id < 0)
+			*btf_id = btf_find_by_name_kind(*btf, btf_type,
+							BTF_KIND_INT);
 	} else if (ptr->type_id > 0)
-		btf_id = ptr->type_id;
+		*btf_id = ptr->type_id;
 	else
 		return -EINVAL;
 
-	if (btf_id > 0)
-		t = btf_type_by_id(btf, btf_id);
-	if (btf_id <= 0 || !t)
+	if (*btf_id > 0)
+		t = btf_type_by_id(*btf, *btf_id);
+	if (*btf_id <= 0 || !t)
 		return -ENOENT;
 
+	return 0;
+}
+
+BPF_CALL_5(bpf_btf_snprintf, char *, str, u32, str_size, struct btf_ptr *, ptr,
+	   u32, btf_ptr_size, u64, flags)
+{
+	const struct btf *btf;
+	s32 btf_id;
+	int ret;
+
+	ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id);
+	if (ret)
+		return ret;
+
 	return btf_type_snprintf_show(btf, btf_id, ptr->ptr, str, str_size,
 				      flags);
 }
@@ -1715,6 +1756,10 @@ static void put_bpf_raw_tp_regs(void)
 		return prog->expected_attach_type == BPF_TRACE_ITER ?
 		       &bpf_seq_write_proto :
 		       NULL;
+	case BPF_FUNC_seq_btf_write:
+		return prog->expected_attach_type == BPF_TRACE_ITER ?
+		       &bpf_seq_btf_write_proto :
+		       NULL;
 	case BPF_FUNC_d_path:
 		return &bpf_d_path_proto;
 	default:
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 9b89b67..c0815f1 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -3614,6 +3614,15 @@ struct bpf_stack_build_id {
  *		The number of bytes that were written (or would have been
  *		written if output had to be truncated due to string size),
  *		or a negative error in cases of failure.
+ *
+ * long bpf_seq_btf_write(struct seq_file *m, struct btf_ptr *ptr, u32 ptr_size, u64 flags)
+ *	Description
+ *		Use BTF to write to seq_write a string representation of
+ *		*ptr*->ptr, using *ptr*->type name or *ptr*->type_id as per
+ *		bpf_btf_snprintf() above.  *flags* are identical to those
+ *		used for bpf_btf_snprintf.
+ *	Return
+ *		0 on success or a negative error in case of failure.
  */
 #define __BPF_FUNC_MAPPER(FN)		\
 	FN(unspec),			\
@@ -3766,6 +3775,7 @@ struct bpf_stack_build_id {
 	FN(d_path),			\
 	FN(copy_from_user),		\
 	FN(btf_snprintf),		\
+	FN(seq_btf_write),		\
 	/* */
 
 /* integer value in 'imm' field of BPF_CALL instruction selects which helper
-- 
1.8.3.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ