lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230113090935.1763477-7-benjamin.tissoires@redhat.com>
Date:   Fri, 13 Jan 2023 10:09:32 +0100
From:   Benjamin Tissoires <benjamin.tissoires@...hat.com>
To:     Greg KH <gregkh@...uxfoundation.org>,
        Jiri Kosina <jikos@...nel.org>,
        Alexei Starovoitov <ast@...nel.org>,
        Daniel Borkmann <daniel@...earbox.net>,
        Andrii Nakryiko <andrii@...nel.org>,
        Dmitry Torokhov <dmitry.torokhov@...il.com>
Cc:     Tero Kristo <tero.kristo@...ux.intel.com>,
        linux-kernel@...r.kernel.org, linux-input@...r.kernel.org,
        netdev@...r.kernel.org, bpf@...r.kernel.org,
        linux-kselftest@...r.kernel.org,
        Benjamin Tissoires <benjamin.tissoires@...hat.com>
Subject: [PATCH HID for-next v2 6/9] HID: bpf: rework how programs are attached and stored in the kernel

Previously, HID-BPF was relying on a bpf tracing program to be notified
when a program was released from userspace. This is error prone, as
LLVM sometimes inline the function and sometimes not.

So instead of messing up with the bpf prog ref count, we can use the
bpf_link concept which actually matches exactly what we want:
- a bpf_link represents the fact that a given program is attached to a
  given HID device
- as long as the bpf_link has fd opened (either by the userspace program
  still being around or by pinning the bpf object in the bpffs), the
  program stays attached to the HID device
- once every user has closed the fd, we get called by
  hid_bpf_link_release() that we no longer have any users, and we can
  disconnect the program to the device in 2 passes: first atomically clear
  the bit saying that the link is active, and then calling release_work in
  a scheduled work item.

This solves entirely the problems of BPF tracing not showing up and is
definitely cleaner.

Signed-off-by: Benjamin Tissoires <benjamin.tissoires@...hat.com>

---

changes in v2:
- renamed struct hid_bpf_link->index into struct hid_bpf_link->hid_table_index
- renamed prog_idx into prog_table_idx in __hid_bpf_attach_prog()
---
 Documentation/hid/hid-bpf.rst       |  12 ++-
 drivers/hid/bpf/hid_bpf_dispatch.c  |  18 ++--
 drivers/hid/bpf/hid_bpf_jmp_table.c | 126 +++++++++++++++-------------
 include/linux/hid_bpf.h             |   7 ++
 4 files changed, 95 insertions(+), 68 deletions(-)

diff --git a/Documentation/hid/hid-bpf.rst b/Documentation/hid/hid-bpf.rst
index a55f191d49a3..c205f92b7bcc 100644
--- a/Documentation/hid/hid-bpf.rst
+++ b/Documentation/hid/hid-bpf.rst
@@ -427,12 +427,22 @@ program first::
 	prog_fd = bpf_program__fd(hid_skel->progs.attach_prog);
 
 	err = bpf_prog_test_run_opts(prog_fd, &tattrs);
-	return err;
+	if (err)
+		return err;
+
+	return args.retval; /* the fd of the created bpf_link */
   }
 
 Our userspace program can now listen to notifications on the ring buffer, and
 is awaken only when the value changes.
 
+When the userspace program doesn't need to listen to events anymore, it can just
+close the returned fd from :c:func:`attach_filter`, which will tell the kernel to
+detach the program from the HID device.
+
+Of course, in other use cases, the userspace program can also pin the fd to the
+BPF filesystem through a call to :c:func:`bpf_obj_pin`, as with any bpf_link.
+
 Controlling the device
 ----------------------
 
diff --git a/drivers/hid/bpf/hid_bpf_dispatch.c b/drivers/hid/bpf/hid_bpf_dispatch.c
index 58e608ebf0fa..8d29fd361ab9 100644
--- a/drivers/hid/bpf/hid_bpf_dispatch.c
+++ b/drivers/hid/bpf/hid_bpf_dispatch.c
@@ -249,7 +249,9 @@ int hid_bpf_reconnect(struct hid_device *hdev)
  * @prog_fd: an fd in the user process representing the program to attach
  * @flags: any logical OR combination of &enum hid_bpf_attach_flags
  *
- * @returns %0 on success, an error code otherwise.
+ * @returns an fd of a bpf_link object on success (> %0), an error code otherwise.
+ * Closing this fd will detach the program from the HID device (unless the bpf_link
+ * is pinned to the BPF file system).
  */
 /* called from syscall */
 noinline int
@@ -257,7 +259,7 @@ hid_bpf_attach_prog(unsigned int hid_id, int prog_fd, __u32 flags)
 {
 	struct hid_device *hdev;
 	struct device *dev;
-	int err, prog_type = hid_bpf_get_prog_attach_type(prog_fd);
+	int fd, err, prog_type = hid_bpf_get_prog_attach_type(prog_fd);
 
 	if (!hid_bpf_ops)
 		return -EINVAL;
@@ -283,17 +285,19 @@ hid_bpf_attach_prog(unsigned int hid_id, int prog_fd, __u32 flags)
 			return err;
 	}
 
-	err = __hid_bpf_attach_prog(hdev, prog_type, prog_fd, flags);
-	if (err)
-		return err;
+	fd = __hid_bpf_attach_prog(hdev, prog_type, prog_fd, flags);
+	if (fd < 0)
+		return fd;
 
 	if (prog_type == HID_BPF_PROG_TYPE_RDESC_FIXUP) {
 		err = hid_bpf_reconnect(hdev);
-		if (err)
+		if (err) {
+			close_fd(fd);
 			return err;
+		}
 	}
 
-	return 0;
+	return fd;
 }
 
 /**
diff --git a/drivers/hid/bpf/hid_bpf_jmp_table.c b/drivers/hid/bpf/hid_bpf_jmp_table.c
index 207972b028d9..b7eb5c5e435f 100644
--- a/drivers/hid/bpf/hid_bpf_jmp_table.c
+++ b/drivers/hid/bpf/hid_bpf_jmp_table.c
@@ -322,16 +322,6 @@ static int hid_bpf_insert_prog(int prog_fd, struct bpf_prog *prog)
 	if (err)
 		goto out;
 
-	/*
-	 * The program has been safely inserted, decrement the reference count
-	 * so it doesn't interfere with the number of actual user handles.
-	 * This is safe to do because:
-	 * - we overrite the put_ptr in the prog fd map
-	 * - we also have a cleanup function that monitors when a program gets
-	 *   released and we manually do the cleanup in the prog fd map
-	 */
-	bpf_prog_sub(prog, 1);
-
 	/* return the index */
 	err = index;
 
@@ -365,14 +355,46 @@ int hid_bpf_get_prog_attach_type(int prog_fd)
 	return prog_type;
 }
 
+static void hid_bpf_link_release(struct bpf_link *link)
+{
+	struct hid_bpf_link *hid_link =
+		container_of(link, struct hid_bpf_link, link);
+
+	__clear_bit(hid_link->hid_table_index, jmp_table.enabled);
+	schedule_work(&release_work);
+}
+
+static void hid_bpf_link_dealloc(struct bpf_link *link)
+{
+	struct hid_bpf_link *hid_link =
+		container_of(link, struct hid_bpf_link, link);
+
+	kfree(hid_link);
+}
+
+static void hid_bpf_link_show_fdinfo(const struct bpf_link *link,
+					 struct seq_file *seq)
+{
+	seq_printf(seq,
+		   "attach_type:\tHID-BPF\n");
+}
+
+static const struct bpf_link_ops hid_bpf_link_lops = {
+	.release = hid_bpf_link_release,
+	.dealloc = hid_bpf_link_dealloc,
+	.show_fdinfo = hid_bpf_link_show_fdinfo,
+};
+
 /* called from syscall */
 noinline int
 __hid_bpf_attach_prog(struct hid_device *hdev, enum hid_bpf_prog_type prog_type,
 		      int prog_fd, __u32 flags)
 {
+	struct bpf_link_primer link_primer;
+	struct hid_bpf_link *link;
 	struct bpf_prog *prog = NULL;
 	struct hid_bpf_prog_entry *prog_entry;
-	int cnt, err = -EINVAL, prog_idx = -1;
+	int cnt, err = -EINVAL, prog_table_idx = -1;
 
 	/* take a ref on the prog itself */
 	prog = bpf_prog_get(prog_fd);
@@ -381,23 +403,32 @@ __hid_bpf_attach_prog(struct hid_device *hdev, enum hid_bpf_prog_type prog_type,
 
 	mutex_lock(&hid_bpf_attach_lock);
 
+	link = kzalloc(sizeof(*link), GFP_USER);
+	if (!link) {
+		err = -ENOMEM;
+		goto err_unlock;
+	}
+
+	bpf_link_init(&link->link, BPF_LINK_TYPE_UNSPEC,
+		      &hid_bpf_link_lops, prog);
+
 	/* do not attach too many programs to a given HID device */
 	cnt = hid_bpf_program_count(hdev, NULL, prog_type);
 	if (cnt < 0) {
 		err = cnt;
-		goto out_unlock;
+		goto err_unlock;
 	}
 
 	if (cnt >= hid_bpf_max_programs(prog_type)) {
 		err = -E2BIG;
-		goto out_unlock;
+		goto err_unlock;
 	}
 
-	prog_idx = hid_bpf_insert_prog(prog_fd, prog);
+	prog_table_idx = hid_bpf_insert_prog(prog_fd, prog);
 	/* if the jmp table is full, abort */
-	if (prog_idx < 0) {
-		err = prog_idx;
-		goto out_unlock;
+	if (prog_table_idx < 0) {
+		err = prog_table_idx;
+		goto err_unlock;
 	}
 
 	if (flags & HID_BPF_FLAG_INSERT_HEAD) {
@@ -412,22 +443,32 @@ __hid_bpf_attach_prog(struct hid_device *hdev, enum hid_bpf_prog_type prog_type,
 
 	/* we steal the ref here */
 	prog_entry->prog = prog;
-	prog_entry->idx = prog_idx;
+	prog_entry->idx = prog_table_idx;
 	prog_entry->hdev = hdev;
 	prog_entry->type = prog_type;
 
 	/* finally store the index in the device list */
 	err = hid_bpf_populate_hdev(hdev, prog_type);
+	if (err) {
+		hid_bpf_release_prog_at(prog_table_idx);
+		goto err_unlock;
+	}
+
+	link->hid_table_index = prog_table_idx;
+
+	err = bpf_link_prime(&link->link, &link_primer);
 	if (err)
-		hid_bpf_release_prog_at(prog_idx);
+		goto err_unlock;
 
- out_unlock:
 	mutex_unlock(&hid_bpf_attach_lock);
 
-	/* we only use prog as a key in the various tables, so we don't need to actually
-	 * increment the ref count.
-	 */
+	return bpf_link_settle(&link_primer);
+
+ err_unlock:
+	mutex_unlock(&hid_bpf_attach_lock);
+
 	bpf_prog_put(prog);
+	kfree(link);
 
 	return err;
 }
@@ -460,36 +501,10 @@ void __hid_bpf_destroy_device(struct hid_device *hdev)
 
 void call_hid_bpf_prog_put_deferred(struct work_struct *work)
 {
-	struct bpf_prog_aux *aux;
-	struct bpf_prog *prog;
-	bool found = false;
-	int i;
-
-	aux = container_of(work, struct bpf_prog_aux, work);
-	prog = aux->prog;
-
-	/* we don't need locking here because the entries in the progs table
-	 * are stable:
-	 * if there are other users (and the progs entries might change), we
-	 * would simply not have been called.
-	 */
-	for (i = 0; i < HID_BPF_MAX_PROGS; i++) {
-		if (jmp_table.progs[i] == prog) {
-			__clear_bit(i, jmp_table.enabled);
-			found = true;
-		}
-	}
-
-	if (found)
-		/* schedule release of all detached progs */
-		schedule_work(&release_work);
-}
-
-static void hid_bpf_prog_fd_array_put_ptr(void *ptr)
-{
+	/* kept around for patch readability, to be dropped in the next commmit */
 }
 
-#define HID_BPF_PROGS_COUNT 2
+#define HID_BPF_PROGS_COUNT 1
 
 static struct bpf_link *links[HID_BPF_PROGS_COUNT];
 static struct entrypoints_bpf *skel;
@@ -528,8 +543,6 @@ void hid_bpf_free_links_and_skel(void)
 	idx++;									\
 } while (0)
 
-static struct bpf_map_ops hid_bpf_prog_fd_maps_ops;
-
 int hid_bpf_preload_skel(void)
 {
 	int err, idx = 0;
@@ -548,14 +561,7 @@ int hid_bpf_preload_skel(void)
 		goto out;
 	}
 
-	/* our jump table is stealing refs, so we should not decrement on removal of elements */
-	hid_bpf_prog_fd_maps_ops = *jmp_table.map->ops;
-	hid_bpf_prog_fd_maps_ops.map_fd_put_ptr = hid_bpf_prog_fd_array_put_ptr;
-
-	jmp_table.map->ops = &hid_bpf_prog_fd_maps_ops;
-
 	ATTACH_AND_STORE_LINK(hid_tail_call);
-	ATTACH_AND_STORE_LINK(hid_bpf_prog_put_deferred);
 
 	return 0;
 out:
diff --git a/include/linux/hid_bpf.h b/include/linux/hid_bpf.h
index 3ca85ab91325..e9afb61e6ee0 100644
--- a/include/linux/hid_bpf.h
+++ b/include/linux/hid_bpf.h
@@ -3,6 +3,7 @@
 #ifndef __HID_BPF_H
 #define __HID_BPF_H
 
+#include <linux/bpf.h>
 #include <linux/spinlock.h>
 #include <uapi/linux/hid.h>
 
@@ -138,6 +139,12 @@ struct hid_bpf {
 	spinlock_t progs_lock;		/* protects RCU update of progs */
 };
 
+/* specific HID-BPF link when a program is attached to a device */
+struct hid_bpf_link {
+	struct bpf_link link;
+	int hid_table_index;
+};
+
 #ifdef CONFIG_HID_BPF
 u8 *dispatch_hid_bpf_device_event(struct hid_device *hid, enum hid_report_type type, u8 *data,
 				  u32 *size, int interrupt);
-- 
2.38.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ