[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250805155627.1605911-2-ysk@kzalloc.com>
Date: Tue, 5 Aug 2025 15:56:28 +0000
From: Yunseong Kim <ysk@...lloc.com>
To: Namjae Jeon <linkinjeon@...nel.org>,
Steve French <smfrench@...il.com>
Cc: Sergey Senozhatsky <senozhatsky@...omium.org>,
Tom Talpey <tom@...pey.com>,
linux-cifs@...r.kernel.org,
syzkaller@...glegroups.com,
linux-kernel@...r.kernel.org,
Yunseong Kim <ysk@...lloc.com>,
notselwyn@...ing.tech
Subject: [PATCH] ksmbd: add kcov remote coverage support via ksmbd_conn
KSMBD processes SMB requests on per-connection threads and then hands
off work items to a kworker pool for actual command processing by
handle_ksmbd_work(). Because each connection may enqueue multiple
struct ksmbd_work instances, attaching the kcov handle to the work
itself is not sufficient: we need a stable, per-connection handle.
Introduce a kcov_handle field on struct ksmbd_conn (under CONFIG_KCOV)
and initialize it when the connection is set up. In both
ksmbd_conn_handler_loop() which only receives a struct ksmbd_conn*
and handle_ksmbd_work() which receives a struct ksmbd_work*, start
kcov_remote with the per-connection handle before processing and stop
it afterward. This ensures coverage collection remains active across
the entire asynchronous path of each SMB request.
The kcov context tied to the connection itself, correctly supporting
multiple outstanding work items per connection.
The related work for syzkaller support is currently being developed
in the following GitHub PR:
Link: https://github.com/google/syzkaller/pull/5524
Based on earlier work by Lau.
Link: https://pwning.tech/ksmbd-syzkaller/
Cc: linux-cifs@...r.kernel.org
Cc: notselwyn@...ing.tech
Signed-off-by: Yunseong Kim <ysk@...lloc.com>
---
fs/smb/server/connection.c | 4 +++-
fs/smb/server/connection.h | 14 ++++++++++++++
fs/smb/server/server.c | 4 ++++
3 files changed, 21 insertions(+), 1 deletion(-)
diff --git a/fs/smb/server/connection.c b/fs/smb/server/connection.c
index 3f04a2977ba8..6ce20aee8cc1 100644
--- a/fs/smb/server/connection.c
+++ b/fs/smb/server/connection.c
@@ -322,6 +322,8 @@ int ksmbd_conn_handler_loop(void *p)
if (t->ops->prepare && t->ops->prepare(t))
goto out;
+ kcov_remote_start_common(ksmbd_conn_get_kcov_handle(conn));
+
max_req = server_conf.max_inflight_req;
conn->last_active = jiffies;
set_freezable();
@@ -412,7 +414,7 @@ int ksmbd_conn_handler_loop(void *p)
break;
}
}
-
+ kcov_remote_stop();
out:
ksmbd_conn_set_releasing(conn);
/* Wait till all reference dropped to the Server object*/
diff --git a/fs/smb/server/connection.h b/fs/smb/server/connection.h
index dd3e0e3f7bf0..07cd0d27ac77 100644
--- a/fs/smb/server/connection.h
+++ b/fs/smb/server/connection.h
@@ -15,6 +15,7 @@
#include <linux/kthread.h>
#include <linux/nls.h>
#include <linux/unicode.h>
+#include <linux/kcov.h>
#include "smb_common.h"
#include "ksmbd_work.h"
@@ -109,6 +110,9 @@ struct ksmbd_conn {
bool binding;
atomic_t refcnt;
bool is_aapl;
+#ifdef CONFIG_KCOV
+ u64 kcov_handle;
+#endif
};
struct ksmbd_conn_ops {
@@ -246,4 +250,14 @@ static inline void ksmbd_conn_set_releasing(struct ksmbd_conn *conn)
}
void ksmbd_all_conn_set_status(u64 sess_id, u32 status);
+
+static inline u64 ksmbd_conn_get_kcov_handle(struct ksmbd_conn *conn)
+{
+#ifdef CONFIG_KCOV
+ return conn->kcov_handle;
+#else
+ return 0;
+#endif
+}
+
#endif /* __CONNECTION_H__ */
diff --git a/fs/smb/server/server.c b/fs/smb/server/server.c
index 8c9c49c3a0a4..0757cd6ef4f7 100644
--- a/fs/smb/server/server.c
+++ b/fs/smb/server/server.c
@@ -264,6 +264,8 @@ static void handle_ksmbd_work(struct work_struct *wk)
struct ksmbd_work *work = container_of(wk, struct ksmbd_work, work);
struct ksmbd_conn *conn = work->conn;
+ kcov_remote_start_common(ksmbd_conn_get_kcov_handle(conn));
+
atomic64_inc(&conn->stats.request_served);
__handle_ksmbd_work(work, conn);
@@ -271,6 +273,8 @@ static void handle_ksmbd_work(struct work_struct *wk)
ksmbd_conn_try_dequeue_request(work);
ksmbd_free_work_struct(work);
ksmbd_conn_r_count_dec(conn);
+
+ kcov_remote_stop();
}
/**
--
2.50.0
Powered by blists - more mailing lists