[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <672c89fd.050a0220.49393.0175.GAE@google.com>
Date: Thu, 07 Nov 2024 01:35:57 -0800
From: syzbot <syzbot+1fc6f64c40a9d143cfb6@...kaller.appspotmail.com>
To: linux-kernel@...r.kernel.org
Subject: Re: [syzbot] Re: [syzbot] [mm?] BUG: stack guard page was hit in v9fs_file_read_iter
For archival purposes, forwarding an incoming command email to
linux-kernel@...r.kernel.org.
***
Subject: Re: [syzbot] [mm?] BUG: stack guard page was hit in v9fs_file_read_iter
Author: lizhi.xu@...driver.com
add limit to avoid retry too frequently when rreq need to retry
#syz test
diff --git a/fs/netfs/direct_read.c b/fs/netfs/direct_read.c
index b1a66a6e6bc2..1863258cd9db 100644
--- a/fs/netfs/direct_read.c
+++ b/fs/netfs/direct_read.c
@@ -87,6 +87,7 @@ static int netfs_dispatch_unbuffered_reads(struct netfs_io_request *rreq)
netfs_prepare_dio_read_iterator(subreq);
slice = subreq->len;
+ printk("subrq: %p, %s\n", subreq, __func__);
rreq->netfs_ops->issue_read(subreq);
size -= slice;
diff --git a/fs/netfs/iterator.c b/fs/netfs/iterator.c
index 72a435e5fc6d..ac9ca11b091f 100644
--- a/fs/netfs/iterator.c
+++ b/fs/netfs/iterator.c
@@ -63,6 +63,7 @@ ssize_t netfs_extract_user_iter(struct iov_iter *orig, size_t orig_len,
pg_size = array_size(max_pages, sizeof(*pages));
pages = (void *)bv + bv_size - pg_size;
+ printk("bvsize: %lu, pg_size: %lu, cnt: %lu, np: %u, max_p: %u, %s\n", bv_size, pg_size, count, npages, max_pages, __func__);
while (count && npages < max_pages) {
ret = iov_iter_extract_pages(orig, &pages, count,
max_pages - npages, extraction_flags,
@@ -98,6 +99,7 @@ ssize_t netfs_extract_user_iter(struct iov_iter *orig, size_t orig_len,
}
iov_iter_bvec(new, orig->data_source, bv, npages, orig_len - count);
+ printk("ret: %d, npages: %u, orig len: %lu, count: %lu, %s\n", ret, npages, orig_len, count, __func__);
return npages;
}
EXPORT_SYMBOL_GPL(netfs_extract_user_iter);
diff --git a/fs/netfs/read_collect.c b/fs/netfs/read_collect.c
index b18c65ba5580..4e244dfb23bf 100644
--- a/fs/netfs/read_collect.c
+++ b/fs/netfs/read_collect.c
@@ -465,6 +465,7 @@ void netfs_read_subreq_terminated(struct netfs_io_subrequest *subreq,
int error, bool was_async)
{
struct netfs_io_request *rreq = subreq->rreq;
+ static int rtt = 0;
switch (subreq->source) {
case NETFS_READ_FROM_CACHE:
@@ -506,12 +507,18 @@ void netfs_read_subreq_terminated(struct netfs_io_subrequest *subreq,
if (!error && subreq->transferred < subreq->len) {
if (test_bit(NETFS_SREQ_HIT_EOF, &subreq->flags)) {
trace_netfs_sreq(subreq, netfs_sreq_trace_hit_eof);
+ rtt = 0;
} else {
trace_netfs_sreq(subreq, netfs_sreq_trace_short);
if (subreq->transferred > subreq->consumed) {
+ rtt++;
+ if (rtt < 50) {
__set_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
__clear_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags);
set_bit(NETFS_RREQ_NEED_RETRY, &rreq->flags);
+ }
+ printk("subreq: %p, 1async: %d, r: %p, transed: %lu, sub req length: %lu, retry times: %d, subreq consume: %d, subreq list empty: %d, %s\n",
+ subreq, was_async, rreq, subreq->transferred, subreq->len, rtt, subreq->consumed, list_empty(&rreq->subrequests), __func__);
} else if (!__test_and_set_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags)) {
__set_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
set_bit(NETFS_RREQ_NEED_RETRY, &rreq->flags);
diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
index 819c75233235..b7d22f04593c 100644
--- a/fs/9p/vfs_addr.c
+++ b/fs/9p/vfs_addr.c
@@ -83,6 +83,7 @@ static void v9fs_issue_read(struct netfs_io_subrequest *subreq)
if (!err)
subreq->transferred += total;
+ printk("subreq: %p, err: %d, total: %d, transfed: %d, %s\n", subreq, err, total, subreq->transferred, __func__);
netfs_read_subreq_terminated(subreq, err, false);
}
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index 0b8086f58ad5..d80af1aa74e4 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -714,7 +714,7 @@ p9_virtio_create(struct p9_client *client, const char *devname, char *args)
mutex_unlock(&virtio_9p_lock);
if (!found) {
- pr_err("no channels available for device %s\n", devname);
+ pr_err_ratelimited("no channels available for device %s\n", devname);
return ret;
}
Powered by blists - more mailing lists