[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <584e60dd-8a3b-46d5-87ec-aa7032d3a572@huawei.com>
Date: Wed, 16 Oct 2024 20:34:44 +0800
From: Kefeng Wang <wangkefeng.wang@...wei.com>
To: Baolin Wang <baolin.wang@...ux.alibaba.com>, <akpm@...ux-foundation.org>,
<hughd@...gle.com>
CC: <willy@...radead.org>, <david@...hat.com>, <linux-mm@...ck.org>,
<linux-kernel@...r.kernel.org>
Subject: Re: [PATCH 1/2] mm: shmem: update iocb->ki_pos directly to simplify
tmpfs read logic
On 2024/10/16 18:09, Baolin Wang wrote:
> Use iocb->ki_pos to check if the read bytes exceeds the file size and to
> calculate the bytes to be read can help simplify the code logic. Meanwhile,
> this is also a preparation for improving tmpfs large folios read performace
> in the following patch.
>
> Signed-off-by: Baolin Wang <baolin.wang@...ux.alibaba.com>
> ---
> mm/shmem.c | 36 ++++++++++++------------------------
> 1 file changed, 12 insertions(+), 24 deletions(-)
>
> diff --git a/mm/shmem.c b/mm/shmem.c
> index 66eae800ffab..edab02a26aac 100644
> --- a/mm/shmem.c
> +++ b/mm/shmem.c
> @@ -3106,26 +3106,18 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
> unsigned long offset;
> int error = 0;
> ssize_t retval = 0;
> - loff_t *ppos = &iocb->ki_pos;
>
> - index = *ppos >> PAGE_SHIFT;
> - offset = *ppos & ~PAGE_MASK;
> + index = iocb->ki_pos >> PAGE_SHIFT;
index calculate could be moved before shmem_get_folio(), then...
> + offset = iocb->ki_pos & ~PAGE_MASK;
>
> for (;;) {
> struct folio *folio = NULL;
> struct page *page = NULL;
> - pgoff_t end_index;
> unsigned long nr, ret;
> - loff_t i_size = i_size_read(inode);
> + loff_t end_offset, i_size = i_size_read(inode);
>
> - end_index = i_size >> PAGE_SHIFT;
> - if (index > end_index)
> + if (unlikely(iocb->ki_pos >= i_size))
> break;
> - if (index == end_index) {
> - nr = i_size & ~PAGE_MASK;
> - if (nr <= offset)
> - break;
> - }
>
> error = shmem_get_folio(inode, index, 0, &folio, SGP_READ);
> if (error) {
> @@ -3148,18 +3140,14 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
> * We must evaluate after, since reads (unlike writes)
> * are called without i_rwsem protection against truncate
> */
> - nr = PAGE_SIZE;
> i_size = i_size_read(inode);
> - end_index = i_size >> PAGE_SHIFT;
> - if (index == end_index) {
> - nr = i_size & ~PAGE_MASK;
> - if (nr <= offset) {
> - if (folio)
> - folio_put(folio);
> - break;
> - }
> + if (unlikely(iocb->ki_pos >= i_size)) {
> + if (folio)
> + folio_put(folio);
> + break;
> }
> - nr -= offset;
> + end_offset = min_t(loff_t, i_size, iocb->ki_pos + to->count);
> + nr = min_t(loff_t, end_offset - iocb->ki_pos, PAGE_SIZE - offset);
>
> if (folio) {
> /*
> @@ -3199,8 +3187,9 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
>
> retval += ret;
> offset += ret;
> - index += offset >> PAGE_SHIFT;
> offset &= ~PAGE_MASK;
> + iocb->ki_pos += ret;
> + index = iocb->ki_pos >> PAGE_SHIFT;
remove this line.
>
> if (!iov_iter_count(to))
> break;
> @@ -3211,7 +3200,6 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
> cond_resched();
> }
>
> - *ppos = ((loff_t) index << PAGE_SHIFT) + offset;
> file_accessed(file);
> return retval ? retval : error;
> }
Powered by blists - more mailing lists