[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <9aa8aa19-b3ba-ab7e-abcb-78b8a65577ce@huawei.com>
Date: Wed, 6 Jul 2022 14:47:52 +0800
From: Liu Shixin <liushixin2@...wei.com>
To: Matthew Wilcox <willy@...radead.org>
CC: Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
Andrew Morton <akpm@...ux-foundation.org>,
Jan Kara <jack@...e.cz>,
William Kucharski <william.kucharski@...cle.com>,
Christoph Hellwig <hch@....de>, <linux-kernel@...r.kernel.org>,
<stable@...r.kernel.org>
Subject: Re: [PATCH 5.15] mm/filemap: fix UAF in find_lock_entries
On 2022/7/6 11:21, Matthew Wilcox wrote:
> On Wed, Jul 06, 2022 at 11:24:34AM +0800, Liu Shixin wrote:
>> Release refcount after xas_set to fix UAF which may cause panic like this:
> I think we can do better. How about this?
>
> diff --git a/mm/filemap.c b/mm/filemap.c
> index 00e391e75880..11ae38cc4fd3 100644
> --- a/mm/filemap.c
> +++ b/mm/filemap.c
> @@ -2090,7 +2090,9 @@ unsigned find_lock_entries(struct address_space *mapping, pgoff_t start,
>
> rcu_read_lock();
> while ((page = find_get_entry(&xas, end, XA_PRESENT))) {
> + unsigned long next_idx = xas.xa_index + 1;
> if (!xa_is_value(page)) {
> + next_idx = page->index + thp_nr_pages(page);
I noticed that there was a VM_BUG_ON_PAGE before which was deleted by patch 6560e8cd869b ("mm/filemap.c: remove bogus VM_BUG_ON")
It seems that page->index and xas.xa_index are not guaranteed to be equal. Therefore, I conservatively retained
the PageTransHuge to keep consistent with the original logic :)
@@ -2090,7 +2090,11 @@ unsigned find_lock_entries(struct address_space *mapping, pgoff_t start,
rcu_read_lock();
while ((page = find_get_entry(&xas, end, XA_PRESENT))) {
+ unsigned long next_idx = xas.xa_index;
+
if (!xa_is_value(page)) {
+ if (PageTransHuge(page))
+ next_idx = page->index + thp_nr_pages(page);
if (page->index < start)
goto put;
if (page->index + thp_nr_pages(page) - 1 > end)
> if (page->index < start)
> goto put;
> if (page->index + thp_nr_pages(page) - 1 > end)
> @@ -2111,14 +2113,11 @@ unsigned find_lock_entries(struct address_space *mapping, pgoff_t start,
> put:
> put_page(page);
> next:
> - if (!xa_is_value(page) && PageTransHuge(page)) {
> - unsigned int nr_pages = thp_nr_pages(page);
> -
> - /* Final THP may cross MAX_LFS_FILESIZE on 32-bit */
> - xas_set(&xas, page->index + nr_pages);
> - if (xas.xa_index < nr_pages)
> - break;
> - }
> + /* Final THP may cross MAX_LFS_FILESIZE on 32-bit */
> + if (next_idx < xas.xa_index)
> + break;
> + if (next_idx != xas.xa_index + 1)
> + xas_set(&xas, next_idx);
> }
> rcu_read_unlock();
>
>
> .
>
Powered by blists - more mailing lists