lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <96675869-3815-4E98-8492-1D84F5B42AED@lca.pw>
Date:   Mon, 20 Jan 2020 08:56:46 -0500
From:   Qian Cai <cai@....pw>
To:     David Hildenbrand <david@...hat.com>
Cc:     Andrew Morton <akpm@...ux-foundation.org>, mhocko@...nel.org,
        linux-mm@...ck.org, linux-kernel@...r.kernel.org
Subject: Re: [PATCH -mm v2] mm/page_isolation: fix potential warning from user



> On Jan 20, 2020, at 8:38 AM, David Hildenbrand <david@...hat.com> wrote:
> 
> On 20.01.20 14:30, David Hildenbrand wrote:
>> On 20.01.20 14:19, Qian Cai wrote:
>>> It makes sense to call the WARN_ON_ONCE(zone_idx(zone) == ZONE_MOVABLE)
>>> from start_isolate_page_range(), but should avoid triggering it from
>>> userspace, i.e, from is_mem_section_removable() because it could be a
>>> DoS if warn_on_panic is set.
>>> 
>>> While at it, simplify the code a bit by removing an unnecessary jump
>>> label and a local variable, so set_migratetype_isolate() could really
>>> return a bool.
>>> 
>>> Suggested-by: Michal Hocko <mhocko@...nel.org>
>>> Signed-off-by: Qian Cai <cai@....pw>
>>> ---
>>> 
>>> v2: Improve the commit log.
>>>    Warn for all start_isolate_page_range() users not just offlining.
>>> 
>>> mm/page_alloc.c     | 11 ++++-------
>>> mm/page_isolation.c | 30 +++++++++++++++++-------------
>>> 2 files changed, 21 insertions(+), 20 deletions(-)
>>> 
>>> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
>>> index 621716a25639..3c4eb750a199 100644
>>> --- a/mm/page_alloc.c
>>> +++ b/mm/page_alloc.c
>>> @@ -8231,7 +8231,7 @@ struct page *has_unmovable_pages(struct zone *zone, struct page *page,
>>> 		if (is_migrate_cma(migratetype))
>>> 			return NULL;
>>> 
>>> -		goto unmovable;
>>> +		return page;
>>> 	}
>>> 
>>> 	for (; iter < pageblock_nr_pages; iter++) {
>>> @@ -8241,7 +8241,7 @@ struct page *has_unmovable_pages(struct zone *zone, struct page *page,
>>> 		page = pfn_to_page(pfn + iter);
>>> 
>>> 		if (PageReserved(page))
>>> -			goto unmovable;
>>> +			return page;
>>> 
>>> 		/*
>>> 		 * If the zone is movable and we have ruled out all reserved
>>> @@ -8261,7 +8261,7 @@ struct page *has_unmovable_pages(struct zone *zone, struct page *page,
>>> 			unsigned int skip_pages;
>>> 
>>> 			if (!hugepage_migration_supported(page_hstate(head)))
>>> -				goto unmovable;
>>> +				return page;
>>> 
>>> 			skip_pages = compound_nr(head) - (page - head);
>>> 			iter += skip_pages - 1;
>>> @@ -8303,12 +8303,9 @@ struct page *has_unmovable_pages(struct zone *zone, struct page *page,
>>> 		 * is set to both of a memory hole page and a _used_ kernel
>>> 		 * page at boot.
>>> 		 */
>>> -		goto unmovable;
>>> +		return page;
>>> 	}
>>> 	return NULL;
>>> -unmovable:
>>> -	WARN_ON_ONCE(zone_idx(zone) == ZONE_MOVABLE);
>>> -	return pfn_to_page(pfn + iter);
>>> }
>>> 
>>> #ifdef CONFIG_CONTIG_ALLOC
>>> diff --git a/mm/page_isolation.c b/mm/page_isolation.c
>>> index e70586523ca3..31f5516f5d54 100644
>>> --- a/mm/page_isolation.c
>>> +++ b/mm/page_isolation.c
>>> @@ -15,12 +15,12 @@
>>> #define CREATE_TRACE_POINTS
>>> #include <trace/events/page_isolation.h>
>>> 
>>> -static int set_migratetype_isolate(struct page *page, int migratetype, int isol_flags)
>>> +static bool set_migratetype_isolate(struct page *page, int migratetype,
>>> +				    int isol_flags)
>> 
>> Why this change?
>> 
>>> {
>>> -	struct page *unmovable = NULL;
>>> +	struct page *unmovable = ERR_PTR(-EBUSY);
>> 
>> Also, why this change?
>> 
>>> 	struct zone *zone;
>>> 	unsigned long flags;
>>> -	int ret = -EBUSY;
>>> 
>>> 	zone = page_zone(page);
>>> 
>>> @@ -49,21 +49,25 @@ static int set_migratetype_isolate(struct page *page, int migratetype, int isol_
>>> 									NULL);
>>> 
>>> 		__mod_zone_freepage_state(zone, -nr_pages, mt);
>>> -		ret = 0;
>>> 	}
>>> 
>>> out:
>>> 	spin_unlock_irqrestore(&zone->lock, flags);
>>> -	if (!ret)
>>> +
>>> +	if (!unmovable) {
>>> 		drain_all_pages(zone);
>>> -	else if ((isol_flags & REPORT_FAILURE) && unmovable)
>>> -		/*
>>> -		 * printk() with zone->lock held will guarantee to trigger a
>>> -		 * lockdep splat, so defer it here.
>>> -		 */
>>> -		dump_page(unmovable, "unmovable page");
>>> -
>>> -	return ret;
>>> +	} else {
>>> +		WARN_ON_ONCE(zone_idx(zone) == ZONE_MOVABLE);
>>> +
>>> +		if ((isol_flags & REPORT_FAILURE) && !IS_ERR(unmovable))
>>> +			/*
>> 
>> Why this change? (!IS_ERR)
>> 
>> 
>> Some things here look unrelated - or I am missing something :)
>> 
> 
> FWIW, I'd prefer this change without any such cleanups (e.g., I don't
> like returning a bool from this function and the IS_ERR handling, makes
> the function harder to read than before)

What is Michal or Andrew’s opinion? BTW, a bonus point to return a bool
is that it helps the code robustness in general, as UBSAN will be able to
catch any abuse.

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ