[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <33fdaa38-6204-bef0-b12f-0416f16dc165@redhat.com>
Date: Tue, 29 Jan 2019 11:03:56 +0100
From: David Hildenbrand <david@...hat.com>
To: Andrew Morton <akpm@...ux-foundation.org>,
Oscar Salvador <osalvador@...e.de>, linux-mm@...ck.org,
linux-kernel@...r.kernel.org, mhocko@...e.com
Subject: Re: [PATCH] mm,memory_hotplug: Fix scan_movable_pages for gigantic
hugepages
On 28.01.19 23:56, Andrew Morton wrote:
> On Mon, 28 Jan 2019 14:53:09 -0800 Andrew Morton <akpm@...ux-foundation.org> wrote:
>
>> On Fri, 25 Jan 2019 08:58:33 +0100 Oscar Salvador <osalvador@...e.de> wrote:
>>
>>> On Wed, Jan 23, 2019 at 11:33:56AM +0100, David Hildenbrand wrote:
>>>> If you use {} for the else case, please also do so for the if case.
>>>
>>> Diff on top:
>>>
>>> diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
>>> index 25aee4f04a72..d5810e522b72 100644
>>> --- a/mm/memory_hotplug.c
>>> +++ b/mm/memory_hotplug.c
>>> @@ -1338,9 +1338,9 @@ static unsigned long scan_movable_pages(unsigned long start, unsigned long end)
>>> struct page *head = compound_head(page);
>>>
>>> if (hugepage_migration_supported(page_hstate(head)) &&
>>> - page_huge_active(head))
>>> + page_huge_active(head)) {
>>> return pfn;
>>> - else {
>>> + } else {
>>> unsigned long skip;
>>>
>>> skip = (1 << compound_order(head)) - (page - head);
>>>
>>
>> The indenting is getting a bit deep also, so how about this?
>>
>> static unsigned long scan_movable_pages(unsigned long start, unsigned long end)
>> {
>> unsigned long pfn;
>>
>> for (pfn = start; pfn < end; pfn++) {
>> struct page *page, *head;
>>
>> if (!pfn_valid(pfn))
>> continue;
>> page = pfn_to_page(pfn);
>> if (PageLRU(page))
>> return pfn;
>> if (__PageMovable(page))
>> return pfn;
>>
>> if (!PageHuge(page))
>> continue;
>> head = compound_head(page);
>> if (hugepage_migration_supported(page_hstate(head)) &&
>> page_huge_active(head)) {
>> return pfn;
>
> checkpatch pointed out that else-after-return isn't needed so we can do
>
> static unsigned long scan_movable_pages(unsigned long start, unsigned long end)
> {
> unsigned long pfn;
>
> for (pfn = start; pfn < end; pfn++) {
> struct page *page, *head;
> unsigned long skip;
>
> if (!pfn_valid(pfn))
> continue;
> page = pfn_to_page(pfn);
> if (PageLRU(page))
> return pfn;
> if (__PageMovable(page))
> return pfn;
>
> if (!PageHuge(page))
> continue;
> head = compound_head(page);
> if (hugepage_migration_supported(page_hstate(head)) &&
> page_huge_active(head))
> return pfn;
> skip = (1 << compound_order(head)) - (page - head);
> pfn += skip - 1;
> }
> return 0;
> }
>
> --- a/mm/memory_hotplug.c~mmmemory_hotplug-fix-scan_movable_pages-for-gigantic-hugepages-fix
> +++ a/mm/memory_hotplug.c
> @@ -1305,28 +1305,27 @@ int test_pages_in_a_zone(unsigned long s
> static unsigned long scan_movable_pages(unsigned long start, unsigned long end)
> {
> unsigned long pfn;
> - struct page *page;
> +
> for (pfn = start; pfn < end; pfn++) {
> - if (pfn_valid(pfn)) {
> - page = pfn_to_page(pfn);
> - if (PageLRU(page))
> - return pfn;
> - if (__PageMovable(page))
> - return pfn;
> - if (PageHuge(page)) {
> - struct page *head = compound_head(page);
> + struct page *page, *head;
> + unsigned long skip;
>
> - if (hugepage_migration_supported(page_hstate(head)) &&
> - page_huge_active(head))
> - return pfn;
> - else {
> - unsigned long skip;
> + if (!pfn_valid(pfn))
> + continue;
> + page = pfn_to_page(pfn);
> + if (PageLRU(page))
> + return pfn;
> + if (__PageMovable(page))
> + return pfn;
>
> - skip = (1 << compound_order(head)) - (page - head);
> - pfn += skip - 1;
> - }
> - }
> - }
> + if (!PageHuge(page))
> + continue;
> + head = compound_head(page);
> + if (hugepage_migration_supported(page_hstate(head)) &&
> + page_huge_active(head))
> + return pfn;
> + skip = (1 << compound_order(head)) - (page - head);
> + pfn += skip - 1;
Not sure if encoding the -1 in the previous line is even better now that
we have more space
skip = (1 << compound_order(head)) - (page - head + 1);
Looks good to me.
> }
> return 0;
> }
> _
>
--
Thanks,
David / dhildenb
Powered by blists - more mailing lists