lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Mon, 28 Jan 2019 14:56:17 -0800
From:   Andrew Morton <akpm@...ux-foundation.org>
To:     Oscar Salvador <osalvador@...e.de>,
        David Hildenbrand <david@...hat.com>, linux-mm@...ck.org,
        linux-kernel@...r.kernel.org, mhocko@...e.com
Subject: Re: [PATCH] mm,memory_hotplug: Fix scan_movable_pages for gigantic
 hugepages

On Mon, 28 Jan 2019 14:53:09 -0800 Andrew Morton <akpm@...ux-foundation.org> wrote:

> On Fri, 25 Jan 2019 08:58:33 +0100 Oscar Salvador <osalvador@...e.de> wrote:
> 
> > On Wed, Jan 23, 2019 at 11:33:56AM +0100, David Hildenbrand wrote:
> > > If you use {} for the else case, please also do so for the if case.
> > 
> > Diff on top:
> > 
> > diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
> > index 25aee4f04a72..d5810e522b72 100644
> > --- a/mm/memory_hotplug.c
> > +++ b/mm/memory_hotplug.c
> > @@ -1338,9 +1338,9 @@ static unsigned long scan_movable_pages(unsigned long start, unsigned long end)
> >  				struct page *head = compound_head(page);
> >  
> >  				if (hugepage_migration_supported(page_hstate(head)) &&
> > -				    page_huge_active(head))
> > +				    page_huge_active(head)) {
> >  					return pfn;
> > -				else {
> > +				} else {
> >  					unsigned long skip;
> >  
> >  					skip = (1 << compound_order(head)) - (page - head);
> > 
> 
> The indenting is getting a bit deep also, so how about this?
> 
> static unsigned long scan_movable_pages(unsigned long start, unsigned long end)
> {
> 	unsigned long pfn;
> 
> 	for (pfn = start; pfn < end; pfn++) {
> 		struct page *page, *head;
> 	
> 		if (!pfn_valid(pfn))
> 			continue;
> 		page = pfn_to_page(pfn);
> 		if (PageLRU(page))
> 			return pfn;
> 		if (__PageMovable(page))
> 			return pfn;
> 
> 		if (!PageHuge(page))
> 			continue;
> 		head = compound_head(page);
> 		if (hugepage_migration_supported(page_hstate(head)) &&
> 		    page_huge_active(head)) {
> 			return pfn;

checkpatch pointed out that else-after-return isn't needed so we can do

static unsigned long scan_movable_pages(unsigned long start, unsigned long end)
{
	unsigned long pfn;

	for (pfn = start; pfn < end; pfn++) {
		struct page *page, *head;
		unsigned long skip;

		if (!pfn_valid(pfn))
			continue;
		page = pfn_to_page(pfn);
		if (PageLRU(page))
			return pfn;
		if (__PageMovable(page))
			return pfn;

		if (!PageHuge(page))
			continue;
		head = compound_head(page);
		if (hugepage_migration_supported(page_hstate(head)) &&
		    page_huge_active(head))
			return pfn;
		skip = (1 << compound_order(head)) - (page - head);
		pfn += skip - 1;
	}
	return 0;
}

--- a/mm/memory_hotplug.c~mmmemory_hotplug-fix-scan_movable_pages-for-gigantic-hugepages-fix
+++ a/mm/memory_hotplug.c
@@ -1305,28 +1305,27 @@ int test_pages_in_a_zone(unsigned long s
 static unsigned long scan_movable_pages(unsigned long start, unsigned long end)
 {
 	unsigned long pfn;
-	struct page *page;
+
 	for (pfn = start; pfn < end; pfn++) {
-		if (pfn_valid(pfn)) {
-			page = pfn_to_page(pfn);
-			if (PageLRU(page))
-				return pfn;
-			if (__PageMovable(page))
-				return pfn;
-			if (PageHuge(page)) {
-				struct page *head = compound_head(page);
+		struct page *page, *head;
+		unsigned long skip;
 
-				if (hugepage_migration_supported(page_hstate(head)) &&
-				    page_huge_active(head))
-					return pfn;
-				else {
-					unsigned long skip;
+		if (!pfn_valid(pfn))
+			continue;
+		page = pfn_to_page(pfn);
+		if (PageLRU(page))
+			return pfn;
+		if (__PageMovable(page))
+			return pfn;
 
-					skip = (1 << compound_order(head)) - (page - head);
-					pfn += skip - 1;
-				}
-			}
-		}
+		if (!PageHuge(page))
+			continue;
+		head = compound_head(page);
+		if (hugepage_migration_supported(page_hstate(head)) &&
+		    page_huge_active(head))
+			return pfn;
+		skip = (1 << compound_order(head)) - (page - head);
+		pfn += skip - 1;
 	}
 	return 0;
 }
_

Powered by blists - more mailing lists