lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <b416b30c3ea48e4e97aa0ed1d89124cb@pinky>
Date:	Thu, 23 Nov 2006 16:49:40 +0000
From:	Andy Whitcroft <apw@...dowen.org>
To:	linux-mm@...ck.org
Cc:	Andrew Morton <akpm@...l.org>,
	Peter Zijlstra <a.p.zijlstra@...llo.nl>,
	Mel Gorman <mel@....ul.ie>, Andy Whitcroft <apw@...dowen.org>,
	linux-kernel@...r.kernel.org
Subject: [PATCH 2/4] lumpy cleanup a missplaced comment and simplify some code

lumpy: cleanup a missplaced comment and simplify some code

Move the comment for isolate_lru_pages() back to its function
and comment the new function.  Add some running commentry on the
area scan.  Cleanup the indentation on switch to match the majority
view in mm/*.  Finally, clarify the boundary pfn calculations.

Signed-off-by: Andy Whitcroft <apw@...dowen.org>
---
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 4645a3f..3b6ef79 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -609,21 +609,14 @@ keep:
 }
 
 /*
- * zone->lru_lock is heavily contended.  Some of the functions that
- * shrink the lists perform better by taking out a batch of pages
- * and working on them outside the LRU lock.
+ * Attempt to remove the specified page from its LRU.  Only take this
+ * page if it is of the appropriate PageActive status.  Pages which
+ * are being freed elsewhere are also ignored.
  *
- * For pagecache intensive workloads, this function is the hottest
- * spot in the kernel (apart from copy_*_user functions).
- *
- * Appropriate locks must be held before calling this function.
+ * @page:	page to consider
+ * @active:	active/inactive flag only take pages of this type
  *
- * @nr_to_scan:	The number of pages to look through on the list.
- * @src:	The LRU list to pull pages off.
- * @dst:	The temp list to put pages on to.
- * @scanned:	The number of pages that were scanned.
- *
- * returns how many pages were moved onto *@....
+ * returns 0 on success, -ve errno on failure.
  */
 int __isolate_lru_page(struct page *page, int active)
 {
@@ -645,6 +638,23 @@ int __isolate_lru_page(struct page *page
 	return ret;
 }
 
+/*
+ * zone->lru_lock is heavily contended.  Some of the functions that
+ * shrink the lists perform better by taking out a batch of pages
+ * and working on them outside the LRU lock.
+ *
+ * For pagecache intensive workloads, this function is the hottest
+ * spot in the kernel (apart from copy_*_user functions).
+ *
+ * Appropriate locks must be held before calling this function.
+ *
+ * @nr_to_scan:	The number of pages to look through on the list.
+ * @src:	The LRU list to pull pages off.
+ * @dst:	The temp list to put pages on to.
+ * @scanned:	The number of pages that were scanned.
+ *
+ * returns how many pages were moved onto *@....
+ */
 static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
 		struct list_head *src, struct list_head *dst,
 		unsigned long *scanned, int order)
@@ -662,26 +672,31 @@ static unsigned long isolate_lru_pages(u
 
 		active = PageActive(page);
 		switch (__isolate_lru_page(page, active)) {
-			case 0:
-				list_move(&page->lru, dst);
-				nr_taken++;
-				break;
+		case 0:
+			list_move(&page->lru, dst);
+			nr_taken++;
+			break;
 
-			case -EBUSY:
-				/* else it is being freed elsewhere */
-				list_move(&page->lru, src);
-				continue;
+		case -EBUSY:
+			/* else it is being freed elsewhere */
+			list_move(&page->lru, src);
+			continue;
 
-			default:
-				BUG();
+		default:
+			BUG();
 		}
 
 		if (!order)
 			continue;
 
-		page_pfn = pfn = __page_to_pfn(page);
-		end_pfn = pfn &= ~((1 << order) - 1);
-		end_pfn += 1 << order;
+		/*
+		 * Attempt to take all pages in the order aligned region
+		 * surrounding the tag page.  Only take those pages of
+		 * the same active state as that tag page.
+		 */
+		page_pfn = __page_to_pfn(page);
+		pfn = page_pfn & ~((1 << order) - 1);
+		end_pfn = pfn + (1 << order);
 		for (; pfn < end_pfn; pfn++) {
 			if (unlikely(pfn == page_pfn))
 				continue;
@@ -691,17 +706,16 @@ static unsigned long isolate_lru_pages(u
 			scan++;
 			tmp = __pfn_to_page(pfn);
 			switch (__isolate_lru_page(tmp, active)) {
-				case 0:
-					list_move(&tmp->lru, dst);
-					nr_taken++;
-					continue;
-
-				case -EBUSY:
-					/* else it is being freed elsewhere */
-					list_move(&tmp->lru, src);
-				default:
-					break;
+			case 0:
+				list_move(&tmp->lru, dst);
+				nr_taken++;
+				continue;
 
+			case -EBUSY:
+				/* else it is being freed elsewhere */
+				list_move(&tmp->lru, src);
+			default:
+				break;
 			}
 			break;
 		}
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ