[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <200608021848.54374.rjw@sisk.pl>
Date: Wed, 2 Aug 2006 18:48:54 +0200
From: "Rafael J. Wysocki" <rjw@...k.pl>
To: Andrew Morton <akpm@...l.org>
Cc: LKML <linux-kernel@...r.kernel.org>, Pavel Machek <pavel@....cz>
Subject: [PATCH 1/3] swsusp: Fix mark_free_pages
Clean up mm/page_alloc.c#mark_free_pages() and make it avoid clearing
PageNosaveFree for PageNosave pages. This allows us to get rid of an ugly
hack in kernel/power/snapshot.c#copy_data_pages().
Additionally, the page-copying loop in copy_data_pages() is moved to an
inline function.
Signed-off-by: Rafael J. Wysocki <rjw@...k.pl>
---
kernel/power/snapshot.c | 27 +++++++++++++--------------
mm/page_alloc.c | 22 ++++++++++++++--------
2 files changed, 27 insertions(+), 22 deletions(-)
Index: linux-2.6.18-rc2-mm1/mm/page_alloc.c
===================================================================
--- linux-2.6.18-rc2-mm1.orig/mm/page_alloc.c 2006-07-31 20:42:29.000000000 +0200
+++ linux-2.6.18-rc2-mm1/mm/page_alloc.c 2006-07-31 20:52:11.000000000 +0200
@@ -703,7 +703,8 @@ static void __drain_pages(unsigned int c
void mark_free_pages(struct zone *zone)
{
- unsigned long zone_pfn, flags;
+ unsigned long pfn, max_zone_pfn;
+ unsigned long flags;
int order;
struct list_head *curr;
@@ -711,18 +712,23 @@ void mark_free_pages(struct zone *zone)
return;
spin_lock_irqsave(&zone->lock, flags);
- for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn)
- ClearPageNosaveFree(pfn_to_page(zone_pfn + zone->zone_start_pfn));
+ max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
+ for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) {
+ struct page *page = pfn_to_page(pfn);
+
+ if (!PageNosave(page))
+ ClearPageNosaveFree(page);
+ }
for (order = MAX_ORDER - 1; order >= 0; --order)
list_for_each(curr, &zone->free_area[order].free_list) {
- unsigned long start_pfn, i;
+ unsigned long i;
- start_pfn = page_to_pfn(list_entry(curr, struct page, lru));
+ pfn = page_to_pfn(list_entry(curr, struct page, lru));
+ for (i = 0; i < (1UL << order); i++)
+ SetPageNosaveFree(pfn_to_page(pfn + i));
+ }
- for (i=0; i < (1<<order); i++)
- SetPageNosaveFree(pfn_to_page(start_pfn+i));
- }
spin_unlock_irqrestore(&zone->lock, flags);
}
Index: linux-2.6.18-rc2-mm1/kernel/power/snapshot.c
===================================================================
--- linux-2.6.18-rc2-mm1.orig/kernel/power/snapshot.c 2006-07-31 20:47:26.000000000 +0200
+++ linux-2.6.18-rc2-mm1/kernel/power/snapshot.c 2006-07-31 21:01:22.000000000 +0200
@@ -208,37 +208,36 @@ unsigned int count_data_pages(void)
return n;
}
+static inline void copy_data_page(long *dst, long *src)
+{
+ int n;
+
+ /* copy_page and memcpy are not usable for copying task structs. */
+ for (n = PAGE_SIZE / sizeof(long); n; n--)
+ *dst++ = *src++;
+}
+
static void copy_data_pages(struct pbe *pblist)
{
struct zone *zone;
unsigned long pfn, max_zone_pfn;
- struct pbe *pbe, *p;
+ struct pbe *pbe;
pbe = pblist;
for_each_zone (zone) {
if (is_highmem(zone))
continue;
mark_free_pages(zone);
- /* This is necessary for swsusp_free() */
- for_each_pb_page (p, pblist)
- SetPageNosaveFree(virt_to_page(p));
- for_each_pbe (p, pblist)
- SetPageNosaveFree(virt_to_page(p->address));
max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) {
struct page *page = saveable_page(pfn);
if (page) {
- long *src, *dst;
- int n;
+ void *ptr = page_address(page);;
BUG_ON(!pbe);
- pbe->orig_address = (unsigned long)page_address(page);
- /* copy_page and memcpy are not usable for copying task structs. */
- dst = (long *)pbe->address;
- src = (long *)pbe->orig_address;
- for (n = PAGE_SIZE / sizeof(long); n; n--)
- *dst++ = *src++;
+ copy_data_page((void *)pbe->address, ptr);
+ pbe->orig_address = (unsigned long)ptr;
pbe = pbe->next;
}
}
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists