--- mm/compaction.c | 19 ++++++++++--------- mm/vmscan.c | 8 ++------ 2 files changed, 12 insertions(+), 15 deletions(-) --- a/mm/compaction.c +++ b/mm/compaction.c @@ -271,9 +271,19 @@ static unsigned long isolate_migratepage } /* Time to isolate some pages for migration */ + cond_resched(); spin_lock_irq(&zone->lru_lock); for (; low_pfn < end_pfn; low_pfn++) { struct page *page; + + if (need_resched() || spin_is_contended(&zone->lru_lock)) { + if (fatal_signal_pending(current)) + break; + spin_unlock_irq(&zone->lru_lock); + cond_resched(); + spin_lock_irq(&zone->lru_lock); + } + if (!pfn_valid_within(low_pfn)) continue; nr_scanned++; @@ -413,15 +423,6 @@ static int compact_finished(struct zone if (cc->order == -1) return COMPACT_CONTINUE; - /* - * Generating only one page of the right order is not enough - * for kswapd, we must continue until we're above the high - * watermark as a pool for high order GFP_ATOMIC allocations - * too. - */ - if (cc->compact_mode == COMPACT_MODE_KSWAPD) - return COMPACT_CONTINUE; - /* Direct compactor: Is a suitable page free? */ for (order = cc->order; order < MAX_ORDER; order++) { /* Job done if page is free of the right migratetype */ --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -2385,7 +2385,6 @@ loop_again: * cause too much scanning of the lower zones. */ for (i = 0; i <= end_zone; i++) { - int compaction; struct zone *zone = pgdat->node_zones + i; int nr_slab; @@ -2416,24 +2415,21 @@ loop_again: sc.nr_reclaimed += reclaim_state->reclaimed_slab; total_scanned += sc.nr_scanned; - compaction = 0; if (order && zone_watermark_ok(zone, 0, high_wmark_pages(zone), end_zone, 0) && !zone_watermark_ok(zone, order, high_wmark_pages(zone), - end_zone, 0)) { + end_zone, 0)) compact_zone_order(zone, order, sc.gfp_mask, false, COMPACT_MODE_KSWAPD); - compaction = 1; - } if (zone->all_unreclaimable) continue; - if (!compaction && nr_slab == 0 && + if (nr_slab == 0 && !zone_reclaimable(zone)) zone->all_unreclaimable = 1; /*