--- linux-2.6.20.x86_64/mm/vmscan.c.swapfull 2007-02-16 06:47:02.000000000 -0500 +++ linux-2.6.20.x86_64/mm/vmscan.c 2007-02-16 07:03:30.000000000 -0500 @@ -587,6 +587,9 @@ free_it: continue; activate_locked: + /* Not a candidate for swapping, so reclaim swap space. */ + if (PageSwapCache(page) && vm_swap_full()) + remove_exclusive_swap_page(page); SetPageActive(page); pgactivate++; keep_locked: @@ -875,6 +878,11 @@ force_reclaim_mapped: pagevec_strip(&pvec); spin_lock_irq(&zone->lru_lock); } + if (vm_swap_full()) { + spin_unlock_irq(&zone->lru_lock); + pagevec_swap_free(&pvec); + spin_lock_irq(&zone->lru_lock); + } pgmoved = 0; while (!list_empty(&l_active)) { --- linux-2.6.20.x86_64/mm/swap.c.swapfull 2007-02-16 07:09:38.000000000 -0500 +++ linux-2.6.20.x86_64/mm/swap.c 2007-02-16 07:05:00.000000000 -0500 @@ -420,6 +420,24 @@ void pagevec_strip(struct pagevec *pvec) } } +/* + * Try to free swap space from the pages in a pagevec + */ +void pagevec_swap_free(struct pagevec *pvec) +{ + int i; + + for (i = 0; i < pagevec_count(pvec); i++) { + struct page *page = pvec->pages[i]; + + if (PageSwapCache(page) && !TestSetPageLocked(page)) { + if (PageSwapCache(page)) + remove_exclusive_swap_page(page); + unlock_page(page); + } + } +} + /** * pagevec_lookup - gang pagecache lookup * @pvec: Where the resulting pages are placed --- linux-2.6.20.x86_64/include/linux/pagevec.h.swapfull 2007-02-16 07:06:29.000000000 -0500 +++ linux-2.6.20.x86_64/include/linux/pagevec.h 2007-02-16 07:06:41.000000000 -0500 @@ -26,6 +26,7 @@ void __pagevec_free(struct pagevec *pvec void __pagevec_lru_add(struct pagevec *pvec); void __pagevec_lru_add_active(struct pagevec *pvec); void pagevec_strip(struct pagevec *pvec); +void pagevec_swap_free(struct pagevec *pvec); unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping, pgoff_t start, unsigned nr_pages); unsigned pagevec_lookup_tag(struct pagevec *pvec,