The generic writeback routines are departing from congestion_wait() in preference of get_request_wait(), aka. waiting on the block queues. Introduce the missing writeback wait queue for NFS, otherwise its writeback pages will grow out of control, exhausting all PG_dirty pages. CC: Jens Axboe CC: Chris Mason CC: Peter Zijlstra CC: Trond Myklebust Signed-off-by: Wu Fengguang --- fs/nfs/client.c | 2 fs/nfs/write.c | 93 +++++++++++++++++++++++++++++++----- include/linux/nfs_fs_sb.h | 1 3 files changed, 85 insertions(+), 11 deletions(-) --- linux-next.orig/fs/nfs/write.c 2011-03-03 14:03:40.000000000 +0800 +++ linux-next/fs/nfs/write.c 2011-03-03 14:03:40.000000000 +0800 @@ -185,11 +185,68 @@ static int wb_priority(struct writeback_ * NFS congestion control */ +#define NFS_WAIT_PAGES (1024L >> (PAGE_SHIFT - 10)) int nfs_congestion_kb; -#define NFS_CONGESTION_ON_THRESH (nfs_congestion_kb >> (PAGE_SHIFT-10)) -#define NFS_CONGESTION_OFF_THRESH \ - (NFS_CONGESTION_ON_THRESH - (NFS_CONGESTION_ON_THRESH >> 2)) +/* + * SYNC requests will block on (2*limit) and wakeup on (2*limit-NFS_WAIT_PAGES) + * ASYNC requests will block on (limit) and wakeup on (limit - NFS_WAIT_PAGES) + * In this way SYNC writes will never be blocked by ASYNC ones. + */ + +static void nfs_set_congested(long nr, struct backing_dev_info *bdi) +{ + long limit = nfs_congestion_kb >> (PAGE_SHIFT - 10); + + if (nr > limit && !test_bit(BDI_async_congested, &bdi->state)) + set_bdi_congested(bdi, BLK_RW_ASYNC); + else if (nr > 2 * limit && !test_bit(BDI_sync_congested, &bdi->state)) + set_bdi_congested(bdi, BLK_RW_SYNC); +} + +static void nfs_wait_contested(int is_sync, + struct backing_dev_info *bdi, + wait_queue_head_t *wqh) +{ + int waitbit = is_sync ? BDI_sync_congested : BDI_async_congested; + DEFINE_WAIT(wait); + + if (!test_bit(waitbit, &bdi->state)) + return; + + for (;;) { + prepare_to_wait(&wqh[is_sync], &wait, TASK_UNINTERRUPTIBLE); + if (!test_bit(waitbit, &bdi->state)) + break; + + io_schedule(); + } + finish_wait(&wqh[is_sync], &wait); +} + +static void nfs_wakeup_congested(long nr, + struct backing_dev_info *bdi, + wait_queue_head_t *wqh) +{ + long limit = nfs_congestion_kb >> (PAGE_SHIFT - 10); + + if (nr < 2 * limit - min(limit / 8, NFS_WAIT_PAGES)) { + if (test_bit(BDI_sync_congested, &bdi->state)) { + clear_bdi_congested(bdi, BLK_RW_SYNC); + smp_mb__after_clear_bit(); + } + if (waitqueue_active(&wqh[BLK_RW_SYNC])) + wake_up(&wqh[BLK_RW_SYNC]); + } + if (nr < limit - min(limit / 8, NFS_WAIT_PAGES)) { + if (test_bit(BDI_async_congested, &bdi->state)) { + clear_bdi_congested(bdi, BLK_RW_ASYNC); + smp_mb__after_clear_bit(); + } + if (waitqueue_active(&wqh[BLK_RW_ASYNC])) + wake_up(&wqh[BLK_RW_ASYNC]); + } +} static int nfs_set_page_writeback(struct page *page) { @@ -200,11 +257,8 @@ static int nfs_set_page_writeback(struct struct nfs_server *nfss = NFS_SERVER(inode); page_cache_get(page); - if (atomic_long_inc_return(&nfss->writeback) > - NFS_CONGESTION_ON_THRESH) { - set_bdi_congested(&nfss->backing_dev_info, - BLK_RW_ASYNC); - } + nfs_set_congested(atomic_long_inc_return(&nfss->writeback), + &nfss->backing_dev_info); } return ret; } @@ -216,8 +270,10 @@ static void nfs_end_page_writeback(struc end_page_writeback(page); page_cache_release(page); - if (atomic_long_dec_return(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH) - clear_bdi_congested(&nfss->backing_dev_info, BLK_RW_ASYNC); + + nfs_wakeup_congested(atomic_long_dec_return(&nfss->writeback), + &nfss->backing_dev_info, + nfss->writeback_wait); } static struct nfs_page *nfs_find_and_lock_request(struct page *page, bool nonblock) @@ -318,19 +374,34 @@ static int nfs_writepage_locked(struct p int nfs_writepage(struct page *page, struct writeback_control *wbc) { + struct inode *inode = page->mapping->host; + struct nfs_server *nfss = NFS_SERVER(inode); int ret; ret = nfs_writepage_locked(page, wbc); unlock_page(page); + + nfs_wait_contested(wbc->sync_mode == WB_SYNC_ALL, + &nfss->backing_dev_info, + nfss->writeback_wait); + return ret; } -static int nfs_writepages_callback(struct page *page, struct writeback_control *wbc, void *data) +static int nfs_writepages_callback(struct page *page, + struct writeback_control *wbc, void *data) { + struct inode *inode = page->mapping->host; + struct nfs_server *nfss = NFS_SERVER(inode); int ret; ret = nfs_do_writepage(page, wbc, data); unlock_page(page); + + nfs_wait_contested(wbc->sync_mode == WB_SYNC_ALL, + &nfss->backing_dev_info, + nfss->writeback_wait); + return ret; } --- linux-next.orig/include/linux/nfs_fs_sb.h 2011-03-03 14:03:38.000000000 +0800 +++ linux-next/include/linux/nfs_fs_sb.h 2011-03-03 14:03:40.000000000 +0800 @@ -102,6 +102,7 @@ struct nfs_server { struct nfs_iostats __percpu *io_stats; /* I/O statistics */ struct backing_dev_info backing_dev_info; atomic_long_t writeback; /* number of writeback pages */ + wait_queue_head_t writeback_wait[2]; int flags; /* various flags */ unsigned int caps; /* server capabilities */ unsigned int rsize; /* read size */ --- linux-next.orig/fs/nfs/client.c 2011-03-03 14:03:38.000000000 +0800 +++ linux-next/fs/nfs/client.c 2011-03-03 14:03:40.000000000 +0800 @@ -1042,6 +1042,8 @@ static struct nfs_server *nfs_alloc_serv INIT_LIST_HEAD(&server->delegations); atomic_set(&server->active, 0); + init_waitqueue_head(&server->writeback_wait[BLK_RW_SYNC]); + init_waitqueue_head(&server->writeback_wait[BLK_RW_ASYNC]); server->io_stats = nfs_alloc_iostats(); if (!server->io_stats) { -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/