[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1191577623.22357.69.camel@twins>
Date: Fri, 05 Oct 2007 11:47:03 +0200
From: Peter Zijlstra <a.p.zijlstra@...llo.nl>
To: Miklos Szeredi <miklos@...redi.hu>
Cc: akpm@...ux-foundation.org, wfg@...l.ustc.edu.cn,
linux-mm@...ck.org, linux-kernel@...r.kernel.org
Subject: Re: [PATCH] remove throttle_vm_writeout()
On Fri, 2007-10-05 at 11:22 +0200, Miklos Szeredi wrote:
> > So how do we end up with more writeback pages than that? should we teach
> > pdflush about these limits as well?
>
> Ugh.
>
> I think we should rather fix vmscan to not spin when all pages of a
> zone are already under writeout. Which is the _real_ problem,
> according to Andrew.
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index 4ef4d22..eff2438 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -88,7 +88,7 @@ static inline void wait_on_inode(struct inode *inode)
int wakeup_pdflush(long nr_pages);
void laptop_io_completion(void);
void laptop_sync_completion(void);
-void throttle_vm_writeout(gfp_t gfp_mask);
+void throttle_vm_writeout(struct zone *zone, gfp_t gfp_mask);
/* These are exported to sysctl. */
extern int dirty_background_ratio;
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index eec1481..f949997 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -326,11 +326,8 @@ void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
}
EXPORT_SYMBOL(balance_dirty_pages_ratelimited_nr);
-void throttle_vm_writeout(gfp_t gfp_mask)
+void throttle_vm_writeout(struct zone *zone, gfp_t gfp_mask)
{
- long background_thresh;
- long dirty_thresh;
-
if ((gfp_mask & (__GFP_FS|__GFP_IO)) != (__GFP_FS|__GFP_IO)) {
/*
* The caller might hold locks which can prevent IO completion
@@ -342,17 +339,16 @@ void throttle_vm_writeout(gfp_t gfp_mask)
}
for ( ; ; ) {
- get_dirty_limits(&background_thresh, &dirty_thresh, NULL);
+ unsigned long thresh = zone_page_state(zone, NR_ACTIVE) +
+ zone_page_state(zone, NR_INACTIVE);
- /*
- * Boost the allowable dirty threshold a bit for page
- * allocators so they don't get DoS'ed by heavy writers
- */
- dirty_thresh += dirty_thresh / 10; /* wheeee... */
+ /*
+ * wait when 75% of the zone's pages are under writeback
+ */
+ thresh -= thresh >> 2;
+ if (zone_page_state(zone, NR_WRITEBACK) < thresh)
+ break;
- if (global_page_state(NR_UNSTABLE_NFS) +
- global_page_state(NR_WRITEBACK) <= dirty_thresh)
- break;
congestion_wait(WRITE, HZ/10);
}
}
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 1be5a63..7dd6bd9 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -948,7 +948,7 @@ static unsigned long shrink_zone(int priority, struct zone *zone,
}
}
- throttle_vm_writeout(sc->gfp_mask);
+ throttle_vm_writeout(zone, sc->gfp_mask);
atomic_dec(&zone->reclaim_in_progress);
return nr_reclaimed;
Download attachment "signature.asc" of type "application/pgp-signature" (190 bytes)
Powered by blists - more mailing lists