[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20080623230517.GA4564@csn.ul.ie>
Date: Tue, 24 Jun 2008 00:05:18 +0100
From: Mel Gorman <mel@....ul.ie>
To: Andy Whitcroft <apw@...dowen.org>
Cc: Jon Tollefson <kniht@...ux.vnet.ibm.com>,
Andrew Morton <akpm@...ux-foundation.org>,
Nick Piggin <npiggin@...e.de>,
Nishanth Aravamudan <nacc@...ibm.com>,
Adam Litke <agl@...ux.vnet.ibm.com>,
linux-kernel@...r.kernel.org, kernel-testers@...r.kernel.org,
linux-mm@...ck.org
Subject: Re: [PATCH 1/2] hugetlb reservations: move region tracking earlier
On (23/06/08 18:35), Andy Whitcroft didst pronounce:
> Move the region tracking code much earlier so we can use it for page
> presence tracking later on. No code is changed, just its location.
>
> Signed-off-by: Andy Whitcroft <apw@...dowen.org>
Straight-forward code-move.
Acked-by: Mel Gorman <mel@....ul.ie>
> ---
> mm/hugetlb.c | 246 +++++++++++++++++++++++++++++----------------------------
> 1 files changed, 125 insertions(+), 121 deletions(-)
>
> diff --git a/mm/hugetlb.c b/mm/hugetlb.c
> index 0f76ed1..d701e39 100644
> --- a/mm/hugetlb.c
> +++ b/mm/hugetlb.c
> @@ -47,6 +47,131 @@ static unsigned long __initdata default_hstate_size;
> static DEFINE_SPINLOCK(hugetlb_lock);
>
> /*
> + * Region tracking -- allows tracking of reservations and instantiated pages
> + * across the pages in a mapping.
> + */
> +struct file_region {
> + struct list_head link;
> + long from;
> + long to;
> +};
> +
> +static long region_add(struct list_head *head, long f, long t)
> +{
> + struct file_region *rg, *nrg, *trg;
> +
> + /* Locate the region we are either in or before. */
> + list_for_each_entry(rg, head, link)
> + if (f <= rg->to)
> + break;
> +
> + /* Round our left edge to the current segment if it encloses us. */
> + if (f > rg->from)
> + f = rg->from;
> +
> + /* Check for and consume any regions we now overlap with. */
> + nrg = rg;
> + list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
> + if (&rg->link == head)
> + break;
> + if (rg->from > t)
> + break;
> +
> + /* If this area reaches higher then extend our area to
> + * include it completely. If this is not the first area
> + * which we intend to reuse, free it. */
> + if (rg->to > t)
> + t = rg->to;
> + if (rg != nrg) {
> + list_del(&rg->link);
> + kfree(rg);
> + }
> + }
> + nrg->from = f;
> + nrg->to = t;
> + return 0;
> +}
> +
> +static long region_chg(struct list_head *head, long f, long t)
> +{
> + struct file_region *rg, *nrg;
> + long chg = 0;
> +
> + /* Locate the region we are before or in. */
> + list_for_each_entry(rg, head, link)
> + if (f <= rg->to)
> + break;
> +
> + /* If we are below the current region then a new region is required.
> + * Subtle, allocate a new region at the position but make it zero
> + * size such that we can guarantee to record the reservation. */
> + if (&rg->link == head || t < rg->from) {
> + nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
> + if (!nrg)
> + return -ENOMEM;
> + nrg->from = f;
> + nrg->to = f;
> + INIT_LIST_HEAD(&nrg->link);
> + list_add(&nrg->link, rg->link.prev);
> +
> + return t - f;
> + }
> +
> + /* Round our left edge to the current segment if it encloses us. */
> + if (f > rg->from)
> + f = rg->from;
> + chg = t - f;
> +
> + /* Check for and consume any regions we now overlap with. */
> + list_for_each_entry(rg, rg->link.prev, link) {
> + if (&rg->link == head)
> + break;
> + if (rg->from > t)
> + return chg;
> +
> + /* We overlap with this area, if it extends futher than
> + * us then we must extend ourselves. Account for its
> + * existing reservation. */
> + if (rg->to > t) {
> + chg += rg->to - t;
> + t = rg->to;
> + }
> + chg -= rg->to - rg->from;
> + }
> + return chg;
> +}
> +
> +static long region_truncate(struct list_head *head, long end)
> +{
> + struct file_region *rg, *trg;
> + long chg = 0;
> +
> + /* Locate the region we are either in or before. */
> + list_for_each_entry(rg, head, link)
> + if (end <= rg->to)
> + break;
> + if (&rg->link == head)
> + return 0;
> +
> + /* If we are in the middle of a region then adjust it. */
> + if (end > rg->from) {
> + chg = rg->to - end;
> + rg->to = end;
> + rg = list_entry(rg->link.next, typeof(*rg), link);
> + }
> +
> + /* Drop any remaining regions. */
> + list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
> + if (&rg->link == head)
> + break;
> + chg += rg->to - rg->from;
> + list_del(&rg->link);
> + kfree(rg);
> + }
> + return chg;
> +}
> +
> +/*
> * Convert the address within this vma to the page offset within
> * the mapping, in base page units.
> */
> @@ -649,127 +774,6 @@ static void return_unused_surplus_pages(struct hstate *h,
> }
> }
>
> -struct file_region {
> - struct list_head link;
> - long from;
> - long to;
> -};
> -
> -static long region_add(struct list_head *head, long f, long t)
> -{
> - struct file_region *rg, *nrg, *trg;
> -
> - /* Locate the region we are either in or before. */
> - list_for_each_entry(rg, head, link)
> - if (f <= rg->to)
> - break;
> -
> - /* Round our left edge to the current segment if it encloses us. */
> - if (f > rg->from)
> - f = rg->from;
> -
> - /* Check for and consume any regions we now overlap with. */
> - nrg = rg;
> - list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
> - if (&rg->link == head)
> - break;
> - if (rg->from > t)
> - break;
> -
> - /* If this area reaches higher then extend our area to
> - * include it completely. If this is not the first area
> - * which we intend to reuse, free it. */
> - if (rg->to > t)
> - t = rg->to;
> - if (rg != nrg) {
> - list_del(&rg->link);
> - kfree(rg);
> - }
> - }
> - nrg->from = f;
> - nrg->to = t;
> - return 0;
> -}
> -
> -static long region_chg(struct list_head *head, long f, long t)
> -{
> - struct file_region *rg, *nrg;
> - long chg = 0;
> -
> - /* Locate the region we are before or in. */
> - list_for_each_entry(rg, head, link)
> - if (f <= rg->to)
> - break;
> -
> - /* If we are below the current region then a new region is required.
> - * Subtle, allocate a new region at the position but make it zero
> - * size such that we can guarantee to record the reservation. */
> - if (&rg->link == head || t < rg->from) {
> - nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
> - if (!nrg)
> - return -ENOMEM;
> - nrg->from = f;
> - nrg->to = f;
> - INIT_LIST_HEAD(&nrg->link);
> - list_add(&nrg->link, rg->link.prev);
> -
> - return t - f;
> - }
> -
> - /* Round our left edge to the current segment if it encloses us. */
> - if (f > rg->from)
> - f = rg->from;
> - chg = t - f;
> -
> - /* Check for and consume any regions we now overlap with. */
> - list_for_each_entry(rg, rg->link.prev, link) {
> - if (&rg->link == head)
> - break;
> - if (rg->from > t)
> - return chg;
> -
> - /* We overlap with this area, if it extends futher than
> - * us then we must extend ourselves. Account for its
> - * existing reservation. */
> - if (rg->to > t) {
> - chg += rg->to - t;
> - t = rg->to;
> - }
> - chg -= rg->to - rg->from;
> - }
> - return chg;
> -}
> -
> -static long region_truncate(struct list_head *head, long end)
> -{
> - struct file_region *rg, *trg;
> - long chg = 0;
> -
> - /* Locate the region we are either in or before. */
> - list_for_each_entry(rg, head, link)
> - if (end <= rg->to)
> - break;
> - if (&rg->link == head)
> - return 0;
> -
> - /* If we are in the middle of a region then adjust it. */
> - if (end > rg->from) {
> - chg = rg->to - end;
> - rg->to = end;
> - rg = list_entry(rg->link.next, typeof(*rg), link);
> - }
> -
> - /* Drop any remaining regions. */
> - list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
> - if (&rg->link == head)
> - break;
> - chg += rg->to - rg->from;
> - list_del(&rg->link);
> - kfree(rg);
> - }
> - return chg;
> -}
> -
> /*
> * Determine if the huge page at addr within the vma has an associated
> * reservation. Where it does not we will need to logically increase
> --
> 1.5.6.205.g7ca3a
>
--
Mel Gorman
Part-time Phd Student Linux Technology Center
University of Limerick IBM Dublin Software Lab
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists