[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20190403180026.GC6778@tower.DHCP.thefacebook.com>
Date: Wed, 3 Apr 2019 18:00:30 +0000
From: Roman Gushchin <guro@...com>
To: "Tobin C. Harding" <tobin@...nel.org>
CC: Andrew Morton <akpm@...ux-foundation.org>,
Christoph Lameter <cl@...ux.com>,
Pekka Enberg <penberg@...nel.org>,
David Rientjes <rientjes@...gle.com>,
Joonsoo Kim <iamjoonsoo.kim@....com>,
Matthew Wilcox <willy@...radead.org>,
"linux-mm@...ck.org" <linux-mm@...ck.org>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>
Subject: Re: [PATCH v5 2/7] slob: Respect list_head abstraction layer
On Wed, Apr 03, 2019 at 10:05:40AM +1100, Tobin C. Harding wrote:
> Currently we reach inside the list_head. This is a violation of the
> layer of abstraction provided by the list_head. It makes the code
> fragile. More importantly it makes the code wicked hard to understand.
>
> The code reaches into the list_head structure to counteract the fact
> that the list _may_ have been changed during slob_page_alloc(). Instead
> of this we can add a return parameter to slob_page_alloc() to signal
> that the list was modified (list_del() called with page->lru to remove
> page from the freelist).
>
> This code is concerned with an optimisation that counters the tendency
> for first fit allocation algorithm to fragment memory into many small
> chunks at the front of the memory pool. Since the page is only removed
> from the list when an allocation uses _all_ the remaining memory in the
> page then in this special case fragmentation does not occur and we
> therefore do not need the optimisation.
>
> Add a return parameter to slob_page_alloc() to signal that the
> allocation used up the whole page and that the page was removed from the
> free list. After calling slob_page_alloc() check the return value just
> added and only attempt optimisation if the page is still on the list.
>
> Use list_head API instead of reaching into the list_head structure to
> check if sp is at the front of the list.
>
> Signed-off-by: Tobin C. Harding <tobin@...nel.org>
> ---
> mm/slob.c | 51 +++++++++++++++++++++++++++++++++++++--------------
> 1 file changed, 37 insertions(+), 14 deletions(-)
>
> diff --git a/mm/slob.c b/mm/slob.c
> index 307c2c9feb44..07356e9feaaa 100644
> --- a/mm/slob.c
> +++ b/mm/slob.c
> @@ -213,13 +213,26 @@ static void slob_free_pages(void *b, int order)
> }
>
> /*
> - * Allocate a slob block within a given slob_page sp.
> + * slob_page_alloc() - Allocate a slob block within a given slob_page sp.
> + * @sp: Page to look in.
> + * @size: Size of the allocation.
> + * @align: Allocation alignment.
> + * @page_removed_from_list: Return parameter.
> + *
> + * Tries to find a chunk of memory at least @size bytes big within @page.
> + *
> + * Return: Pointer to memory if allocated, %NULL otherwise. If the
> + * allocation fills up @page then the page is removed from the
> + * freelist, in this case @page_removed_from_list will be set to
> + * true (set to false otherwise).
> */
> -static void *slob_page_alloc(struct page *sp, size_t size, int align)
> +static void *slob_page_alloc(struct page *sp, size_t size, int align,
> + bool *page_removed_from_list)
Hi Tobin!
Isn't it better to make slob_page_alloc() return a bool value?
Then it's easier to ignore the returned value, no need to introduce "_unused".
Thanks!
> {
> slob_t *prev, *cur, *aligned = NULL;
> int delta = 0, units = SLOB_UNITS(size);
>
> + *page_removed_from_list = false;
> for (prev = NULL, cur = sp->freelist; ; prev = cur, cur = slob_next(cur)) {
> slobidx_t avail = slob_units(cur);
>
> @@ -254,8 +267,10 @@ static void *slob_page_alloc(struct page *sp, size_t size, int align)
> }
>
> sp->units -= units;
> - if (!sp->units)
> + if (!sp->units) {
> clear_slob_page_free(sp);
> + *page_removed_from_list = true;
> + }
> return cur;
> }
> if (slob_last(cur))
> @@ -269,10 +284,10 @@ static void *slob_page_alloc(struct page *sp, size_t size, int align)
> static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
> {
> struct page *sp;
> - struct list_head *prev;
> struct list_head *slob_list;
> slob_t *b = NULL;
> unsigned long flags;
> + bool _unused;
>
> if (size < SLOB_BREAK1)
> slob_list = &free_slob_small;
> @@ -284,6 +299,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
> spin_lock_irqsave(&slob_lock, flags);
> /* Iterate through each partially free page, try to find room */
> list_for_each_entry(sp, slob_list, lru) {
> + bool page_removed_from_list = false;
> #ifdef CONFIG_NUMA
> /*
> * If there's a node specification, search for a partial
> @@ -296,18 +312,25 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
> if (sp->units < SLOB_UNITS(size))
> continue;
>
> - /* Attempt to alloc */
> - prev = sp->lru.prev;
> - b = slob_page_alloc(sp, size, align);
> + b = slob_page_alloc(sp, size, align, &page_removed_from_list);
> if (!b)
> continue;
>
> - /* Improve fragment distribution and reduce our average
> - * search time by starting our next search here. (see
> - * Knuth vol 1, sec 2.5, pg 449) */
> - if (prev != slob_list->prev &&
> - slob_list->next != prev->next)
> - list_move_tail(slob_list, prev->next);
> + /*
> + * If slob_page_alloc() removed sp from the list then we
> + * cannot call list functions on sp. If so allocation
> + * did not fragment the page anyway so optimisation is
> + * unnecessary.
> + */
> + if (!page_removed_from_list) {
> + /*
> + * Improve fragment distribution and reduce our average
> + * search time by starting our next search here. (see
> + * Knuth vol 1, sec 2.5, pg 449)
> + */
> + if (!list_is_first(&sp->lru, slob_list))
> + list_rotate_to_front(&sp->lru, slob_list);
> + }
> break;
> }
> spin_unlock_irqrestore(&slob_lock, flags);
> @@ -326,7 +349,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
> INIT_LIST_HEAD(&sp->lru);
> set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
> set_slob_page_free(sp, slob_list);
> - b = slob_page_alloc(sp, size, align);
> + b = slob_page_alloc(sp, size, align, &_unused);
> BUG_ON(!b);
> spin_unlock_irqrestore(&slob_lock, flags);
> }
> --
> 2.21.0
>
Powered by blists - more mailing lists