[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20150925075753.90ff10d13070717e3a6b10ca@gmail.com>
Date: Fri, 25 Sep 2015 07:57:53 +0200
From: Vitaly Wool <vitalywool@...il.com>
To: Seth Jennings <sjennings@...iantweb.net>,
Dan Streetman <ddstreet@...e.org>,
Andrew Morton <akpm@...ux-foundation.org>
Cc: Minchan Kim <minchan@...nel.org>,
Sergey Senozhatsky <sergey.senozhatsky@...il.com>,
linux-kernel <linux-kernel@...r.kernel.org>,
Linux-MM <linux-mm@...ck.org>
Subject: [PATCH v3] zbud: allow up to PAGE_SIZE allocations
>From e219a88f4cd68842e7e04e37461aba6e06555d6a Mon Sep 17 00:00:00 2001
From: Vitaly Vul <vitaly.vul@...ymobile.com>
Date: Tue, 22 Sep 2015 14:07:01 +0200
Subject: [PATCH] zbud: allow up to PAGE_SIZE allocations
Currently zbud is only capable of allocating not more than
PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE. This is okay as
long as only zswap is using it, but other users of zbud may
(and likely will) want to allocate up to PAGE_SIZE. This patch
addresses that by skipping the creation of zbud internal
structure in the beginning of an allocated page. As a zbud page
is no longer guaranteed to contain zbud header, the following
changes have to be applied throughout the code:
* page->lru to be used for zbud page lists
* page->private to hold 'under_reclaim' flag
page->private will also be used to indicate if this page contains
a zbud header in the beginning or not ('headless' flag).
This patch incorporates minor fixups after Seth's comments.
Signed-off-by: Vitaly Wool <vitalywool@...il.com>
---
mm/zbud.c | 168 ++++++++++++++++++++++++++++++++++++++++++--------------------
1 file changed, 114 insertions(+), 54 deletions(-)
diff --git a/mm/zbud.c b/mm/zbud.c
index fa48bcdf..619beba 100644
--- a/mm/zbud.c
+++ b/mm/zbud.c
@@ -105,18 +105,20 @@ struct zbud_pool {
/*
* struct zbud_header - zbud page metadata occupying the first chunk of each
- * zbud page.
+ * zbud page, except for HEADLESS pages
* @buddy: links the zbud page into the unbuddied/buddied lists in the pool
- * @lru: links the zbud page into the lru list in the pool
* @first_chunks: the size of the first buddy in chunks, 0 if free
* @last_chunks: the size of the last buddy in chunks, 0 if free
*/
struct zbud_header {
struct list_head buddy;
- struct list_head lru;
unsigned int first_chunks;
unsigned int last_chunks;
- bool under_reclaim;
+};
+
+enum zbud_page_flags {
+ ZPF_UNDER_RECLAIM,
+ ZPF_HEADLESS,
};
/*****************
@@ -221,6 +223,7 @@ MODULE_ALIAS("zpool-zbud");
*****************/
/* Just to make the code easier to read */
enum buddy {
+ HEADLESS,
FIRST,
LAST
};
@@ -238,18 +241,26 @@ static int size_to_chunks(size_t size)
static struct zbud_header *init_zbud_page(struct page *page)
{
struct zbud_header *zhdr = page_address(page);
+
+ INIT_LIST_HEAD(&page->lru);
+ page->private = 0;
+
zhdr->first_chunks = 0;
zhdr->last_chunks = 0;
INIT_LIST_HEAD(&zhdr->buddy);
- INIT_LIST_HEAD(&zhdr->lru);
- zhdr->under_reclaim = 0;
return zhdr;
}
/* Resets the struct page fields and frees the page */
static void free_zbud_page(struct zbud_header *zhdr)
{
- __free_page(virt_to_page(zhdr));
+ struct page *page = virt_to_page(zhdr);
+
+ /* do some cleanup */
+ INIT_LIST_HEAD(&page->lru);
+ page->private = 0;
+
+ __free_page(page);
}
/*
@@ -267,11 +278,17 @@ static unsigned long encode_handle(struct zbud_header *zhdr, enum buddy bud)
* over the zbud header in the first chunk.
*/
handle = (unsigned long)zhdr;
- if (bud == FIRST)
+ switch (bud) {
+ case FIRST:
/* skip over zbud header */
handle += ZHDR_SIZE_ALIGNED;
- else /* bud == LAST */
+ break;
+ case LAST:
handle += PAGE_SIZE - (zhdr->last_chunks << CHUNK_SHIFT);
+ break;
+ case HEADLESS:
+ break;
+ }
return handle;
}
@@ -287,6 +304,7 @@ static int num_free_chunks(struct zbud_header *zhdr)
/*
* Rather than branch for different situations, just use the fact that
* free buddies have a length of zero to simplify everything.
+ * NB: can't be used with HEADLESS pages.
*/
return NCHUNKS - zhdr->first_chunks - zhdr->last_chunks;
}
@@ -360,24 +378,33 @@ int zbud_alloc(struct zbud_pool *pool, size_t size, gfp_t gfp,
if (!size || (gfp & __GFP_HIGHMEM))
return -EINVAL;
- if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE)
+
+ if (size > PAGE_SIZE)
return -ENOSPC;
- chunks = size_to_chunks(size);
- spin_lock(&pool->lock);
- /* First, try to find an unbuddied zbud page. */
- zhdr = NULL;
- for_each_unbuddied_list(i, chunks) {
- if (!list_empty(&pool->unbuddied[i])) {
- zhdr = list_first_entry(&pool->unbuddied[i],
- struct zbud_header, buddy);
- list_del(&zhdr->buddy);
- if (zhdr->first_chunks == 0)
- bud = FIRST;
- else
- bud = LAST;
- goto found;
+ if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE) {
+ chunks = 0;
+ bud = HEADLESS;
+ } else {
+ chunks = size_to_chunks(size);
+ spin_lock(&pool->lock);
+
+ /* First, try to find an unbuddied zbud page. */
+ zhdr = NULL;
+ for_each_unbuddied_list(i, chunks) {
+ if (!list_empty(&pool->unbuddied[i])) {
+ zhdr = list_first_entry(&pool->unbuddied[i],
+ struct zbud_header, buddy);
+ list_del(&zhdr->buddy);
+ page = virt_to_page(zhdr);
+ if (zhdr->first_chunks == 0)
+ bud = FIRST;
+ else
+ bud = LAST;
+ goto found;
+ }
}
+ bud = FIRST;
}
/* Couldn't find unbuddied zbud page, create new one */
@@ -388,7 +415,11 @@ int zbud_alloc(struct zbud_pool *pool, size_t size, gfp_t gfp,
spin_lock(&pool->lock);
pool->pages_nr++;
zhdr = init_zbud_page(page);
- bud = FIRST;
+
+ if (bud == HEADLESS) {
+ set_bit(ZPF_HEADLESS, &page->private);
+ goto headless;
+ }
found:
if (bud == FIRST)
@@ -405,10 +436,12 @@ found:
list_add(&zhdr->buddy, &pool->buddied);
}
+headless:
/* Add/move zbud page to beginning of LRU */
- if (!list_empty(&zhdr->lru))
- list_del(&zhdr->lru);
- list_add(&zhdr->lru, &pool->lru);
+ if (!list_empty(&page->lru))
+ list_del(&page->lru);
+
+ list_add(&page->lru, &pool->lru);
*handle = encode_handle(zhdr, bud);
spin_unlock(&pool->lock);
@@ -430,28 +463,39 @@ void zbud_free(struct zbud_pool *pool, unsigned long handle)
{
struct zbud_header *zhdr;
int freechunks;
+ struct page *page;
+ enum buddy bud;
spin_lock(&pool->lock);
zhdr = handle_to_zbud_header(handle);
+ page = virt_to_page(zhdr);
- /* If first buddy, handle will be page aligned */
- if ((handle - ZHDR_SIZE_ALIGNED) & ~PAGE_MASK)
+ if (!(handle & ~PAGE_MASK)) /* HEADLESS page stored */
+ bud = HEADLESS;
+ else if ((handle - ZHDR_SIZE_ALIGNED) & ~PAGE_MASK) {
+ bud = LAST;
zhdr->last_chunks = 0;
- else
+ } else {
+ /* If first buddy, handle will be page aligned */
+ bud = FIRST;
zhdr->first_chunks = 0;
+ }
- if (zhdr->under_reclaim) {
+ if (test_bit(ZPF_UNDER_RECLAIM, &page->private)) {
/* zbud page is under reclaim, reclaim will free */
spin_unlock(&pool->lock);
return;
}
- /* Remove from existing buddy list */
- list_del(&zhdr->buddy);
+ if (bud != HEADLESS) {
+ /* Remove from existing buddy list */
+ list_del(&zhdr->buddy);
+ }
- if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
+ if (bud == HEADLESS ||
+ (zhdr->first_chunks == 0 && zhdr->last_chunks == 0)) {
/* zbud page is empty, free */
- list_del(&zhdr->lru);
+ list_del(&page->lru);
free_zbud_page(zhdr);
pool->pages_nr--;
} else {
@@ -503,8 +547,9 @@ void zbud_free(struct zbud_pool *pool, unsigned long handle)
*/
int zbud_reclaim_page(struct zbud_pool *pool, unsigned int retries)
{
- int i, ret, freechunks;
+ int i, ret = 0, freechunks;
struct zbud_header *zhdr;
+ struct page *page;
unsigned long first_handle = 0, last_handle = 0;
spin_lock(&pool->lock);
@@ -514,21 +559,30 @@ int zbud_reclaim_page(struct zbud_pool *pool, unsigned int retries)
return -EINVAL;
}
for (i = 0; i < retries; i++) {
- zhdr = list_tail_entry(&pool->lru, struct zbud_header, lru);
- list_del(&zhdr->lru);
- list_del(&zhdr->buddy);
+ page = list_tail_entry(&pool->lru, struct page, lru);
+ list_del(&page->lru);
+
/* Protect zbud page against free */
- zhdr->under_reclaim = true;
- /*
- * We need encode the handles before unlocking, since we can
- * race with free that will set (first|last)_chunks to 0
- */
- first_handle = 0;
- last_handle = 0;
- if (zhdr->first_chunks)
- first_handle = encode_handle(zhdr, FIRST);
- if (zhdr->last_chunks)
- last_handle = encode_handle(zhdr, LAST);
+ set_bit(ZPF_UNDER_RECLAIM, &page->private);
+ zhdr = page_address(page);
+ if (!test_bit(ZPF_HEADLESS, &page->private)) {
+ list_del(&zhdr->buddy);
+ /*
+ * We need encode the handles before unlocking, since
+ * we can race with free that will set
+ * (first|last)_chunks to 0
+ */
+ first_handle = 0;
+ last_handle = 0;
+ if (zhdr->first_chunks)
+ first_handle = encode_handle(zhdr, FIRST);
+ if (zhdr->last_chunks)
+ last_handle = encode_handle(zhdr, LAST);
+ } else {
+ first_handle = encode_handle(zhdr, HEADLESS);
+ last_handle = 0;
+ }
+
spin_unlock(&pool->lock);
/* Issue the eviction callback(s) */
@@ -544,8 +598,14 @@ int zbud_reclaim_page(struct zbud_pool *pool, unsigned int retries)
}
next:
spin_lock(&pool->lock);
- zhdr->under_reclaim = false;
- if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
+ clear_bit(ZPF_UNDER_RECLAIM, &page->private);
+ if (test_bit(ZPF_HEADLESS, &page->private)) {
+ if (ret == 0) {
+ free_zbud_page(zhdr);
+ pool->pages_nr--;
+ spin_unlock(&pool->lock);
+ }
+ } else if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
/*
* Both buddies are now free, free the zbud page and
* return success.
@@ -565,7 +625,7 @@ next:
}
/* add to beginning of LRU */
- list_add(&zhdr->lru, &pool->lru);
+ list_add(&page->lru, &pool->lru);
}
spin_unlock(&pool->lock);
return -EAGAIN;
--
2.4.2
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists