[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAMJBoFN_uP0AEANqM2UiOM0_9sp1g2aD6EwuP8jZhCG7Dtpj9A@mail.gmail.com>
Date: Tue, 1 Nov 2016 22:03:46 +0100
From: Vitaly Wool <vitalywool@...il.com>
To: Dan Streetman <ddstreet@...e.org>
Cc: Linux-MM <linux-mm@...ck.org>,
linux-kernel <linux-kernel@...r.kernel.org>,
Andrew Morton <akpm@...ux-foundation.org>
Subject: Re: [PATCHv3 1/3] z3fold: make counters atomic
On Tue, Nov 1, 2016 at 9:03 PM, Dan Streetman <ddstreet@...e.org> wrote:
> On Thu, Oct 27, 2016 at 7:08 AM, Vitaly Wool <vitalywool@...il.com> wrote:
>> This patch converts pages_nr per-pool counter to atomic64_t.
>> It also introduces a new counter, unbuddied_nr, which is
>> atomic64_t, too, to track the number of unbuddied (compactable)
>> z3fold pages.
>
> so, with the use of workqueues, do we still need the unbuddied_nr
> counter? It doesn't seem to be used in later patches?
I'm going to add sysfs/debugfs accounting a bit later so it won't rest unused.
Also, with a per-page lock (if I come up with something reasonable) it could
be still worth going back to shrinker.
> changing the pages_nr to atomic is a good idea though, so we can
> safely read it without needing the pool lock.
>
>>
>> Signed-off-by: Vitaly Wool <vitalywool@...il.com>
>> ---
>> mm/z3fold.c | 33 +++++++++++++++++++++++++--------
>> 1 file changed, 25 insertions(+), 8 deletions(-)
>>
>> diff --git a/mm/z3fold.c b/mm/z3fold.c
>> index 8f9e89c..5ac325a 100644
>> --- a/mm/z3fold.c
>> +++ b/mm/z3fold.c
>> @@ -69,6 +69,7 @@ struct z3fold_ops {
>> * @lru: list tracking the z3fold pages in LRU order by most recently
>> * added buddy.
>> * @pages_nr: number of z3fold pages in the pool.
>> + * @unbuddied_nr: number of unbuddied z3fold pages in the pool.
>> * @ops: pointer to a structure of user defined operations specified at
>> * pool creation time.
>> *
>> @@ -80,7 +81,8 @@ struct z3fold_pool {
>> struct list_head unbuddied[NCHUNKS];
>> struct list_head buddied;
>> struct list_head lru;
>> - u64 pages_nr;
>> + atomic64_t pages_nr;
>> + atomic64_t unbuddied_nr;
>> const struct z3fold_ops *ops;
>> struct zpool *zpool;
>> const struct zpool_ops *zpool_ops;
>> @@ -234,7 +236,8 @@ static struct z3fold_pool *z3fold_create_pool(gfp_t gfp,
>> INIT_LIST_HEAD(&pool->unbuddied[i]);
>> INIT_LIST_HEAD(&pool->buddied);
>> INIT_LIST_HEAD(&pool->lru);
>> - pool->pages_nr = 0;
>> + atomic64_set(&pool->pages_nr, 0);
>> + atomic64_set(&pool->unbuddied_nr, 0);
>> pool->ops = ops;
>> return pool;
>> }
>> @@ -334,6 +337,7 @@ static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
>> continue;
>> }
>> list_del(&zhdr->buddy);
>> + atomic64_dec(&pool->unbuddied_nr);
>> goto found;
>> }
>> }
>> @@ -346,7 +350,7 @@ static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
>> if (!page)
>> return -ENOMEM;
>> spin_lock(&pool->lock);
>> - pool->pages_nr++;
>> + atomic64_inc(&pool->pages_nr);
>> zhdr = init_z3fold_page(page);
>>
>> if (bud == HEADLESS) {
>> @@ -369,6 +373,7 @@ static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
>> /* Add to unbuddied list */
>> freechunks = num_free_chunks(zhdr);
>> list_add(&zhdr->buddy, &pool->unbuddied[freechunks]);
>> + atomic64_inc(&pool->unbuddied_nr);
>> } else {
>> /* Add to buddied list */
>> list_add(&zhdr->buddy, &pool->buddied);
>> @@ -412,6 +417,10 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
>> /* HEADLESS page stored */
>> bud = HEADLESS;
>> } else {
>> + bool is_unbuddied = zhdr->first_chunks == 0 ||
>> + zhdr->middle_chunks == 0 ||
>> + zhdr->last_chunks == 0;
>> +
>> bud = handle_to_buddy(handle);
>>
>> switch (bud) {
>> @@ -431,6 +440,8 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
>> spin_unlock(&pool->lock);
>> return;
>> }
>> + if (is_unbuddied)
>> + atomic64_dec(&pool->unbuddied_nr);
>> }
>>
>> if (test_bit(UNDER_RECLAIM, &page->private)) {
>> @@ -451,12 +462,13 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
>> list_del(&page->lru);
>> clear_bit(PAGE_HEADLESS, &page->private);
>> free_z3fold_page(zhdr);
>> - pool->pages_nr--;
>> + atomic64_dec(&pool->pages_nr);
>> } else {
>> z3fold_compact_page(zhdr);
>> /* Add to the unbuddied list */
>> freechunks = num_free_chunks(zhdr);
>> list_add(&zhdr->buddy, &pool->unbuddied[freechunks]);
>> + atomic64_inc(&pool->unbuddied_nr);
>> }
>>
>> spin_unlock(&pool->lock);
>> @@ -520,6 +532,11 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
>> zhdr = page_address(page);
>> if (!test_bit(PAGE_HEADLESS, &page->private)) {
>> list_del(&zhdr->buddy);
>> + if (zhdr->first_chunks == 0 ||
>> + zhdr->middle_chunks == 0 ||
>> + zhdr->last_chunks == 0)
>> + atomic64_dec(&pool->unbuddied_nr);
>> +
>> /*
>> * We need encode the handles before unlocking, since
>> * we can race with free that will set
>> @@ -569,7 +586,7 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
>> */
>> clear_bit(PAGE_HEADLESS, &page->private);
>> free_z3fold_page(zhdr);
>> - pool->pages_nr--;
>> + atomic64_dec(&pool->pages_nr);
>> spin_unlock(&pool->lock);
>> return 0;
>> } else if (!test_bit(PAGE_HEADLESS, &page->private)) {
>> @@ -584,6 +601,7 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
>> freechunks = num_free_chunks(zhdr);
>> list_add(&zhdr->buddy,
>> &pool->unbuddied[freechunks]);
>> + atomic64_inc(&pool->unbuddied_nr);
>> }
>> }
>>
>> @@ -672,12 +690,11 @@ static void z3fold_unmap(struct z3fold_pool *pool, unsigned long handle)
>> * z3fold_get_pool_size() - gets the z3fold pool size in pages
>> * @pool: pool whose size is being queried
>> *
>> - * Returns: size in pages of the given pool. The pool lock need not be
>> - * taken to access pages_nr.
>> + * Returns: size in pages of the given pool.
>> */
>> static u64 z3fold_get_pool_size(struct z3fold_pool *pool)
>> {
>> - return pool->pages_nr;
>> + return atomic64_read(&pool->pages_nr);
>> }
>>
>> /*****************
>> --
>> 2.4.2
Powered by blists - more mailing lists