[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <baeab92c-d966-2dc2-d952-c7f3faf2a229@huawei.com>
Date: Mon, 21 Feb 2022 10:53:18 +0800
From: Miaohe Lin <linmiaohe@...wei.com>
To: David Laight <David.Laight@...LAB.COM>
CC: "vitaly.wool@...sulko.com" <vitaly.wool@...sulko.com>,
"linux-mm@...ck.org" <linux-mm@...ck.org>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
"akpm@...ux-foundation.org" <akpm@...ux-foundation.org>
Subject: Re: [PATCH 6/9] mm/z3fold: move decrement of pool->pages_nr into
__release_z3fold_page()
On 2022/2/20 0:33, David Laight wrote:
> From: Miaohe Lin
>> Sent: 19 February 2022 09:26
>>
>> The z3fold will always do atomic64_dec(&pool->pages_nr) when the
>> __release_z3fold_page() is called. Thus we can move decrement of
>> pool->pages_nr into __release_z3fold_page() to simplify the code.
>> Also we can reduce the size of z3fold.o ~1k.
>> Without this patch:
>> text data bss dec hex filename
>> 15444 1376 8 16828 41bc mm/z3fold.o
>> With this patch:
>> text data bss dec hex filename
>> 15044 1248 8 16300 3fac mm/z3fold.o
>
> I can't see anything obvious in this patch that would reduce the size much.
> OTOH there are some large functions that are pointlessly marked 'inline'.
> Maybe the compiler made a better choice?
I think so too.
> Although it isn't al all obvious why the 'data' size changes.
I checked the header of z3fold.o. The size of .data is unchanged while
align is changed from 00003818 to 00003688. Maybe this is the reason
.data size changes.
Section Headers:
[Nr] Name Type Address Offset
Size EntSize Flags Link Info Align
with this patch:
[ 3] .data PROGBITS 0000000000000000 00003688
00000000000000c0 0000000000000000 WA 0 0 8
without this patch:
[ 3] .data PROGBITS 0000000000000000 00003818
00000000000000c0 0000000000000000 WA 0 0 8
>
>> Signed-off-by: Miaohe Lin <linmiaohe@...wei.com>
>> ---
>> mm/z3fold.c | 41 ++++++++++++-----------------------------
>> 1 file changed, 12 insertions(+), 29 deletions(-)
>>
>> diff --git a/mm/z3fold.c b/mm/z3fold.c
>> index adc0b3fa4906..18a697f6fe32 100644
>> --- a/mm/z3fold.c
>> +++ b/mm/z3fold.c
>> @@ -520,6 +520,8 @@ static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked)
>> list_add(&zhdr->buddy, &pool->stale);
>> queue_work(pool->release_wq, &pool->work);
>> spin_unlock(&pool->stale_lock);
>> +
>> + atomic64_dec(&pool->pages_nr);
>
> Looks like you can move the decrement inside the lock.
> If you can do the same for the increment you can avoid the
> expensive locked bus cycle.
>
atomic64_inc(&pool->pages_nr); is only done when init a new or reused z3fold_page.
There is no lock around. If we hold pool->lock there, this potential gain might be
nullified. Or am I miss something ?
Many thanks for your review and reply.
> David
>
>> }
>>
>> static void release_z3fold_page(struct kref *ref)
>> @@ -737,13 +739,9 @@ static struct z3fold_header *compact_single_buddy(struct z3fold_header *zhdr)
>> return new_zhdr;
>>
>> out_fail:
>> - if (new_zhdr) {
>> - if (kref_put(&new_zhdr->refcount, release_z3fold_page_locked))
>> - atomic64_dec(&pool->pages_nr);
>> - else {
>> - add_to_unbuddied(pool, new_zhdr);
>> - z3fold_page_unlock(new_zhdr);
>> - }
>> + if (new_zhdr && !kref_put(&new_zhdr->refcount, release_z3fold_page_locked)) {
>> + add_to_unbuddied(pool, new_zhdr);
>> + z3fold_page_unlock(new_zhdr);
>> }
>> return NULL;
>>
>> @@ -816,10 +814,8 @@ static void do_compact_page(struct z3fold_header *zhdr, bool locked)
>> list_del_init(&zhdr->buddy);
>> spin_unlock(&pool->lock);
>>
>> - if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) {
>> - atomic64_dec(&pool->pages_nr);
>> + if (kref_put(&zhdr->refcount, release_z3fold_page_locked))
>> return;
>> - }
>>
>> if (test_bit(PAGE_STALE, &page->private) ||
>> test_and_set_bit(PAGE_CLAIMED, &page->private)) {
>> @@ -829,9 +825,7 @@ static void do_compact_page(struct z3fold_header *zhdr, bool locked)
>>
>> if (!zhdr->foreign_handles && buddy_single(zhdr) &&
>> zhdr->mapped_count == 0 && compact_single_buddy(zhdr)) {
>> - if (kref_put(&zhdr->refcount, release_z3fold_page_locked))
>> - atomic64_dec(&pool->pages_nr);
>> - else {
>> + if (!kref_put(&zhdr->refcount, release_z3fold_page_locked)) {
>> clear_bit(PAGE_CLAIMED, &page->private);
>> z3fold_page_unlock(zhdr);
>> }
>> @@ -1089,10 +1083,8 @@ static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
>> if (zhdr) {
>> bud = get_free_buddy(zhdr, chunks);
>> if (bud == HEADLESS) {
>> - if (kref_put(&zhdr->refcount,
>> + if (!kref_put(&zhdr->refcount,
>> release_z3fold_page_locked))
>> - atomic64_dec(&pool->pages_nr);
>> - else
>> z3fold_page_unlock(zhdr);
>> pr_err("No free chunks in unbuddied\n");
>> WARN_ON(1);
>> @@ -1239,10 +1231,8 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
>>
>> if (!page_claimed)
>> free_handle(handle, zhdr);
>> - if (kref_put(&zhdr->refcount, release_z3fold_page_locked_list)) {
>> - atomic64_dec(&pool->pages_nr);
>> + if (kref_put(&zhdr->refcount, release_z3fold_page_locked_list))
>> return;
>> - }
>> if (page_claimed) {
>> /* the page has not been claimed by us */
>> put_z3fold_header(zhdr);
>> @@ -1353,9 +1343,7 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
>> break;
>> }
>> if (!z3fold_page_trylock(zhdr)) {
>> - if (kref_put(&zhdr->refcount,
>> - release_z3fold_page))
>> - atomic64_dec(&pool->pages_nr);
>> + kref_put(&zhdr->refcount, release_z3fold_page);
>> zhdr = NULL;
>> continue; /* can't evict at this point */
>> }
>> @@ -1366,10 +1354,8 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
>> */
>> if (zhdr->foreign_handles ||
>> test_and_set_bit(PAGE_CLAIMED, &page->private)) {
>> - if (kref_put(&zhdr->refcount,
>> + if (!kref_put(&zhdr->refcount,
>> release_z3fold_page_locked))
>> - atomic64_dec(&pool->pages_nr);
>> - else
>> z3fold_page_unlock(zhdr);
>> zhdr = NULL;
>> continue; /* can't evict such page */
>> @@ -1447,7 +1433,6 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
>> if (kref_put(&zhdr->refcount,
>> release_z3fold_page_locked)) {
>> kmem_cache_free(pool->c_handle, slots);
>> - atomic64_dec(&pool->pages_nr);
>> return 0;
>> }
>> /*
>> @@ -1669,10 +1654,8 @@ static void z3fold_page_putback(struct page *page)
>> if (!list_empty(&zhdr->buddy))
>> list_del_init(&zhdr->buddy);
>> INIT_LIST_HEAD(&page->lru);
>> - if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) {
>> - atomic64_dec(&pool->pages_nr);
>> + if (kref_put(&zhdr->refcount, release_z3fold_page_locked))
>> return;
>> - }
>> spin_lock(&pool->lock);
>> list_add(&page->lru, &pool->lru);
>> spin_unlock(&pool->lock);
>> --
>> 2.23.0
>
> -
> Registered Address Lakeside, Bramley Road, Mount Farm, Milton Keynes, MK1 1PT, UK
> Registration No: 1397386 (Wales)
>
> .
>
Powered by blists - more mailing lists