[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <665ccd89-8434-fc45-4813-c6412ef80c10@bytedance.com>
Date: Wed, 26 Jul 2023 17:27:47 +0800
From: Qi Zheng <zhengqi.arch@...edance.com>
To: Muchun Song <muchun.song@...ux.dev>
Cc: linux-kernel@...r.kernel.org, linux-mm@...ck.org, x86@...nel.org,
kvm@...r.kernel.org, xen-devel@...ts.xenproject.org,
linux-erofs@...ts.ozlabs.org, linux-f2fs-devel@...ts.sourceforge.net,
cluster-devel@...hat.com, linux-nfs@...r.kernel.org,
linux-mtd@...ts.infradead.org, rcu@...r.kernel.org, netdev@...r.kernel.org,
dri-devel@...ts.freedesktop.org, linux-arm-msm@...r.kernel.org,
dm-devel@...hat.com, linux-raid@...r.kernel.org,
linux-bcache@...r.kernel.org, virtualization@...ts.linux-foundation.org,
linux-fsdevel@...r.kernel.org, linux-ext4@...r.kernel.org,
linux-xfs@...r.kernel.org, linux-btrfs@...r.kernel.org,
akpm@...ux-foundation.org, david@...morbit.com, tkhai@...ru, vbabka@...e.cz,
roman.gushchin@...ux.dev, djwong@...nel.org, brauner@...nel.org,
paulmck@...nel.org, tytso@....edu, steven.price@....com, cel@...nel.org,
senozhatsky@...omium.org, yujie.liu@...el.com, gregkh@...uxfoundation.org
Subject: Re: [PATCH v2 19/47] mm: thp: dynamically allocate the thp-related
shrinkers
On 2023/7/26 15:10, Muchun Song wrote:
>
>
> On 2023/7/24 17:43, Qi Zheng wrote:
>> Use new APIs to dynamically allocate the thp-zero and thp-deferred_split
>> shrinkers.
>>
>> Signed-off-by: Qi Zheng <zhengqi.arch@...edance.com>
>> ---
>> mm/huge_memory.c | 69 +++++++++++++++++++++++++++++++-----------------
>> 1 file changed, 45 insertions(+), 24 deletions(-)
>>
>> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
>> index 8c94b34024a2..4db5a1834d81 100644
>> --- a/mm/huge_memory.c
>> +++ b/mm/huge_memory.c
>> @@ -65,7 +65,11 @@ unsigned long transparent_hugepage_flags
>> __read_mostly =
>> (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)|
>> (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
>> -static struct shrinker deferred_split_shrinker;
>> +static struct shrinker *deferred_split_shrinker;
>> +static unsigned long deferred_split_count(struct shrinker *shrink,
>> + struct shrink_control *sc);
>> +static unsigned long deferred_split_scan(struct shrinker *shrink,
>> + struct shrink_control *sc);
>> static atomic_t huge_zero_refcount;
>> struct page *huge_zero_page __read_mostly;
>> @@ -229,11 +233,7 @@ static unsigned long
>> shrink_huge_zero_page_scan(struct shrinker *shrink,
>> return 0;
>> }
>> -static struct shrinker huge_zero_page_shrinker = {
>> - .count_objects = shrink_huge_zero_page_count,
>> - .scan_objects = shrink_huge_zero_page_scan,
>> - .seeks = DEFAULT_SEEKS,
>> -};
>> +static struct shrinker *huge_zero_page_shrinker;
>
> Same as patch #17.
OK, will do.
>
>> #ifdef CONFIG_SYSFS
>> static ssize_t enabled_show(struct kobject *kobj,
>> @@ -454,6 +454,40 @@ static inline void hugepage_exit_sysfs(struct
>> kobject *hugepage_kobj)
>> }
>> #endif /* CONFIG_SYSFS */
>> +static int thp_shrinker_init(void)
>
> Better to declare it as __init.
Will do.
>
>> +{
>> + huge_zero_page_shrinker = shrinker_alloc(0, "thp-zero");
>> + if (!huge_zero_page_shrinker)
>> + return -ENOMEM;
>> +
>> + deferred_split_shrinker = shrinker_alloc(SHRINKER_NUMA_AWARE |
>> + SHRINKER_MEMCG_AWARE |
>> + SHRINKER_NONSLAB,
>> + "thp-deferred_split");
>> + if (!deferred_split_shrinker) {
>> + shrinker_free_non_registered(huge_zero_page_shrinker);
>> + return -ENOMEM;
>> + }
>> +
>> + huge_zero_page_shrinker->count_objects =
>> shrink_huge_zero_page_count;
>> + huge_zero_page_shrinker->scan_objects = shrink_huge_zero_page_scan;
>> + huge_zero_page_shrinker->seeks = DEFAULT_SEEKS;
>> + shrinker_register(huge_zero_page_shrinker);
>> +
>> + deferred_split_shrinker->count_objects = deferred_split_count;
>> + deferred_split_shrinker->scan_objects = deferred_split_scan;
>> + deferred_split_shrinker->seeks = DEFAULT_SEEKS;
>> + shrinker_register(deferred_split_shrinker);
>> +
>> + return 0;
>> +}
>> +
>> +static void thp_shrinker_exit(void)
>
> Same as here.
Will do.
>
>> +{
>> + shrinker_unregister(huge_zero_page_shrinker);
>> + shrinker_unregister(deferred_split_shrinker);
>> +}
>> +
>> static int __init hugepage_init(void)
>> {
>> int err;
>> @@ -482,12 +516,9 @@ static int __init hugepage_init(void)
>> if (err)
>> goto err_slab;
>> - err = register_shrinker(&huge_zero_page_shrinker, "thp-zero");
>> - if (err)
>> - goto err_hzp_shrinker;
>> - err = register_shrinker(&deferred_split_shrinker,
>> "thp-deferred_split");
>> + err = thp_shrinker_init();
>> if (err)
>> - goto err_split_shrinker;
>> + goto err_shrinker;
>> /*
>> * By default disable transparent hugepages on smaller systems,
>> @@ -505,10 +536,8 @@ static int __init hugepage_init(void)
>> return 0;
>> err_khugepaged:
>> - unregister_shrinker(&deferred_split_shrinker);
>> -err_split_shrinker:
>> - unregister_shrinker(&huge_zero_page_shrinker);
>> -err_hzp_shrinker:
>> + thp_shrinker_exit();
>> +err_shrinker:
>> khugepaged_destroy();
>> err_slab:
>> hugepage_exit_sysfs(hugepage_kobj);
>> @@ -2851,7 +2880,7 @@ void deferred_split_folio(struct folio *folio)
>> #ifdef CONFIG_MEMCG
>> if (memcg)
>> set_shrinker_bit(memcg, folio_nid(folio),
>> - deferred_split_shrinker.id);
>> + deferred_split_shrinker->id);
>> #endif
>> }
>> spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
>> @@ -2925,14 +2954,6 @@ static unsigned long deferred_split_scan(struct
>> shrinker *shrink,
>> return split;
>> }
>> -static struct shrinker deferred_split_shrinker = {
>> - .count_objects = deferred_split_count,
>> - .scan_objects = deferred_split_scan,
>> - .seeks = DEFAULT_SEEKS,
>> - .flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE |
>> - SHRINKER_NONSLAB,
>> -};
>> -
>> #ifdef CONFIG_DEBUG_FS
>> static void split_huge_pages_all(void)
>> {
>
Powered by blists - more mailing lists