lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Date: Mon, 13 May 2024 14:17:41 +0800
From: Chengming Zhou <chengming.zhou@...ux.dev>
To: xu xin <xu.xin.sc@...il.com>
Cc: aarcange@...hat.com, akpm@...ux-foundation.org, david@...hat.com,
 hughd@...gle.com, linux-kernel@...r.kernel.org, linux-mm@...ck.org,
 shr@...kernel.io, xu.xin16@....com.cn, zhouchengming@...edance.com,
 si.hao@....com.cn
Subject: Re: [PATCH v2 2/2] mm/ksm: fix ksm_zero_pages accounting

On 2024/5/13 14:00, xu xin wrote:
>> We normally ksm_zero_pages++ in ksmd when page is merged with zero page,
>> but ksm_zero_pages-- is done from page tables side, which can't protected
>> by the ksmd mutex.
> 
>   "cant protected" -> "can't be protected".

Right, will fix.

> 
>   But It's better to say  "where there is no any accessing protection of
>   ksm_zero_pages" because ksmd mutex is to protect the flag of ksm_run, not to
>   protect the counters of KSM.

Ah, I thought the introduced ksm_zero_pages counters were protected by ksmd mutex
like all other ksm counters, no? But the difference with those ksm counters is
that ksm_zero_pages could be changed by the page table side operations, which
can't take ksmd mutex, this is the reason why we need to use atomic variables.

Thanks.

> 
> 
>   Anyway, The following code looks OK to me.
>>
>> So we can read very exceptional value of ksm_zero_pages in rare cases,
>> such as -1, which is very confusing to users.
>>
>> Fix it by changing to use atomic_long_t, and the same case with the
>> mm->ksm_zero_pages.
>>
>> Fixes: e2942062e01d ("ksm: count all zero pages placed by KSM")
>> Fixes: 6080d19f0704 ("ksm: add ksm zero pages for each process")
>> Acked-by: David Hildenbrand <david@...hat.com>
>> Signed-off-by: Chengming Zhou <chengming.zhou@...ux.dev>
>> ---
>>  fs/proc/base.c           |  2 +-
>>  include/linux/ksm.h      | 17 ++++++++++++++---
>>  include/linux/mm_types.h |  2 +-
>>  mm/ksm.c                 | 11 +++++------
>>  4 files changed, 21 insertions(+), 11 deletions(-)
>>
>> diff --git a/fs/proc/base.c b/fs/proc/base.c
>> index 18550c071d71..72a1acd03675 100644
>> --- a/fs/proc/base.c
>> +++ b/fs/proc/base.c
>> @@ -3214,7 +3214,7 @@ static int proc_pid_ksm_stat(struct seq_file *m, struct pid_namespace *ns,
>>  	mm = get_task_mm(task);
>>  	if (mm) {
>>  		seq_printf(m, "ksm_rmap_items %lu\n", mm->ksm_rmap_items);
>> -		seq_printf(m, "ksm_zero_pages %lu\n", mm->ksm_zero_pages);
>> +		seq_printf(m, "ksm_zero_pages %ld\n", mm_ksm_zero_pages(mm));
>>  		seq_printf(m, "ksm_merging_pages %lu\n", mm->ksm_merging_pages);
>>  		seq_printf(m, "ksm_process_profit %ld\n", ksm_process_profit(mm));
>>  		mmput(mm);
>> diff --git a/include/linux/ksm.h b/include/linux/ksm.h
>> index 52c63a9c5a9c..11690dacd986 100644
>> --- a/include/linux/ksm.h
>> +++ b/include/linux/ksm.h
>> @@ -33,16 +33,27 @@ void __ksm_exit(struct mm_struct *mm);
>>   */
>>  #define is_ksm_zero_pte(pte)	(is_zero_pfn(pte_pfn(pte)) && pte_dirty(pte))
>>  
>> -extern unsigned long ksm_zero_pages;
>> +extern atomic_long_t ksm_zero_pages;
>> +
>> +static inline void ksm_map_zero_page(struct mm_struct *mm)
>> +{
>> +	atomic_long_inc(&ksm_zero_pages);
>> +	atomic_long_inc(&mm->ksm_zero_pages);
>> +}
>>  
>>  static inline void ksm_might_unmap_zero_page(struct mm_struct *mm, pte_t pte)
>>  {
>>  	if (is_ksm_zero_pte(pte)) {
>> -		ksm_zero_pages--;
>> -		mm->ksm_zero_pages--;
>> +		atomic_long_dec(&ksm_zero_pages);
>> +		atomic_long_dec(&mm->ksm_zero_pages);
>>  	}
>>  }
>>  
>> +static inline long mm_ksm_zero_pages(struct mm_struct *mm)
>> +{
>> +	return atomic_long_read(&mm->ksm_zero_pages);
>> +}
>> +
>>  static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
>>  {
>>  	if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags))
>> diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
>> index 24323c7d0bd4..af3a0256fa93 100644
>> --- a/include/linux/mm_types.h
>> +++ b/include/linux/mm_types.h
>> @@ -985,7 +985,7 @@ struct mm_struct {
>>  		 * Represent how many empty pages are merged with kernel zero
>>  		 * pages when enabling KSM use_zero_pages.
>>  		 */
>> -		unsigned long ksm_zero_pages;
>> +		atomic_long_t ksm_zero_pages;
>>  #endif /* CONFIG_KSM */
>>  #ifdef CONFIG_LRU_GEN_WALKS_MMU
>>  		struct {
>> diff --git a/mm/ksm.c b/mm/ksm.c
>> index 0f9c491552ff..6f461411d070 100644
>> --- a/mm/ksm.c
>> +++ b/mm/ksm.c
>> @@ -296,7 +296,7 @@ static bool ksm_use_zero_pages __read_mostly;
>>  static bool ksm_smart_scan = true;
>>  
>>  /* The number of zero pages which is placed by KSM */
>> -unsigned long ksm_zero_pages;
>> +atomic_long_t ksm_zero_pages = ATOMIC_LONG_INIT(0);
>>  
>>  /* The number of pages that have been skipped due to "smart scanning" */
>>  static unsigned long ksm_pages_skipped;
>> @@ -1429,8 +1429,7 @@ static int replace_page(struct vm_area_struct *vma, struct page *page,
>>  		 * the dirty bit in zero page's PTE is set.
>>  		 */
>>  		newpte = pte_mkdirty(pte_mkspecial(pfn_pte(page_to_pfn(kpage), vma->vm_page_prot)));
>> -		ksm_zero_pages++;
>> -		mm->ksm_zero_pages++;
>> +		ksm_map_zero_page(mm);
>>  		/*
>>  		 * We're replacing an anonymous page with a zero page, which is
>>  		 * not anonymous. We need to do proper accounting otherwise we
>> @@ -3373,7 +3372,7 @@ static void wait_while_offlining(void)
>>  #ifdef CONFIG_PROC_FS
>>  long ksm_process_profit(struct mm_struct *mm)
>>  {
>> -	return (long)(mm->ksm_merging_pages + mm->ksm_zero_pages) * PAGE_SIZE -
>> +	return (long)(mm->ksm_merging_pages + mm_ksm_zero_pages(mm)) * PAGE_SIZE -
>>  		mm->ksm_rmap_items * sizeof(struct ksm_rmap_item);
>>  }
>>  #endif /* CONFIG_PROC_FS */
>> @@ -3662,7 +3661,7 @@ KSM_ATTR_RO(pages_skipped);
>>  static ssize_t ksm_zero_pages_show(struct kobject *kobj,
>>  				struct kobj_attribute *attr, char *buf)
>>  {
>> -	return sysfs_emit(buf, "%ld\n", ksm_zero_pages);
>> +	return sysfs_emit(buf, "%ld\n", atomic_long_read(&ksm_zero_pages));
>>  }
>>  KSM_ATTR_RO(ksm_zero_pages);
>>  
>> @@ -3671,7 +3670,7 @@ static ssize_t general_profit_show(struct kobject *kobj,
>>  {
>>  	long general_profit;
>>  
>> -	general_profit = (ksm_pages_sharing + ksm_zero_pages) * PAGE_SIZE -
>> +	general_profit = (ksm_pages_sharing + atomic_long_read(&ksm_zero_pages)) * PAGE_SIZE -
>>  				ksm_rmap_items * sizeof(struct ksm_rmap_item);
>>  
>>  	return sysfs_emit(buf, "%ld\n", general_profit);
>>
>> -- 
>> 2.45.0
>>

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ