lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <401a535a-0393-fc7c-66b0-832c061283c7@redhat.com>
Date:   Tue, 23 May 2023 11:47:55 +0200
From:   David Hildenbrand <david@...hat.com>
To:     Yang Yang <yang.yang29@....com.cn>, akpm@...ux-foundation.org
Cc:     imbrenda@...ux.ibm.com, jiang.xuexin@....com.cn,
        linux-kernel@...r.kernel.org, linux-mm@...ck.org,
        ran.xiaokai@....com.cn, xu.xin.sc@...il.com, xu.xin16@....com.cn
Subject: Re: [PATCH v8 2/6] ksm: count all zero pages placed by KSM

On 22.05.23 12:52, Yang Yang wrote:
> From: xu xin <xu.xin16@....com.cn>
> 
> As pages_sharing and pages_shared don't include the number of zero pages
> merged by KSM, we cannot know how many pages are zero pages placed by KSM
> when enabling use_zero_pages, which leads to KSM not being transparent with
> all actual merged pages by KSM. In the early days of use_zero_pages,
> zero-pages was unable to get unshared by the ways like MADV_UNMERGEABLE so
> it's hard to count how many times one of those zeropages was then unmerged.
> 
> But now, unsharing KSM-placed zero page accurately has been achieved, so we
> can easily count both how many times a page full of zeroes was merged with
> zero-page and how many times one of those pages was then unmerged. and so,
> it helps to estimate memory demands when each and every shared page could
> get unshared.
> 
> So we add ksm_zero_pages under /sys/kernel/mm/ksm/ to show the number
> of all zero pages placed by KSM.
> 
> v7->v8:
> Handle the case when khugepaged replaces a shared zeropage by a THP.
> 
> Signed-off-by: xu xin <xu.xin16@....com.cn>
> Suggested-by: David Hildenbrand <david@...hat.com>
> Cc: Claudio Imbrenda <imbrenda@...ux.ibm.com>
> Cc: Xuexin Jiang <jiang.xuexin@....com.cn>
> Reviewed-by: Xiaokai Ran <ran.xiaokai@....com.cn>
> Reviewed-by: Yang Yang <yang.yang29@....com.cn>
> ---
>   include/linux/ksm.h | 17 +++++++++++++++++
>   mm/khugepaged.c     |  3 +++
>   mm/ksm.c            | 12 ++++++++++++
>   mm/memory.c         |  7 ++++++-
>   4 files changed, 38 insertions(+), 1 deletion(-)
> 
> diff --git a/include/linux/ksm.h b/include/linux/ksm.h
> index 7989200cdbb7..1adcae0205e3 100644
> --- a/include/linux/ksm.h
> +++ b/include/linux/ksm.h
> @@ -29,6 +29,16 @@ void __ksm_exit(struct mm_struct *mm);
>   /* use pte_mkdirty to track a KSM-placed zero page */
>   #define set_pte_ksm_zero(pte)	pte_mkdirty(pte_mkspecial(pte))
>   #define is_ksm_zero_pte(pte)	(is_zero_pfn(pte_pfn(pte)) && pte_dirty(pte))
> +extern unsigned long ksm_zero_pages;
> +static inline void inc_ksm_zero_pages(void)
> +{
> +	ksm_zero_pages++;
> +}
> +

No need to export the inc, just inline this.

> +static inline void dec_ksm_zero_pages(void)
> +{
> +	ksm_zero_pages--;
> +}
>   
>   static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
>   {
> @@ -100,6 +110,13 @@ static inline void ksm_exit(struct mm_struct *mm)
>   
>   #define set_pte_ksm_zero(pte)	pte_mkspecial(pte)
>   #define is_ksm_zero_pte(pte)	0
> +static inline void inc_ksm_zero_pages(void)
> +{
> +}
> +
> +static inline void dec_ksm_zero_pages(void)
> +{
> +}
>   
>   #ifdef CONFIG_MEMORY_FAILURE
>   static inline void collect_procs_ksm(struct page *page,
> diff --git a/mm/khugepaged.c b/mm/khugepaged.c
> index 6b9d39d65b73..ba0d077b6951 100644
> --- a/mm/khugepaged.c
> +++ b/mm/khugepaged.c
> @@ -19,6 +19,7 @@
>   #include <linux/page_table_check.h>
>   #include <linux/swapops.h>
>   #include <linux/shmem_fs.h>
> +#include <linux/ksm.h>
>   
>   #include <asm/tlb.h>
>   #include <asm/pgalloc.h>
> @@ -711,6 +712,8 @@ static void __collapse_huge_page_copy_succeeded(pte_t *pte,
>   				spin_lock(ptl);
>   				ptep_clear(vma->vm_mm, address, _pte);
>   				spin_unlock(ptl);
> +				if (is_ksm_zero_pte(pteval))
> +					dec_ksm_zero_pages();
>   			}
>   		} else {
>   			src_page = pte_page(pteval);
> diff --git a/mm/ksm.c b/mm/ksm.c
> index 9962f5962afd..2ca7e8860faa 100644
> --- a/mm/ksm.c
> +++ b/mm/ksm.c
> @@ -278,6 +278,9 @@ static unsigned int zero_checksum __read_mostly;
>   /* Whether to merge empty (zeroed) pages with actual zero pages */
>   static bool ksm_use_zero_pages __read_mostly;
>   
> +/* The number of zero pages which is placed by KSM */
> +unsigned long ksm_zero_pages;
> +
>   #ifdef CONFIG_NUMA
>   /* Zeroed when merging across nodes is not allowed */
>   static unsigned int ksm_merge_across_nodes = 1;
> @@ -1223,6 +1226,7 @@ static int replace_page(struct vm_area_struct *vma, struct page *page,
>   	} else {
>   		newpte = set_pte_ksm_zero(pfn_pte(page_to_pfn(kpage),
>   					       vma->vm_page_prot));
> +		inc_ksm_zero_pages();
>   		/*
>   		 * We're replacing an anonymous page with a zero page, which is
>   		 * not anonymous. We need to do proper accounting otherwise we
> @@ -3350,6 +3354,13 @@ static ssize_t pages_volatile_show(struct kobject *kobj,
>   }
>   KSM_ATTR_RO(pages_volatile);
>   
> +static ssize_t ksm_zero_pages_show(struct kobject *kobj,
> +				struct kobj_attribute *attr, char *buf)
> +{
> +	return sysfs_emit(buf, "%ld\n", ksm_zero_pages);
> +}
> +KSM_ATTR_RO(ksm_zero_pages);
> +
>   static ssize_t general_profit_show(struct kobject *kobj,
>   				   struct kobj_attribute *attr, char *buf)
>   {
> @@ -3417,6 +3428,7 @@ static struct attribute *ksm_attrs[] = {
>   	&pages_sharing_attr.attr,
>   	&pages_unshared_attr.attr,
>   	&pages_volatile_attr.attr,
> +	&ksm_zero_pages_attr.attr,
>   	&full_scans_attr.attr,
>   #ifdef CONFIG_NUMA
>   	&merge_across_nodes_attr.attr,
> diff --git a/mm/memory.c b/mm/memory.c
> index 8358f3b853f2..058b416adf24 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -1415,8 +1415,11 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
>   			tlb_remove_tlb_entry(tlb, pte, addr);
>   			zap_install_uffd_wp_if_needed(vma, addr, pte, details,
>   						      ptent);
> -			if (unlikely(!page))
> +			if (unlikely(!page)) {
> +				if (is_ksm_zero_pte(ptent))
> +					dec_ksm_zero_pages();
>   				continue;
> +			}
>   
>   			delay_rmap = 0;
>   			if (!PageAnon(page)) {
> @@ -3120,6 +3123,8 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
>   				inc_mm_counter(mm, MM_ANONPAGES);
>   			}
>   		} else {
> +			if (is_ksm_zero_pte(vmf->orig_pte))
> +				dec_ksm_zero_pages();
>   			inc_mm_counter(mm, MM_ANONPAGES);
>   		}
>   		flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));

Can we maybe avoid exporting the dec semantics and rather add a callback 
to KSM? Ideally, we'd even distill that down to a single call, and 
handle the details in ksm.h. Maybe simply:

ksm_notify_unmap_zero_page(vmf->orig_pte);

and then just have in ksm.h

static inline void ksm_notify_unmap_zero_page(pte_t pte)
{
	if (is_ksm_zero_pte(pte))
		ksm_zero_pages--;
}

-- 
Thanks,

David / dhildenb

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ