lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <e64d8888-f00c-d360-b35c-54396c6fff2d@huaweicloud.com>
Date:   Mon, 10 Apr 2023 17:03:31 +0800
From:   Wu Bo <wubo@...weicloud.com>
To:     "zhaoyang.huang" <zhaoyang.huang@...soc.com>,
        Andrew Morton <akpm@...ux-foundation.org>,
        Minchan Kim <minchan@...nel.org>,
        Joonsoo Kim <iamjoonsoo.kim@....com>, linux-mm@...ck.org,
        linux-kernel@...r.kernel.org,
        Zhaoyang Huang <huangzhaoyang@...il.com>, ke.wang@...soc.com
Subject: Re: [RFC PATCHv2] mm: introduce defer free for cma



On 2023/4/10 16:46, zhaoyang.huang wrote:
> From: Zhaoyang Huang <zhaoyang.huang@...soc.com>
>
> Continues page blocks are expensive for the system. Introducing defer free
> mechanism to buffer some which make the allocation easier. The shrinker will
> ensure the page block can be reclaimed when there is memory pressure.
>
> Signed-off-by: Zhaoyang Huang <zhaoyang.huang@...soc.com>
> ---
> v2: fix build warning and regist shrinker
> ---
> ---
>   mm/cma.c | 151 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++-
>   mm/cma.h |  11 +++++
>   2 files changed, 160 insertions(+), 2 deletions(-)
>
> diff --git a/mm/cma.c b/mm/cma.c
> index 4a978e0..6d2fd24 100644
> --- a/mm/cma.c
> +++ b/mm/cma.c
> @@ -39,6 +39,10 @@
>   unsigned cma_area_count;
>   static DEFINE_MUTEX(cma_mutex);
>   
> +static unsigned long cma_defer_free_count(struct shrinker *shrinker,
> +					struct shrink_control *sc);
> +static unsigned long cma_defer_free_scan(struct shrinker *shrinker,
> +					struct shrink_control *sc);
>   phys_addr_t cma_get_base(const struct cma *cma)
>   {
>   	return PFN_PHYS(cma->base_pfn);
> @@ -153,6 +157,20 @@ static int __init cma_init_reserved_areas(void)
>   }
>   core_initcall(cma_init_reserved_areas);
>   
> +static unsigned long cma_free_get(struct cma *cma)
> +{
> +	unsigned long used;
> +	unsigned long val;
> +
> +	spin_lock_irq(&cma->lock);
> +	/* pages counter is smaller than sizeof(int) */
> +	used = bitmap_weight(cma->bitmap, (int)cma_bitmap_maxno(cma));
> +	val = cma->count - ((u64)used << cma->order_per_bit);
> +	spin_unlock_irq(&cma->lock);
> +
> +	return val;
> +}
> +
>   void __init cma_reserve_pages_on_error(struct cma *cma)
>   {
>   	cma->reserve_pages_on_error = true;
> @@ -212,6 +230,13 @@ int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
>   	cma_area_count++;
>   	totalcma_pages += (size / PAGE_SIZE);
>   
> +	cma->batch = cma->count >> 1;
> +	cma->shrinker.count_objects = cma_defer_free_count;
> +	cma->shrinker.scan_objects = cma_defer_free_scan;
> +	cma->shrinker.seeks = DEFAULT_SEEKS;
> +	cma->shrinker.batch = 0;
> +
> +	register_shrinker(&cma->shrinker, "cma-shrinker");
>   	return 0;
>   }
>   
> @@ -411,6 +436,46 @@ static void cma_debug_show_areas(struct cma *cma)
>   static inline void cma_debug_show_areas(struct cma *cma) { }
>   #endif
>   
> +static int cma_defer_area_fetch(struct cma *cma, unsigned long pfn,
> +		unsigned long count)
> +{
> +	struct cma_defer_free_area *area;
> +	unsigned long new_pfn;
> +	int ret = -1;
> +
> +	if (!atomic64_read(&cma->defer_count))
> +		return ret;
> +	if (count <= atomic64_read(&cma->defer_count)) {
> +		spin_lock_irq(&cma->lock);
> +		list_for_each_entry(area, &cma->defer_free, list) {
> +			/*area found for given pfn and count*/
> +			if (pfn >= area->pfn && count <= area->count) {
> +				list_del(&area->list);
> +				/*set bits for allocated pfn*/
> +				bitmap_set(cma->bitmap, pfn - cma->base_pfn, count);
> +				kfree(area);
> +				atomic64_sub(count, &cma->defer_count);
> +				/*release the rest pfn to cma*/
> +				if (!list_empty(&cma->defer_free) && (pfn == area->pfn)) {
There is a null pointer reference here?

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ