lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <87plvho872.fsf@yhuang6-desk2.ccr.corp.intel.com>
Date: Tue, 26 Mar 2024 13:47:45 +0800
From: "Huang, Ying" <ying.huang@...el.com>
To: Johannes Weiner <hannes@...xchg.org>
Cc: linux-mm@...ck.org,  david@...hat.com,  hughd@...gle.com,
  osandov@...com,  linux-fsdevel@...r.kernel.org,
  linux-kernel@...r.kernel.org
Subject: Re: [RFC PATCH] mm: swapfile: fix SSD detection with swapfile on btrfs

Hi, Johannes,

Johannes Weiner <hannes@...xchg.org> writes:

> +static struct swap_cluster_info *setup_clusters(struct swap_info_struct *p,
> +						unsigned char *swap_map)
> +{
> +	unsigned long nr_clusters = DIV_ROUND_UP(p->max, SWAPFILE_CLUSTER);
> +	unsigned long col = p->cluster_next / SWAPFILE_CLUSTER % SWAP_CLUSTER_COLS;
> +	struct swap_cluster_info *cluster_info;
> +	unsigned long i, j, k, idx;
> +	int cpu, err = -ENOMEM;
> +
> +	cluster_info = kvcalloc(nr_clusters, sizeof(*cluster_info), GFP_KERNEL);
>  	if (!cluster_info)
> -		return nr_extents;
> +		goto err;
> +
> +	for (i = 0; i < nr_clusters; i++)
> +		spin_lock_init(&cluster_info[i].lock);
>  
> +	p->cluster_next_cpu = alloc_percpu(unsigned int);
> +	if (!p->cluster_next_cpu)
> +		goto err_free;
> +
> +	/* Random start position to help with wear leveling */
> +	for_each_possible_cpu(cpu)
> +		per_cpu(*p->cluster_next_cpu, cpu) =
> +			get_random_u32_inclusive(1, p->highest_bit);
> +
> +	p->percpu_cluster = alloc_percpu(struct percpu_cluster);
> +	if (!p->percpu_cluster)
> +		goto err_free;
> +
> +	for_each_possible_cpu(cpu) {
> +		struct percpu_cluster *cluster;
> +
> +		cluster = per_cpu_ptr(p->percpu_cluster, cpu);
> +		cluster_set_null(&cluster->index);
> +	}
> +
> +	/*
> +	 * Mark unusable pages as unavailable. The clusters aren't
> +	 * marked free yet, so no list operations are involved yet.
> +	 */
> +	for (i = 0; i < round_up(p->max, SWAPFILE_CLUSTER); i++)
> +		if (i >= p->max || swap_map[i] == SWAP_MAP_BAD)
> +			inc_cluster_info_page(p, cluster_info, i);

If p->max is large, it seems better to use an loop like below?

 	for (i = 0; i < swap_header->info.nr_badpages; i++) {
                /* check i and inc_cluster_info_page() */
        }

in most cases, swap_header->info.nr_badpages should be much smaller than
p->max.

> +
> +	cluster_list_init(&p->free_clusters);
> +	cluster_list_init(&p->discard_clusters);
>  
>  	/*
>  	 * Reduce false cache line sharing between cluster_info and
> @@ -2994,7 +3019,13 @@ static int setup_swap_map_and_extents(struct swap_info_struct *p,
>  					      idx);
>  		}
>  	}
> -	return nr_extents;
> +
> +	return cluster_info;
> +
> +err_free:
> +	kvfree(cluster_info);
> +err:
> +	return ERR_PTR(err);
>  }
>  

[snip]

--
Best Regards,
Huang, Ying

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ