lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date: Mon, 29 Jan 2024 20:05:24 +0800
From: Yunsheng Lin <linyunsheng@...wei.com>
To: Lorenzo Bianconi <lorenzo@...nel.org>, <netdev@...r.kernel.org>
CC: <lorenzo.bianconi@...hat.com>, <davem@...emloft.net>, <kuba@...nel.org>,
	<edumazet@...gle.com>, <pabeni@...hat.com>, <bpf@...r.kernel.org>,
	<toke@...hat.com>, <willemdebruijn.kernel@...il.com>, <jasowang@...hat.com>,
	<sdf@...gle.com>, <hawk@...nel.org>, <ilias.apalodimas@...aro.org>
Subject: Re: [PATCH v6 net-next 1/5] net: add generic per-cpu page_pool
 allocator

On 2024/1/28 22:20, Lorenzo Bianconi wrote:

>  #ifdef CONFIG_LOCKDEP
>  /*
>   * register_netdevice() inits txq->_xmit_lock and sets lockdep class
> @@ -11686,6 +11690,27 @@ static void __init net_dev_struct_check(void)
>   *
>   */
>  
> +#define SD_PAGE_POOL_RING_SIZE	256

I might missed that if there is a reason we choose 256 here, do we
need to use different value for differe page size, for 64K page size,
it means we might need to reserve 16MB memory for each CPU.

> +static int net_page_pool_alloc(int cpuid)
> +{
> +#if IS_ENABLED(CONFIG_PAGE_POOL)
> +	struct page_pool_params page_pool_params = {
> +		.pool_size = SD_PAGE_POOL_RING_SIZE,
> +		.nid = NUMA_NO_NODE,
> +	};
> +	struct page_pool *pp_ptr;
> +
> +	pp_ptr = page_pool_create_percpu(&page_pool_params, cpuid);
> +	if (IS_ERR(pp_ptr)) {
> +		pp_ptr = NULL;

unnecessary NULL setting?

> +		return -ENOMEM;
> +	}
> +
> +	per_cpu(page_pool, cpuid) = pp_ptr;
> +#endif
> +	return 0;
> +}
> +
>  /*
>   *       This is called single threaded during boot, so no need
>   *       to take the rtnl semaphore.
> @@ -11738,6 +11763,9 @@ static int __init net_dev_init(void)
>  		init_gro_hash(&sd->backlog);
>  		sd->backlog.poll = process_backlog;
>  		sd->backlog.weight = weight_p;
> +
> +		if (net_page_pool_alloc(i))
> +			goto out;
>  	}
>  
>  	dev_boot_phase = 0;
> @@ -11765,6 +11793,18 @@ static int __init net_dev_init(void)
>  	WARN_ON(rc < 0);
>  	rc = 0;
>  out:
> +	if (rc < 0) {
> +		for_each_possible_cpu(i) {
> +			struct page_pool *pp_ptr = this_cpu_read(page_pool);

this_cpu_read() -> per_cpu_ptr()?

> +
> +			if (!pp_ptr)
> +				continue;
> +
> +			page_pool_destroy(pp_ptr);
> +			per_cpu(page_pool, i) = NULL;
> +		}
> +	}
> +
>  	return rc;
>  }


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ