lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <495AB5DD.8040204@oracle.com>
Date:	Tue, 30 Dec 2008 15:59:25 -0800
From:	Randy Dunlap <randy.dunlap@...cle.com>
To:	Peter W Morreale <pmorreale@...ell.com>
CC:	linux-kernel@...r.kernel.org
Subject: Re: [PATCH 2/2] Add /proc controls for pdflush threads

Peter W Morreale wrote:
> From: \"Peter W. Morreale\" <pmorreale@...ell.com>
> 
> This patch adds /proc entries to give the admin the ability to
> control the minimum and maximum number of pdflush threads.  This allows
> finer control of pdflush on both large and small machines.
> 
> The patch adds '/proc/sys/vm/nr_pdflush_threads_min' and
> '/proc/sys/vm/nr_pdflush_threads_max' with r/w permissions.

These need to be documented in (ugh) either
Documentation/filesystems/proc.txt or
Documentation/sysctl/vm.txt.  Hard to say which one, but I'd
recommend the latter (vm.txt) since they are sysctls.


It looks like there is a lot of duplication between them.
That's not good IMO.

> ---
> 
> Signed-off-by: Peter W Morreale <pmorreale@...ell.com>
> 
>  include/linux/sysctl.h    |    2 ++
>  include/linux/writeback.h |    2 ++
>  kernel/sysctl.c           |   16 ++++++++++++++++
>  mm/pdflush.c              |   19 ++++++++++++++-----
>  4 files changed, 34 insertions(+), 5 deletions(-)
> 
> diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
> index d0437f3..9921e62 100644
> --- a/include/linux/sysctl.h
> +++ b/include/linux/sysctl.h
> @@ -205,6 +205,8 @@ enum
>  	VM_PANIC_ON_OOM=33,	/* panic at out-of-memory */
>  	VM_VDSO_ENABLED=34,	/* map VDSO into new processes? */
>  	VM_MIN_SLAB=35,		 /* Percent pages ignored by zone reclaim */
> +	VM_NR_PDFLUSH_THREADS_MAX=36, /* nr_pdflush_threads_max */
> +	VM_NR_PDFLUSH_THREADS_MIN=37, /* nr_pdflush_threads_min */
>  };
>  
>  
> diff --git a/include/linux/writeback.h b/include/linux/writeback.h
> index 12b15c5..ee566a0 100644
> --- a/include/linux/writeback.h
> +++ b/include/linux/writeback.h
> @@ -150,6 +150,8 @@ void writeback_set_ratelimit(void);
>  /* pdflush.c */
>  extern int nr_pdflush_threads;	/* Global so it can be exported to sysctl
>  				   read-only. */
> +extern int nr_pdflush_threads_max; /* Global so it can be exported to sysctl */
> +extern int nr_pdflush_threads_min; /* Global so it can be exported to sysctl */
>  
>  
>  #endif		/* WRITEBACK_H */
> diff --git a/kernel/sysctl.c b/kernel/sysctl.c
> index 50ec088..6dae777 100644
> --- a/kernel/sysctl.c
> +++ b/kernel/sysctl.c
> @@ -948,6 +948,22 @@ static struct ctl_table vm_table[] = {
>  		.proc_handler	= &proc_dointvec,
>  	},
>  	{
> +		.ctl_name	= VM_NR_PDFLUSH_THREADS_MIN,
> +		.procname	= "nr_pdflush_threads_min",
> +		.data		= &nr_pdflush_threads_min,
> +		.maxlen		= sizeof nr_pdflush_threads_min,
> +		.mode		= 0644 /* read-only*/,
> +		.proc_handler	= &proc_dointvec,
> +	},
> +	{
> +		.ctl_name	= VM_NR_PDFLUSH_THREADS_MAX,
> +		.procname	= "nr_pdflush_threads_max",
> +		.data		= &nr_pdflush_threads_max,
> +		.maxlen		= sizeof nr_pdflush_threads_max,
> +		.mode		= 0644 /* read-only*/,
> +		.proc_handler	= &proc_dointvec,
> +	},
> +	{
>  		.ctl_name	= VM_SWAPPINESS,
>  		.procname	= "swappiness",
>  		.data		= &vm_swappiness,
> diff --git a/mm/pdflush.c b/mm/pdflush.c
> index 481680f..9a6f835 100644
> --- a/mm/pdflush.c
> +++ b/mm/pdflush.c
> @@ -58,6 +58,14 @@ static DEFINE_SPINLOCK(pdflush_lock);
>  int nr_pdflush_threads = 0;
>  
>  /*
> + * The max/min number of pdflush threads. R/W by sysctl at 
> + * /proc/sys/vm/nr_pdflush_threads_max 
> + */
> +int nr_pdflush_threads_max = MAX_PDFLUSH_THREADS;
> +int nr_pdflush_threads_min = MIN_PDFLUSH_THREADS;
> +
> +
> +/*
>   * The time at which the pdflush thread pool last went empty
>   */
>  static unsigned long last_empty_jifs;
> @@ -68,7 +76,7 @@ static unsigned long last_empty_jifs;
>   * Thread pool management algorithm:
>   * 
>   * - The minimum and maximum number of pdflush instances are bound
> - *   by MIN_PDFLUSH_THREADS and MAX_PDFLUSH_THREADS.
> + *   by nr_pdflush_threads_min and nr_pdflush_threads_max.
>   * 
>   * - If there have been no idle pdflush instances for 1 second, create
>   *   a new one.
> @@ -133,7 +141,8 @@ static int __pdflush(struct pdflush_work *my_work)
>  		 */
>  		if (time_after(jiffies, last_empty_jifs + 1 * HZ)) {
>  			if (list_empty(&pdflush_list)) {
> -				if (nr_pdflush_threads < MAX_PDFLUSH_THREADS) {
> +				if (nr_pdflush_threads <
> +						nr_pdflush_threads_max) {
>  					nr_pdflush_threads++;
>  					spin_unlock_irq(&pdflush_lock);
>  					start_one_pdflush_thread();
> @@ -150,7 +159,7 @@ static int __pdflush(struct pdflush_work *my_work)
>  		 */
>  		if (list_empty(&pdflush_list))
>  			continue;
> -		if (nr_pdflush_threads <= MIN_PDFLUSH_THREADS)
> +		if (nr_pdflush_threads <= nr_pdflush_threads_min)
>  			continue;
>  		pdf = list_entry(pdflush_list.prev, struct pdflush_work, list);
>  		if (time_after(jiffies, pdf->when_i_went_to_sleep + 1 * HZ)) {
> @@ -246,9 +255,9 @@ static int __init pdflush_init(void)
>  	 * Pre-set nr_pdflush_threads...  If we fail to create, 
>  	 * the count will be decremented.
>  	 */
> -	nr_pdflush_threads = MIN_PDFLUSH_THREADS;
> +	nr_pdflush_threads = nr_pdflush_threads_min;
>  
> -	for (i = 0; i < MIN_PDFLUSH_THREADS; i++)
> +	for (i = 0; i < nr_pdflush_threads_min; i++)
>  		start_one_pdflush_thread();
>  	return 0;
>  }


-- 
~Randy
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ