lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Tue, 13 Oct 2009 16:27:01 +0200
From:	Peter Zijlstra <a.p.zijlstra@...llo.nl>
To:	bharata@...ux.vnet.ibm.com
Cc:	linux-kernel@...r.kernel.org,
	Dhaval Giani <dhaval@...ux.vnet.ibm.com>,
	Balbir Singh <balbir@...ux.vnet.ibm.com>,
	Vaidyanathan Srinivasan <svaidy@...ux.vnet.ibm.com>,
	Gautham R Shenoy <ego@...ibm.com>,
	Srivatsa Vaddagiri <vatsa@...ibm.com>,
	Ingo Molnar <mingo@...e.hu>,
	Pavel Emelyanov <xemul@...nvz.org>,
	Herbert Poetzl <herbert@...hfloor.at>,
	Avi Kivity <avi@...hat.com>,
	Chris Friesen <cfriesen@...tel.com>,
	Paul Menage <menage@...gle.com>,
	Mike Waychison <mikew@...gle.com>
Subject: Re: [RFC v2 PATCH 3/8] sched: Bandwidth initialization for fair
 task groups

On Wed, 2009-09-30 at 18:22 +0530, Bharata B Rao wrote:

> diff --git a/kernel/sched.c b/kernel/sched.c
> index c283d0f..0147f6f 100644
> --- a/kernel/sched.c
> +++ b/kernel/sched.c
> @@ -262,6 +262,15 @@ static DEFINE_MUTEX(sched_domains_mutex);
>  
>  #include <linux/cgroup.h>
>  
> +#if defined(CONFIG_FAIR_GROUP_SCHED) && defined(CONFIG_CFS_HARD_LIMITS)
> +struct cfs_bandwidth {
> +	spinlock_t		cfs_runtime_lock;
> +	ktime_t			cfs_period;
> +	u64			cfs_runtime;
> +	struct hrtimer		cfs_period_timer;
> +};
> +#endif

too much cfs here..

>  struct cfs_rq;
>  
>  static LIST_HEAD(task_groups);
> @@ -282,6 +291,11 @@ struct task_group {
>  	/* runqueue "owned" by this group on each cpu */
>  	struct cfs_rq **cfs_rq;
>  	unsigned long shares;
> +#ifdef CONFIG_CFS_HARD_LIMITS
> +	struct cfs_bandwidth cfs_bandwidth;
> +	/* If set, throttle when the group exceeds its bandwidth */
> +	int hard_limit_enabled;
> +#endif

What's wrong with doing something like cfs_bandwidth.cfs_runtime ==
RUNTIME_INF ?

>  #endif
>  
>  #ifdef CONFIG_RT_GROUP_SCHED
> @@ -477,6 +491,16 @@ struct cfs_rq {
>  	unsigned long rq_weight;
>  #endif
>  #endif
> +#ifdef CONFIG_CFS_HARD_LIMITS
> +	/* set when the group is throttled  on this cpu */
> +	int cfs_throttled;
> +
> +	/* runtime currently consumed by the group on this rq */
> +	u64 cfs_time;
> +
> +	/* runtime available to the group on this rq */
> +	u64 cfs_runtime;
> +#endif

too much cfs_ again.

>  	/*
>  	 * Number of tasks at this heirarchy.
>  	 */
> @@ -665,6 +689,11 @@ struct rq {
>  	/* BKL stats */
>  	unsigned int bkl_count;
>  #endif
> +	/*
> +	 * Protects the cfs runtime related fields of all cfs_rqs under
> +	 * this rq
> +	 */
> +	spinlock_t runtime_lock;
>  };
>  
>  static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);


> +static inline void rq_runtime_lock(struct rq *rq)
> +{
> +	spin_lock(&rq->runtime_lock);
> +}
> +
> +static inline void rq_runtime_unlock(struct rq *rq)
> +{
> +	spin_unlock(&rq->runtime_lock);
> +}

needless obfuscation.

> CONFIG_RT_GROUP_SCHED
> @@ -10317,6 +10617,23 @@ static struct cftype cpu_files[] = {
>  		.read_u64 = cpu_shares_read_u64,
>  		.write_u64 = cpu_shares_write_u64,
>  	},
> +#ifdef CONFIG_CFS_HARD_LIMITS
> +	{
> +		.name = "cfs_runtime_us",
> +		.read_s64 = cpu_cfs_runtime_read_s64,
> +		.write_s64 = cpu_cfs_runtime_write_s64,
> +	},
> +	{
> +		.name = "cfs_period_us",
> +		.read_u64 = cpu_cfs_period_read_u64,
> +		.write_u64 = cpu_cfs_period_write_u64,
> +	},
> +	{
> +		.name = "cfs_hard_limit",
> +		.read_u64 = cpu_cfs_hard_limit_read_u64,
> +		.write_u64 = cpu_cfs_hard_limit_write_u64,
> +	},
> +#endif /* CONFIG_CFS_HARD_LIMITS */
>  #endif
>  #ifdef CONFIG_RT_GROUP_SCHED
>  	{

I guess that cfs_hard_limit thing is superfluous as well.

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ