lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Date:   Sun, 28 Apr 2019 16:32:06 +0900
From:   k-onishi <princeontrojanhorse@...il.com>
To:     mingo@...hat.com
Cc:     peterz@...radead.org, linux-kernel@...r.kernel.org,
        princeontrojanhorse@...il.com
Subject: [PATCH] feat(CFS Bandwidth): add an interface for CFS Bandwidth

I added an interface which is more intuitive
and takes less write/read systemcalls.

I think that most people don't really care period
and quota of CFS Bandwidth,

They just use it like
"I will allow this process to use 50% of single core" in most cases.

But I know that we still need to care period and quota in some cases.

Please consider for merging this if you like, thanks.

Signed-off-by: k-onishi <princeontrojanhorse@...il.com>
---
 kernel/sched/core.c | 46 +++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 46 insertions(+)

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 4778c48a7fda..27338c727d7c 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -6700,6 +6700,35 @@ long tg_get_cfs_period(struct task_group *tg)
 	return cfs_period_us;
 }
 
+int tg_set_cfs_percent(struct task_group *tg, long cfs_percent)
+{
+	u64 quota, period;
+
+	if (cfs_percent < 0)
+		return -EINVAL;
+
+	period = 1 * NSEC_PER_SEC;
+	quota = cfs_percent * 10 * USEC_PER_SECUL;
+	return tg_set_cfs_bandwidth(tg, period, quota);
+}
+
+long tg_get_cfs_percent(struct task_group *tg)
+{
+	u64 quota, period;
+
+	if (tg->cfs_bandwidth.quota == RUNTIME_INF)
+		return -1;
+
+	quota = tg->cfs_bandwidth.quota;
+	period = tg->cfs_bandwidth.period;
+
+	do_div(quota, 10);
+	do_div(period, NSEC_PER_USEC);
+	do_div(quota, period);
+
+	return quota;
+}
+
 static s64 cpu_cfs_quota_read_s64(struct cgroup_subsys_state *css,
 				  struct cftype *cft)
 {
@@ -6724,6 +6753,18 @@ static int cpu_cfs_period_write_u64(struct cgroup_subsys_state *css,
 	return tg_set_cfs_period(css_tg(css), cfs_period_us);
 }
 
+static s64 cpu_cfs_percent_read_u64(struct cgroup_subsys_state *css,
+				   struct cftype *cft)
+{
+	return tg_get_cfs_percent(css_tg(css));
+}
+
+static int cpu_cfs_percent_write_u64(struct cgroup_subsys_state *css,
+				   struct cftype *cftype, u64 cfs_percent)
+{
+	return tg_set_cfs_percent(css_tg(css), cfs_percent);
+}
+
 struct cfs_schedulable_data {
 	struct task_group *tg;
 	u64 period, quota;
@@ -6876,6 +6917,11 @@ static struct cftype cpu_legacy_files[] = {
 		.read_u64 = cpu_cfs_period_read_u64,
 		.write_u64 = cpu_cfs_period_write_u64,
 	},
+	{
+		.name = "cfs_percent",
+		.read_s64 = cpu_cfs_percent_read_u64,
+		.write_u64 = cpu_cfs_percent_write_u64,
+	},
 	{
 		.name = "stat",
 		.seq_show = cpu_cfs_stat_show,
-- 
2.17.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ