[<prev] [next>] [day] [month] [year] [list]
Message-Id: <20260115070454.118065-1-zenghongling@kylinos.cn>
Date: Thu, 15 Jan 2026 15:04:54 +0800
From: zenghongling <zenghongling@...inos.cn>
To: mingo@...hat.com,
juri.lelli@...hat.com,
dietmar.eggemann@....com,
rostedt@...dmis.org,
bsegall@...gle.com,
mgorman@...e.de,
vschneid@...hat.com
Cc: linux-kernel@...r.kernel.org,
zhongling0719@....com,
zenghongling <zenghongling@...inos.cn>
Subject: [PATCH] sched: optimize cfs_bandwidth structure
Using pahole, we can see that there are some padding holes
in the current cfs_bandwith structure. Adjusting the
layout of cfs_bandwith can reduce these holes,
resulting in the size of the structure decreasing
from 232 bytes to 240 bytes.
Before:
struct cfs_bandwidth {
raw_spinlock_t lock; /* 0 4 */
/* XXX 4 bytes hole, try to pack */
ktime_t period; /* 8 8 */
u64 quota; /* 16 8 */
u64 runtime; /* 24 8 */
u64 burst; /* 32 8 */
u64 runtime_snap; /* 40 8 */
s64 hierarchical_quota; /* 48 8 */
u8 idle; /* 56 1 */
u8 period_active; /* 57 1 */
u8 slack_started; /* 58 1 */
/* XXX 5 bytes hole, try to pack */
/* --- cacheline 1 boundary (64 bytes) --- */
struct hrtimer period_timer; /* 64 64 */
/* --- cacheline 2 boundary (128 bytes) --- */
struct hrtimer slack_timer; /* 128 64 */
/* --- cacheline 3 boundary (192 bytes) --- */
struct list_head throttled_cfs_rq; /* 192 16 */
int nr_periods; /* 208 4 */
int nr_throttled; /* 212 4 */
int nr_burst; /* 216 4 */
/* XXX 4 bytes hole, try to pack */
u64 throttled_time; /* 224 8 */
u64 burst_time; /* 232 8 */
/* size: 240, cachelines: 4, members: 18 */
/* sum members: 227, holes: 3, sum holes: 13 */
/* paddings: 2, sum paddings: 8 */
/* forced alignments: 2, forced holes: 1, sum forced holes: 5 */
/* last cacheline: 48 bytes */
}
After:
struct cfs_bandwidth {
raw_spinlock_t lock; /* 0 4 */
u8 idle; /* 4 1 */
u8 period_active; /* 5 1 */
u8 slack_started; /* 6 1 */
/* XXX 1 byte hole, try to pack */
ktime_t period; /* 8 8 */
u64 quota; /* 16 8 */
u64 runtime; /* 24 8 */
u64 burst; /* 32 8 */
u64 runtime_snap; /* 40 8 */
s64 hierarchical_quota; /* 48 8 */
struct hrtimer period_timer; /* 56 64 */
/* --- cacheline 1 boundary (64 bytes) was 56 bytes ago --- */
struct hrtimer slack_timer; /* 120 64 */
/* --- cacheline 2 boundary (128 bytes) was 56 bytes ago --- */
struct list_head throttled_cfs_rq; /* 184 16 */
/* --- cacheline 3 boundary (192 bytes) was 8 bytes ago --- */
int nr_periods; /* 200 4 */
int nr_throttled; /* 204 4 */
int nr_burst; /* 208 4 */
/* XXX 4 bytes hole, try to pack */
u64 throttled_time; /* 216 8 */
u64 burst_time; /* 224 8 */
/* size: 232, cachelines: 4, members: 18 */
/* sum members: 227, holes: 2, sum holes: 5 */
/* paddings: 2, sum paddings: 8 */
/* forced alignments: 2 */
/* last cacheline: 40 bytes */
}
Signed-off-by: zenghongling <zenghongling@...inos.cn>
---
kernel/sched/sched.h | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index adfb6e3409d7..55ebd3d868e6 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -444,6 +444,9 @@ static inline u64 default_bw_period_us(void)
struct cfs_bandwidth {
#ifdef CONFIG_CFS_BANDWIDTH
raw_spinlock_t lock;
+ u8 idle;
+ u8 period_active;
+ u8 slack_started;
ktime_t period;
u64 quota;
u64 runtime;
@@ -451,9 +454,6 @@ struct cfs_bandwidth {
u64 runtime_snap;
s64 hierarchical_quota;
- u8 idle;
- u8 period_active;
- u8 slack_started;
struct hrtimer period_timer;
struct hrtimer slack_timer;
struct list_head throttled_cfs_rq;
--
2.25.1
Powered by blists - more mailing lists