[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250502133725.1210587-3-neelx@suse.com>
Date: Fri, 2 May 2025 15:37:23 +0200
From: Daniel Vacek <neelx@...e.com>
To: David Sterba <dsterba@...e.com>,
Chris Mason <clm@...com>,
Josef Bacik <josef@...icpanda.com>
Cc: Daniel Vacek <neelx@...e.com>,
linux-btrfs@...r.kernel.org,
linux-kernel@...r.kernel.org
Subject: [PATCH v2 2/2] btrfs: rearrange the extent buffer structure members
Fill in the hole after the removed `len` field. There should be no difference
on default config but it cuts the size down by 8 bytes on -rt kernels due to
different lock sizes and alignment. This way we can completely get rid of the
other hole which was there before.
Signed-off-by: Daniel Vacek <neelx@...e.com>
---
This patch is new in v2.
@Dave> What assembly would you like to see?
>@@ -10148,30 +10148,27 @@
> struct extent_buffer {
> u64 start; /* 0 8 */
> u32 folio_size; /* 8 4 */
>+ u8 folio_shift; /* 12 1 */
>+ s8 log_index; /* 13 1 */
>
>- /* XXX 4 bytes hole, try to pack */
>+ /* XXX 2 bytes hole, try to pack */
>
> long unsigned int bflags; /* 16 8 */
> struct btrfs_fs_info * fs_info; /* 24 8 */
> void * addr; /* 32 8 */
> spinlock_t refs_lock; /* 40 32 */
> /* --- cacheline 1 boundary (64 bytes) was 8 bytes ago --- */
> atomic_t refs; /* 72 4 */
> int read_mirror; /* 76 4 */
>- s8 log_index; /* 80 1 */
>- u8 folio_shift; /* 81 1 */
>+ struct callback_head callback_head __attribute__((__aligned__(8))); /* 80 16 */
>+ struct rw_semaphore lock; /* 96 40 */
>+ /* --- cacheline 2 boundary (128 bytes) was 8 bytes ago --- */
>+ struct folio * folios[16]; /* 136 128 */
>
>- /* XXX 6 bytes hole, try to pack */
>-
>- struct callback_head callback_head __attribute__((__aligned__(8))); /* 88 16 */
>- struct rw_semaphore lock; /* 104 40 */
>- /* --- cacheline 2 boundary (128 bytes) was 16 bytes ago --- */
>- struct folio * folios[16]; /* 144 128 */
>-
>- /* size: 272, cachelines: 5, members: 13 */
>- /* sum members: 262, holes: 2, sum holes: 10 */
>- /* forced alignments: 1, forced holes: 1, sum forced holes: 6 */
>- /* last cacheline: 16 bytes */
>+ /* size: 264, cachelines: 5, members: 13 */
>+ /* sum members: 262, holes: 1, sum holes: 2 */
>+ /* forced alignments: 1 */
>+ /* last cacheline: 8 bytes */
> } __attribute__((__aligned__(8)));
Here the refs_lock and refs are split to different cachelines. But the
slab object is not aligned anyways so this is inevitable anyways on -rt.
For non-rt they always move together as they fit into 8 bytes aligned.
So that's not an issue for non-rt.
---
fs/btrfs/extent_io.h | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 7a8451c11630a..5162d2da767ad 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -88,6 +88,9 @@ void __cold extent_buffer_free_cachep(void);
struct extent_buffer {
u64 start;
u32 folio_size;
+ u8 folio_shift;
+ /* >= 0 if eb belongs to a log tree, -1 otherwise */
+ s8 log_index;
unsigned long bflags;
struct btrfs_fs_info *fs_info;
@@ -100,9 +103,6 @@ struct extent_buffer {
spinlock_t refs_lock;
atomic_t refs;
int read_mirror;
- /* >= 0 if eb belongs to a log tree, -1 otherwise */
- s8 log_index;
- u8 folio_shift;
struct rcu_head rcu_head;
struct rw_semaphore lock;
--
2.47.2
Powered by blists - more mailing lists