[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <e02f861b-706c-4f6d-bded-002601da954a@app.fastmail.com>
Date: Tue, 30 Sep 2025 08:35:55 +0200
From: "Arnd Bergmann" <arnd@...db.de>
To: "Finn Thain" <fthain@...ux-m68k.org>
Cc: "Geert Uytterhoeven" <geert@...ux-m68k.org>,
"Peter Zijlstra" <peterz@...radead.org>, "Will Deacon" <will@...nel.org>,
"Andrew Morton" <akpm@...ux-foundation.org>,
"Boqun Feng" <boqun.feng@...il.com>, "Jonathan Corbet" <corbet@....net>,
"Mark Rutland" <mark.rutland@....com>, linux-kernel@...r.kernel.org,
Linux-Arch <linux-arch@...r.kernel.org>, linux-m68k@...r.kernel.org,
"Lance Yang" <lance.yang@...ux.dev>
Subject: Re: [RFC v2 2/3] atomic: Specify alignment for atomic_t and atomic64_t
On Tue, Sep 30, 2025, at 04:18, Finn Thain wrote:
> On Tue, 23 Sep 2025, I wrote:
>>
>> ... there's still some kmem cache or other allocator somewhere that has
>> produced some misaligned path and dentry structures. So we get
>> misaligned atomics somewhere in the VFS and TTY layers. I was unable to
>> find those allocations.
>>
>
> It turned out that the problem wasn't dynamic allocations, it was a local
> variable in the core locking code (kernel/locking/rwsem.c): a misaligned
> long used with an atomic operation (cmpxchg). To get natural alignment for
> 64-bit quantities, I had to align other local variables as well, such as
> the one in ktime_get_real_ts64_mg() that's used with
> atomic64_try_cmpxchg(). The atomic_t branch in my github repo has the
> patches I wrote for that.
It looks like the variable you get the warning for is not
even the atomic64_t but the 'old' argument to atomic64_try_cmpxchg(),
at least in some of the cases you found if not all of them.
I don't see where why there is a requirement to have that
aligned at all, even if we do require the atomic64_t to
be naturally aligned, and I would expect the same warning
to hit on x86-32 and the other architectures with 4-byte
alignment of u64 variable on stack and .data.
> To silence the misalignment WARN from CONFIG_DEBUG_ATOMIC, for 64-bit
> atomic operations, for my small m68k .config, it was also necesary to
> increase ARCH_SLAB_MINALIGN to 8. However, I'm not advocating a
> ARCH_SLAB_MINALIGN increase, as that wastes memory.
Have you tried to quantify the memory waste here? I assume
that most slab allocations are already 8-byte aligned, at
least kmalloc() with size>4, while custom caches are usually
done for larger structures where an extra average of 2 bytes
per allocation may not be that bad.
> diff --git a/include/linux/instrumented.h b/include/linux/instrumented.h
> index 402a999a0d6b..cd569a87c0a8 100644
> --- a/include/linux/instrumented.h
> +++ b/include/linux/instrumented.h
> @@ -68,7 +68,7 @@ static __always_inline void
> instrument_atomic_read(const volatile void *v, size_
> {
> kasan_check_read(v, size);
> kcsan_check_atomic_read(v, size);
> - WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ATOMIC) && ((unsigned long)v &
> (size - 1)));
> + WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ATOMIC) && ((unsigned long)v &
> (size - 1) & 3));
> }
What is the alignment of stack variables on m68k? E.g. if you
have a function with two local variables, would that still
be able to trigger the check?
int f(atomic64_t *a)
{
u16 pad;
u64 old;
g(&pad);
atomic64_try_cmpxchg(a, &old, 0);
}
Since there is nothing telling the compiler that
the 'old' argument to atomic*_try_cmpcxchg() needs to
be naturally aligned, maybe that check should be changed
to only test for the ABI-guaranteed alignment? I think
that would still be needed on x86-32.
Arnd
diff --git a/include/linux/atomic/atomic-instrumented.h b/include/linux/atomic/atomic-instrumented.h
index 9409a6ddf3e0..e57763a889bd 100644
--- a/include/linux/atomic/atomic-instrumented.h
+++ b/include/linux/atomic/atomic-instrumented.h
@@ -1276,7 +1276,7 @@ atomic_try_cmpxchg(atomic_t *v, int *old, int new)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
- instrument_atomic_read_write(old, sizeof(*old));
+ instrument_atomic_read_write(old, alignof(*old));
return raw_atomic_try_cmpxchg(v, old, new);
}
@@ -1298,7 +1298,7 @@ static __always_inline bool
atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
{
instrument_atomic_read_write(v, sizeof(*v));
- instrument_atomic_read_write(old, sizeof(*old));
+ instrument_atomic_read_write(old, alignof(*old));
return raw_atomic_try_cmpxchg_acquire(v, old, new);
}
@@ -1321,7 +1321,7 @@ atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
- instrument_atomic_read_write(old, sizeof(*old));
+ instrument_atomic_read_write(old, alignof(*old));
return raw_atomic_try_cmpxchg_release(v, old, new);
}
@@ -1343,7 +1343,7 @@ static __always_inline bool
atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
{
instrument_atomic_read_write(v, sizeof(*v));
- instrument_atomic_read_write(old, sizeof(*old));
+ instrument_atomic_read_write(old, alignof(*old));
return raw_atomic_try_cmpxchg_relaxed(v, old, new);
}
@@ -2854,7 +2854,7 @@ atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
- instrument_atomic_read_write(old, sizeof(*old));
+ instrument_atomic_read_write(old, alignof(*old));
return raw_atomic64_try_cmpxchg(v, old, new);
}
@@ -2876,7 +2876,7 @@ static __always_inline bool
atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
{
instrument_atomic_read_write(v, sizeof(*v));
- instrument_atomic_read_write(old, sizeof(*old));
+ instrument_atomic_read_write(old, alignof(*old));
return raw_atomic64_try_cmpxchg_acquire(v, old, new);
}
@@ -2899,7 +2899,7 @@ atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
- instrument_atomic_read_write(old, sizeof(*old));
+ instrument_atomic_read_write(old, alignof(*old));
return raw_atomic64_try_cmpxchg_release(v, old, new);
}
@@ -2921,7 +2921,7 @@ static __always_inline bool
atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
{
instrument_atomic_read_write(v, sizeof(*v));
- instrument_atomic_read_write(old, sizeof(*old));
+ instrument_atomic_read_write(old, alignof(*old));
return raw_atomic64_try_cmpxchg_relaxed(v, old, new);
}
@@ -4432,7 +4432,7 @@ atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
- instrument_atomic_read_write(old, sizeof(*old));
+ instrument_atomic_read_write(old, alignof(*old));
return raw_atomic_long_try_cmpxchg(v, old, new);
}
@@ -4454,7 +4454,7 @@ static __always_inline bool
atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new)
{
instrument_atomic_read_write(v, sizeof(*v));
- instrument_atomic_read_write(old, sizeof(*old));
+ instrument_atomic_read_write(old, alignof(*old));
return raw_atomic_long_try_cmpxchg_acquire(v, old, new);
}
@@ -4477,7 +4477,7 @@ atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
- instrument_atomic_read_write(old, sizeof(*old));
+ instrument_atomic_read_write(old, alignof(*old));
return raw_atomic_long_try_cmpxchg_release(v, old, new);
}
@@ -4499,7 +4499,7 @@ static __always_inline bool
atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new)
{
instrument_atomic_read_write(v, sizeof(*v));
- instrument_atomic_read_write(old, sizeof(*old));
+ instrument_atomic_read_write(old, alignof(*old));
return raw_atomic_long_try_cmpxchg_relaxed(v, old, new);
}
Powered by blists - more mailing lists