[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20201110115358.GE2594@hirez.programming.kicks-ass.net>
Date: Tue, 10 Nov 2020 12:53:58 +0100
From: Peter Zijlstra <peterz@...radead.org>
To: "Ahmed S. Darwish" <a.darwish@...utronix.de>
Cc: John Hubbard <jhubbard@...dia.com>,
Linus Torvalds <torvalds@...ux-foundation.org>,
Jason Gunthorpe <jgg@...dia.com>, Peter Xu <peterx@...hat.com>,
Linux Kernel Mailing List <linux-kernel@...r.kernel.org>,
Andrea Arcangeli <aarcange@...hat.com>,
Andrew Morton <akpm@...ux-foundation.org>,
"Aneesh Kumar K.V" <aneesh.kumar@...ux.ibm.com>,
Christoph Hellwig <hch@....de>,
Hugh Dickins <hughd@...gle.com>, Jan Kara <jack@...e.cz>,
Jann Horn <jannh@...gle.com>,
Kirill Shutemov <kirill@...temov.name>,
Kirill Tkhai <ktkhai@...tuozzo.com>,
Leon Romanovsky <leonro@...dia.com>,
Linux-MM <linux-mm@...ck.org>, Michal Hocko <mhocko@...e.com>,
Oleg Nesterov <oleg@...hat.com>,
Ingo Molnar <mingo@...hat.com>, Will Deacon <will@...nel.org>,
Thomas Gleixner <tglx@...utronix.de>,
Sebastian Siewior <bigeasy@...utronix.de>
Subject: Re: [PATCH v2 2/2] mm: prevent gup_fast from racing with COW during
fork
On Wed, Nov 04, 2020 at 04:17:11AM +0100, Ahmed S. Darwish wrote:
> On Tue, Nov 03, 2020 at 06:01:30PM -0800, John Hubbard wrote:
> > On 11/3/20 5:32 PM, Ahmed S. Darwish wrote:
> ...
> > > #define __read_seqcount_retry(s, start) \
> > > - __read_seqcount_t_retry(__seqcount_ptr(s), start)
> > > + __do___read_seqcount_retry(__seqcount_ptr(s), start)
> >
> ...
> > A nit: while various numbers of leading underscores are sometimes used, it's a lot
> > less common to use, say, 3 consecutive underscores (as above) *within* the name. And
> > I don't think you need it for uniqueness, at least from a quick look around here.
> >
> ...
> > But again, either way, I think "do" is helping a *lot* here (as is getting rid
> > of the _t_ idea).
>
> The three underscores are needed because there's a do_ version for
> read_seqcount_retry(), and another for __read_seqcount_retry().
>
> Similarly for {__,}read_seqcount_begin(). You want to be very careful
> with this, and never mistaknely mix the two, because it affects some VFS
> hot paths.
>
> Nonetheless, as you mentioned in the later (dropped) part of your
> message, I think do_ is better than __do_, so the final result will be:
>
> do___read_seqcount_retry()
> do_read_seqcount_retry()
> do_raw_write_seqcount_begin()
> do_raw_write_seqcount_end()
> do_write_seqcount_begin()
> ...
>
> and so on.
>
> I'll wait for some further feedback on the two patches (possibly from
> Linus or PeterZ), then send a mini patch series.
I'm Ok with that. The change I have issues with is:
-#define __seqcount_lock_preemptible(s) __seqprop(s, preemptible)
+#define __seqcount_associated_lock_exists_and_is_preemptible(s) __seqprop(s, preemptible)
That's just _realllllllly_ long.
Would something like the below make it easier to follow?
seqprop_preemptible() is 'obviously' related to __seqprop_preemptible()
without having to follow through the _Generic token pasting maze.
---
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index 8d8552474c64..576594add921 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -307,10 +307,10 @@ SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex, true, &s->lock->base, ww_mu
__seqprop_case((s), mutex, prop), \
__seqprop_case((s), ww_mutex, prop))
-#define __seqcount_ptr(s) __seqprop(s, ptr)
-#define __seqcount_sequence(s) __seqprop(s, sequence)
-#define __seqcount_lock_preemptible(s) __seqprop(s, preemptible)
-#define __seqcount_assert_lock_held(s) __seqprop(s, assert)
+#define seqprop_ptr(s) __seqprop(s, ptr)
+#define seqprop_sequence(s) __seqprop(s, sequence)
+#define seqprop_preemptible(s) __seqprop(s, preemptible)
+#define seqprop_assert(s) __seqprop(s, assert)
/**
* __read_seqcount_begin() - begin a seqcount_t read section w/o barrier
@@ -330,7 +330,7 @@ SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex, true, &s->lock->base, ww_mu
({ \
unsigned __seq; \
\
- while ((__seq = __seqcount_sequence(s)) & 1) \
+ while ((__seq = seqprop_sequence(s)) & 1) \
cpu_relax(); \
\
kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); \
@@ -359,7 +359,7 @@ SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex, true, &s->lock->base, ww_mu
*/
#define read_seqcount_begin(s) \
({ \
- seqcount_lockdep_reader_access(__seqcount_ptr(s)); \
+ seqcount_lockdep_reader_access(seqprop_ptr(s)); \
raw_read_seqcount_begin(s); \
})
@@ -376,7 +376,7 @@ SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex, true, &s->lock->base, ww_mu
*/
#define raw_read_seqcount(s) \
({ \
- unsigned __seq = __seqcount_sequence(s); \
+ unsigned __seq = seqprop_sequence(s); \
\
smp_rmb(); \
kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); \
@@ -425,7 +425,7 @@ SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex, true, &s->lock->base, ww_mu
* Return: true if a read section retry is required, else false
*/
#define __read_seqcount_retry(s, start) \
- __read_seqcount_t_retry(__seqcount_ptr(s), start)
+ __read_seqcount_t_retry(seqprop_ptr(s), start)
static inline int __read_seqcount_t_retry(const seqcount_t *s, unsigned start)
{
@@ -445,7 +445,7 @@ static inline int __read_seqcount_t_retry(const seqcount_t *s, unsigned start)
* Return: true if a read section retry is required, else false
*/
#define read_seqcount_retry(s, start) \
- read_seqcount_t_retry(__seqcount_ptr(s), start)
+ read_seqcount_t_retry(seqprop_ptr(s), start)
static inline int read_seqcount_t_retry(const seqcount_t *s, unsigned start)
{
@@ -459,10 +459,10 @@ static inline int read_seqcount_t_retry(const seqcount_t *s, unsigned start)
*/
#define raw_write_seqcount_begin(s) \
do { \
- if (__seqcount_lock_preemptible(s)) \
+ if (seqprop_preemptible(s)) \
preempt_disable(); \
\
- raw_write_seqcount_t_begin(__seqcount_ptr(s)); \
+ raw_write_seqcount_t_begin(seqprop_ptr(s)); \
} while (0)
static inline void raw_write_seqcount_t_begin(seqcount_t *s)
@@ -478,9 +478,9 @@ static inline void raw_write_seqcount_t_begin(seqcount_t *s)
*/
#define raw_write_seqcount_end(s) \
do { \
- raw_write_seqcount_t_end(__seqcount_ptr(s)); \
+ raw_write_seqcount_t_end(seqprop_ptr(s)); \
\
- if (__seqcount_lock_preemptible(s)) \
+ if (seqprop_preemptible(s)) \
preempt_enable(); \
} while (0)
@@ -501,12 +501,12 @@ static inline void raw_write_seqcount_t_end(seqcount_t *s)
*/
#define write_seqcount_begin_nested(s, subclass) \
do { \
- __seqcount_assert_lock_held(s); \
+ seqprop_assert(s); \
\
- if (__seqcount_lock_preemptible(s)) \
+ if (seqprop_preemptible(s)) \
preempt_disable(); \
\
- write_seqcount_t_begin_nested(__seqcount_ptr(s), subclass); \
+ write_seqcount_t_begin_nested(seqprop_ptr(s), subclass); \
} while (0)
static inline void write_seqcount_t_begin_nested(seqcount_t *s, int subclass)
@@ -528,12 +528,12 @@ static inline void write_seqcount_t_begin_nested(seqcount_t *s, int subclass)
*/
#define write_seqcount_begin(s) \
do { \
- __seqcount_assert_lock_held(s); \
+ seqprop_assert(s); \
\
- if (__seqcount_lock_preemptible(s)) \
+ if (seqprop_preemptible(s)) \
preempt_disable(); \
\
- write_seqcount_t_begin(__seqcount_ptr(s)); \
+ write_seqcount_t_begin(seqprop_ptr(s)); \
} while (0)
static inline void write_seqcount_t_begin(seqcount_t *s)
@@ -549,9 +549,9 @@ static inline void write_seqcount_t_begin(seqcount_t *s)
*/
#define write_seqcount_end(s) \
do { \
- write_seqcount_t_end(__seqcount_ptr(s)); \
+ write_seqcount_t_end(seqprop_ptr(s)); \
\
- if (__seqcount_lock_preemptible(s)) \
+ if (seqprop_preemptible(s)) \
preempt_enable(); \
} while (0)
@@ -603,7 +603,7 @@ static inline void write_seqcount_t_end(seqcount_t *s)
* }
*/
#define raw_write_seqcount_barrier(s) \
- raw_write_seqcount_t_barrier(__seqcount_ptr(s))
+ raw_write_seqcount_t_barrier(seqprop_ptr(s))
static inline void raw_write_seqcount_t_barrier(seqcount_t *s)
{
@@ -623,7 +623,7 @@ static inline void raw_write_seqcount_t_barrier(seqcount_t *s)
* will complete successfully and see data older than this.
*/
#define write_seqcount_invalidate(s) \
- write_seqcount_t_invalidate(__seqcount_ptr(s))
+ write_seqcount_t_invalidate(seqprop_ptr(s))
static inline void write_seqcount_t_invalidate(seqcount_t *s)
{
Powered by blists - more mailing lists