[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200810095428.2602276-1-a.darwish@linutronix.de>
Date: Mon, 10 Aug 2020 11:54:28 +0200
From: "Ahmed S. Darwish" <a.darwish@...utronix.de>
To: gregkh@...uxfoundation.org
Cc: a.darwish@...utronix.de, bigeasy@...utronix.de,
linux-kernel@...r.kernel.org, linux@...ck-us.net, mingo@...hat.com,
paulmck@...nel.org, peterz@...radead.org, rostedt@...dmis.org,
tglx@...utronix.de, will@...nel.org
Subject: [PATCH] Revert "seqlock: lockdep assert non-preemptibility on seqcount_t write"
This reverts commit 859247d39fb008ea812e8f0c398a58a20c12899e.
Current implementation of lockdep_assert_preemption_disabled() uses
per-CPU variables, which was done to untangle the existing
seqlock.h<=>sched.h 'current->' task_struct circular dependency.
Using per-CPU variables did not fully untangle the dependency for
various non-x86 architectures though, resulting in multiple broken
builds. For the affected architectures, raw_smp_processor_id() led
back to 'current->', thus having the original seqlock.h<=>sched.h
dependency in full-effect.
For now, revert adding lockdep_assert_preemption_disabled() to
seqlock.h.
Reported-by: Guenter Roeck <linux@...ck-us.net>
Link: https://lkml.kernel.org/r/20200808232122.GA176509@roeck-us.net
Link: https://lkml.kernel.org/r/20200810085954.GA1591892@kroah.com
References: Commit a21ee6055c30 ("lockdep: Change hardirq{s_enabled,_context} to per-cpu variables")
Signed-off-by: Ahmed S. Darwish <a.darwish@...utronix.de>
---
Notes:
My apologies for the mess on this one :-(
include/linux/seqlock.h | 29 ++++++-----------------------
1 file changed, 6 insertions(+), 23 deletions(-)
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index 54bc20496392..e885702d8b82 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -266,12 +266,6 @@ static inline void raw_write_seqcount_end(seqcount_t *s)
kcsan_nestable_atomic_end();
}
-static inline void __write_seqcount_begin_nested(seqcount_t *s, int subclass)
-{
- raw_write_seqcount_begin(s);
- seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_);
-}
-
/**
* write_seqcount_begin_nested() - start a seqcount_t write section with
* custom lockdep nesting level
@@ -282,19 +276,8 @@ static inline void __write_seqcount_begin_nested(seqcount_t *s, int subclass)
*/
static inline void write_seqcount_begin_nested(seqcount_t *s, int subclass)
{
- lockdep_assert_preemption_disabled();
- __write_seqcount_begin_nested(s, subclass);
-}
-
-/*
- * A write_seqcount_begin() variant w/o lockdep non-preemptibility checks.
- *
- * Use for internal seqlock.h code where it's known that preemption is
- * already disabled. For example, seqlock_t write side functions.
- */
-static inline void __write_seqcount_begin(seqcount_t *s)
-{
- __write_seqcount_begin_nested(s, 0);
+ raw_write_seqcount_begin(s);
+ seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_);
}
/**
@@ -592,7 +575,7 @@ static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
static inline void write_seqlock(seqlock_t *sl)
{
spin_lock(&sl->lock);
- __write_seqcount_begin(&sl->seqcount);
+ write_seqcount_begin(&sl->seqcount);
}
/**
@@ -618,7 +601,7 @@ static inline void write_sequnlock(seqlock_t *sl)
static inline void write_seqlock_bh(seqlock_t *sl)
{
spin_lock_bh(&sl->lock);
- __write_seqcount_begin(&sl->seqcount);
+ write_seqcount_begin(&sl->seqcount);
}
/**
@@ -645,7 +628,7 @@ static inline void write_sequnlock_bh(seqlock_t *sl)
static inline void write_seqlock_irq(seqlock_t *sl)
{
spin_lock_irq(&sl->lock);
- __write_seqcount_begin(&sl->seqcount);
+ write_seqcount_begin(&sl->seqcount);
}
/**
@@ -666,7 +649,7 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
unsigned long flags;
spin_lock_irqsave(&sl->lock, flags);
- __write_seqcount_begin(&sl->seqcount);
+ write_seqcount_begin(&sl->seqcount);
return flags;
}
--
2.28.0
Powered by blists - more mailing lists