[<prev] [next>] [day] [month] [year] [list]
Message-ID: <20241002135449.xUZX0zAj@linutronix.de>
Date: Wed, 2 Oct 2024 15:54:49 +0200
From: Sebastian Andrzej Siewior <bigeasy@...utronix.de>
To: Thomas Gleixner <tglx@...utronix.de>, linux-rt-devel@...ts.linux.dev
Cc: LKML <linux-kernel@...r.kernel.org>, linux-rt-users@...r.kernel.org,
Steven Rostedt <rostedt@...dmis.org>
Subject: [ANNOUNCE] v6.12-rc1-rt2
Dear RT folks!
I'm pleased to announce the v6.12-rc1-rt2 patch set.
Changes since v6.12-rc1-rt1:
- Drop the "don't push tasks for RT balancing if there are no tasks
that can be pushed" patch introduced in v6.5-rc4-rt2. The situation
improved since v6.7.
- Drop the "Preempt the timer softirq if it is PI boosted" patch also
introduced in v6.5-rc4-rt2. In covered only timer_list timers (not
hrtimer). It would require more of this duct tape in more places.
The longterm plan is to avoid having the BH lock.
Known issues
None.
The delta patch against v6.12-rc1-rt1 is appended below and can be found here:
https://cdn.kernel.org/pub/linux/kernel/projects/rt/6.12/incr/patch-6.12-rc1-rt1-rt2.patch.xz
You can get this release via the git tree at:
https://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git v6.12-rc1-rt2
The RT patch against v6.12-rc1 can be found here:
https://cdn.kernel.org/pub/linux/kernel/projects/rt/6.12/older/patch-6.12-rc1-rt2.patch.xz
The split quilt queue is available at:
https://cdn.kernel.org/pub/linux/kernel/projects/rt/6.12/older/patches-6.12-rc1-rt2.tar.xz
Sebastian
diff --git a/include/linux/bottom_half.h b/include/linux/bottom_half.h
index 448bbef474564..fc53e0ad56d90 100644
--- a/include/linux/bottom_half.h
+++ b/include/linux/bottom_half.h
@@ -35,10 +35,8 @@ static inline void local_bh_enable(void)
#ifdef CONFIG_PREEMPT_RT
extern bool local_bh_blocked(void);
-extern void softirq_preempt(void);
#else
static inline bool local_bh_blocked(void) { return false; }
-static inline void softirq_preempt(void) { }
#endif
#endif /* _LINUX_BH_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 04dbfb7cda334..2016534bbc533 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1838,7 +1838,6 @@ static inline int dl_task_check_affinity(struct task_struct *p, const struct cpu
}
#endif
-extern bool task_is_pi_boosted(const struct task_struct *p);
extern int yield_to(struct task_struct *p, bool preempt);
extern void set_user_nice(struct task_struct *p, long nice);
extern int task_prio(const struct task_struct *p);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index d7af5c21c94a8..91d250a0e039b 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -7533,21 +7533,6 @@ static inline void preempt_dynamic_init(void) { }
#endif /* CONFIG_PREEMPT_DYNAMIC */
-/*
- * task_is_pi_boosted - Check if task has been PI boosted.
- * @p: Task to check.
- *
- * Return true if task is subject to priority inheritance.
- */
-bool task_is_pi_boosted(const struct task_struct *p)
-{
- int prio = p->prio;
-
- if (!rt_prio(prio))
- return false;
- return prio != p->normal_prio;
-}
-
int io_schedule_prepare(void)
{
int old_iowait = current->in_iowait;
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index a825cdc1f02b3..172c588de5427 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -2176,11 +2176,8 @@ static int rto_next_cpu(struct root_domain *rd)
rd->rto_cpu = cpu;
- if (cpu < nr_cpu_ids) {
- if (!has_pushable_tasks(cpu_rq(cpu)))
- continue;
+ if (cpu < nr_cpu_ids)
return cpu;
- }
rd->rto_cpu = -1;
diff --git a/kernel/softirq.c b/kernel/softirq.c
index c251f1bf75c5e..0052bd4d9ec1d 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -248,19 +248,6 @@ void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
}
EXPORT_SYMBOL(__local_bh_enable_ip);
-void softirq_preempt(void)
-{
- if (WARN_ON_ONCE(!preemptible()))
- return;
-
- if (WARN_ON_ONCE(__this_cpu_read(softirq_ctrl.cnt) != SOFTIRQ_OFFSET))
- return;
-
- __local_bh_enable(SOFTIRQ_OFFSET, true);
- /* preemption point */
- __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
-}
-
/*
* Invoked from ksoftirqd_run() outside of the interrupt disabled section
* to acquire the per CPU local lock for reentrancy protection.
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index aef6ca700c991..79f0dc73ac436 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -1564,16 +1564,9 @@ static void timer_sync_wait_running(struct timer_base *base)
__releases(&base->lock) __releases(&base->expiry_lock)
__acquires(&base->expiry_lock) __acquires(&base->lock)
{
- bool need_preempt;
-
- need_preempt = task_is_pi_boosted(current);
- if (need_preempt || atomic_read(&base->timer_waiters)) {
+ if (atomic_read(&base->timer_waiters)) {
raw_spin_unlock_irq(&base->lock);
spin_unlock(&base->expiry_lock);
-
- if (need_preempt)
- softirq_preempt();
-
spin_lock(&base->expiry_lock);
raw_spin_lock_irq(&base->lock);
}
diff --git a/localversion-rt b/localversion-rt
index 6f206be67cd28..c3054d08a1129 100644
--- a/localversion-rt
+++ b/localversion-rt
@@ -1 +1 @@
--rt1
+-rt2
Powered by blists - more mailing lists