[<prev] [next>] [day] [month] [year] [list]
Message-ID: <20170130201543.jsg3bpbjhxswysns@linutronix.de>
Date: Mon, 30 Jan 2017 21:15:43 +0100
From: Sebastian Andrzej Siewior <bigeasy@...utronix.de>
To: Thomas Gleixner <tglx@...utronix.de>
Cc: LKML <linux-kernel@...r.kernel.org>,
linux-rt-users <linux-rt-users@...r.kernel.org>,
Steven Rostedt <rostedt@...dmis.org>
Subject: [ANNOUNCE] v4.9.6-rt4
Dear RT folks!
I'm pleased to announce the v4.9.6-rt4 patch set.
Changes since v4.9.6-rt3:
- Since the timer(s)-softirq split we could delay the wakeup of the
timer sofitrq. Patch by Mike Galbraith
- The CPUSET code maybe be called with disabled interrupts and
requires raw locks for it to succeed. Patch by Mike Galbraith.
- The workaround for the radix preload code was not perfect and could
cause failure if the code relied on it. Reported by Mike Galbraith.
- Qualcomm's pinctrl driver got rawlocks. Reported by Brian Wrenn,
patched by Julia Cartwright.
- On X86, setting / changing page attributes could lead to an
expensive (in terms of latency) but efficient (in terms of runtime)
cache flush. In order no to hurt the latency the expensive cache has
been disabled. Patch by John Ogness.
Known issues
- CPU hotplug got a little better but can deadlock.
The delta patch against v4.9.6-rt4 is appended below and can be found here:
https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.9/incr/patch-4.9.6-rt3-rt4.patch.xz
You can get this release via the git tree at:
git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git v4.9.6-rt4
The RT patch against v4.9.6 can be found here:
https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patch-4.9.6-rt4.patch.xz
The split quilt queue is available at:
https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.6-rt4.tar.xz
Sebastian
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -214,7 +214,15 @@ static void cpa_flush_array(unsigned long *start, int numpages, int cache,
int in_flags, struct page **pages)
{
unsigned int i, level;
+#ifdef CONFIG_PREEMPT
+ /*
+ * Avoid wbinvd() because it causes latencies on all CPUs,
+ * regardless of any CPU isolation that may be in effect.
+ */
+ unsigned long do_wbinvd = 0;
+#else
unsigned long do_wbinvd = cache && numpages >= 1024; /* 4M threshold */
+#endif
BUG_ON(irqs_disabled());
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -61,7 +61,7 @@ struct msm_pinctrl {
struct notifier_block restart_nb;
int irq;
- spinlock_t lock;
+ raw_spinlock_t lock;
DECLARE_BITMAP(dual_edge_irqs, MAX_NR_GPIO);
DECLARE_BITMAP(enabled_irqs, MAX_NR_GPIO);
@@ -153,14 +153,14 @@ static int msm_pinmux_set_mux(struct pinctrl_dev *pctldev,
if (WARN_ON(i == g->nfuncs))
return -EINVAL;
- spin_lock_irqsave(&pctrl->lock, flags);
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
val = readl(pctrl->regs + g->ctl_reg);
val &= ~mask;
val |= i << g->mux_bit;
writel(val, pctrl->regs + g->ctl_reg);
- spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
return 0;
}
@@ -323,14 +323,14 @@ static int msm_config_group_set(struct pinctrl_dev *pctldev,
break;
case PIN_CONFIG_OUTPUT:
/* set output value */
- spin_lock_irqsave(&pctrl->lock, flags);
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
val = readl(pctrl->regs + g->io_reg);
if (arg)
val |= BIT(g->out_bit);
else
val &= ~BIT(g->out_bit);
writel(val, pctrl->regs + g->io_reg);
- spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
/* enable output */
arg = 1;
@@ -351,12 +351,12 @@ static int msm_config_group_set(struct pinctrl_dev *pctldev,
return -EINVAL;
}
- spin_lock_irqsave(&pctrl->lock, flags);
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
val = readl(pctrl->regs + g->ctl_reg);
val &= ~(mask << bit);
val |= arg << bit;
writel(val, pctrl->regs + g->ctl_reg);
- spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
}
return 0;
@@ -384,13 +384,13 @@ static int msm_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
g = &pctrl->soc->groups[offset];
- spin_lock_irqsave(&pctrl->lock, flags);
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
val = readl(pctrl->regs + g->ctl_reg);
val &= ~BIT(g->oe_bit);
writel(val, pctrl->regs + g->ctl_reg);
- spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
return 0;
}
@@ -404,7 +404,7 @@ static int msm_gpio_direction_output(struct gpio_chip *chip, unsigned offset, in
g = &pctrl->soc->groups[offset];
- spin_lock_irqsave(&pctrl->lock, flags);
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
val = readl(pctrl->regs + g->io_reg);
if (value)
@@ -417,7 +417,7 @@ static int msm_gpio_direction_output(struct gpio_chip *chip, unsigned offset, in
val |= BIT(g->oe_bit);
writel(val, pctrl->regs + g->ctl_reg);
- spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
return 0;
}
@@ -443,7 +443,7 @@ static void msm_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
g = &pctrl->soc->groups[offset];
- spin_lock_irqsave(&pctrl->lock, flags);
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
val = readl(pctrl->regs + g->io_reg);
if (value)
@@ -452,7 +452,7 @@ static void msm_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
val &= ~BIT(g->out_bit);
writel(val, pctrl->regs + g->io_reg);
- spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
}
#ifdef CONFIG_DEBUG_FS
@@ -571,7 +571,7 @@ static void msm_gpio_irq_mask(struct irq_data *d)
g = &pctrl->soc->groups[d->hwirq];
- spin_lock_irqsave(&pctrl->lock, flags);
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
val = readl(pctrl->regs + g->intr_cfg_reg);
val &= ~BIT(g->intr_enable_bit);
@@ -579,7 +579,7 @@ static void msm_gpio_irq_mask(struct irq_data *d)
clear_bit(d->hwirq, pctrl->enabled_irqs);
- spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
}
static void msm_gpio_irq_unmask(struct irq_data *d)
@@ -592,7 +592,7 @@ static void msm_gpio_irq_unmask(struct irq_data *d)
g = &pctrl->soc->groups[d->hwirq];
- spin_lock_irqsave(&pctrl->lock, flags);
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
val = readl(pctrl->regs + g->intr_status_reg);
val &= ~BIT(g->intr_status_bit);
@@ -604,7 +604,7 @@ static void msm_gpio_irq_unmask(struct irq_data *d)
set_bit(d->hwirq, pctrl->enabled_irqs);
- spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
}
static void msm_gpio_irq_ack(struct irq_data *d)
@@ -617,7 +617,7 @@ static void msm_gpio_irq_ack(struct irq_data *d)
g = &pctrl->soc->groups[d->hwirq];
- spin_lock_irqsave(&pctrl->lock, flags);
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
val = readl(pctrl->regs + g->intr_status_reg);
if (g->intr_ack_high)
@@ -629,7 +629,7 @@ static void msm_gpio_irq_ack(struct irq_data *d)
if (test_bit(d->hwirq, pctrl->dual_edge_irqs))
msm_gpio_update_dual_edge_pos(pctrl, g, d);
- spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
}
static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
@@ -642,7 +642,7 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
g = &pctrl->soc->groups[d->hwirq];
- spin_lock_irqsave(&pctrl->lock, flags);
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
/*
* For hw without possibility of detecting both edges
@@ -716,7 +716,7 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
if (test_bit(d->hwirq, pctrl->dual_edge_irqs))
msm_gpio_update_dual_edge_pos(pctrl, g, d);
- spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
irq_set_handler_locked(d, handle_level_irq);
@@ -732,11 +732,11 @@ static int msm_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
unsigned long flags;
- spin_lock_irqsave(&pctrl->lock, flags);
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
irq_set_irq_wake(pctrl->irq, on);
- spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
return 0;
}
@@ -882,7 +882,7 @@ int msm_pinctrl_probe(struct platform_device *pdev,
pctrl->soc = soc_data;
pctrl->chip = msm_gpio_template;
- spin_lock_init(&pctrl->lock);
+ raw_spin_lock_init(&pctrl->lock);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
pctrl->regs = devm_ioremap_resource(&pdev->dev, res);
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -289,19 +289,11 @@ unsigned int radix_tree_gang_lookup(struct radix_tree_root *root,
unsigned int radix_tree_gang_lookup_slot(struct radix_tree_root *root,
void ***results, unsigned long *indices,
unsigned long first_index, unsigned int max_items);
-#ifdef CONFIG_PREEMPT_RT_FULL
-static inline int radix_tree_preload(gfp_t gm) { return 0; }
-static inline int radix_tree_maybe_preload(gfp_t gfp_mask) { return 0; }
-static inline int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order)
-{
- return 0;
-};
-
-#else
int radix_tree_preload(gfp_t gfp_mask);
int radix_tree_maybe_preload(gfp_t gfp_mask);
int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order);
-#endif
+void radix_tree_preload_end(void);
+
void radix_tree_init(void);
void *radix_tree_tag_set(struct radix_tree_root *root,
unsigned long index, unsigned int tag);
@@ -324,11 +316,6 @@ unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root,
int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag);
unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item);
-static inline void radix_tree_preload_end(void)
-{
- preempt_enable_nort();
-}
-
/**
* struct radix_tree_iter - radix tree iterator state
*
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -284,7 +284,7 @@ static struct cpuset top_cpuset = {
*/
static DEFINE_MUTEX(cpuset_mutex);
-static DEFINE_SPINLOCK(callback_lock);
+static DEFINE_RAW_SPINLOCK(callback_lock);
static struct workqueue_struct *cpuset_migrate_mm_wq;
@@ -907,9 +907,9 @@ static void update_cpumasks_hier(struct cpuset *cs, struct cpumask *new_cpus)
continue;
rcu_read_unlock();
- spin_lock_irq(&callback_lock);
+ raw_spin_lock_irq(&callback_lock);
cpumask_copy(cp->effective_cpus, new_cpus);
- spin_unlock_irq(&callback_lock);
+ raw_spin_unlock_irq(&callback_lock);
WARN_ON(!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
!cpumask_equal(cp->cpus_allowed, cp->effective_cpus));
@@ -974,9 +974,9 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
if (retval < 0)
return retval;
- spin_lock_irq(&callback_lock);
+ raw_spin_lock_irq(&callback_lock);
cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed);
- spin_unlock_irq(&callback_lock);
+ raw_spin_unlock_irq(&callback_lock);
/* use trialcs->cpus_allowed as a temp variable */
update_cpumasks_hier(cs, trialcs->cpus_allowed);
@@ -1176,9 +1176,9 @@ static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
continue;
rcu_read_unlock();
- spin_lock_irq(&callback_lock);
+ raw_spin_lock_irq(&callback_lock);
cp->effective_mems = *new_mems;
- spin_unlock_irq(&callback_lock);
+ raw_spin_unlock_irq(&callback_lock);
WARN_ON(!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
!nodes_equal(cp->mems_allowed, cp->effective_mems));
@@ -1246,9 +1246,9 @@ static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
if (retval < 0)
goto done;
- spin_lock_irq(&callback_lock);
+ raw_spin_lock_irq(&callback_lock);
cs->mems_allowed = trialcs->mems_allowed;
- spin_unlock_irq(&callback_lock);
+ raw_spin_unlock_irq(&callback_lock);
/* use trialcs->mems_allowed as a temp variable */
update_nodemasks_hier(cs, &trialcs->mems_allowed);
@@ -1339,9 +1339,9 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs))
|| (is_spread_page(cs) != is_spread_page(trialcs)));
- spin_lock_irq(&callback_lock);
+ raw_spin_lock_irq(&callback_lock);
cs->flags = trialcs->flags;
- spin_unlock_irq(&callback_lock);
+ raw_spin_unlock_irq(&callback_lock);
if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed)
rebuild_sched_domains_locked();
@@ -1756,7 +1756,7 @@ static int cpuset_common_seq_show(struct seq_file *sf, void *v)
cpuset_filetype_t type = seq_cft(sf)->private;
int ret = 0;
- spin_lock_irq(&callback_lock);
+ raw_spin_lock_irq(&callback_lock);
switch (type) {
case FILE_CPULIST:
@@ -1775,7 +1775,7 @@ static int cpuset_common_seq_show(struct seq_file *sf, void *v)
ret = -EINVAL;
}
- spin_unlock_irq(&callback_lock);
+ raw_spin_unlock_irq(&callback_lock);
return ret;
}
@@ -1989,12 +1989,12 @@ static int cpuset_css_online(struct cgroup_subsys_state *css)
cpuset_inc();
- spin_lock_irq(&callback_lock);
+ raw_spin_lock_irq(&callback_lock);
if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys)) {
cpumask_copy(cs->effective_cpus, parent->effective_cpus);
cs->effective_mems = parent->effective_mems;
}
- spin_unlock_irq(&callback_lock);
+ raw_spin_unlock_irq(&callback_lock);
if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags))
goto out_unlock;
@@ -2021,12 +2021,12 @@ static int cpuset_css_online(struct cgroup_subsys_state *css)
}
rcu_read_unlock();
- spin_lock_irq(&callback_lock);
+ raw_spin_lock_irq(&callback_lock);
cs->mems_allowed = parent->mems_allowed;
cs->effective_mems = parent->mems_allowed;
cpumask_copy(cs->cpus_allowed, parent->cpus_allowed);
cpumask_copy(cs->effective_cpus, parent->cpus_allowed);
- spin_unlock_irq(&callback_lock);
+ raw_spin_unlock_irq(&callback_lock);
out_unlock:
mutex_unlock(&cpuset_mutex);
return 0;
@@ -2065,7 +2065,7 @@ static void cpuset_css_free(struct cgroup_subsys_state *css)
static void cpuset_bind(struct cgroup_subsys_state *root_css)
{
mutex_lock(&cpuset_mutex);
- spin_lock_irq(&callback_lock);
+ raw_spin_lock_irq(&callback_lock);
if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys)) {
cpumask_copy(top_cpuset.cpus_allowed, cpu_possible_mask);
@@ -2076,7 +2076,7 @@ static void cpuset_bind(struct cgroup_subsys_state *root_css)
top_cpuset.mems_allowed = top_cpuset.effective_mems;
}
- spin_unlock_irq(&callback_lock);
+ raw_spin_unlock_irq(&callback_lock);
mutex_unlock(&cpuset_mutex);
}
@@ -2177,12 +2177,12 @@ hotplug_update_tasks_legacy(struct cpuset *cs,
{
bool is_empty;
- spin_lock_irq(&callback_lock);
+ raw_spin_lock_irq(&callback_lock);
cpumask_copy(cs->cpus_allowed, new_cpus);
cpumask_copy(cs->effective_cpus, new_cpus);
cs->mems_allowed = *new_mems;
cs->effective_mems = *new_mems;
- spin_unlock_irq(&callback_lock);
+ raw_spin_unlock_irq(&callback_lock);
/*
* Don't call update_tasks_cpumask() if the cpuset becomes empty,
@@ -2219,10 +2219,10 @@ hotplug_update_tasks(struct cpuset *cs,
if (nodes_empty(*new_mems))
*new_mems = parent_cs(cs)->effective_mems;
- spin_lock_irq(&callback_lock);
+ raw_spin_lock_irq(&callback_lock);
cpumask_copy(cs->effective_cpus, new_cpus);
cs->effective_mems = *new_mems;
- spin_unlock_irq(&callback_lock);
+ raw_spin_unlock_irq(&callback_lock);
if (cpus_updated)
update_tasks_cpumask(cs);
@@ -2308,21 +2308,21 @@ static void cpuset_hotplug_workfn(struct work_struct *work)
/* synchronize cpus_allowed to cpu_active_mask */
if (cpus_updated) {
- spin_lock_irq(&callback_lock);
+ raw_spin_lock_irq(&callback_lock);
if (!on_dfl)
cpumask_copy(top_cpuset.cpus_allowed, &new_cpus);
cpumask_copy(top_cpuset.effective_cpus, &new_cpus);
- spin_unlock_irq(&callback_lock);
+ raw_spin_unlock_irq(&callback_lock);
/* we don't mess with cpumasks of tasks in top_cpuset */
}
/* synchronize mems_allowed to N_MEMORY */
if (mems_updated) {
- spin_lock_irq(&callback_lock);
+ raw_spin_lock_irq(&callback_lock);
if (!on_dfl)
top_cpuset.mems_allowed = new_mems;
top_cpuset.effective_mems = new_mems;
- spin_unlock_irq(&callback_lock);
+ raw_spin_unlock_irq(&callback_lock);
update_tasks_nodemask(&top_cpuset);
}
@@ -2420,11 +2420,11 @@ void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
{
unsigned long flags;
- spin_lock_irqsave(&callback_lock, flags);
+ raw_spin_lock_irqsave(&callback_lock, flags);
rcu_read_lock();
guarantee_online_cpus(task_cs(tsk), pmask);
rcu_read_unlock();
- spin_unlock_irqrestore(&callback_lock, flags);
+ raw_spin_unlock_irqrestore(&callback_lock, flags);
}
void cpuset_cpus_allowed_fallback(struct task_struct *tsk)
@@ -2472,11 +2472,11 @@ nodemask_t cpuset_mems_allowed(struct task_struct *tsk)
nodemask_t mask;
unsigned long flags;
- spin_lock_irqsave(&callback_lock, flags);
+ raw_spin_lock_irqsave(&callback_lock, flags);
rcu_read_lock();
guarantee_online_mems(task_cs(tsk), &mask);
rcu_read_unlock();
- spin_unlock_irqrestore(&callback_lock, flags);
+ raw_spin_unlock_irqrestore(&callback_lock, flags);
return mask;
}
@@ -2568,14 +2568,14 @@ bool __cpuset_node_allowed(int node, gfp_t gfp_mask)
return true;
/* Not hardwall and node outside mems_allowed: scan up cpusets */
- spin_lock_irqsave(&callback_lock, flags);
+ raw_spin_lock_irqsave(&callback_lock, flags);
rcu_read_lock();
cs = nearest_hardwall_ancestor(task_cs(current));
allowed = node_isset(node, cs->mems_allowed);
rcu_read_unlock();
- spin_unlock_irqrestore(&callback_lock, flags);
+ raw_spin_unlock_irqrestore(&callback_lock, flags);
return allowed;
}
diff --git a/kernel/softirq.c b/kernel/softirq.c
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -206,6 +206,7 @@ static void handle_softirq(unsigned int vec_nr)
}
}
+#ifndef CONFIG_PREEMPT_RT_FULL
/*
* If ksoftirqd is scheduled, we do not want to process pending softirqs
* right now. Let ksoftirqd handle this at its own rate, to get fairness.
@@ -217,7 +218,6 @@ static bool ksoftirqd_running(void)
return tsk && (tsk->state == TASK_RUNNING);
}
-#ifndef CONFIG_PREEMPT_RT_FULL
static inline int ksoftirqd_softirq_pending(void)
{
return local_softirq_pending();
@@ -794,13 +794,10 @@ void irq_enter(void)
static inline void invoke_softirq(void)
{
-#ifdef CONFIG_PREEMPT_RT_FULL
- unsigned long flags;
-#endif
-
+#ifndef CONFIG_PREEMPT_RT_FULL
if (ksoftirqd_running())
return;
-#ifndef CONFIG_PREEMPT_RT_FULL
+
if (!force_irqthreads) {
#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
/*
@@ -821,6 +818,7 @@ static inline void invoke_softirq(void)
wakeup_softirqd();
}
#else /* PREEMPT_RT_FULL */
+ unsigned long flags;
local_irq_save(flags);
if (__this_cpu_read(ksoftirqd) &&
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -36,7 +36,7 @@
#include <linux/bitops.h>
#include <linux/rcupdate.h>
#include <linux/preempt.h> /* in_interrupt() */
-
+#include <linux/locallock.h>
/* Number of nodes in fully populated tree of given height */
static unsigned long height_to_maxnodes[RADIX_TREE_MAX_PATH + 1] __read_mostly;
@@ -68,6 +68,7 @@ struct radix_tree_preload {
struct radix_tree_node *nodes;
};
static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
+static DEFINE_LOCAL_IRQ_LOCK(radix_tree_preloads_lock);
static inline void *node_to_entry(void *ptr)
{
@@ -290,14 +291,14 @@ radix_tree_node_alloc(struct radix_tree_root *root)
* succeed in getting a node here (and never reach
* kmem_cache_alloc)
*/
- rtp = &get_cpu_var(radix_tree_preloads);
+ rtp = &get_locked_var(radix_tree_preloads_lock, radix_tree_preloads);
if (rtp->nr) {
ret = rtp->nodes;
rtp->nodes = ret->private_data;
ret->private_data = NULL;
rtp->nr--;
}
- put_cpu_var(radix_tree_preloads);
+ put_locked_var(radix_tree_preloads_lock, radix_tree_preloads);
/*
* Update the allocation stack trace as this is more useful
* for debugging.
@@ -337,7 +338,6 @@ radix_tree_node_free(struct radix_tree_node *node)
call_rcu(&node->rcu_head, radix_tree_node_rcu_free);
}
-#ifndef CONFIG_PREEMPT_RT_FULL
/*
* Load up this CPU's radix_tree_node buffer with sufficient objects to
* ensure that the addition of a single element in the tree cannot fail. On
@@ -359,14 +359,14 @@ static int __radix_tree_preload(gfp_t gfp_mask, int nr)
*/
gfp_mask &= ~__GFP_ACCOUNT;
- preempt_disable();
+ local_lock(radix_tree_preloads_lock);
rtp = this_cpu_ptr(&radix_tree_preloads);
while (rtp->nr < nr) {
- preempt_enable();
+ local_unlock(radix_tree_preloads_lock);
node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
if (node == NULL)
goto out;
- preempt_disable();
+ local_lock(radix_tree_preloads_lock);
rtp = this_cpu_ptr(&radix_tree_preloads);
if (rtp->nr < nr) {
node->private_data = rtp->nodes;
@@ -408,7 +408,7 @@ int radix_tree_maybe_preload(gfp_t gfp_mask)
if (gfpflags_allow_blocking(gfp_mask))
return __radix_tree_preload(gfp_mask, RADIX_TREE_PRELOAD_SIZE);
/* Preloading doesn't help anything with this gfp mask, skip it */
- preempt_disable();
+ local_lock(radix_tree_preloads_lock);
return 0;
}
EXPORT_SYMBOL(radix_tree_maybe_preload);
@@ -424,7 +424,7 @@ int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order)
/* Preloading doesn't help anything with this gfp mask, skip it */
if (!gfpflags_allow_blocking(gfp_mask)) {
- preempt_disable();
+ local_lock(radix_tree_preloads_lock);
return 0;
}
@@ -457,7 +457,12 @@ int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order)
return __radix_tree_preload(gfp_mask, nr_nodes);
}
-#endif
+
+void radix_tree_preload_end(void)
+{
+ local_unlock(radix_tree_preloads_lock);
+}
+EXPORT_SYMBOL(radix_tree_preload_end);
/*
* The maximum index which can be stored in a radix tree
diff --git a/localversion-rt b/localversion-rt
--- a/localversion-rt
+++ b/localversion-rt
@@ -1 +1 @@
--rt3
+-rt4
Powered by blists - more mailing lists