[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <163188348452.25758.1078497735772312412.tip-bot2@tip-bot2>
Date: Fri, 17 Sep 2021 12:58:04 -0000
From: "tip-bot2 for Peter Zijlstra" <tip-bot2@...utronix.de>
To: linux-tip-commits@...r.kernel.org
Cc: "Peter Zijlstra (Intel)" <peterz@...radead.org>,
Juergen Gross <jgross@...e.com>, x86@...nel.org,
linux-kernel@...r.kernel.org
Subject: [tip: objtool/core] x86/xen: Make irq_enable() noinstr
The following commit has been merged into the objtool/core branch of tip:
Commit-ID: d7bfc7d57cbe13382fd3eb739667fd0e2f74122b
Gitweb: https://git.kernel.org/tip/d7bfc7d57cbe13382fd3eb739667fd0e2f74122b
Author: Peter Zijlstra <peterz@...radead.org>
AuthorDate: Thu, 24 Jun 2021 11:41:19 +02:00
Committer: Peter Zijlstra <peterz@...radead.org>
CommitterDate: Fri, 17 Sep 2021 13:17:12 +02:00
x86/xen: Make irq_enable() noinstr
vmlinux.o: warning: objtool: pv_ops[32]: native_irq_enable
vmlinux.o: warning: objtool: pv_ops[32]: __raw_callee_save_xen_irq_enable
vmlinux.o: warning: objtool: pv_ops[32]: xen_irq_enable_direct
vmlinux.o: warning: objtool: lock_is_held_type()+0xfe: call to pv_ops[32]() leaves .noinstr.text section
Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
Reviewed-by: Juergen Gross <jgross@...e.com>
Link: https://lore.kernel.org/r/20210624095148.872254932@infradead.org
---
arch/x86/kernel/paravirt.c | 7 ++++-
arch/x86/xen/irq.c | 4 +--
arch/x86/xen/xen-asm.S | 56 ++++++++++++++++++-------------------
3 files changed, 36 insertions(+), 31 deletions(-)
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index cdaf862..75f0d24 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -238,6 +238,11 @@ static noinstr void pv_native_set_debugreg(int regno, unsigned long val)
{
native_set_debugreg(regno, val);
}
+
+static noinstr void pv_native_irq_enable(void)
+{
+ native_irq_enable();
+}
#endif
enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
@@ -302,7 +307,7 @@ struct paravirt_patch_template pv_ops = {
/* Irq ops. */
.irq.save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
.irq.irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
- .irq.irq_enable = __PV_IS_CALLEE_SAVE(native_irq_enable),
+ .irq.irq_enable = __PV_IS_CALLEE_SAVE(pv_native_irq_enable),
.irq.safe_halt = native_safe_halt,
.irq.halt = native_halt,
#endif /* CONFIG_PARAVIRT_XXL */
diff --git a/arch/x86/xen/irq.c b/arch/x86/xen/irq.c
index 9c71f43..7fb4cf2 100644
--- a/arch/x86/xen/irq.c
+++ b/arch/x86/xen/irq.c
@@ -53,7 +53,7 @@ asmlinkage __visible void xen_irq_disable(void)
}
PV_CALLEE_SAVE_REGS_THUNK(xen_irq_disable);
-asmlinkage __visible void xen_irq_enable(void)
+asmlinkage __visible noinstr void xen_irq_enable(void)
{
struct vcpu_info *vcpu;
@@ -76,7 +76,7 @@ asmlinkage __visible void xen_irq_enable(void)
preempt_enable();
}
-PV_CALLEE_SAVE_REGS_THUNK(xen_irq_enable);
+__PV_CALLEE_SAVE_REGS_THUNK(xen_irq_enable, ".noinstr.text");
static void xen_safe_halt(void)
{
diff --git a/arch/x86/xen/xen-asm.S b/arch/x86/xen/xen-asm.S
index 0883e39..2225195 100644
--- a/arch/x86/xen/xen-asm.S
+++ b/arch/x86/xen/xen-asm.S
@@ -22,33 +22,6 @@
#include <linux/linkage.h>
/*
- * Enable events. This clears the event mask and tests the pending
- * event status with one and operation. If there are pending events,
- * then enter the hypervisor to get them handled.
- */
-SYM_FUNC_START(xen_irq_enable_direct)
- FRAME_BEGIN
- /* Unmask events */
- movb $0, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
-
- /*
- * Preempt here doesn't matter because that will deal with any
- * pending interrupts. The pending check may end up being run
- * on the wrong CPU, but that doesn't hurt.
- */
-
- /* Test for pending */
- testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
- jz 1f
-
- call check_events
-1:
- FRAME_END
- ret
-SYM_FUNC_END(xen_irq_enable_direct)
-
-
-/*
* Disabling events is simply a matter of making the event mask
* non-zero.
*/
@@ -57,6 +30,8 @@ SYM_FUNC_START(xen_irq_disable_direct)
ret
SYM_FUNC_END(xen_irq_disable_direct)
+.pushsection .noinstr.text, "ax"
+
/*
* Force an event check by making a hypercall, but preserve regs
* before making the call.
@@ -86,7 +61,32 @@ SYM_FUNC_START(check_events)
ret
SYM_FUNC_END(check_events)
-.pushsection .noinstr.text, "ax"
+/*
+ * Enable events. This clears the event mask and tests the pending
+ * event status with one and operation. If there are pending events,
+ * then enter the hypervisor to get them handled.
+ */
+SYM_FUNC_START(xen_irq_enable_direct)
+ FRAME_BEGIN
+ /* Unmask events */
+ movb $0, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
+
+ /*
+ * Preempt here doesn't matter because that will deal with any
+ * pending interrupts. The pending check may end up being run
+ * on the wrong CPU, but that doesn't hurt.
+ */
+
+ /* Test for pending */
+ testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
+ jz 1f
+
+ call check_events
+1:
+ FRAME_END
+ ret
+SYM_FUNC_END(xen_irq_enable_direct)
+
/*
* (xen_)save_fl is used to get the current interrupt enable status.
* Callers expect the status to be in X86_EFLAGS_IF, and other bits
Powered by blists - more mailing lists