[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20191104212143.637154910@linuxfoundation.org>
Date: Mon, 4 Nov 2019 22:44:55 +0100
From: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
To: linux-kernel@...r.kernel.org
Cc: Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
stable@...r.kernel.org, afzal mohammed <afzal.mohd.ma@...il.com>,
Vladimir Murzin <vladimir.murzin@....com>,
Russell King <rmk+kernel@...linux.org.uk>,
Sasha Levin <sashal@...nel.org>
Subject: [PATCH 4.19 102/149] ARM: 8914/1: NOMMU: Fix exc_ret for XIP
From: Vladimir Murzin <vladimir.murzin@....com>
[ Upstream commit 4c0742f65b4ee466546fd24b71b56516cacd4613 ]
It was reported that 72cd4064fcca "NOMMU: Toggle only bits in
EXC_RETURN we are really care of" breaks NOMMU+XIP combination.
It happens because saved EXC_RETURN gets overwritten when data
section is relocated.
The fix is to propagate EXC_RETURN via register and let relocation
code to commit that value into memory.
Fixes: 72cd4064fcca ("ARM: 8830/1: NOMMU: Toggle only bits in EXC_RETURN we are really care of")
Reported-by: afzal mohammed <afzal.mohd.ma@...il.com>
Tested-by: afzal mohammed <afzal.mohd.ma@...il.com>
Signed-off-by: Vladimir Murzin <vladimir.murzin@....com>
Signed-off-by: Russell King <rmk+kernel@...linux.org.uk>
Signed-off-by: Sasha Levin <sashal@...nel.org>
---
arch/arm/kernel/head-common.S | 5 +++--
arch/arm/kernel/head-nommu.S | 2 ++
arch/arm/mm/proc-v7m.S | 5 ++---
3 files changed, 7 insertions(+), 5 deletions(-)
diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S
index 997b02302c314..9328f2010bc19 100644
--- a/arch/arm/kernel/head-common.S
+++ b/arch/arm/kernel/head-common.S
@@ -72,7 +72,7 @@ ENDPROC(__vet_atags)
* The following fragment of code is executed with the MMU on in MMU mode,
* and uses absolute addresses; this is not position independent.
*
- * r0 = cp#15 control register
+ * r0 = cp#15 control register (exc_ret for M-class)
* r1 = machine ID
* r2 = atags/dtb pointer
* r9 = processor ID
@@ -141,7 +141,8 @@ __mmap_switched_data:
#ifdef CONFIG_CPU_CP15
.long cr_alignment @ r3
#else
- .long 0 @ r3
+M_CLASS(.long exc_ret) @ r3
+AR_CLASS(.long 0) @ r3
#endif
.size __mmap_switched_data, . - __mmap_switched_data
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S
index cab89479d15ef..326a97aa3ea0c 100644
--- a/arch/arm/kernel/head-nommu.S
+++ b/arch/arm/kernel/head-nommu.S
@@ -205,6 +205,8 @@ M_CLASS(streq r3, [r12, #PMSAv8_MAIR1])
bic r0, r0, #V7M_SCB_CCR_IC
#endif
str r0, [r12, V7M_SCB_CCR]
+ /* Pass exc_ret to __mmap_switched */
+ mov r0, r10
#endif /* CONFIG_CPU_CP15 elif CONFIG_CPU_V7M */
ret lr
ENDPROC(__after_proc_init)
diff --git a/arch/arm/mm/proc-v7m.S b/arch/arm/mm/proc-v7m.S
index 92e84181933ad..59d82864c134b 100644
--- a/arch/arm/mm/proc-v7m.S
+++ b/arch/arm/mm/proc-v7m.S
@@ -139,9 +139,8 @@ __v7m_setup_cont:
cpsie i
svc #0
1: cpsid i
- ldr r0, =exc_ret
- orr lr, lr, #EXC_RET_THREADMODE_PROCESSSTACK
- str lr, [r0]
+ /* Calculate exc_ret */
+ orr r10, lr, #EXC_RET_THREADMODE_PROCESSSTACK
ldmia sp, {r0-r3, r12}
str r5, [r12, #11 * 4] @ restore the original SVC vector entry
mov lr, r6 @ restore LR
--
2.20.1
Powered by blists - more mailing lists