summaryrefslogtreecommitdiff
path: root/src/arch/arm64/transition.c
diff options
context:
space:
mode:
authorJulius Werner <jwerner@chromium.org>2019-12-03 22:47:01 -0800
committerPatrick Georgi <pgeorgi@google.com>2019-12-05 17:57:58 +0000
commitbb345abbfc999f70e3f0f9739f13e1f45d5a0fe9 (patch)
treef8b652805a320d188ea995bc88bca8837118cfe9 /src/arch/arm64/transition.c
parent31a5ff5e36ea499f87a8947875a067c843a45532 (diff)
downloadcoreboot-bb345abbfc999f70e3f0f9739f13e1f45d5a0fe9.tar.xz
arm64: Correctly unmask asynchronous SError interrupts
Arm CPUs have always had an odd feature that allows you to mask not only true interrupts, but also "external aborts" (memory bus errors from outside the CPU). CPUs usually have all of these masked after reset, which we quickly learned was a bad idea back when bringing up the first arm32 systems in coreboot. Masking external aborts means that if any of your firmware code does an illegal memory access, you will only see it once the kernel comes up and unmasks the abort (not when it happens). Therefore, we always unmask everything in early bootblock assembly code. When arm64 came around, it had very similar masking bits and we did the same there, thinking the issue resolved. Unfortunately Arm, in their ceaseless struggle for more complexity, decided that having a single bit to control this masking behavior is no longer enough: on AArch64, in addition to the PSTATE.DAIF bits that are analogous to arm32's CPSR, there are additional bits in SCR_EL3 that can override the PSTATE setting for some but not all cases (makes perfect sense, I know...). When aborts are unmasked in PSTATE, but SCR.EA is not set, then synchronous external aborts will cause an exception while asynchronous external aborts will not. It turns out we never intialize SCR in coreboot and on RK3399 it comes up with all zeroes (even the reserved-1 bits, which is super weird). If you get an asynchronous external abort in coreboot it will silently hide in the CPU until BL31 enables SCR.EA before it has its own console handlers registered and silently hangs. This patch resolves the issue by also initializing SCR to a known good state early in the bootblock. It also cleans up some bit defintions and slightly reworks the DAIF unmasking... it doesn't actually make that much sense to unmask anything before our console and exception handlers are up. The new code will mask everything until the exception handler is installed and then unmask it, so that if there was a super early external abort we could still see it. (Of course there are still dozens of other processor exceptions that could happen which we have no way to mask.) Change-Id: I5266481a7aaf0b72aca8988accb671d92739af6f Signed-off-by: Julius Werner <jwerner@chromium.org> Reviewed-on: https://review.coreboot.org/c/coreboot/+/37463 Tested-by: build bot (Jenkins) <no-reply@coreboot.org> Reviewed-by: Hung-Te Lin <hungte@chromium.org>
Diffstat (limited to 'src/arch/arm64/transition.c')
-rw-r--r--src/arch/arm64/transition.c32
1 files changed, 6 insertions, 26 deletions
diff --git a/src/arch/arm64/transition.c b/src/arch/arm64/transition.c
index 3e8d7f0762..ac59d19acf 100644
--- a/src/arch/arm64/transition.c
+++ b/src/arch/arm64/transition.c
@@ -17,14 +17,6 @@
#include <arch/transition.h>
#include <assert.h>
-/* Litte-endian, No XN-forced, Instr cache disabled,
- * Stack alignment disabled, Data and unified cache
- * disabled, Alignment check disabled, MMU disabled
- */
-#define SCTLR_MASK (SCTLR_MMU_DISABLE | SCTLR_ACE_DISABLE | \
- SCTLR_CACHE_DISABLE | SCTLR_SAE_DISABLE | SCTLR_RES1 | \
- SCTLR_ICE_DISABLE | SCTLR_WXN_DISABLE | SCTLR_LITTLE_END)
-
void __weak exc_dispatch(struct exc_state *exc_state, uint64_t id)
{
/* Default weak implementation does nothing. */
@@ -54,7 +46,6 @@ void transition_to_el2(void *entry, void *arg, uint64_t spsr)
struct exc_state exc_state;
struct elx_state *elx = &exc_state.elx;
struct regs *regs = &exc_state.regs;
- uint32_t sctlr;
regs->x[X0_INDEX] = (uint64_t)arg;
elx->elr = (uint64_t)entry;
@@ -70,19 +61,10 @@ void transition_to_el2(void *entry, void *arg, uint64_t spsr)
*/
assert(get_el_from_spsr(spsr) == EL2 && !(spsr & SPSR_ERET_32));
- /* Initialize SCR with defaults for running without secure monitor. */
- raw_write_scr_el3(SCR_TWE_DISABLE | /* don't trap WFE */
- SCR_TWI_DISABLE | /* don't trap WFI */
- SCR_ST_ENABLE | /* allow secure timer access */
- SCR_LOWER_AARCH64 | /* lower level is AArch64 */
- SCR_SIF_DISABLE | /* disable secure ins. fetch */
- SCR_HVC_ENABLE | /* allow HVC instruction */
- SCR_SMD_ENABLE | /* disable SMC instruction */
- SCR_RES1 | /* reserved-1 bits */
- SCR_EA_DISABLE | /* disable ext. abort trap */
- SCR_FIQ_DISABLE | /* disable FIQ trap to EL3 */
- SCR_IRQ_DISABLE | /* disable IRQ trap to EL3 */
- SCR_NS_ENABLE); /* lower level is non-secure */
+ /* Initialize SCR with defaults for running without secure monitor
+ (disable all traps, enable all instructions, run NS at AArch64). */
+ raw_write_scr_el3(SCR_FIEN | SCR_API | SCR_APK | SCR_ST | SCR_RW |
+ SCR_HCE | SCR_SMD | SCR_RES1 | SCR_NS);
/* Initialize CPTR to not trap anything to EL3. */
raw_write_cptr_el3(CPTR_EL3_TCPAC_DISABLE | CPTR_EL3_TTA_DISABLE |
@@ -92,10 +74,8 @@ void transition_to_el2(void *entry, void *arg, uint64_t spsr)
raw_write_elr_el3(elx->elr);
raw_write_spsr_el3(elx->spsr);
- /* SCTLR: Initialize EL with selected properties */
- sctlr = raw_read_sctlr_el2();
- sctlr &= SCTLR_MASK;
- raw_write_sctlr_el2(sctlr);
+ /* SCTLR: Initialize EL with everything disabled */
+ raw_write_sctlr_el2(SCTLR_RES1);
/* SP_ELx: Initialize stack pointer */
raw_write_sp_el2(elx->sp_elx);