diff options
author | Julius Werner <jwerner@chromium.org> | 2019-12-03 22:47:01 -0800 |
---|---|---|
committer | Patrick Georgi <pgeorgi@google.com> | 2019-12-05 17:57:58 +0000 |
commit | bb345abbfc999f70e3f0f9739f13e1f45d5a0fe9 (patch) | |
tree | f8b652805a320d188ea995bc88bca8837118cfe9 /src | |
parent | 31a5ff5e36ea499f87a8947875a067c843a45532 (diff) | |
download | coreboot-bb345abbfc999f70e3f0f9739f13e1f45d5a0fe9.tar.xz |
arm64: Correctly unmask asynchronous SError interrupts
Arm CPUs have always had an odd feature that allows you to mask not only
true interrupts, but also "external aborts" (memory bus errors from
outside the CPU). CPUs usually have all of these masked after reset,
which we quickly learned was a bad idea back when bringing up the first
arm32 systems in coreboot. Masking external aborts means that if any of
your firmware code does an illegal memory access, you will only see it
once the kernel comes up and unmasks the abort (not when it happens).
Therefore, we always unmask everything in early bootblock assembly code.
When arm64 came around, it had very similar masking bits and we did the
same there, thinking the issue resolved. Unfortunately Arm, in their
ceaseless struggle for more complexity, decided that having a single bit
to control this masking behavior is no longer enough: on AArch64, in
addition to the PSTATE.DAIF bits that are analogous to arm32's CPSR,
there are additional bits in SCR_EL3 that can override the PSTATE
setting for some but not all cases (makes perfect sense, I know...).
When aborts are unmasked in PSTATE, but SCR.EA is not set, then
synchronous external aborts will cause an exception while asynchronous
external aborts will not. It turns out we never intialize SCR in
coreboot and on RK3399 it comes up with all zeroes (even the reserved-1
bits, which is super weird). If you get an asynchronous external abort
in coreboot it will silently hide in the CPU until BL31 enables SCR.EA
before it has its own console handlers registered and silently hangs.
This patch resolves the issue by also initializing SCR to a known good
state early in the bootblock. It also cleans up some bit defintions and
slightly reworks the DAIF unmasking... it doesn't actually make that
much sense to unmask anything before our console and exception handlers
are up. The new code will mask everything until the exception handler is
installed and then unmask it, so that if there was a super early
external abort we could still see it. (Of course there are still dozens
of other processor exceptions that could happen which we have no way to
mask.)
Change-Id: I5266481a7aaf0b72aca8988accb671d92739af6f
Signed-off-by: Julius Werner <jwerner@chromium.org>
Reviewed-on: https://review.coreboot.org/c/coreboot/+/37463
Tested-by: build bot (Jenkins) <no-reply@coreboot.org>
Reviewed-by: Hung-Te Lin <hungte@chromium.org>
Diffstat (limited to 'src')
-rw-r--r-- | src/arch/arm64/armv8/cpu.S | 30 | ||||
-rw-r--r-- | src/arch/arm64/include/armv8/arch/cache.h | 22 | ||||
-rw-r--r-- | src/arch/arm64/include/armv8/arch/lib_helpers.h | 99 | ||||
-rw-r--r-- | src/arch/arm64/transition.c | 32 | ||||
-rw-r--r-- | src/arch/arm64/transition_asm.S | 5 |
5 files changed, 59 insertions, 129 deletions
diff --git a/src/arch/arm64/armv8/cpu.S b/src/arch/arm64/armv8/cpu.S index 2bc4defde8..5f06c7e677 100644 --- a/src/arch/arm64/armv8/cpu.S +++ b/src/arch/arm64/armv8/cpu.S @@ -99,15 +99,14 @@ ENDPROC(mmu_disable) /* * Bring an ARMv8 processor we just gained control of (e.g. from IROM) into a - * known state regarding caches/SCTLR/PSTATE. Completely invalidates + * known state regarding caches/SCTLR/SCR/PSTATE. Completely invalidates * icache/dcache, disables MMU and dcache (if active), and enables unaligned - * accesses, icache and branch prediction (if inactive). Seeds the stack and - * initializes SP_EL0. Clobbers R22 and R23. + * accesses, icache. Seeds stack and initializes SP_EL0. Clobbers R22 and R23. */ ENTRY(arm64_init_cpu) - /* Initialize PSTATE (unmask all exceptions, select SP_EL0). */ + /* Initialize PSTATE (mask all exceptions, select SP_EL0). */ msr SPSel, #0 - msr DAIFClr, #0xf + msr DAIFSet, #0xf /* TODO: This is where we'd put non-boot CPUs into WFI if needed. */ @@ -116,24 +115,25 @@ ENTRY(arm64_init_cpu) /* TODO: Assert that we always start running at EL3 */ mrs x22, sctlr_el3 - /* Activate ICache (12) already for speed during cache flush below. */ - orr x22, x22, #(1 << 12) + /* Activate ICache already for speed during cache flush below. */ + orr x22, x22, #SCTLR_I msr sctlr_el3, x22 isb /* Invalidate dcache */ bl dcache_invalidate_all - /* Deactivate MMU (0), Alignment Check (1) and DCache (2) */ - and x22, x22, # ~(1 << 0) & ~(1 << 1) & ~(1 << 2) - /* Activate Stack Alignment (3) because why not */ - orr x22, x22, #(1 << 3) - /* Set to little-endian (25) */ - and x22, x22, # ~(1 << 25) - /* Deactivate write-xor-execute enforcement (19) */ - and x22, x22, # ~(1 << 19) + /* Reinitialize SCTLR from scratch to known-good state. + This may disable MMU or DCache. */ + ldr w22, =(SCTLR_RES1 | SCTLR_I | SCTLR_SA) msr sctlr_el3, x22 + /* Initialize SCR to unmask all interrupts (so that if we get a spurious + IRQ/SError we'll see it when it happens, not hang in BL31). This will + only have an effect after we DAIFClr in exception_init(). */ + mov x22, #SCR_RES1 | SCR_IRQ | SCR_FIQ | SCR_EA + msr scr_el3, x22 + /* Invalidate icache and TLB for good measure */ ic iallu tlbi alle3 diff --git a/src/arch/arm64/include/armv8/arch/cache.h b/src/arch/arm64/include/armv8/arch/cache.h index 3de2e80877..1168992cc4 100644 --- a/src/arch/arm64/include/armv8/arch/cache.h +++ b/src/arch/arm64/include/armv8/arch/cache.h @@ -32,33 +32,13 @@ #ifndef ARM_ARM64_CACHE_H #define ARM_ARM64_CACHE_H -/* SCTLR_ELx common bits */ -#define SCTLR_M (1 << 0) /* MMU enable */ -#define SCTLR_A (1 << 1) /* Alignment check enable */ -#define SCTLR_C (1 << 2) /* Data/unified cache enable */ -#define SCTLR_SA (1 << 3) /* Stack alignment check enable */ -#define SCTLR_I (1 << 12) /* Instruction cache enable */ -#define SCTLR_WXN (1 << 19) /* Write permission implies XN */ -#define SCTLR_EE (1 << 25) /* Exception endianness */ - -/* SCTLR_EL1 bits */ -#define SCTLR_EL1_CP15B (1 << 5) /* CP15 barrier enable */ -#define SCTLR_EL1_ITD (1 << 7) /* IT disable */ -#define SCTLR_EL1_SED (1 << 8) /* SETEND disable */ -#define SCTLR_EL1_UMA (1 << 9) /* User mask access */ -#define SCTLR_EL1_DZE (1 << 14) /* DC ZVA instruction at EL0 */ -#define SCTLR_EL1_UCT (1 << 15) /* CTR_EL0 register EL0 access */ -#define SCTLR_EL1_NTWI (1 << 16) /* Not trap WFI */ -#define SCTLR_EL1_NTWE (1 << 18) /* Not trap WFE */ -#define SCTLR_EL1_E0E (1 << 24) /* Exception endianness at EL0 */ -#define SCTLR_EL1_UCI (1 << 26) /* EL0 access to cache instructions */ +#include <arch/lib_helpers.h> #ifndef __ASSEMBLER__ #include <stddef.h> #include <stdint.h> #include <arch/barrier.h> -#include <arch/lib_helpers.h> /* dcache clean by virtual address to PoC */ void dcache_clean_by_mva(void const *addr, size_t len); diff --git a/src/arch/arm64/include/armv8/arch/lib_helpers.h b/src/arch/arm64/include/armv8/arch/lib_helpers.h index 0afbf82a23..9d5b508453 100644 --- a/src/arch/arm64/include/armv8/arch/lib_helpers.h +++ b/src/arch/arm64/include/armv8/arch/lib_helpers.h @@ -38,79 +38,46 @@ #define SPSR_DEBUG (1 << 9) #define SPSR_EXCEPTION_MASK (SPSR_FIQ | SPSR_IRQ | SPSR_SERROR | SPSR_DEBUG) -#define SCR_NS_SHIFT 0 -#define SCR_NS_MASK (1 << SCR_NS_SHIFT) -#define SCR_NS_ENABLE (1 << SCR_NS_SHIFT) -#define SCR_NS_DISABLE (0 << SCR_NS_SHIFT) -#define SCR_NS SCR_NS_ENABLE -#define SCR_RES1 (0x3 << 4) -#define SCR_IRQ_SHIFT 2 -#define SCR_IRQ_MASK (1 << SCR_IRQ_SHIFT) -#define SCR_IRQ_ENABLE (1 << SCR_IRQ_SHIFT) -#define SCR_IRQ_DISABLE (0 << SCR_IRQ_SHIFT) -#define SCR_FIQ_SHIFT 2 -#define SCR_FIQ_MASK (1 << SCR_FIQ_SHIFT) -#define SCR_FIQ_ENABLE (1 << SCR_FIQ_SHIFT) -#define SCR_FIQ_DISABLE (0 << SCR_FIQ_SHIFT) -#define SCR_EA_SHIFT 3 -#define SCR_EA_MASK (1 << SCR_EA_SHIFT) -#define SCR_EA_ENABLE (1 << SCR_EA_SHIFT) -#define SCR_EA_DISABLE (0 << SCR_EA_SHIFT) -#define SCR_SMD_SHIFT 7 -#define SCR_SMD_MASK (1 << SCR_SMD_SHIFT) -#define SCR_SMD_DISABLE (1 << SCR_SMD_SHIFT) -#define SCR_SMD_ENABLE (0 << SCR_SMD_SHIFT) -#define SCR_HVC_SHIFT 8 -#define SCR_HVC_MASK (1 << SCR_HVC_SHIFT) -#define SCR_HVC_DISABLE (0 << SCR_HVC_SHIFT) -#define SCR_HVC_ENABLE (1 << SCR_HVC_SHIFT) -#define SCR_SIF_SHIFT 9 -#define SCR_SIF_MASK (1 << SCR_SIF_SHIFT) -#define SCR_SIF_ENABLE (1 << SCR_SIF_SHIFT) -#define SCR_SIF_DISABLE (0 << SCR_SIF_SHIFT) -#define SCR_RW_SHIFT 10 -#define SCR_RW_MASK (1 << SCR_RW_SHIFT) -#define SCR_LOWER_AARCH64 (1 << SCR_RW_SHIFT) -#define SCR_LOWER_AARCH32 (0 << SCR_RW_SHIFT) -#define SCR_ST_SHIFT 11 -#define SCR_ST_MASK (1 << SCR_ST_SHIFT) -#define SCR_ST_ENABLE (1 << SCR_ST_SHIFT) -#define SCR_ST_DISABLE (0 << SCR_ST_SHIFT) -#define SCR_TWI_SHIFT 12 -#define SCR_TWI_MASK (1 << SCR_TWI_SHIFT) -#define SCR_TWI_ENABLE (1 << SCR_TWI_SHIFT) -#define SCR_TWI_DISABLE (0 << SCR_TWI_SHIFT) -#define SCR_TWE_SHIFT 13 -#define SCR_TWE_MASK (1 << SCR_TWE_SHIFT) -#define SCR_TWE_ENABLE (1 << SCR_TWE_SHIFT) -#define SCR_TWE_DISABLE (0 << SCR_TWE_SHIFT) +#define SCR_NS (1 << 0) /* EL0/1 are non-secure */ +#define SCR_IRQ (1 << 1) /* Take IRQs in EL3 */ +#define SCR_FIQ (1 << 2) /* Take FIQs in EL3 */ +#define SCR_EA (1 << 3) /* Take EA/SError in EL3 */ +#define SCR_SMD (1 << 7) /* Disable SMC instruction */ +#define SCR_HCE (1 << 8) /* Enable HVC instruction */ +#define SCR_SIF (1 << 9) /* Forbid insns from NS memory */ +#define SCR_RW (1 << 10) /* Lower ELs are AArch64 */ +#define SCR_ST (1 << 11) /* Don't trap secure CNTPS */ +#define SCR_TWI (1 << 12) /* Trap WFI to EL3 */ +#define SCR_TWE (1 << 13) /* Trap WFE to EL3 */ +#define SCR_TLOR (1 << 14) /* Trap LOR accesses to EL3 */ +#define SCR_TERR (1 << 15) /* Trap ERR accesses to EL3 */ +#define SCR_APK (1 << 16) /* Don't trap ptrauth keys */ +#define SCR_API (1 << 17) /* Don't trap ptrauth insn */ +#define SCR_EEL2 (1 << 18) /* Enable secure EL2 */ +#define SCR_EASE (1 << 19) /* Sync EAs use SError vector */ +#define SCR_NMEA (1 << 20) /* Disallow EL3 SError masking */ +#define SCR_FIEN (1 << 21) /* Don't trap EXRPFG */ +#define SCR_RES1 (3 << 4) #define HCR_RW_SHIFT 31 #define HCR_LOWER_AARCH64 (1 << HCR_RW_SHIFT) #define HCR_LOWER_AARCH32 (0 << HCR_RW_SHIFT) -#define SCTLR_MMU_ENABLE 1 -#define SCTLR_MMU_DISABLE 0 -#define SCTLR_ACE_SHIFT 1 -#define SCTLR_ACE_ENABLE (1 << SCTLR_ACE_SHIFT) -#define SCTLR_ACE_DISABLE (0 << SCTLR_ACE_SHIFT) -#define SCTLR_CACHE_SHIFT 2 -#define SCTLR_CACHE_ENABLE (1 << SCTLR_CACHE_SHIFT) -#define SCTLR_CACHE_DISABLE (0 << SCTLR_CACHE_SHIFT) -#define SCTLR_SAE_SHIFT 3 -#define SCTLR_SAE_ENABLE (1 << SCTLR_SAE_SHIFT) -#define SCTLR_SAE_DISABLE (0 << SCTLR_SAE_SHIFT) +#define SCTLR_M (1 << 0) /* MMU enable */ +#define SCTLR_A (1 << 1) /* Alignment check enable */ +#define SCTLR_C (1 << 2) /* Data/unified cache enable */ +#define SCTLR_SA (1 << 3) /* Stack alignment check enable */ +#define SCTLR_NAA (1 << 6) /* non-aligned access STA/LDR */ +#define SCTLR_I (1 << 12) /* Instruction cache enable */ +#define SCTLR_ENDB (1 << 13) /* Pointer auth (data B) */ +#define SCTLR_WXN (1 << 19) /* Write permission implies XN */ +#define SCTLR_IESB (1 << 21) /* Implicit error sync event */ +#define SCTLR_EE (1 << 25) /* Exception endianness (BE) */ +#define SCTLR_ENDA (1 << 27) /* Pointer auth (data A) */ +#define SCTLR_ENIB (1 << 30) /* Pointer auth (insn B) */ +#define SCTLR_ENIA (1 << 31) /* Pointer auth (insn A) */ #define SCTLR_RES1 ((0x3 << 4) | (0x1 << 11) | (0x1 << 16) | \ (0x1 << 18) | (0x3 << 22) | (0x3 << 28)) -#define SCTLR_ICE_SHIFT 12 -#define SCTLR_ICE_ENABLE (1 << SCTLR_ICE_SHIFT) -#define SCTLR_ICE_DISABLE (0 << SCTLR_ICE_SHIFT) -#define SCTLR_WXN_SHIFT 19 -#define SCTLR_WXN_ENABLE (1 << SCTLR_WXN_SHIFT) -#define SCTLR_WXN_DISABLE (0 << SCTLR_WXN_SHIFT) -#define SCTLR_ENDIAN_SHIFT 25 -#define SCTLR_LITTLE_END (0 << SCTLR_ENDIAN_SHIFT) -#define SCTLR_BIG_END (1 << SCTLR_ENDIAN_SHIFT) #define CPTR_EL3_TCPAC_SHIFT (31) #define CPTR_EL3_TTA_SHIFT (20) diff --git a/src/arch/arm64/transition.c b/src/arch/arm64/transition.c index 3e8d7f0762..ac59d19acf 100644 --- a/src/arch/arm64/transition.c +++ b/src/arch/arm64/transition.c @@ -17,14 +17,6 @@ #include <arch/transition.h> #include <assert.h> -/* Litte-endian, No XN-forced, Instr cache disabled, - * Stack alignment disabled, Data and unified cache - * disabled, Alignment check disabled, MMU disabled - */ -#define SCTLR_MASK (SCTLR_MMU_DISABLE | SCTLR_ACE_DISABLE | \ - SCTLR_CACHE_DISABLE | SCTLR_SAE_DISABLE | SCTLR_RES1 | \ - SCTLR_ICE_DISABLE | SCTLR_WXN_DISABLE | SCTLR_LITTLE_END) - void __weak exc_dispatch(struct exc_state *exc_state, uint64_t id) { /* Default weak implementation does nothing. */ @@ -54,7 +46,6 @@ void transition_to_el2(void *entry, void *arg, uint64_t spsr) struct exc_state exc_state; struct elx_state *elx = &exc_state.elx; struct regs *regs = &exc_state.regs; - uint32_t sctlr; regs->x[X0_INDEX] = (uint64_t)arg; elx->elr = (uint64_t)entry; @@ -70,19 +61,10 @@ void transition_to_el2(void *entry, void *arg, uint64_t spsr) */ assert(get_el_from_spsr(spsr) == EL2 && !(spsr & SPSR_ERET_32)); - /* Initialize SCR with defaults for running without secure monitor. */ - raw_write_scr_el3(SCR_TWE_DISABLE | /* don't trap WFE */ - SCR_TWI_DISABLE | /* don't trap WFI */ - SCR_ST_ENABLE | /* allow secure timer access */ - SCR_LOWER_AARCH64 | /* lower level is AArch64 */ - SCR_SIF_DISABLE | /* disable secure ins. fetch */ - SCR_HVC_ENABLE | /* allow HVC instruction */ - SCR_SMD_ENABLE | /* disable SMC instruction */ - SCR_RES1 | /* reserved-1 bits */ - SCR_EA_DISABLE | /* disable ext. abort trap */ - SCR_FIQ_DISABLE | /* disable FIQ trap to EL3 */ - SCR_IRQ_DISABLE | /* disable IRQ trap to EL3 */ - SCR_NS_ENABLE); /* lower level is non-secure */ + /* Initialize SCR with defaults for running without secure monitor + (disable all traps, enable all instructions, run NS at AArch64). */ + raw_write_scr_el3(SCR_FIEN | SCR_API | SCR_APK | SCR_ST | SCR_RW | + SCR_HCE | SCR_SMD | SCR_RES1 | SCR_NS); /* Initialize CPTR to not trap anything to EL3. */ raw_write_cptr_el3(CPTR_EL3_TCPAC_DISABLE | CPTR_EL3_TTA_DISABLE | @@ -92,10 +74,8 @@ void transition_to_el2(void *entry, void *arg, uint64_t spsr) raw_write_elr_el3(elx->elr); raw_write_spsr_el3(elx->spsr); - /* SCTLR: Initialize EL with selected properties */ - sctlr = raw_read_sctlr_el2(); - sctlr &= SCTLR_MASK; - raw_write_sctlr_el2(sctlr); + /* SCTLR: Initialize EL with everything disabled */ + raw_write_sctlr_el2(SCTLR_RES1); /* SP_ELx: Initialize stack pointer */ raw_write_sp_el2(elx->sp_elx); diff --git a/src/arch/arm64/transition_asm.S b/src/arch/arm64/transition_asm.S index 718832b421..bdb412f36d 100644 --- a/src/arch/arm64/transition_asm.S +++ b/src/arch/arm64/transition_asm.S @@ -154,7 +154,7 @@ ENDPROC(exc_exit) /* * exception_init_asm: Initialize VBAR and point SP_EL3 to exception stack. - * x0 = end of exception stack + * Also unmask aborts now that we can report them. x0 = end of exception stack */ ENTRY(exception_init_asm) msr SPSel, #SPSR_USE_H @@ -163,6 +163,9 @@ ENTRY(exception_init_asm) adr x0, exc_vectors msr vbar_el3, x0 + + msr DAIFClr, #0xf + dsb sy isb ret |