summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--payloads/libpayload/arch/arm64/cpu.S15
-rw-r--r--payloads/libpayload/arch/arm64/head.S4
-rw-r--r--payloads/libpayload/arch/arm64/mmu.c24
-rw-r--r--payloads/libpayload/include/arm64/arch/cache.h32
-rw-r--r--payloads/libpayload/include/arm64/arch/lib_helpers.h20
5 files changed, 39 insertions, 56 deletions
diff --git a/payloads/libpayload/arch/arm64/cpu.S b/payloads/libpayload/arch/arm64/cpu.S
index d80f73c112..70a1044b02 100644
--- a/payloads/libpayload/arch/arm64/cpu.S
+++ b/payloads/libpayload/arch/arm64/cpu.S
@@ -29,6 +29,7 @@
*/
#include <arch/asm.h>
+#include <arch/lib_helpers.h>
.macro dcache_apply_all crm
dsb sy
@@ -96,3 +97,17 @@ ENDPROC(dcache_clean_all)
ENTRY(dcache_clean_invalidate_all)
dcache_apply_all crm=cisw
ENDPROC(dcache_clean_invalidate_all)
+
+/* This must be implemented in assembly to ensure there are no accesses to
+ memory (e.g. the stack) in between disabling and flushing the cache. */
+ENTRY(mmu_disable)
+ str x30, [sp, #-0x8]
+ mrs x0, sctlr_el2
+ mov x1, #~(SCTLR_C | SCTLR_M)
+ and x0, x0, x1
+ msr sctlr_el2, x0
+ isb
+ bl dcache_clean_invalidate_all
+ ldr x30, [sp, #-0x8]
+ ret
+ENDPROC(mmu_disable)
diff --git a/payloads/libpayload/arch/arm64/head.S b/payloads/libpayload/arch/arm64/head.S
index 8bac70fee5..c44169b82a 100644
--- a/payloads/libpayload/arch/arm64/head.S
+++ b/payloads/libpayload/arch/arm64/head.S
@@ -28,11 +28,15 @@
*/
#include <arch/asm.h>
+#include <arch/lib_helpers.h>
/*
* Our entry point
*/
ENTRY(_entry)
+ /* Initialize SCTLR to intended state (icache and stack-alignment on) */
+ ldr w1, =(SCTLR_RES1 | SCTLR_I | SCTLR_SA)
+ msr sctlr_el2, x1
/* Save off the location of the coreboot tables */
ldr x1, 1f
diff --git a/payloads/libpayload/arch/arm64/mmu.c b/payloads/libpayload/arch/arm64/mmu.c
index d1dd5b0147..3a5e04db6c 100644
--- a/payloads/libpayload/arch/arm64/mmu.c
+++ b/payloads/libpayload/arch/arm64/mmu.c
@@ -304,30 +304,6 @@ static uint32_t is_mmu_enabled(void)
}
/*
- * Func: mmu_disable
- * Desc: Invalidate caches and disable mmu
- */
-void mmu_disable(void)
-{
- uint32_t sctlr;
-
- sctlr = raw_read_sctlr_el2();
- sctlr &= ~(SCTLR_C | SCTLR_M | SCTLR_I);
-
- tlbiall_el2();
- dcache_clean_invalidate_all();
-
- dsb();
- isb();
-
- raw_write_sctlr_el2(sctlr);
-
- dcache_clean_invalidate_all();
- dsb();
- isb();
-}
-
-/*
* Func: mmu_enable
* Desc: Initialize MAIR, TCR, TTBR and enable MMU by setting appropriate bits
* in SCTLR
diff --git a/payloads/libpayload/include/arm64/arch/cache.h b/payloads/libpayload/include/arm64/arch/cache.h
index de68cee3f1..ace0e0ecd6 100644
--- a/payloads/libpayload/include/arm64/arch/cache.h
+++ b/payloads/libpayload/include/arm64/arch/cache.h
@@ -35,38 +35,6 @@
#include <stddef.h>
#include <stdint.h>
-/* SCTLR bits */
-#define SCTLR_M (1 << 0) /* MMU enable */
-#define SCTLR_A (1 << 1) /* Alignment check enable */
-#define SCTLR_C (1 << 2) /* Data/unified cache enable */
-/* Bits 4:3 are reserved */
-#define SCTLR_CP15BEN (1 << 5) /* CP15 barrier enable */
-/* Bit 6 is reserved */
-#define SCTLR_B (1 << 7) /* Endianness */
-/* Bits 9:8 */
-#define SCTLR_SW (1 << 10) /* SWP and SWPB enable */
-#define SCTLR_Z (1 << 11) /* Branch prediction enable */
-#define SCTLR_I (1 << 12) /* Instruction cache enable */
-#define SCTLR_V (1 << 13) /* Low/high exception vectors */
-#define SCTLR_RR (1 << 14) /* Round Robin select */
-/* Bits 16:15 are reserved */
-#define SCTLR_HA (1 << 17) /* Hardware Access flag enable */
-/* Bit 18 is reserved */
-/* Bits 20:19 reserved virtualization not supported */
-#define SCTLR_WXN (1 << 19) /* Write permission implies XN */
-#define SCTLR_UWXN (1 << 20) /* Unprivileged write permission
- implies PL1 XN */
-#define SCTLR_FI (1 << 21) /* Fast interrupt config enable */
-#define SCTLR_U (1 << 22) /* Unaligned access behavior */
-#define SCTLR_VE (1 << 24) /* Interrupt vectors enable */
-#define SCTLR_EE (1 << 25) /* Exception endianness */
-/* Bit 26 is reserved */
-#define SCTLR_NMFI (1 << 27) /* Non-maskable FIQ support */
-#define SCTLR_TRE (1 << 28) /* TEX remap enable */
-#define SCTLR_AFE (1 << 29) /* Access flag enable */
-#define SCTLR_TE (1 << 30) /* Thumb exception enable */
-/* Bit 31 is reserved */
-
/*
* Cache maintenance API
*/
diff --git a/payloads/libpayload/include/arm64/arch/lib_helpers.h b/payloads/libpayload/include/arm64/arch/lib_helpers.h
index 7617f97426..b2e3a069e0 100644
--- a/payloads/libpayload/include/arm64/arch/lib_helpers.h
+++ b/payloads/libpayload/include/arm64/arch/lib_helpers.h
@@ -30,11 +30,29 @@
#ifndef __ARCH_LIB_HELPERS_H__
#define __ARCH_LIB_HELPERS_H__
+#define SCTLR_M (1 << 0) /* MMU enable */
+#define SCTLR_A (1 << 1) /* Alignment check enable */
+#define SCTLR_C (1 << 2) /* Data/unified cache enable */
+#define SCTLR_SA (1 << 3) /* Stack alignment check enable */
+#define SCTLR_NAA (1 << 6) /* non-aligned access STA/LDR */
+#define SCTLR_I (1 << 12) /* Instruction cache enable */
+#define SCTLR_ENDB (1 << 13) /* Pointer auth (data B) */
+#define SCTLR_WXN (1 << 19) /* Write permission implies XN */
+#define SCTLR_IESB (1 << 21) /* Implicit error sync event */
+#define SCTLR_EE (1 << 25) /* Exception endianness (BE) */
+#define SCTLR_ENDA (1 << 27) /* Pointer auth (data A) */
+#define SCTLR_ENIB (1 << 30) /* Pointer auth (insn B) */
+#define SCTLR_ENIA (1 << 31) /* Pointer auth (insn A) */
+#define SCTLR_RES1 ((0x3 << 4) | (0x1 << 11) | (0x1 << 16) | \
+ (0x1 << 18) | (0x3 << 22) | (0x3 << 28))
+
#define DAIF_DBG_BIT (1 << 3)
#define DAIF_ABT_BIT (1 << 2)
#define DAIF_IRQ_BIT (1 << 1)
#define DAIF_FIQ_BIT (1 << 0)
+#ifndef __ASSEMBLER__
+
#include <stdint.h>
#define MAKE_REGISTER_ACCESSORS(reg) \
@@ -273,4 +291,6 @@ static inline void tlbivaa_el1(uint64_t va)
#define dsb() dsb_opt(sy)
#define isb() isb_opt()
+#endif /* __ASSEMBLER__ */
+
#endif /* __ARCH_LIB_HELPERS_H__ */