diff options
author | Julius Werner <jwerner@chromium.org> | 2018-06-25 18:06:59 -0700 |
---|---|---|
committer | Julius Werner <jwerner@chromium.org> | 2018-06-26 23:59:18 +0000 |
commit | e0058e89ab94ef7ef366c596d3e03ad693694d2a (patch) | |
tree | 9d30123effa4f26419fcf2a12ab909e5d0ebf881 /src/arch | |
parent | a98b5bf89b39fede95c34bf81e9decc1e6b6d38f (diff) | |
download | coreboot-e0058e89ab94ef7ef366c596d3e03ad693694d2a.tar.xz |
arm64: Reimplement mmu_disable() in assembly
Disabling the MMU with proper cache behavior is a bit tricky on ARM64:
you can flush the cache first and then disable the MMU (like we have
been doing), but then you run the risk of having new cache lines
allocated in the tiny window between the two, which may or may not
become a problem when those get flushed at a later point (on some
platforms certain memory regions "go away" at certain points in a way
that makes the CPU very unhappy if it ever issues a write cycle to
them again afterwards).
The obvious alternative is to first disable the MMU and then flush the
cache, ensuring that every memory access after the flush already has the
non-cacheable attribute. But we can't just flip the order around in the
C code that we have because then those accesses in the tiny window
in-between will go straight to memory, so loads may yield the wrong
result or stores may get overwritten again by the later cache flush.
In the end, this all shouldn't really be a problem because we can do
both operations purely from registers without doing any explicit memory
accesses in-between. We just have to reimplement the function in
assembly to make sure the compiler doesn't insert any stack accesses at
the wrong points.
Change-Id: Ic552960c91400dadae6f130b2521a696eeb4c0b1
Signed-off-by: Julius Werner <jwerner@chromium.org>
Reviewed-on: https://review.coreboot.org/27238
Tested-by: build bot (Jenkins) <no-reply@coreboot.org>
Reviewed-by: Aaron Durbin <adurbin@chromium.org>
Diffstat (limited to 'src/arch')
-rw-r--r-- | src/arch/arm64/armv8/cpu.S | 15 | ||||
-rw-r--r-- | src/arch/arm64/armv8/mmu.c | 14 | ||||
-rw-r--r-- | src/arch/arm64/include/armv8/arch/cache.h | 11 |
3 files changed, 22 insertions, 18 deletions
diff --git a/src/arch/arm64/armv8/cpu.S b/src/arch/arm64/armv8/cpu.S index 1bb8c838ad..935f1fe7bd 100644 --- a/src/arch/arm64/armv8/cpu.S +++ b/src/arch/arm64/armv8/cpu.S @@ -15,6 +15,7 @@ */ #include <arch/asm.h> +#include <arch/cache.h> .macro dcache_apply_all crm dsb sy @@ -83,6 +84,20 @@ ENTRY(dcache_clean_invalidate_all) dcache_apply_all crm=cisw ENDPROC(dcache_clean_invalidate_all) +/* This must be implemented in assembly to ensure there are no accesses to + memory (e.g. the stack) in between disabling and flushing the cache. */ +ENTRY(mmu_disable) + str x30, [sp, #-0x8] + mrs x0, sctlr_el3 + mov x1, #~(SCTLR_C | SCTLR_M) + and x0, x0, x1 + msr sctlr_el3, x0 + isb + bl dcache_clean_invalidate_all + ldr x30, [sp, #-0x8] + ret +ENDPROC(mmu_disable) + /* * Bring an ARMv8 processor we just gained control of (e.g. from IROM) into a * known state regarding caches/SCTLR/PSTATE. Completely invalidates diff --git a/src/arch/arm64/armv8/mmu.c b/src/arch/arm64/armv8/mmu.c index 48f77ace83..606a9b30ad 100644 --- a/src/arch/arm64/armv8/mmu.c +++ b/src/arch/arm64/armv8/mmu.c @@ -321,17 +321,3 @@ void mmu_enable(void) raw_write_sctlr_el3(sctlr); isb(); } - -/* - * CAUTION: This implementation assumes that coreboot never uses non-identity - * page tables for pages containing executed code. If you ever want to violate - * this assumption, have fun figuring out the associated problems on your own. - */ -void mmu_disable(void) -{ - dcache_clean_invalidate_all(); - uint32_t sctlr = raw_read_sctlr_el3(); - sctlr &= ~(SCTLR_C | SCTLR_M); - raw_write_sctlr_el3(sctlr); - isb(); -} diff --git a/src/arch/arm64/include/armv8/arch/cache.h b/src/arch/arm64/include/armv8/arch/cache.h index 3647290866..b31c3b0153 100644 --- a/src/arch/arm64/include/armv8/arch/cache.h +++ b/src/arch/arm64/include/armv8/arch/cache.h @@ -32,10 +32,6 @@ #ifndef ARM_ARM64_CACHE_H #define ARM_ARM64_CACHE_H -#include <stddef.h> -#include <stdint.h> -#include <arch/barrier.h> - /* SCTLR_ELx common bits */ #define SCTLR_M (1 << 0) /* MMU enable */ #define SCTLR_A (1 << 1) /* Alignment check enable */ @@ -57,6 +53,11 @@ #define SCTLR_EL1_E0E (1 << 24) /* Exception endianness at EL0 */ #define SCTLR_EL1_UCI (1 << 26) /* EL0 access to cache instructions */ +#ifndef __ASSEMBLER__ + +#include <stddef.h> +#include <stdint.h> +#include <arch/barrier.h> /* dcache clean by virtual address to PoC */ void dcache_clean_by_mva(void const *addr, size_t len); @@ -92,4 +93,6 @@ static inline void icache_invalidate_all(void) : : : "memory"); } +#endif /* __ASSEMBLER__ */ + #endif /* ARM_ARM64_CACHE_H */ |