summaryrefslogtreecommitdiff
path: root/src/arch/arm64/stage_entry.S
diff options
context:
space:
mode:
authorFurquan Shaikh <furquan@google.com>2014-11-21 15:42:40 -0800
committerPatrick Georgi <pgeorgi@google.com>2015-04-10 20:47:44 +0200
commit9482498003d500db6aced4c94bf4ab3485cab18e (patch)
treee2c2ec3cc22f6893d772da62782b3795245c101a /src/arch/arm64/stage_entry.S
parentb17f580d2955822d0095e1fb914d98eeabf2a708 (diff)
downloadcoreboot-9482498003d500db6aced4c94bf4ab3485cab18e.tar.xz
arm64: Add support for save/restore registers for CPU startup.
startup.c provides function to enable CPU in any stage to save register data that can be used by secondary CPU (for normal boot) or any CPU (for resume boot). stage_entry.S defines space for saving arm64_startup_data. This can be filled by: 1) Primary CPU before bringing up secondary CPUs so that the secondary can use register values to initialize MMU-related and other required registers to appropriate values. 2) CPU suspend path to ensure that on resume the values which were saved are restored appropriately. stage_entry.S provides a common path for both normal and resume boot to initialize saved registers. For resume path, it is important to set the secondary entry point for startup since x26 needs to be 1 for enabling MMU and cache. This also ensures that we do not fall into false memory cache errors which caused CPU to fail during normal / resume boot. Thus, we can get rid of the stack cache invalidate for secondary CPUs. BUG=chrome-os-partner:33962 BRANCH=None TEST=Compiles and boots both CPU0 and CPU1 on ryu without mmu_enable and stack cache invalidate for CPU1. Change-Id: Ia4ca0e7d35c0738dbbaa926cce4268143c6f9de3 Signed-off-by: Patrick Georgi <pgeorgi@chromium.org> Original-Commit-Id: 9f5e78469313ddd144ad7cf5abc3e07cb712183a Original-Signed-off-by: Furquan Shaikh <furquan@google.com> Original-Change-Id: I527a95779cf3fed37392b6605b096f54f8286d64 Original-Reviewed-on: https://chromium-review.googlesource.com/231561 Original-Reviewed-by: Aaron Durbin <adurbin@chromium.org> Original-Tested-by: Furquan Shaikh <furquan@chromium.org> Original-Commit-Queue: Furquan Shaikh <furquan@chromium.org> Reviewed-on: http://review.coreboot.org/9540 Tested-by: build bot (Jenkins) Reviewed-by: Stefan Reinauer <stefan.reinauer@coreboot.org>
Diffstat (limited to 'src/arch/arm64/stage_entry.S')
-rw-r--r--src/arch/arm64/stage_entry.S78
1 files changed, 71 insertions, 7 deletions
diff --git a/src/arch/arm64/stage_entry.S b/src/arch/arm64/stage_entry.S
index 5a5ddabb51..fa2064e708 100644
--- a/src/arch/arm64/stage_entry.S
+++ b/src/arch/arm64/stage_entry.S
@@ -29,6 +29,7 @@
#include <arch/asm.h>
#define __ASSEMBLY__
#include <arch/lib_helpers.h>
+#include <arch/startup.h>
#define STACK_SZ CONFIG_STACK_SIZE
#define EXCEPTION_STACK_SZ CONFIG_STACK_SIZE
@@ -38,6 +39,12 @@
* according to MAX_CPUS. Additionally provide exception stacks for each CPU.
*/
.section .bss, "aw", @nobits
+
+.global _arm64_startup_data
+.balign 8
+_arm64_startup_data:
+.space NUM_ELEMENTS*PER_ELEMENT_SIZE_BYTES
+
.global _stack
.global _estack
.balign STACK_SZ
@@ -136,23 +143,80 @@ ENTRY(__rmodule_entry)
b arm64_c_environment
ENDPROC(__rmodule_entry)
-ENTRY(_arm64_cpu_startup)
+/*
+ * Setup SCTLR so that:
+ * Little endian mode is setup, XN is not enforced, MMU and caches are disabled.
+ * Alignment and stack alignment checks are disabled.
+ */
+.macro setup_sctlr
read_current x0, sctlr
bic x0, x0, #(1 << 25) /* Little Endian */
bic x0, x0, #(1 << 19) /* XN not enforced */
bic x0, x0, #(1 << 12) /* Disable Instruction Cache */
- bic x0, x0, #0xf /* Clear SA, C, A, and M */
+ bic x0, x0, #0xf /* Clear SA, C, A and M */
+ write_current sctlr, x0, x1
+.endm
+
+/*
+ * This macro assumes x2 has base address and returns value read in x0
+ * x1 is used as temporary register.
+ */
+.macro get_element_addr index
+ add x1, x2, #(\index * PER_ELEMENT_SIZE_BYTES)
+ ldr x0, [x1]
+.endm
+
+/*
+ * Uses following registers:
+ * x0 = reading stored value
+ * x1 = temp reg
+ * x2 = base address of saved data region
+ */
+.macro startup_restore
+ adr x2, _arm64_startup_data
+
+ get_element_addr MAIR_INDEX
+ write_current mair, x0, x1
+
+ get_element_addr TCR_INDEX
+ write_current tcr, x0, x1
+
+ get_element_addr TTBR0_INDEX
+ write_current ttbr0, x0, x1
+
+ get_element_addr SCR_INDEX
+ write_el3 scr, x0, x1
+
+ get_element_addr VBAR_INDEX
+ write_current vbar, x0, x1
+
+ dsb sy
+ isb
+
+ tlbiall_current x1
+ read_current x0, sctlr
+ orr x0, x0, #(1 << 12) /* Enable Instruction Cache */
+ orr x0, x0, #(1 << 2) /* Enable Data/Unified Cache */
+ orr x0, x0, #(1 << 0) /* Enable MMU */
write_current sctlr, x0, x1
+
+ dsb sy
isb
- b arm64_c_environment
-ENDPROC(_arm64_cpu_startup)
+.endm
CPU_RESET_ENTRY(arm64_cpu_startup)
split_bsp_path
- b _arm64_cpu_startup
+ setup_sctlr
+ b arm64_c_environment
ENDPROC(arm64_cpu_startup)
-ENTRY(stage_entry)
+CPU_RESET_ENTRY(arm64_cpu_startup_resume)
split_bsp_path
- b _arm64_cpu_startup
+ setup_sctlr
+ startup_restore
+ b arm64_c_environment
+ENDPROC(arm64_cpu_startup_resume)
+
+ENTRY(stage_entry)
+ b arm64_cpu_startup
ENDPROC(stage_entry)