summaryrefslogtreecommitdiff
path: root/src/arch/arm64
diff options
context:
space:
mode:
authorJulius Werner <jwerner@chromium.org>2015-10-16 13:10:02 -0700
committerJulius Werner <jwerner@chromium.org>2015-11-17 21:31:20 +0100
commit7dcf9d51e5ffadfcf8b5fceddcddb4e1d0a7db37 (patch)
tree2860976349922ae1ba54c9a668949c55598469ba /src/arch/arm64
parentd3634c108d63d07ce004a66e3abb05e8da57d65b (diff)
downloadcoreboot-7dcf9d51e5ffadfcf8b5fceddcddb4e1d0a7db37.tar.xz
arm64: tegra132: tegra210: Remove old arm64/stage_entry.S
This patch removes the old arm64/stage_entry.S code that was too specific to the Tegra SoC boot flow, and replaces it with code that hides the peculiarities of switching to a different CPU/arch in ramstage in the Tegra SoC directories. BRANCH=None BUG=None TEST=Built Ryu and Smaug. !!!UNTESTED!!! Change-Id: Ib3a0448b30ac9c7132581464573efd5e86e03698 Signed-off-by: Julius Werner <jwerner@chromium.org> Reviewed-on: http://review.coreboot.org/12078 Tested-by: build bot (Jenkins) Reviewed-by: Paul Menzel <paulepanter@users.sourceforge.net> Reviewed-by: Aaron Durbin <adurbin@chromium.org>
Diffstat (limited to 'src/arch/arm64')
-rw-r--r--src/arch/arm64/Kconfig1
-rw-r--r--src/arch/arm64/Makefile.inc13
-rw-r--r--src/arch/arm64/armv8/bootblock.S19
-rw-r--r--src/arch/arm64/armv8/cpu.S15
-rw-r--r--src/arch/arm64/armv8/exception.c1
-rw-r--r--src/arch/arm64/boot.c2
-rw-r--r--src/arch/arm64/c_entry.c128
-rw-r--r--src/arch/arm64/cpu-internal.h25
-rw-r--r--src/arch/arm64/cpu-stubs.c21
-rw-r--r--src/arch/arm64/cpu/Kconfig19
-rw-r--r--src/arch/arm64/cpu/Makefile.inc20
-rw-r--r--src/arch/arm64/cpu/cortex_a57.S25
-rw-r--r--src/arch/arm64/include/arch/memlayout.h5
-rw-r--r--src/arch/arm64/include/arch/stages.h5
-rw-r--r--src/arch/arm64/include/armv8/arch/cpu.h12
-rw-r--r--src/arch/arm64/include/armv8/arch/lib_helpers.h8
-rw-r--r--src/arch/arm64/transition.c41
17 files changed, 48 insertions, 312 deletions
diff --git a/src/arch/arm64/Kconfig b/src/arch/arm64/Kconfig
index 9b06589f82..21bbc9b939 100644
--- a/src/arch/arm64/Kconfig
+++ b/src/arch/arm64/Kconfig
@@ -20,7 +20,6 @@ config ARCH_RAMSTAGE_ARM64
default n
source src/arch/arm64/armv8/Kconfig
-source src/arch/arm64/cpu/Kconfig
config ARM64_USE_ARM_TRUSTED_FIRMWARE
bool
diff --git a/src/arch/arm64/Makefile.inc b/src/arch/arm64/Makefile.inc
index 21fda04bda..1cbc9a407d 100644
--- a/src/arch/arm64/Makefile.inc
+++ b/src/arch/arm64/Makefile.inc
@@ -23,7 +23,7 @@
# Take care of subdirectories
################################################################################
-subdirs-y += armv8/ cpu/
+subdirs-y += armv8/
################################################################################
# ARM specific options
@@ -44,8 +44,6 @@ bootblock-y += id.S
$(obj)/arch/arm64/id.bootblock.o: $(obj)/build.h
bootblock-y += boot.c
-bootblock-y += c_entry.c
-bootblock-y += cpu-stubs.c
bootblock-y += eabi_compat.c
bootblock-y += transition.c transition_asm.S
@@ -87,8 +85,6 @@ endif # CONFIG_ARCH_VERSTAGE_ARM64
ifeq ($(CONFIG_ARCH_ROMSTAGE_ARM64),y)
romstage-y += boot.c
-romstage-y += c_entry.c
-romstage-y += cpu-stubs.c
romstage-y += div0.c
romstage-y += eabi_compat.c
romstage-y += memset.S
@@ -113,7 +109,6 @@ endif # CONFIG_ARCH_ROMSTAGE_ARM64
ifeq ($(CONFIG_ARCH_RAMSTAGE_ARM64),y)
-ramstage-y += c_entry.c
ramstage-y += div0.c
ramstage-y += eabi_compat.c
ramstage-y += boot.c
@@ -121,15 +116,9 @@ ramstage-y += tables.c
ramstage-y += memset.S
ramstage-y += memcpy.S
ramstage-y += memmove.S
-ramstage-y += cpu-stubs.c
ramstage-$(CONFIG_ARM64_USE_ARM_TRUSTED_FIRMWARE) += arm_tf.c
ramstage-y += transition.c transition_asm.S
-# TODO: Replace this with a simpler ramstage entry point in soc/nvidia/tegra*
-ifeq ($(CONFIG_SOC_NVIDIA_TEGRA132)$(CONFIG_SOC_NVIDIA_TEGRA210),y)
-ramstage-y += stage_entry.S
-endif
-
rmodules_arm64-y += memset.S
rmodules_arm64-y += memcpy.S
rmodules_arm64-y += memmove.S
diff --git a/src/arch/arm64/armv8/bootblock.S b/src/arch/arm64/armv8/bootblock.S
index 4a9fea9af6..e5758bca0d 100644
--- a/src/arch/arm64/armv8/bootblock.S
+++ b/src/arch/arm64/armv8/bootblock.S
@@ -16,21 +16,14 @@
#include <arch/asm.h>
+/* NOTE: When making changes to general ARM64 initialization, keep in mind that
+ * there are other CPU entry points, using BOOTBLOCK_CUSTOM or entering the CPU
+ * in a later stage (like Tegra). Changes should generally be put into
+ * arm64_cpu_init so they can be shared between those instances. */
+
ENTRY(_start)
- /* Initialize PSTATE, SCTLR and caches to clean state. */
+ /* Initialize PSTATE, SCTLR and caches to clean state, set up stack. */
bl arm64_init_cpu
- /* Initialize stack with sentinel value to later check overflow. */
- ldr x0, =_stack
- ldr x1, =_estack
- ldr x2, =0xdeadbeefdeadbeef
-stack_init_loop:
- stp x2, x2, [x0], #16
- cmp x0, x1
- bne stack_init_loop
-
- /* Leave a line of beef dead for easier visibility in stack dumps. */
- sub sp, x0, #16
-
bl main
ENDPROC(_start)
diff --git a/src/arch/arm64/armv8/cpu.S b/src/arch/arm64/armv8/cpu.S
index 4713ca59f9..711c338685 100644
--- a/src/arch/arm64/armv8/cpu.S
+++ b/src/arch/arm64/armv8/cpu.S
@@ -21,7 +21,8 @@
* Bring an ARMv8 processor we just gained control of (e.g. from IROM) into a
* known state regarding caches/SCTLR/PSTATE. Completely cleans and invalidates
* icache/dcache, disables MMU and dcache (if active), and enables unaligned
- * accesses, icache and branch prediction (if inactive). Clobbers R22 and R23.
+ * accesses, icache and branch prediction (if inactive). Seeds the stack and
+ * initializes SP_EL0. Clobbers R22 and R23.
*/
ENTRY(arm64_init_cpu)
/* Initialize PSTATE (unmask all exceptions, select SP_EL0). */
@@ -60,5 +61,17 @@ ENTRY(arm64_init_cpu)
dsb sy
isb
+ /* Initialize stack with sentinel value to later check overflow. */
+ ldr x2, =0xdeadbeefdeadbeef
+ ldr x0, =_stack
+ ldr x1, =_estack
+1:
+ stp x2, x2, [x0], #16
+ cmp x0, x1
+ bne 1b
+
+ /* Leave a line of beef dead for easier visibility in stack dumps. */
+ sub sp, x0, #16
+
ret x23
ENDPROC(arm64_init_cpu)
diff --git a/src/arch/arm64/armv8/exception.c b/src/arch/arm64/armv8/exception.c
index 35e3f7fac6..b872a55a8a 100644
--- a/src/arch/arm64/armv8/exception.c
+++ b/src/arch/arm64/armv8/exception.c
@@ -166,7 +166,6 @@ void exc_dispatch(struct exc_state *state, uint64_t idx)
exc_exit(&state->regs);
}
-
static int test_exception_handler(struct exc_state *state, uint64_t vector_id)
{
/* Update instruction pointer to next instrution. */
diff --git a/src/arch/arm64/boot.c b/src/arch/arm64/boot.c
index 1012d97a0a..c996d503f5 100644
--- a/src/arch/arm64/boot.c
+++ b/src/arch/arm64/boot.c
@@ -72,10 +72,8 @@ void arch_prog_run(struct prog *prog)
doit(prog_entry_arg(prog));
}
-#if !IS_ENABLED(CONFIG_SOC_NVIDIA_TEGRA132)
/* Generic stage entry point. Can be overridden by board/SoC if needed. */
__attribute__((weak)) void stage_entry(void)
{
main();
}
-#endif
diff --git a/src/arch/arm64/c_entry.c b/src/arch/arm64/c_entry.c
deleted file mode 100644
index f26ed926b1..0000000000
--- a/src/arch/arm64/c_entry.c
+++ /dev/null
@@ -1,128 +0,0 @@
-/*
- * This file is part of the coreboot project.
- *
- * Copyright 2014 Google Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <arch/cache.h>
-#include <arch/cpu.h>
-#include <arch/lib_helpers.h>
-#include <arch/mmu.h>
-#include <arch/stages.h>
-#include <gic.h>
-
-#include "cpu-internal.h"
-
-void __attribute__((weak)) arm64_soc_init(void)
-{
- /* Default weak implementation does nothing. */
-}
-
-static void seed_stack(void)
-{
- char *stack_begin;
- uint64_t *slot;
- int i;
- int size;
-
- stack_begin = cpu_get_stack();
- stack_begin -= CONFIG_STACK_SIZE;
- slot = (void *)stack_begin;
-
- /* Pad out 256 bytes for current usage. */
- size = CONFIG_STACK_SIZE - 256;
- size /= sizeof(*slot);
- for (i = 0; i < size; i++)
- *slot++ = 0xdeadbeefdeadbeefULL;
-}
-
-/* Set up default SCR values. */
-static void el3_init(void)
-{
- uint32_t scr;
-
- if (get_current_el() != EL3)
- return;
-
- scr = raw_read_scr_el3();
- /* Default to non-secure EL1 and EL0. */
- scr &= ~(SCR_NS_MASK);
- scr |= SCR_NS_ENABLE;
- /* Disable IRQ, FIQ, and external abort interrupt routing. */
- scr &= ~(SCR_IRQ_MASK | SCR_FIQ_MASK | SCR_EA_MASK);
- scr |= SCR_IRQ_DISABLE | SCR_FIQ_DISABLE | SCR_EA_DISABLE;
- /* Enable HVC */
- scr &= ~(SCR_HVC_MASK);
- scr |= SCR_HVC_ENABLE;
- /* Disable SMC */
- scr &= ~(SCR_SMC_MASK);
- scr |= SCR_SMC_DISABLE;
- /* Disable secure instruction fetches. */
- scr &= ~(SCR_SIF_MASK);
- scr |= SCR_SIF_DISABLE;
- /* All lower exception levels 64-bit by default. */
- scr &= ~(SCR_RW_MASK);
- scr |= SCR_LOWER_AARCH64;
- /* Disable secure EL1 access to secure timer. */
- scr &= ~(SCR_ST_MASK);
- scr |= SCR_ST_DISABLE;
- /* Don't trap on WFE or WFI instructions. */
- scr &= ~(SCR_TWI_MASK | SCR_TWE_MASK);
- scr |= SCR_TWI_DISABLE | SCR_TWE_DISABLE;
- raw_write_scr_el3(scr);
- isb();
-}
-
-void __attribute__((weak)) arm64_arch_timer_init(void)
-{
- /* Default weak implementation does nothing. */
-}
-
-static void arm64_init(void)
-{
- seed_stack();
-
- /* Set up default SCR values. */
- el3_init();
-
- /* Initialize the GIC. */
- gic_init();
-
- /*
- * Disable coprocessor traps to EL3:
- * TCPAC [20] = 0, disable traps for EL2 accesses to CPTR_EL2 or HCPTR
- * and EL2/EL1 access to CPACR_EL1.
- * TTA [20] = 0, disable traps for trace register access from any EL.
- * TFP [10] = 0, disable traps for floating-point instructions from any
- * EL.
- */
- raw_write_cptr_el3(CPTR_EL3_TCPAC_DISABLE | CPTR_EL3_TTA_DISABLE |
- CPTR_EL3_TFP_DISABLE);
-
- /*
- * Allow FPU accesses:
- * FPEN [21:20] = 3, disable traps for floating-point instructions from
- * EL0/EL1.
- * TTA [28] = 0, disable traps for trace register access from EL0/EL1.
- */
- raw_write_cpacr_el1(CPACR_TRAP_FP_DISABLE | CPACR_TTA_DISABLE);
-
- /* Arch Timer init: setup cntfrq per CPU */
- arm64_arch_timer_init();
-
- arm64_soc_init();
-
- main();
-}
-
-/* This variable holds entry point for CPU starting up. */
-void (*c_entry)(void) = &arm64_init;
diff --git a/src/arch/arm64/cpu-internal.h b/src/arch/arm64/cpu-internal.h
deleted file mode 100644
index 75d2d4deb6..0000000000
--- a/src/arch/arm64/cpu-internal.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * This file is part of the coreboot project.
- *
- * Copyright 2014 Google Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef ARCH_CPU_INTERNAL_H
-#define ARCH_CPU_INTERNAL_H
-
-/* Return the top of the stack for the cpu. */
-void *cpu_get_stack(void);
-
-/* Return the top of the exception stack for the cpu. */
-void *cpu_get_exception_stack(void);
-
-#endif /* ARCH_CPU_INTERNAL_H */
diff --git a/src/arch/arm64/cpu-stubs.c b/src/arch/arm64/cpu-stubs.c
deleted file mode 100644
index 1a61872986..0000000000
--- a/src/arch/arm64/cpu-stubs.c
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * This file is part of the coreboot project.
- *
- * Copyright 2015 Google Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-void arm64_cpu_early_setup(void);
-
-void __attribute__((weak)) arm64_cpu_early_setup(void)
-{
- /* Default empty implementation */
-}
diff --git a/src/arch/arm64/cpu/Kconfig b/src/arch/arm64/cpu/Kconfig
deleted file mode 100644
index 21526e6409..0000000000
--- a/src/arch/arm64/cpu/Kconfig
+++ /dev/null
@@ -1,19 +0,0 @@
-##
-## This file is part of the coreboot project.
-##
-## Copyright (C) 2015 Google Inc
-##
-## This program is free software; you can redistribute it and/or modify
-## it under the terms of the GNU General Public License as published by
-## the Free Software Foundation; version 2 of the License.
-##
-## This program is distributed in the hope that it will be useful,
-## but WITHOUT ANY WARRANTY; without even the implied warranty of
-## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-## GNU General Public License for more details.
-##
-
-config ARCH_ARM64_CPU_CORTEX_A57
- bool
- default n
- depends on ARCH_ARM64
diff --git a/src/arch/arm64/cpu/Makefile.inc b/src/arch/arm64/cpu/Makefile.inc
deleted file mode 100644
index 4b249c7218..0000000000
--- a/src/arch/arm64/cpu/Makefile.inc
+++ /dev/null
@@ -1,20 +0,0 @@
-################################################################################
-##
-## This file is part of the coreboot project.
-##
-## Copyright 2015 Google Inc.
-##
-## This program is free software; you can redistribute it and/or modify
-## it under the terms of the GNU General Public License as published by
-## the Free Software Foundation; version 2 of the License.
-##
-## This program is distributed in the hope that it will be useful,
-## but WITHOUT ANY WARRANTY; without even the implied warranty of
-## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-## GNU General Public License for more details.
-##
-################################################################################
-
-ifeq ($(CONFIG_ARCH_RAMSTAGE_ARM64),y)
-ramstage-$(CONFIG_ARCH_ARM64_CPU_CORTEX_A57) += cortex_a57.S
-endif
diff --git a/src/arch/arm64/cpu/cortex_a57.S b/src/arch/arm64/cpu/cortex_a57.S
deleted file mode 100644
index 80707caadf..0000000000
--- a/src/arch/arm64/cpu/cortex_a57.S
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * This file is part of the coreboot project.
- *
- * Copyright 2015 Google Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <arch/asm.h>
-#include <cpu/cortex_a57.h>
-
-ENTRY(arm64_cpu_early_setup)
- mrs x0, CPUECTLR_EL1
- orr x0, x0, #(1 << SMPEN_SHIFT)
- msr CPUECTLR_EL1, x0
- isb
- ret
-ENDPROC(arm64_cpu_early_setup)
diff --git a/src/arch/arm64/include/arch/memlayout.h b/src/arch/arm64/include/arch/memlayout.h
index 6db67a9dee..0bd0835aa8 100644
--- a/src/arch/arm64/include/arch/memlayout.h
+++ b/src/arch/arm64/include/arch/memlayout.h
@@ -23,13 +23,8 @@
_ = ASSERT(size % 4K == 0, "TTB size must be divisible by 4K!");
/* ARM64 stacks need 16-byte alignment. */
-#if !(IS_ENABLED(CONFIG_SOC_NVIDIA_TEGRA132) || \
- IS_ENABLED(CONFIG_SOC_NVIDIA_TEGRA210))
#define STACK(addr, size) \
REGION(stack, addr, size, 16) \
_ = ASSERT(size >= 2K, "stack should be >= 2K, see toolchain.inc");
-#else /* Hack around old Tegra stage_entry.S implementation. TODO: remove */
-#define STACK(addr, size) REGION(preram_stack, addr, size, 16)
-#endif
#endif /* __ARCH_MEMLAYOUT_H */
diff --git a/src/arch/arm64/include/arch/stages.h b/src/arch/arm64/include/arch/stages.h
index 0d6606f389..6dbf389001 100644
--- a/src/arch/arm64/include/arch/stages.h
+++ b/src/arch/arm64/include/arch/stages.h
@@ -24,9 +24,4 @@ void stage_entry(void);
void stage_exit(void *);
void jmp_to_elf_entry(void *entry, unsigned long buffer, unsigned long size);
-/* This function is called upon initial entry of each stage. It is called prior
- * to main(). That means all of the common infrastructure will most likely not
- * be available to be used (such as console). */
-void arm64_soc_init(void);
-
#endif
diff --git a/src/arch/arm64/include/armv8/arch/cpu.h b/src/arch/arm64/include/armv8/arch/cpu.h
index fdc34e5086..6e096cc8e6 100644
--- a/src/arch/arm64/include/armv8/arch/cpu.h
+++ b/src/arch/arm64/include/armv8/arch/cpu.h
@@ -20,18 +20,6 @@
static inline unsigned int smp_processor_id(void) { return 0; }
-/*
- * The arm64_cpu_startup() initializes CPU's exception stack and regular
- * stack as well initializing the C environment for the processor. Finally it
- * calls into c_entry.
- */
-void arm64_cpu_startup(void);
-
-/*
- * The arm64_arch_timer_init() initializes the CPU's cntfrq register of
- * ARM arch timer.
- */
-void arm64_arch_timer_init(void);
#if !defined(__PRE_RAM__)
struct cpu_driver { };
diff --git a/src/arch/arm64/include/armv8/arch/lib_helpers.h b/src/arch/arm64/include/armv8/arch/lib_helpers.h
index 6dd33b352f..51a754a8af 100644
--- a/src/arch/arm64/include/armv8/arch/lib_helpers.h
+++ b/src/arch/arm64/include/armv8/arch/lib_helpers.h
@@ -58,10 +58,10 @@
#define SCR_EA_MASK (1 << SCR_EA_SHIFT)
#define SCR_EA_ENABLE (1 << SCR_EA_SHIFT)
#define SCR_EA_DISABLE (0 << SCR_EA_SHIFT)
-#define SCR_SMC_SHIFT 7
-#define SCR_SMC_MASK (1 << SCR_SMC_SHIFT)
-#define SCR_SMC_DISABLE (1 << SCR_SMC_SHIFT)
-#define SCR_SMC_ENABLE (0 << SCR_SMC_SHIFT)
+#define SCR_SMD_SHIFT 7
+#define SCR_SMD_MASK (1 << SCR_SMD_SHIFT)
+#define SCR_SMD_DISABLE (1 << SCR_SMD_SHIFT)
+#define SCR_SMD_ENABLE (0 << SCR_SMD_SHIFT)
#define SCR_HVC_SHIFT 8
#define SCR_HVC_MASK (1 << SCR_HVC_SHIFT)
#define SCR_HVC_DISABLE (0 << SCR_HVC_SHIFT)
diff --git a/src/arch/arm64/transition.c b/src/arch/arm64/transition.c
index 3900421e8c..9edc011420 100644
--- a/src/arch/arm64/transition.c
+++ b/src/arch/arm64/transition.c
@@ -13,8 +13,10 @@
* GNU General Public License for more details.
*/
+#include <arch/cache.h>
#include <arch/lib_helpers.h>
#include <arch/transition.h>
+#include <assert.h>
#include <console/console.h>
/* Litte-endian, No XN-forced, Instr cache disabled,
@@ -66,8 +68,6 @@ void transition_with_entry(void *entry, void *arg, struct exc_state *exc_state)
void transition(struct exc_state *exc_state)
{
- uint32_t scr_mask;
- uint64_t hcr_mask;
uint64_t sctlr;
uint32_t current_el = get_current_el();
@@ -89,23 +89,27 @@ void transition(struct exc_state *exc_state)
if (elx->spsr & SPSR_ERET_32)
die("ARM64 Error: Do not support eret to Aarch32\n");
- else {
- scr_mask = SCR_LOWER_AARCH64;
- hcr_mask = HCR_LOWER_AARCH64;
- }
- /* SCR: Write to SCR if current EL is EL3 */
- if (current_el == EL3) {
- uint32_t scr = raw_read_scr_el3();
- scr |= scr_mask;
- raw_write_scr_el3(scr);
- }
- /* HCR: Write to HCR if current EL is EL2 */
- else if (current_el == EL2) {
- uint64_t hcr = raw_read_hcr_el2();
- hcr |= hcr_mask;
- raw_write_hcr_el2(hcr);
- }
+ /* Most parts of coreboot currently don't support EL2 anyway. */
+ assert(current_el == EL3);
+
+ /* Initialize SCR with defaults for running without secure monitor. */
+ raw_write_scr_el3(SCR_TWE_DISABLE | /* don't trap WFE */
+ SCR_TWI_DISABLE | /* don't trap WFI */
+ SCR_ST_ENABLE | /* allow secure timer access */
+ SCR_LOWER_AARCH64 | /* lower level is AArch64 */
+ SCR_SIF_DISABLE | /* disable secure ins. fetch */
+ SCR_HVC_ENABLE | /* allow HVC instruction */
+ SCR_SMD_ENABLE | /* disable SMC instruction */
+ SCR_RES1 | /* reserved-1 bits */
+ SCR_EA_DISABLE | /* disable ext. abort trap */
+ SCR_FIQ_DISABLE | /* disable FIQ trap to EL3 */
+ SCR_IRQ_DISABLE | /* disable IRQ trap to EL3 */
+ SCR_NS_ENABLE); /* lower level is non-secure */
+
+ /* Initialize CPTR to not trap anything to EL3. */
+ raw_write_cptr_el3(CPTR_EL3_TCPAC_DISABLE | CPTR_EL3_TTA_DISABLE |
+ CPTR_EL3_TFP_DISABLE);
/* ELR/SPSR: Write entry point and processor state of program */
raw_write_elr_current(elx->elr);
@@ -118,6 +122,7 @@ void transition(struct exc_state *exc_state)
/* SP_ELx: Initialize stack pointer */
raw_write_sp_elx(elx->sp_elx, elx_el);
+ isb();
/* Eret to the entry point */
trans_switch(regs);