summaryrefslogtreecommitdiff
path: root/src/arch/arm64
diff options
context:
space:
mode:
Diffstat (limited to 'src/arch/arm64')
-rw-r--r--src/arch/arm64/Makefile.inc2
-rw-r--r--src/arch/arm64/armv8/secmon/secmon_init.c40
-rw-r--r--src/arch/arm64/c_entry.c17
-rw-r--r--src/arch/arm64/cpu_ramstage.c14
-rw-r--r--src/arch/arm64/include/arch/startup.h46
-rw-r--r--src/arch/arm64/include/armv8/arch/cpu.h11
-rw-r--r--src/arch/arm64/stage_entry.S78
-rw-r--r--src/arch/arm64/startup.c54
8 files changed, 201 insertions, 61 deletions
diff --git a/src/arch/arm64/Makefile.inc b/src/arch/arm64/Makefile.inc
index bb3cab57b9..f81e76ac3d 100644
--- a/src/arch/arm64/Makefile.inc
+++ b/src/arch/arm64/Makefile.inc
@@ -138,6 +138,7 @@ ifeq ($(CONFIG_ARCH_RAMSTAGE_ARM64),y)
ramstage-y += c_entry.c
ramstage-y += stages.c
+ramstage-y += startup.c
ramstage-y += div0.c
ramstage-y += cpu.c
ramstage-y += cpu_ramstage.c
@@ -157,6 +158,7 @@ rmodules_arm64-y += ../../lib/memmove.c
rmodules_arm64-y += eabi_compat.c
secmon-$(CONFIG_ARCH_USE_SECURE_MONITOR) += stage_entry.S
+secmon-$(CONFIG_ARCH_USE_SECURE_MONITOR) += startup.c
secmon-$(CONFIG_ARCH_USE_SECURE_MONITOR) += ../../lib/malloc.c
secmon-$(CONFIG_ARCH_USE_SECURE_MONITOR) += ../../lib/memset.c
secmon-$(CONFIG_ARCH_USE_SECURE_MONITOR) += ../../lib/memcmp.c
diff --git a/src/arch/arm64/armv8/secmon/secmon_init.c b/src/arch/arm64/armv8/secmon/secmon_init.c
index 63bd4907d9..7d7f8bb811 100644
--- a/src/arch/arm64/armv8/secmon/secmon_init.c
+++ b/src/arch/arm64/armv8/secmon/secmon_init.c
@@ -27,20 +27,11 @@
#include <arch/psci.h>
#include <arch/secmon.h>
#include <arch/smc.h>
+#include <arch/startup.h>
#include <console/console.h>
#include <stddef.h>
#include "secmon.h"
-/* Common CPU state for all CPUs running in secmon. */
-struct cpu_resume_data {
- uint64_t mair;
- uint64_t tcr;
- uint64_t ttbr0;
- uint64_t scr;
-};
-
-static struct cpu_resume_data resume_data;
-
static void secmon_init(struct secmon_params *params, int bsp);
static void secmon_init_bsp(void *arg)
@@ -61,27 +52,6 @@ void (*c_entry[2])(void *) = { &secmon_init_bsp, &secmon_init_nonbsp };
static void cpu_resume(void *unused)
{
- uint32_t sctlr;
-
- /* Re-enable exception vector. */
- exception_hwinit();
-
- tlbiall_el3();
- raw_write_mair_el3(resume_data.mair);
- raw_write_tcr_el3(resume_data.tcr);
- raw_write_ttbr0_el3(resume_data.ttbr0);
- dsb();
- isb();
-
- /* Enable MMU */
- sctlr = raw_read_sctlr_el3();
- sctlr |= SCTLR_C | SCTLR_M | SCTLR_I;
- raw_write_sctlr_el3(sctlr);
- isb();
-
- raw_write_scr_el3(resume_data.scr);
- isb();
-
psci_cpu_entry();
}
@@ -92,11 +62,7 @@ static void cpu_resume_init(void)
dcache_clean_by_mva(&c_entry, sizeof(c_entry));
/* Back up state. */
- resume_data.mair = raw_read_mair_el3();
- resume_data.tcr = raw_read_tcr_el3();
- resume_data.ttbr0 = raw_read_ttbr0_el3();
- resume_data.scr = raw_read_scr_el3();
- dcache_clean_by_mva(&resume_data, sizeof(resume_data));
+ startup_save_cpu_data();
}
static void start_up_cpu(void *arg)
@@ -150,7 +116,7 @@ static void secmon_init(struct secmon_params *params, int bsp)
wait_for_all_cpus(params->online_cpus);
smc_init();
- psci_init((uintptr_t)arm64_cpu_startup);
+ psci_init((uintptr_t)arm64_cpu_startup_resume);
/* Initialize the resume path. */
cpu_resume_init();
diff --git a/src/arch/arm64/c_entry.c b/src/arch/arm64/c_entry.c
index de8ebcdf0f..bf7a3358ce 100644
--- a/src/arch/arm64/c_entry.c
+++ b/src/arch/arm64/c_entry.c
@@ -19,9 +19,9 @@
#include <arch/cache.h>
#include <arch/cpu.h>
-#include <arch/exception.h>
#include <arch/mmu.h>
#include <arch/stages.h>
+#include <arch/startup.h>
#include "cpu-internal.h"
void __attribute__((weak)) arm64_soc_init(void)
@@ -55,22 +55,15 @@ static void arm64_init(void)
main();
}
-static void secondary_cpu_start(void)
-{
- mmu_enable();
- exception_hwinit();
-
- /* This will never return. */
- arch_secondary_cpu_init();
-}
-
/*
* This variable holds entry point for CPUs starting up. The first
* element is the BSP path, and the second is the non-BSP path.
*/
-void (*c_entry[2])(void) = { &arm64_init, &secondary_cpu_start };
+void (*c_entry[2])(void) = { &arm64_init, &arch_secondary_cpu_init };
void *prepare_secondary_cpu_startup(void)
{
- return secondary_entry_point(&arm64_cpu_startup);
+ startup_save_cpu_data();
+
+ return secondary_entry_point(&arm64_cpu_startup_resume);
}
diff --git a/src/arch/arm64/cpu_ramstage.c b/src/arch/arm64/cpu_ramstage.c
index ec1ac0f0e4..e6d0e8cee2 100644
--- a/src/arch/arm64/cpu_ramstage.c
+++ b/src/arch/arm64/cpu_ramstage.c
@@ -109,8 +109,6 @@ static void init_this_cpu(void *arg)
printk(BIOS_DEBUG, "CPU%x: MPIDR: %llx\n", ci->id, ci->mpidr);
- el3_init();
-
/* Initialize the GIC. */
gic_init();
@@ -151,9 +149,6 @@ static void init_cpu_info(struct bus *bus)
ci->cpu = cur;
ci->id = cur->path.cpu.id;
}
-
- /* Mark current cpu online. */
- cpu_mark_online(cpu_info());
}
static void invalidate_cpu_stack_top(unsigned int id)
@@ -184,6 +179,15 @@ void arch_initialize_cpus(device_t cluster, struct cpu_control_ops *cntrl_ops)
if (bus == NULL)
return;
+ /*
+ * el3_init must be performed prior to prepare_secondary_cpu_startup.
+ * This is important since el3_init initializes SCR values on BSP CPU
+ * and then prepare_secondary_cpu_startup reads the initialized SCR
+ * value and saves it for use by non-BSP CPUs.
+ */
+ el3_init();
+ /* Mark current cpu online. */
+ cpu_mark_online(cpu_info());
entry = prepare_secondary_cpu_startup();
/* Initialize the cpu_info structures. */
diff --git a/src/arch/arm64/include/arch/startup.h b/src/arch/arm64/include/arch/startup.h
new file mode 100644
index 0000000000..bb3a863754
--- /dev/null
+++ b/src/arch/arm64/include/arch/startup.h
@@ -0,0 +1,46 @@
+/*
+ * This file is part of the coreboot project.
+ *
+ * Copyright (C) 2014 Google Inc
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of
+ * the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
+ * MA 02110-1301 USA
+ */
+
+#ifndef __ARCH_ARM64_INCLUDE_ARCH_STARTUP_H__
+#define __ARCH_ARM64_INCLUDE_ARCH_STARTUP_H__
+
+/* Every element occupies 8 bytes (64-bit entries) */
+#define PER_ELEMENT_SIZE_BYTES 8
+#define MAIR_INDEX 0
+#define TCR_INDEX 1
+#define TTBR0_INDEX 2
+#define SCR_INDEX 3
+#define VBAR_INDEX 4
+/* IMPORTANT!!! If any new element is added please update NUM_ELEMENTS */
+#define NUM_ELEMENTS 5
+
+#ifndef __ASSEMBLY__
+
+/*
+ * startup_save_cpu_data is used to save register values that need to be setup
+ * when a CPU starts booting. This is used by secondary CPUs as well as resume
+ * path to directly setup MMU and other related registers.
+ */
+void startup_save_cpu_data(void);
+
+#endif
+
+#endif /* __ARCH_ARM64_INCLUDE_ARCH_STARTUP_H__ */
diff --git a/src/arch/arm64/include/armv8/arch/cpu.h b/src/arch/arm64/include/armv8/arch/cpu.h
index d968a06fe4..34220d9792 100644
--- a/src/arch/arm64/include/armv8/arch/cpu.h
+++ b/src/arch/arm64/include/armv8/arch/cpu.h
@@ -170,4 +170,15 @@ static inline void *secondary_entry_point(void *e)
*/
void arm64_cpu_startup(void);
+/*
+ * The arm64_cpu_startup_resume() initializes a CPU's exception stack and
+ * regular stack as well initializing the C environment for the processor. It
+ * calls into the array of function pointers at symbol c_entry depending
+ * on BSP state. Note that arm64_cpu_startup contains secondary entry
+ * point which can be obtained by secondary_entry_point().
+ * Additionally, it also restores saved register data and enables MMU, caches
+ * and exceptions before jumping to C environment for both BSP and non-BSP CPUs.
+ */
+void arm64_cpu_startup_resume(void);
+
#endif /* __ARCH_CPU_H__ */
diff --git a/src/arch/arm64/stage_entry.S b/src/arch/arm64/stage_entry.S
index 5a5ddabb51..fa2064e708 100644
--- a/src/arch/arm64/stage_entry.S
+++ b/src/arch/arm64/stage_entry.S
@@ -29,6 +29,7 @@
#include <arch/asm.h>
#define __ASSEMBLY__
#include <arch/lib_helpers.h>
+#include <arch/startup.h>
#define STACK_SZ CONFIG_STACK_SIZE
#define EXCEPTION_STACK_SZ CONFIG_STACK_SIZE
@@ -38,6 +39,12 @@
* according to MAX_CPUS. Additionally provide exception stacks for each CPU.
*/
.section .bss, "aw", @nobits
+
+.global _arm64_startup_data
+.balign 8
+_arm64_startup_data:
+.space NUM_ELEMENTS*PER_ELEMENT_SIZE_BYTES
+
.global _stack
.global _estack
.balign STACK_SZ
@@ -136,23 +143,80 @@ ENTRY(__rmodule_entry)
b arm64_c_environment
ENDPROC(__rmodule_entry)
-ENTRY(_arm64_cpu_startup)
+/*
+ * Setup SCTLR so that:
+ * Little endian mode is setup, XN is not enforced, MMU and caches are disabled.
+ * Alignment and stack alignment checks are disabled.
+ */
+.macro setup_sctlr
read_current x0, sctlr
bic x0, x0, #(1 << 25) /* Little Endian */
bic x0, x0, #(1 << 19) /* XN not enforced */
bic x0, x0, #(1 << 12) /* Disable Instruction Cache */
- bic x0, x0, #0xf /* Clear SA, C, A, and M */
+ bic x0, x0, #0xf /* Clear SA, C, A and M */
+ write_current sctlr, x0, x1
+.endm
+
+/*
+ * This macro assumes x2 has base address and returns value read in x0
+ * x1 is used as temporary register.
+ */
+.macro get_element_addr index
+ add x1, x2, #(\index * PER_ELEMENT_SIZE_BYTES)
+ ldr x0, [x1]
+.endm
+
+/*
+ * Uses following registers:
+ * x0 = reading stored value
+ * x1 = temp reg
+ * x2 = base address of saved data region
+ */
+.macro startup_restore
+ adr x2, _arm64_startup_data
+
+ get_element_addr MAIR_INDEX
+ write_current mair, x0, x1
+
+ get_element_addr TCR_INDEX
+ write_current tcr, x0, x1
+
+ get_element_addr TTBR0_INDEX
+ write_current ttbr0, x0, x1
+
+ get_element_addr SCR_INDEX
+ write_el3 scr, x0, x1
+
+ get_element_addr VBAR_INDEX
+ write_current vbar, x0, x1
+
+ dsb sy
+ isb
+
+ tlbiall_current x1
+ read_current x0, sctlr
+ orr x0, x0, #(1 << 12) /* Enable Instruction Cache */
+ orr x0, x0, #(1 << 2) /* Enable Data/Unified Cache */
+ orr x0, x0, #(1 << 0) /* Enable MMU */
write_current sctlr, x0, x1
+
+ dsb sy
isb
- b arm64_c_environment
-ENDPROC(_arm64_cpu_startup)
+.endm
CPU_RESET_ENTRY(arm64_cpu_startup)
split_bsp_path
- b _arm64_cpu_startup
+ setup_sctlr
+ b arm64_c_environment
ENDPROC(arm64_cpu_startup)
-ENTRY(stage_entry)
+CPU_RESET_ENTRY(arm64_cpu_startup_resume)
split_bsp_path
- b _arm64_cpu_startup
+ setup_sctlr
+ startup_restore
+ b arm64_c_environment
+ENDPROC(arm64_cpu_startup_resume)
+
+ENTRY(stage_entry)
+ b arm64_cpu_startup
ENDPROC(stage_entry)
diff --git a/src/arch/arm64/startup.c b/src/arch/arm64/startup.c
new file mode 100644
index 0000000000..d2b76913d2
--- /dev/null
+++ b/src/arch/arm64/startup.c
@@ -0,0 +1,54 @@
+/*
+ * This file is part of the coreboot project.
+ *
+ * Copyright (C) 2014 Google Inc
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of
+ * the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
+ * MA 02110-1301 USA
+ */
+
+#include <arch/cache.h>
+#include <arch/lib_helpers.h>
+#include <arch/startup.h>
+#include <console/console.h>
+
+/* This space is defined in stage_entry.S. */
+extern u8 _arm64_startup_data[];
+
+static inline void save_element(size_t index, uint64_t val)
+{
+ uint64_t *ptr = (uint64_t *)_arm64_startup_data;
+
+ ptr[index] = val;
+}
+
+/*
+ * startup_save_cpu_data is used to save register values that need to be setup
+ * when a CPU starts booting. This is used by secondary CPUs as well as resume
+ * path to directly setup MMU and other related registers.
+ */
+void startup_save_cpu_data(void)
+{
+ save_element(MAIR_INDEX, raw_read_mair_current());
+ save_element(TCR_INDEX, raw_read_tcr_current());
+ save_element(TTBR0_INDEX, raw_read_ttbr0_current());
+ save_element(VBAR_INDEX, raw_read_vbar_current());
+
+ if (get_current_el() == EL3)
+ save_element(SCR_INDEX, raw_read_scr_el3());
+
+ dcache_clean_by_mva(_arm64_startup_data,
+ NUM_ELEMENTS * PER_ELEMENT_SIZE_BYTES);
+}