summaryrefslogtreecommitdiff
path: root/src/arch/arm/armv7
diff options
context:
space:
mode:
Diffstat (limited to 'src/arch/arm/armv7')
-rw-r--r--src/arch/arm/armv7/bootblock.S23
-rw-r--r--src/arch/arm/armv7/cpu.S4
2 files changed, 25 insertions, 2 deletions
diff --git a/src/arch/arm/armv7/bootblock.S b/src/arch/arm/armv7/bootblock.S
index b468e15c98..57d72d0109 100644
--- a/src/arch/arm/armv7/bootblock.S
+++ b/src/arch/arm/armv7/bootblock.S
@@ -32,6 +32,16 @@
#include <arch/asm.h>
.arm
+ /*
+ * Just in case the maskrom or the vendor basic firmware passes on a
+ * parameter when calling the bootblock, store it here for handling by C
+ * code.
+ */
+ .section .bss, "aw" @nobits
+ .global maskrom_param
+maskrom_param:
+ .word 0
+
ENTRY(_start)
/*
* Set the cpu to System mode with IRQ and FIQ disabled. Prefetch/Data
@@ -45,6 +55,10 @@ ENDPROC(_start)
.thumb
ENTRY(_thumb_start)
+
+ /* Preserve the maskrom passed value, if any */
+ mov r10, r0
+
bl arm_init_caches
/*
@@ -71,9 +85,14 @@ init_stack_loop:
cmp r0, r1
bne init_stack_loop
-/* Set stackpointer in internal RAM to call bootblock main() */
call_bootblock:
- ldr sp, =_estack /* Set up stack pointer */
+
+ /* Restore parameter passed in by maskrom/vendor firmware. */
+ ldr r0, =maskrom_param
+ str r10, [r0]
+
+ /* Set stackpointer in internal RAM to call bootblock main() */
+ ldr sp, =_estack
ldr r0,=0x00000000
/*
* The current design of cpu_info places the struct at the top of the
diff --git a/src/arch/arm/armv7/cpu.S b/src/arch/arm/armv7/cpu.S
index b7f245b85d..6c00f62582 100644
--- a/src/arch/arm/armv7/cpu.S
+++ b/src/arch/arm/armv7/cpu.S
@@ -43,6 +43,8 @@
* through the magic of double subtraction. It's quite ingenius, really.
* Takes care to only use r0-r3 and ip so it's pefectly ABI-compatible without
* needing to write to memory.
+ *
+ * THIS FUNCTION MUST PRESERVE THE VALUE OF r10
*/
.macro dcache_apply_all crm
@@ -109,6 +111,8 @@
* known state regarding caches/SCTLR. Completely cleans and invalidates
* icache/dcache, disables MMU and dcache (if active), and enables unaligned
* accesses, icache and branch prediction (if inactive). Clobbers r4 and r5.
+ *
+ * THIS FUNCTION MUST PRESERVE THE VALUE OF r10
*/
ENTRY(arm_init_caches)
/* r4: SCTLR, return address: r5 (stay valid for the whole function) */