summaryrefslogtreecommitdiff
path: root/src/arch/arm64/armv8/cpu.S
diff options
context:
space:
mode:
authorFurquan Shaikh <furquan@google.com>2014-04-28 16:39:40 -0700
committerIsaac Christensen <isaac.christensen@se-eng.com>2014-09-23 18:10:32 +0200
commit2af76f4bdc81df699bad55f65335ff518381d7dd (patch)
tree5b022587d2179837c73ecb7001bf86726a823373 /src/arch/arm64/armv8/cpu.S
parent804702602c017f9aebb66f409f8ed9a5d9200a4e (diff)
downloadcoreboot-2af76f4bdc81df699bad55f65335ff518381d7dd.tar.xz
coreboot arm64: Add support for arm64 into coreboot framework
Add support for enabling different coreboot stages (bootblock, romstage and ramstage) to have arm64 architecture. Most of the files have been copied over from arm/ or arm64-generic work. Signed-off-by: Furquan Shaikh <furquan@google.com> Reviewed-on: https://chromium-review.googlesource.com/197397 Reviewed-by: Aaron Durbin <adurbin@chromium.org> Commit-Queue: Furquan Shaikh <furquan@chromium.org> Tested-by: Furquan Shaikh <furquan@chromium.org> (cherry picked from commit 033ba96516805502673ac7404bc97e6ce4e2a934) This patch is essentially a squash of aarch64 changes made by these patches: d955885 coreboot: Rename coreboot_ram stage to ramstage a492761 cbmem console: Locate the preram console with a symbol instead of a sect 96e7f0e aarch64: Enable early icache and migrate SCTLR from EL3 3f854dc aarch64: Pass coreboot table in jmp_to_elf_entry ab3ecaf aarch64/foundation-armv8: Set up RAM area and enter ramstage 25fd2e9 aarch64: Remove CAR definitions from early_variables.h 65bf77d aarch64/foundation-armv8: Enable DYNAMIC_CBMEM 9484873 aarch64: Change default exception level to EL2 7a152c3 aarch64: Fix formatting of exception registers dump 6946464 aarch64: Implement basic exception handling c732a9d aarch64/foundation-armv8: Basic bootblock implementation 3bc412c aarch64: Comment out some parts of code to allow build ab5be71 Add initial aarch64 support The ramstage support is the only portion that has been tested on actual hardware. Bootblock and romstage support may require modifications to run on hardware. Change-Id: Icd59bec55c963a471a50e30972a8092e4c9d2fb2 Signed-off-by: Isaac Christensen <isaac.christensen@se-eng.com> Reviewed-on: http://review.coreboot.org/6915 Tested-by: build bot (Jenkins) Reviewed-by: Edward O'Callaghan <eocallaghan@alterapraxis.com> Reviewed-by: Furquan Shaikh <furquan@google.com>
Diffstat (limited to 'src/arch/arm64/armv8/cpu.S')
-rw-r--r--src/arch/arm64/armv8/cpu.S131
1 files changed, 131 insertions, 0 deletions
diff --git a/src/arch/arm64/armv8/cpu.S b/src/arch/arm64/armv8/cpu.S
new file mode 100644
index 0000000000..1eecc2b0d7
--- /dev/null
+++ b/src/arch/arm64/armv8/cpu.S
@@ -0,0 +1,131 @@
+/*
+ * Based on arch/arm/include/asm/cacheflush.h
+ *
+ * Copyright (C) 1999-2002 Russell King.
+ * Copyright (C) 2012 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <arch/asm.h>
+
+/*
+ * flush_dcache_all()
+ *
+ * Flush the whole D-cache.
+ *
+ * Corrupted registers: x0-x7, x9-x11
+ * From: Linux arch/arm64/mm/cache.S
+ */
+ENTRY(flush_dcache_all)
+ dsb sy // ensure ordering with previous memory accesses
+ mrs x0, clidr_el1 // read clidr
+ and x3, x0, #0x7000000 // extract loc from clidr
+ lsr x3, x3, #23 // left align loc bit field
+ cbz x3, finished // if loc is 0, then no need to clean
+ mov x10, #0 // start clean at cache level 0
+loop1:
+ add x2, x10, x10, lsr #1 // work out 3x current cache level
+ lsr x1, x0, x2 // extract cache type bits from clidr
+ and x1, x1, #7 // mask of the bits for current cache only
+ cmp x1, #2 // see what cache we have at this level
+ b.lt skip // skip if no cache, or just i-cache
+ mrs x9, daif // make CSSELR and CCSIDR access atomic
+ msr csselr_el1, x10 // select current cache level in csselr
+ isb // isb to sych the new cssr&csidr
+ mrs x1, ccsidr_el1 // read the new ccsidr
+ msr daif, x9
+ and x2, x1, #7 // extract the length of the cache lines
+ add x2, x2, #4 // add 4 (line length offset)
+ mov x4, #0x3ff
+ and x4, x4, x1, lsr #3 // find maximum number on the way size
+ clz x5, x4 // find bit position of way size increment
+ mov x7, #0x7fff
+ and x7, x7, x1, lsr #13 // extract max number of the index size
+loop2:
+ mov x9, x4 // create working copy of max way size
+loop3:
+ lsl x6, x9, x5
+ orr x11, x10, x6 // factor way and cache number into x11
+ lsl x6, x7, x2
+ orr x11, x11, x6 // factor index number into x11
+ dc cisw, x11 // clean & invalidate by set/way
+ subs x9, x9, #1 // decrement the way
+ b.ge loop3
+ subs x7, x7, #1 // decrement the index
+ b.ge loop2
+skip:
+ add x10, x10, #2 // increment cache number
+ cmp x3, x10
+ b.gt loop1
+finished:
+ mov x10, #0 // swith back to cache level 0
+ msr csselr_el1, x10 // select current cache level in csselr
+ dsb sy
+ isb
+ ret
+ENDPROC(flush_dcache_all)
+
+/*
+ * Bring an ARMv8 processor we just gained control of (e.g. from IROM) into a
+ * known state regarding caches/SCTLR. Completely cleans and invalidates
+ * icache/dcache, disables MMU and dcache (if active), and enables unaligned
+ * accesses, icache and branch prediction (if inactive). Clobbers x4 and x5.
+ */
+ENTRY(arm_init_caches)
+ /* w4: SCTLR, return address: x8 (stay valid for the whole function) */
+ mov x8, x30
+ /* XXX: Assume that we always start running at EL3 */
+ mrs x4, sctlr_el3
+
+ /* FIXME: How to enable branch prediction on ARMv8? */
+
+ /* Flush and invalidate dcache */
+ bl flush_dcache_all
+
+ /* Deactivate MMU (0), Alignment Check (1) and DCache (2) */
+ and x4, x4, # ~(1 << 0) & ~(1 << 1) & ~(1 << 2)
+ /* Activate ICache (12) already for speed */
+ orr x4, x4, #(1 << 12)
+ msr sctlr_el3, x4
+
+ /* Invalidate icache and TLB for good measure */
+ ic iallu
+ tlbi alle3
+ dsb sy
+ isb
+
+ ret x8
+ENDPROC(arm_init_caches)
+
+/* Based on u-boot transition.S */
+ENTRY(switch_el3_to_el2)
+ mov x0, #0x5b1 /* Non-secure EL0/EL1 | HVC | 64bit EL2 */
+ msr scr_el3, x0
+ msr cptr_el3, xzr /* Disable coprocessor traps to EL3 */
+ mov x0, #0x33ff
+ msr cptr_el2, x0 /* Disable coprocessor traps to EL2 */
+
+ /* Return to the EL2_SP2 mode from EL3 */
+ mov x0, sp
+ msr sp_el2, x0 /* Migrate SP */
+ mrs x0, vbar_el3
+ msr vbar_el2, x0 /* Migrate VBAR */
+ mrs x0, sctlr_el3
+ msr sctlr_el2, x0 /* Migrate SCTLR */
+ mov x0, #0x3c9
+ msr spsr_el3, x0 /* EL2_SP2 | D | A | I | F */
+ msr elr_el3, x30
+ eret
+ENDPROC(switch_el3_to_el2)