summaryrefslogtreecommitdiff
path: root/src/arch/arm/armv7
diff options
context:
space:
mode:
Diffstat (limited to 'src/arch/arm/armv7')
-rw-r--r--src/arch/arm/armv7/Kconfig11
-rw-r--r--src/arch/arm/armv7/Makefile.inc71
-rw-r--r--src/arch/arm/armv7/bootblock.S113
-rw-r--r--src/arch/arm/armv7/bootblock_simple.c63
-rw-r--r--src/arch/arm/armv7/cache.c328
-rw-r--r--src/arch/arm/armv7/exception.c180
-rw-r--r--src/arch/arm/armv7/exception_asm.S116
-rw-r--r--src/arch/arm/armv7/mmu.c144
-rw-r--r--src/arch/arm/armv7/thread.c127
9 files changed, 1153 insertions, 0 deletions
diff --git a/src/arch/arm/armv7/Kconfig b/src/arch/arm/armv7/Kconfig
new file mode 100644
index 0000000000..f8e0205c40
--- /dev/null
+++ b/src/arch/arm/armv7/Kconfig
@@ -0,0 +1,11 @@
+config ARCH_BOOTBLOCK_ARMV7
+ def_bool n
+ select ARCH_BOOTBLOCK_ARM
+
+config ARCH_ROMSTAGE_ARMV7
+ def_bool n
+ select ARCH_ROMSTAGE_ARM
+
+config ARCH_RAMSTAGE_ARMV7
+ def_bool n
+ select ARCH_RAMSTAGE_ARM
diff --git a/src/arch/arm/armv7/Makefile.inc b/src/arch/arm/armv7/Makefile.inc
new file mode 100644
index 0000000000..bcd7d9e46d
--- /dev/null
+++ b/src/arch/arm/armv7/Makefile.inc
@@ -0,0 +1,71 @@
+################################################################################
+##
+## This file is part of the coreboot project.
+##
+## Copyright (C) 2013 The ChromiumOS Authors
+##
+## This program is free software; you can redistribute it and/or modify
+## it under the terms of the GNU General Public License as published by
+## the Free Software Foundation; version 2 of the License.
+##
+## This program is distributed in the hope that it will be useful,
+## but WITHOUT ANY WARRANTY; without even the implied warranty of
+## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+## GNU General Public License for more details.
+##
+## You should have received a copy of the GNU General Public License
+## along with this program; if not, write to the Free Software
+## Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+##
+###############################################################################
+
+armv7_flags = -march=armv7-a -mthumb -mthumb-interwork \
+ -I$(src)/arch/arm/include/armv7/
+
+###############################################################################
+# bootblock
+###############################################################################
+
+ifeq ($(CONFIG_ARCH_BOOTBLOCK_ARMV7),y)
+
+ifneq ($(CONFIG_ARM_BOOTBLOCK_CUSTOM),y)
+bootblock-y += bootblock.S
+bootblock-$(CONFIG_BOOTBLOCK_SIMPLE) += bootblock_simple.c
+endif
+
+bootblock-y += cache.c
+bootblock-y += mmu.c
+
+CFLAGS_bootblock += $(armv7_flags)
+CPPFLAGS_bootblock += $(armv7_flags)
+
+endif # CONFIG_ARCH_BOOTBLOCK_ARMV7
+
+###############################################################################
+# romstage
+###############################################################################
+
+ifeq ($(CONFIG_ARCH_ROMSTAGE_ARMV7),y)
+
+romstage-y += cache.c
+
+CFLAGS_romstage += $(armv7_flags)
+CPPFLAGS_romstage += $(armv7_flags)
+
+endif # CONFIG_ARCH_ROMSTAGE_ARMV7
+
+###############################################################################
+# ramstage
+###############################################################################
+
+ifeq ($(CONFIG_ARCH_RAMSTAGE_ARMV7),y)
+
+ramstage-y += cache.c
+ramstage-y += exception.c
+ramstage-y += exception_asm.S
+ramstage-y += mmu.c
+
+CFLAGS_ramstage += $(armv7_flags)
+CPPFLAGS_ramstage += $(armv7_flags)
+
+endif # CONFIG_ARCH_RAMSTAGE_ARMV7
diff --git a/src/arch/arm/armv7/bootblock.S b/src/arch/arm/armv7/bootblock.S
new file mode 100644
index 0000000000..b28a787839
--- /dev/null
+++ b/src/arch/arm/armv7/bootblock.S
@@ -0,0 +1,113 @@
+/*
+ * Early initialization code for ARMv7 architecture.
+ *
+ * This file is based off of the OMAP3530/ARM Cortex start.S file from Das
+ * U-Boot, which itself got the file from armboot.
+ *
+ * Copyright (c) 2004 Texas Instruments <r-woodruff2@ti.com>
+ * Copyright (c) 2001 Marius Gröger <mag@sysgo.de>
+ * Copyright (c) 2002 Alex Züpke <azu@sysgo.de>
+ * Copyright (c) 2002 Gary Jennejohn <garyj@denx.de>
+ * Copyright (c) 2003 Richard Woodruff <r-woodruff2@ti.com>
+ * Copyright (c) 2003 Kshitij <kshitij@ti.com>
+ * Copyright (c) 2006-2008 Syed Mohammed Khasim <x0khasim@ti.com>
+ * Copyright (c) 2013 The Chromium OS Authors
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of
+ * the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+.section ".start", "a", %progbits
+.globl _start
+_start: b reset
+ .balignl 16,0xdeadbeef
+
+_cbfs_master_header:
+ /* The CBFS master header is inserted by cbfstool at the first
+ * aligned offset after the above anchor string is found.
+ * Hence, we leave some space for it.
+ */
+ .skip 128 @ Assumes 64-byte alignment
+
+reset:
+ /*
+ * Set the cpu to System mode with IRQ and FIQ disabled. Prefetch/Data
+ * aborts may happen early and crash before the abort handlers are
+ * installed, but at least the problem will show up near the code that
+ * causes it.
+ */
+ msr cpsr_cxf, #0xdf
+
+ /*
+ * From Cortex-A Series Programmer's Guide:
+ * Only CPU 0 performs initialization. Other CPUs go into WFI
+ * to do this, first work out which CPU this is
+ * this code typically is run before any other initialization step
+ */
+ mrc p15, 0, r1, c0, c0, 5 @ Read Multiprocessor Affinity Register
+ and r1, r1, #0x3 @ Extract CPU ID bits
+ cmp r1, #0
+ bne wait_for_interrupt @ If this is not core0, wait
+
+ /*
+ * Initialize the stack to a known value. This is used to check for
+ * stack overflow later in the boot process.
+ */
+ ldr r0, .Stack
+ ldr r1, .Stack_size
+ sub r0, r0, r1
+ ldr r1, .Stack
+ ldr r2, =0xdeadbeef
+init_stack_loop:
+ str r2, [r0]
+ add r0, #4
+ cmp r0, r1
+ bne init_stack_loop
+
+/* Set stackpointer in internal RAM to call board_init_f */
+call_bootblock:
+ ldr sp, .Stack /* Set up stack pointer */
+ ldr r0,=0x00000000
+ /*
+ * The current design of cpu_info places the
+ * struct at the top of the stack. The number of
+ * words pushed must be at least as large as that
+ * struct.
+ */
+ push {r0-r2}
+ bic sp, sp, #7 /* 8-byte alignment for ABI compliance */
+ /*
+ * Use "bl" instead of "b" even though we do not intend to return.
+ * "bl" gets compiled to "blx" if we're transitioning from ARM to
+ * Thumb. However, "b" will not and GCC may attempt to create a
+ * wrapper which is currently broken.
+ */
+ bl main
+
+wait_for_interrupt:
+ wfi
+ mov pc, lr @ back to my caller
+
+/* we do it this way because it's a 32-bit constant and
+ * in some cases too far away to be loaded as just an offset
+ * from IP
+ */
+.align 2
+.Stack:
+ .word CONFIG_STACK_TOP
+.align 2
+/* create this size the same way we do in coreboot_ram.ld: top-bottom */
+.Stack_size:
+ .word CONFIG_STACK_TOP - CONFIG_STACK_BOTTOM
diff --git a/src/arch/arm/armv7/bootblock_simple.c b/src/arch/arm/armv7/bootblock_simple.c
new file mode 100644
index 0000000000..f447034029
--- /dev/null
+++ b/src/arch/arm/armv7/bootblock_simple.c
@@ -0,0 +1,63 @@
+/*
+ * This file is part of the coreboot project.
+ *
+ * Copyright 2010 Google Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of
+ * the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
+ * MA 02110-1301 USA
+ */
+
+#include <arch/cache.h>
+#include <arch/hlt.h>
+#include <arch/stages.h>
+#include <bootblock_common.h>
+#include <cbfs.h>
+#include <console/console.h>
+#include <smp/node.h>
+
+void main(void)
+{
+ const char *stage_name = "fallback/romstage";
+ void *entry;
+ uint32_t sctlr;
+
+ /* Globally disable MMU, caches, and branch prediction (these should
+ * be disabled by default on reset) */
+ sctlr = read_sctlr();
+ sctlr &= ~(SCTLR_M | SCTLR_C | SCTLR_Z | SCTLR_I);
+ write_sctlr(sctlr);
+
+ arm_invalidate_caches();
+
+ /*
+ * Re-enable icache and branch prediction. MMU and dcache will be
+ * set up later.
+ */
+ sctlr = read_sctlr();
+ sctlr |= SCTLR_Z | SCTLR_I;
+ write_sctlr(sctlr);
+
+ bootblock_cpu_init();
+ bootblock_mainboard_init();
+
+#if CONFIG_BOOTBLOCK_CONSOLE
+ console_init();
+#endif
+
+ entry = cbfs_load_stage(CBFS_DEFAULT_MEDIA, stage_name);
+
+ if (entry) stage_exit(entry);
+ hlt();
+}
diff --git a/src/arch/arm/armv7/cache.c b/src/arch/arm/armv7/cache.c
new file mode 100644
index 0000000000..acd1f9aefa
--- /dev/null
+++ b/src/arch/arm/armv7/cache.c
@@ -0,0 +1,328 @@
+/*
+ * This file is part of the coreboot project.
+ *
+ * Copyright 2013 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * cache.c: Cache maintenance routines for ARMv7-A and ARMv7-R
+ *
+ * Reference: ARM Architecture Reference Manual, ARMv7-A and ARMv7-R edition
+ */
+
+#include <stdint.h>
+
+#include <arch/cache.h>
+
+#define bitmask(high, low) ((1UL << (high)) + \
+ ((1UL << (high)) - 1) - ((1UL << (low)) - 1))
+
+/* Basic log2() implementation. Note: log2(0) is 0 for our purposes. */
+/* FIXME: src/include/lib.h is difficult to work with due to romcc */
+static unsigned long log2(unsigned long u)
+{
+ int i = 0;
+
+ while (u >>= 1)
+ i++;
+
+ return i;
+}
+
+void tlb_invalidate_all(void)
+{
+ /*
+ * FIXME: ARMv7 Architecture Ref. Manual claims that the distinction
+ * instruction vs. data TLBs is deprecated in ARMv7, however this does
+ * not seem to be the case as of Cortex-A15.
+ */
+ tlbiall();
+ dtlbiall();
+ itlbiall();
+ isb();
+ dsb();
+}
+
+void icache_invalidate_all(void)
+{
+ /*
+ * icache can be entirely invalidated with one operation.
+ * Note: If branch predictors are architecturally-visible, ICIALLU
+ * also performs a BPIALL operation (B2-1283 in arch manual)
+ */
+ iciallu();
+ isb();
+}
+
+enum dcache_op {
+ OP_DCCSW,
+ OP_DCCISW,
+ OP_DCISW,
+ OP_DCCIMVAC,
+ OP_DCCMVAC,
+ OP_DCIMVAC,
+};
+
+/*
+ * Do a dcache operation on entire cache by set/way. This is done for
+ * portability because mapping of memory address to cache location is
+ * implementation defined (See note on "Requirements for operations by
+ * set/way" in arch ref. manual).
+ */
+static void dcache_op_set_way(enum dcache_op op)
+{
+ uint32_t ccsidr;
+ unsigned int associativity, num_sets, linesize_bytes;
+ unsigned int set, way;
+ unsigned int level;
+
+ level = (read_csselr() >> 1) & 0x7;
+
+ /*
+ * dcache must be invalidated by set/way for portability since virtual
+ * memory mapping is system-defined. The number of sets and
+ * associativity is given by CCSIDR. We'll use DCISW to invalidate the
+ * dcache.
+ */
+ ccsidr = read_ccsidr();
+
+ /* FIXME: rounding up required here? */
+ num_sets = ((ccsidr & bitmask(27, 13)) >> 13) + 1;
+ associativity = ((ccsidr & bitmask(12, 3)) >> 3) + 1;
+ /* FIXME: do we need to use CTR.DminLine here? */
+ linesize_bytes = (1 << ((ccsidr & 0x7) + 2)) * 4;
+
+ dsb();
+
+ /*
+ * Set/way operations require an interesting bit packing. See section
+ * B4-35 in the ARMv7 Architecture Reference Manual:
+ *
+ * A: Log2(associativity)
+ * B: L+S
+ * L: Log2(linesize)
+ * S: Log2(num_sets)
+ *
+ * The bits are packed as follows:
+ * 31 31-A B B-1 L L-1 4 3 1 0
+ * |---|-------------|--------|-------|-----|-|
+ * |Way| zeros | Set | zeros |level|0|
+ * |---|-------------|--------|-------|-----|-|
+ */
+ for (way = 0; way < associativity; way++) {
+ for (set = 0; set < num_sets; set++) {
+ uint32_t val = 0;
+ val |= way << (32 - log2(associativity));
+ val |= set << log2(linesize_bytes);
+ val |= level << 1;
+ switch(op) {
+ case OP_DCCISW:
+ dccisw(val);
+ break;
+ case OP_DCISW:
+ dcisw(val);
+ break;
+ case OP_DCCSW:
+ dccsw(val);
+ break;
+ default:
+ break;
+ }
+ }
+ }
+ isb();
+}
+
+static void dcache_foreach(enum dcache_op op)
+{
+ uint32_t clidr;
+ int level;
+
+ clidr = read_clidr();
+ for (level = 0; level < 7; level++) {
+ unsigned int ctype = (clidr >> (level * 3)) & 0x7;
+ uint32_t csselr;
+
+ switch(ctype) {
+ case 0x2:
+ case 0x3:
+ case 0x4:
+ csselr = level << 1;
+ write_csselr(csselr);
+ dcache_op_set_way(op);
+ break;
+ default:
+ /* no cache, icache only, or reserved */
+ break;
+ }
+ }
+}
+
+void dcache_clean_all(void)
+{
+ dcache_foreach(OP_DCCSW);
+}
+
+void dcache_clean_invalidate_all(void)
+{
+ dcache_foreach(OP_DCCISW);
+}
+
+void dcache_invalidate_all(void)
+{
+ dcache_foreach(OP_DCISW);
+}
+
+static unsigned int line_bytes(void)
+{
+ uint32_t ccsidr;
+ unsigned int size;
+
+ ccsidr = read_ccsidr();
+ /* [2:0] - Indicates (Log2(number of words in cache line)) - 2 */
+ size = 1 << ((ccsidr & 0x7) + 2); /* words per line */
+ size *= sizeof(unsigned int); /* bytes per line */
+
+ return size;
+}
+
+/*
+ * Do a dcache operation by modified virtual address. This is useful for
+ * maintaining coherency in drivers which do DMA transfers and only need to
+ * perform cache maintenance on a particular memory range rather than the
+ * entire cache.
+ */
+static void dcache_op_mva(void const *addr, size_t len, enum dcache_op op)
+{
+ unsigned long line, linesize;
+
+ linesize = line_bytes();
+ line = (uint32_t)addr & ~(linesize - 1);
+
+ dsb();
+ while ((void *)line < addr + len) {
+ switch(op) {
+ case OP_DCCIMVAC:
+ dccimvac(line);
+ break;
+ case OP_DCCMVAC:
+ dccmvac(line);
+ break;
+ case OP_DCIMVAC:
+ dcimvac(line);
+ break;
+ default:
+ break;
+ }
+ line += linesize;
+ }
+ isb();
+}
+
+void dcache_clean_by_mva(void const *addr, size_t len)
+{
+ dcache_op_mva(addr, len, OP_DCCMVAC);
+}
+
+void dcache_clean_invalidate_by_mva(void const *addr, size_t len)
+{
+ dcache_op_mva(addr, len, OP_DCCIMVAC);
+}
+
+void dcache_invalidate_by_mva(void const *addr, size_t len)
+{
+ dcache_op_mva(addr, len, OP_DCIMVAC);
+}
+
+void dcache_mmu_disable(void)
+{
+ uint32_t sctlr;
+
+ dcache_clean_invalidate_all();
+ sctlr = read_sctlr();
+ sctlr &= ~(SCTLR_C | SCTLR_M);
+ write_sctlr(sctlr);
+}
+
+
+void dcache_mmu_enable(void)
+{
+ uint32_t sctlr;
+
+ sctlr = read_sctlr();
+ dcache_clean_invalidate_all();
+ sctlr |= SCTLR_C | SCTLR_M;
+ write_sctlr(sctlr);
+}
+
+void arm_invalidate_caches(void)
+{
+ uint32_t clidr;
+ int level;
+
+ /* Invalidate branch predictor */
+ bpiall();
+
+ /* Iterate thru each cache identified in CLIDR and invalidate */
+ clidr = read_clidr();
+ for (level = 0; level < 7; level++) {
+ unsigned int ctype = (clidr >> (level * 3)) & 0x7;
+ uint32_t csselr;
+
+ switch(ctype) {
+ case 0x0:
+ /* no cache */
+ break;
+ case 0x1:
+ /* icache only */
+ csselr = (level << 1) | 1;
+ write_csselr(csselr);
+ icache_invalidate_all();
+ break;
+ case 0x2:
+ case 0x4:
+ /* dcache only or unified cache */
+ csselr = level << 1;
+ write_csselr(csselr);
+ dcache_invalidate_all();
+ break;
+ case 0x3:
+ /* separate icache and dcache */
+ csselr = (level << 1) | 1;
+ write_csselr(csselr);
+ icache_invalidate_all();
+
+ csselr = level << 1;
+ write_csselr(csselr);
+ dcache_invalidate_all();
+ break;
+ default:
+ /* reserved */
+ break;
+ }
+ }
+
+ /* Invalidate TLB */
+ tlb_invalidate_all();
+}
diff --git a/src/arch/arm/armv7/exception.c b/src/arch/arm/armv7/exception.c
new file mode 100644
index 0000000000..3b32e8bbb1
--- /dev/null
+++ b/src/arch/arm/armv7/exception.c
@@ -0,0 +1,180 @@
+/*
+ * This file is part of the libpayload project.
+ *
+ * Copyright 2013 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <stdint.h>
+#include <types.h>
+#include <arch/exception.h>
+#include <console/console.h>
+
+void exception_test(void);
+
+static int test_abort;
+
+void exception_undefined_instruction(uint32_t *);
+void exception_software_interrupt(uint32_t *);
+void exception_prefetch_abort(uint32_t *);
+void exception_data_abort(uint32_t *);
+void exception_not_used(uint32_t *);
+void exception_irq(uint32_t *);
+void exception_fiq(uint32_t *);
+
+static void dump_stack(uintptr_t addr, size_t bytes)
+{
+ int i, j;
+ const int line = 8;
+ uint32_t *ptr = (uint32_t *)(addr & ~(line * sizeof(*ptr) - 1));
+
+ printk(BIOS_ERR, "Dumping stack:\n");
+ for (i = bytes / sizeof(*ptr); i >= 0; i -= line) {
+ printk(BIOS_ERR, "%p: ", ptr + i);
+ for (j = i; j < i + line; j++)
+ printk(BIOS_ERR, "%08x ", *(ptr + j));
+ printk(BIOS_ERR, "\n");
+ }
+}
+
+static void print_regs(uint32_t *regs)
+{
+ int i;
+
+ for (i = 0; i < 16; i++) {
+ if (i == 15)
+ printk(BIOS_ERR, "PC");
+ else if (i == 14)
+ printk(BIOS_ERR, "LR");
+ else if (i == 13)
+ printk(BIOS_ERR, "SP");
+ else if (i == 12)
+ printk(BIOS_ERR, "IP");
+ else
+ printk(BIOS_ERR, "R%d", i);
+ printk(BIOS_ERR, " = 0x%08x\n", regs[i]);
+ }
+}
+
+void exception_undefined_instruction(uint32_t *regs)
+{
+ printk(BIOS_ERR, "exception _undefined_instruction\n");
+ print_regs(regs);
+ dump_stack(regs[13], 512);
+ die("exception");
+}
+
+void exception_software_interrupt(uint32_t *regs)
+{
+ printk(BIOS_ERR, "exception _software_interrupt\n");
+ print_regs(regs);
+ dump_stack(regs[13], 512);
+ die("exception");
+}
+
+void exception_prefetch_abort(uint32_t *regs)
+{
+ printk(BIOS_ERR, "exception _prefetch_abort\n");
+ print_regs(regs);
+ dump_stack(regs[13], 512);
+ die("exception");
+}
+
+void exception_data_abort(uint32_t *regs)
+{
+ if (test_abort) {
+ regs[15] = regs[0];
+ return;
+ } else {
+ printk(BIOS_ERR, "exception _data_abort\n");
+ print_regs(regs);
+ dump_stack(regs[13], 512);
+ }
+ die("exception");
+}
+
+void exception_not_used(uint32_t *regs)
+{
+ printk(BIOS_ERR, "exception _not_used\n");
+ print_regs(regs);
+ dump_stack(regs[13], 512);
+ die("exception");
+}
+
+void exception_irq(uint32_t *regs)
+{
+ printk(BIOS_ERR, "exception _irq\n");
+ print_regs(regs);
+ dump_stack(regs[13], 512);
+ die("exception");
+}
+
+void exception_fiq(uint32_t *regs)
+{
+ printk(BIOS_ERR, "exception _fiq\n");
+ print_regs(regs);
+ dump_stack(regs[13], 512);
+ die("exception");
+}
+
+static inline uint32_t get_sctlr(void)
+{
+ uint32_t val;
+ asm("mrc p15, 0, %0, c1, c0, 0" : "=r" (val));
+ return val;
+}
+
+static inline void set_sctlr(uint32_t val)
+{
+ asm volatile("mcr p15, 0, %0, c1, c0, 0" :: "r" (val));
+ asm volatile("" ::: "memory");
+}
+
+void exception_init(void)
+{
+ static const uint32_t sctlr_te = (0x1 << 30);
+ static const uint32_t sctlr_v = (0x1 << 13);
+ static const uint32_t sctlr_a = (0x1 << 1);
+
+ uint32_t sctlr = get_sctlr();
+ /* Handle exceptions in ARM mode. */
+ sctlr &= ~sctlr_te;
+ /* Set V=0 in SCTLR so VBAR points to the exception vector table. */
+ sctlr &= ~sctlr_v;
+ /* Enforce alignment temporarily. */
+ set_sctlr(sctlr | sctlr_a);
+
+ extern uint32_t exception_table[];
+ set_vbar((uintptr_t)exception_table);
+
+ test_abort = 1;
+ printk(BIOS_ERR, "Testing exceptions\n");
+ exception_test();
+ test_abort = 0;
+ printk(BIOS_ERR, "Testing exceptions: DONE\n");
+
+ /* Restore original alignment settings. */
+ set_sctlr(sctlr);
+}
diff --git a/src/arch/arm/armv7/exception_asm.S b/src/arch/arm/armv7/exception_asm.S
new file mode 100644
index 0000000000..163fdbd52a
--- /dev/null
+++ b/src/arch/arm/armv7/exception_asm.S
@@ -0,0 +1,116 @@
+/*
+ * This file is part of the libpayload project.
+ *
+ * Copyright 2013 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+exception_stack:
+ .align 5
+ .skip 0x2000, 0xa5
+exception_stack_end:
+ .word exception_stack_end
+
+exception_handler:
+ .word 0
+
+
+ .align 6
+ .arm
+ .global exception_table
+exception_table:
+ b 1f
+ b 2f
+ b 3f
+ b 4f
+ b 5f
+ b 6f
+ b 7f
+ b 8f
+
+1:
+ ldr sp, _not_used
+ b exception_common
+2:
+ ldr sp, _undefined_instruction
+ b exception_common
+3:
+ ldr sp, _software_interrupt
+ b exception_common
+4:
+ ldr sp, _prefetch_abort
+ b exception_common
+5:
+ ldr sp, _data_abort
+ b exception_common
+6:
+ ldr sp, _not_used
+ b exception_common
+7:
+ ldr sp, _irq
+ b exception_common
+8:
+ ldr sp, _fiq
+ b exception_common
+
+exception_common:
+ str sp, exception_handler
+ ldr sp, exception_stack_end
+ push { lr }
+ stmfd sp, { sp, lr }^
+ sub sp, sp, $8
+ push { r0 - r12 }
+ mov r0, sp
+ mov lr, pc
+ ldr pc, exception_handler
+ pop { r0 - r12 }
+ add sp, sp, $8
+ ldmfd sp!, { pc }^
+
+
+_undefined_instruction: .word exception_undefined_instruction
+_software_interrupt: .word exception_software_interrupt
+_prefetch_abort: .word exception_prefetch_abort
+_data_abort: .word exception_data_abort
+_not_used: .word exception_not_used
+_irq: .word exception_irq
+_fiq: .word exception_fiq
+
+ .thumb
+ .global set_vbar
+ .thumb_func
+set_vbar:
+ mcr p15, 0, r0, c12, c0, 0
+ bx lr
+
+ .global exception_test
+ .thumb_func
+exception_test:
+ mov r1, $1
+ mov r0, pc
+ add r0, $3
+ ldr r1, [r1]
+ bx lr
+
diff --git a/src/arch/arm/armv7/mmu.c b/src/arch/arm/armv7/mmu.c
new file mode 100644
index 0000000000..cc915a6a33
--- /dev/null
+++ b/src/arch/arm/armv7/mmu.c
@@ -0,0 +1,144 @@
+/*
+ * This file is part of the coreboot project.
+ *
+ * Copyright 2013 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <config.h>
+#include <stdlib.h>
+#include <stdint.h>
+
+#include <cbmem.h>
+#include <console/console.h>
+
+#include <arch/cache.h>
+#include <arch/io.h>
+
+static void *const ttb_buff = (void *)CONFIG_TTB_BUFFER;
+
+void mmu_disable_range(unsigned long start_mb, unsigned long size_mb)
+{
+ unsigned int i;
+ uint32_t *ttb_entry = ttb_buff;
+ printk(BIOS_DEBUG, "Disabling: 0x%08lx:0x%08lx\n",
+ start_mb*MiB, start_mb*MiB + size_mb*MiB - 1);
+
+ for (i = start_mb; i < start_mb + size_mb; i++)
+ writel(0, &ttb_entry[i]);
+
+ for (i = start_mb; i < start_mb + size_mb; i++) {
+ dccmvac((uintptr_t)&ttb_entry[i]);
+ tlbimvaa(i*MiB);
+ }
+}
+
+void mmu_config_range(unsigned long start_mb, unsigned long size_mb,
+ enum dcache_policy policy)
+{
+ unsigned int i;
+ uint32_t attr;
+ uint32_t *ttb_entry = ttb_buff;
+ const char *str = NULL;
+
+ /*
+ * Section entry bits:
+ * 31:20 - section base address
+ * 18 - 0 to indicate normal section (versus supersection)
+ * 17 - nG, 0 to indicate page is global
+ * 16 - S, 0 for non-shareable (?)
+ * 15 - APX, 0 for full access
+ * 14:12 - TEX, 0b000 for outer and inner write-back
+ * 11:10 - AP, 0b11 for full access
+ * 9 - P, ? (FIXME: not described or possibly obsolete?)
+ * 8: 5 - Domain
+ * 4 - XN, 1 to set execute-never (and also avoid prefetches)
+ * 3 - C, 1 for cacheable
+ * 2 - B, 1 for bufferable
+ * 1: 0 - 0b10 to indicate section entry
+ */
+
+ switch(policy) {
+ case DCACHE_OFF:
+ /* XN set to avoid prefetches to uncached/unbuffered regions */
+ attr = (0x3 << 10) | (1 << 4) | 0x2;
+ str = "off";
+ break;
+ case DCACHE_WRITEBACK:
+ attr = (0x3 << 10) | (1 << 3) | (1 << 2) | 0x2;
+ str = "writeback";
+ break;
+ case DCACHE_WRITETHROUGH:
+ attr = (0x3 << 10) | (1 << 3) | 0x2;
+ str = "writethrough";
+ break;
+ default:
+ printk(BIOS_ERR, "unknown dcache policy: %02x\n", policy);
+ return;
+ }
+
+ printk(BIOS_DEBUG, "Setting dcache policy: 0x%08lx:0x%08lx [%s]\n",
+ start_mb << 20, ((start_mb + size_mb) << 20) - 1, str);
+
+ /* Write out page table entries. */
+ for (i = start_mb; i < start_mb + size_mb; i++)
+ writel((i << 20) | attr, &ttb_entry[i]);
+
+ /* Flush the page table entries, and old translations from the TLB. */
+ for (i = start_mb; i < start_mb + size_mb; i++) {
+ dccmvac((uintptr_t)&ttb_entry[i]);
+ tlbimvaa(i*MiB);
+ }
+}
+
+void mmu_init(void)
+{
+ /*
+ * For coreboot's purposes, we will create a simple L1 page table
+ * in RAM with 1MB section translation entries over the 4GB address
+ * space.
+ * (ref: section 10.2 and example 15-4 in Cortex-A series
+ * programmer's guide)
+ */
+ printk(BIOS_DEBUG, "Translation table is @ %p\n", ttb_buff);
+
+ /*
+ * Disable TTBR1 by setting TTBCR.N to 0b000, which means the TTBR0
+ * table size is 16KB and has indices VA[31:20].
+ *
+ * ref: Arch Ref. Manual for ARMv7-A, B3.5.4,
+ */
+ write_ttbcr(read_ttbcr() & ~0x3);
+
+ /*
+ * Translation table base 0 address is in bits 31:14-N, where N is given
+ * by bits 2:0 in TTBCR (which we set to 0). All lower bits in this
+ * register should be zero for coreboot.
+ */
+ write_ttbr0((uintptr_t)ttb_buff);
+
+ /* disable domain-level checking of permissions */
+ write_dacr(~0);
+}
diff --git a/src/arch/arm/armv7/thread.c b/src/arch/arm/armv7/thread.c
new file mode 100644
index 0000000000..d0c23ff92d
--- /dev/null
+++ b/src/arch/arm/armv7/thread.c
@@ -0,0 +1,127 @@
+/*
+ * This file is part of the coreboot project.
+ *
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include <console/console.h>
+#include <thread.h>
+
+/* The stack frame looks like the following. */
+struct pushed_regs {
+ u32 r4;
+ u32 r5;
+ u32 r6;
+ u32 r7;
+ u32 r8;
+ u32 r9;
+ u32 r10;
+ u32 r11;
+ u32 lr;
+};
+
+static inline uintptr_t push_stack(uintptr_t cur_stack, uintptr_t value)
+{
+ uintptr_t *addr;
+
+ cur_stack -= sizeof(value);
+ addr = (uintptr_t *)cur_stack;
+ *addr = value;
+ return cur_stack;
+}
+
+void arch_prepare_thread(struct thread *t,
+ void asmlinkage (*thread_entry)(void *), void *arg)
+{
+ uintptr_t stack = t->stack_current;
+ int i;
+ uintptr_t poison = 0xdeadbeef;
+
+ /* Push the LR. thread_entry()
+ * is assumed to never return.
+ */
+ stack = push_stack(stack, (uintptr_t)thread_entry);
+ /* Make room for the registers.
+ * Poison the initial stack. This is good hygiene and finds bugs.
+ * Poisoning the stack with different values helps when you're
+ * hunting for (e.g.) misaligned stacks or other such
+ * weirdness. The -1 is because we already pushed lr.
+ */
+ for(i = 0; i < sizeof(struct pushed_regs)/sizeof(u32)-1; i++)
+ stack = push_stack(stack, poison++);
+
+ t->stack_current = stack;
+}
+
+/* We could write this as a .S and the first time around that's how we
+ * did it. But there's always the question of matching our ARM
+ * directives in the .S with how gcc is doing things. It seems best
+ * to follow the pattern of the rest of the ARM port and just use
+ * inline assembly and let gcc get all the ELF magic right.
+ */
+void __attribute__((naked))
+switch_to_thread(uintptr_t new_stack, uintptr_t *saved_stack)
+{
+ /* Defintions for those of us not totally familiar with ARM:
+ * R15 -- PC, R14 -- LR, R13 -- SP
+ * R0-R3 need not be saved, nor R12.
+ * on entry, the only saved state is in LR -- the old PC.
+ * The args are in R0,R1.
+ * R0 is the new stack
+ * R1 is a pointer to the old stack save location
+ * Push R4-R11 and LR
+ * then switch stacks
+ * then pop R0-R12 and LR
+ * then mov PC,LR
+ *
+ * stack layout
+ * +------------+
+ * | LR | <-- sp + 0x20
+ * +------------+
+ * | R11 | <-- sp + 0x1c
+ * +------------+
+ * | R10 | <-- sp + 0x18
+ * +------------+
+ * | R9 | <-- sp + 0x14
+ * +------------+
+ * | R8 | <-- sp + 0x10
+ * +------------+
+ * | R7 | <-- sp + 0x0c
+ * +------------+
+ * | R6 | <-- sp + 0x08
+ * +------------+
+ * | R5 | <-- sp + 0x04
+ * +------------+
+ * | R4 | <-- sp + 0x00
+ * +------------+
+ */
+ asm volatile (
+ /* save context. */
+ "push {r4-r11,lr}\n\t"
+ /* Save the current stack */
+ "str sp,[r1]\n\t"
+ /* switch to the new stack */
+ "mov sp,r0\n\t"
+ /* restore the registers */
+ "pop {r4-r11,lr}\n\t"
+ /* resume other thread. */
+ "mov pc,lr\n\t"
+ );
+}
+
+void *arch_get_thread_stackbase(void)
+{
+ return (void *)CONFIG_STACK_BOTTOM;
+}