summaryrefslogtreecommitdiff
path: root/payloads/libpayload/arch
diff options
context:
space:
mode:
authorFurquan Shaikh <furquan@google.com>2014-02-19 11:35:30 -0800
committerMarc Jones <marc.jones@se-eng.com>2015-01-09 07:04:28 +0100
commit8c8c377584742755ca7a2f490e77d0cd8da36bee (patch)
tree7a61b2fb2a8a4e2a00350ab567034c2cde9eab3d /payloads/libpayload/arch
parent24452743472a2480b88465de22db8adc7a9b544c (diff)
downloadcoreboot-8c8c377584742755ca7a2f490e77d0cd8da36bee.tar.xz
libpayload: Add support for arm64 in libpayload
Basic support for arm64 is enabled in libpayload. Features added: 1) mem* operations in assembly. 2) Basic exception handling and support for testing exceptions. 3) Caching support. Tested with arm64-generic board compilation. BUG=None BRANCH=None TEST=Compilation successful Original-Change-Id: I4e86301f9c6383abc078e2b70071fb84bd6e4741 Original-Signed-off-by: Furquan Shaikh <furquan@google.com> Original-Reviewed-on: https://chromium-review.googlesource.com/187067 Original-Tested-by: Furquan Shaikh <furquan@chromium.org> Original-Reviewed-by: Aaron Durbin <adurbin@chromium.org> Original-Commit-Queue: Furquan Shaikh <furquan@chromium.org> (cherry picked from commit a70d13f3d225535843ab352290eab2e1ec7a9b4b) Signed-off-by: Marc Jones <marc.jones@se-eng.com> Change-Id: Ie3affe6a2bdd4fed3058de739d4c6aa573e5b251 Reviewed-on: http://review.coreboot.org/8063 Tested-by: build bot (Jenkins) Reviewed-by: Stefan Reinauer <stefan.reinauer@coreboot.org> Reviewed-by: Furquan Shaikh <furquan@google.com>
Diffstat (limited to 'payloads/libpayload/arch')
-rw-r--r--payloads/libpayload/arch/Config.in1
-rw-r--r--payloads/libpayload/arch/arm64/Config.in36
-rw-r--r--payloads/libpayload/arch/arm64/Makefile.inc43
-rw-r--r--payloads/libpayload/arch/arm64/cache.c149
-rw-r--r--payloads/libpayload/arch/arm64/coreboot.c307
-rw-r--r--payloads/libpayload/arch/arm64/cpu.S98
-rw-r--r--payloads/libpayload/arch/arm64/dummy_media.c42
-rw-r--r--payloads/libpayload/arch/arm64/exception.c96
-rw-r--r--payloads/libpayload/arch/arm64/exception_asm.S146
-rw-r--r--payloads/libpayload/arch/arm64/head.S66
-rw-r--r--payloads/libpayload/arch/arm64/libpayload.ldscript93
-rw-r--r--payloads/libpayload/arch/arm64/main.c82
-rw-r--r--payloads/libpayload/arch/arm64/memcpy.S189
-rw-r--r--payloads/libpayload/arch/arm64/memmove.S324
-rw-r--r--payloads/libpayload/arch/arm64/memset.S242
-rw-r--r--payloads/libpayload/arch/arm64/sysinfo.c64
-rw-r--r--payloads/libpayload/arch/arm64/timer.c54
-rw-r--r--payloads/libpayload/arch/arm64/util.S35
-rw-r--r--payloads/libpayload/arch/arm64/virtual.c38
19 files changed, 2105 insertions, 0 deletions
diff --git a/payloads/libpayload/arch/Config.in b/payloads/libpayload/arch/Config.in
index 541f64f2c1..3d3d0b5cf2 100644
--- a/payloads/libpayload/arch/Config.in
+++ b/payloads/libpayload/arch/Config.in
@@ -29,3 +29,4 @@
source "arch/arm/Config.in"
source "arch/x86/Config.in"
+source "arch/arm64/Config.in"
diff --git a/payloads/libpayload/arch/arm64/Config.in b/payloads/libpayload/arch/arm64/Config.in
new file mode 100644
index 0000000000..0f2596c452
--- /dev/null
+++ b/payloads/libpayload/arch/arm64/Config.in
@@ -0,0 +1,36 @@
+##
+## This file is part of the libpayload project.
+##
+## Copyright (c) 2012 Google Inc.
+##
+## Redistribution and use in source and binary forms, with or without
+## modification, are permitted provided that the following conditions
+## are met:
+## 1. Redistributions of source code must retain the above copyright
+## notice, this list of conditions and the following disclaimer.
+## 2. Redistributions in binary form must reproduce the above copyright
+## notice, this list of conditions and the following disclaimer in the
+## documentation and/or other materials provided with the distribution.
+## 3. The name of the author may not be used to endorse or promote products
+## derived from this software without specific prior written permission.
+##
+## THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+## ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+## IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+## ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+## FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+## DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+## OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+## HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+## LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+## OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+## SUCH DAMAGE.
+##
+
+if ARCH_ARM64
+
+config ARCH_SPECIFIC_OPTIONS # dummy
+ def_bool y
+ select LITTLE_ENDIAN
+
+endif
diff --git a/payloads/libpayload/arch/arm64/Makefile.inc b/payloads/libpayload/arch/arm64/Makefile.inc
new file mode 100644
index 0000000000..682e58bd8a
--- /dev/null
+++ b/payloads/libpayload/arch/arm64/Makefile.inc
@@ -0,0 +1,43 @@
+##
+## This file is part of the libpayload project.
+##
+## Copyright (C) 2008 Advanced Micro Devices, Inc.
+##
+## Redistribution and use in source and binary forms, with or without
+## modification, are permitted provided that the following conditions
+## are met:
+## 1. Redistributions of source code must retain the above copyright
+## notice, this list of conditions and the following disclaimer.
+## 2. Redistributions in binary form must reproduce the above copyright
+## notice, this list of conditions and the following disclaimer in the
+## documentation and/or other materials provided with the distribution.
+## 3. The name of the author may not be used to endorse or promote products
+## derived from this software without specific prior written permission.
+##
+## THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+## ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+## IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+## ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+## FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+## DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+## OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+## HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+## LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+## OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+## SUCH DAMAGE.
+##
+
+CFLAGS += -march=armv8-a
+arm64_asm_flags =
+
+head.o-y += head.S
+libc-y += main.c sysinfo.c
+libc-y += timer.c coreboot.c util.S
+libc-y += virtual.c
+libc-y += memcpy.S memset.S memmove.S
+libc-y += exception_asm.S exception.c
+libc-y += cache.c cpu.S
+
+# Add other classes here when you put assembly files into them!
+ head.o-S-ccopts += $(arm64_asm_flags)
+ libc-S-ccopts += $(arm64_asm_flags)
diff --git a/payloads/libpayload/arch/arm64/cache.c b/payloads/libpayload/arch/arm64/cache.c
new file mode 100644
index 0000000000..f6faafa31d
--- /dev/null
+++ b/payloads/libpayload/arch/arm64/cache.c
@@ -0,0 +1,149 @@
+/*
+ * This file is part of the coreboot project.
+ *
+ * Copyright 2013 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * cache.c: Cache maintenance routines for ARM64-A and ARM64-R
+ *
+ * Reference: ARM64 Architecture Reference Manual, ARM64-A and ARM64-R edition
+ */
+
+#include <stdint.h>
+
+#include <arch/cache.h>
+
+void tlb_invalidate_all(void)
+{
+ /* TLBIALL includes dTLB and iTLB on systems that have them. */
+ tlbiall_el3();
+ dsb();
+ isb();
+}
+
+enum dcache_op {
+ OP_DCCSW,
+ OP_DCCISW,
+ OP_DCISW,
+ OP_DCCIVAC,
+ OP_DCCVAC,
+ OP_DCIVAC,
+};
+
+unsigned int dcache_line_bytes(void)
+{
+ uint32_t ccsidr;
+ static unsigned int line_bytes = 0;
+
+ if (line_bytes)
+ return line_bytes;
+
+ ccsidr = read_ccsidr();
+ /* [2:0] - Indicates (Log2(number of words in cache line)) - 2 */
+ line_bytes = 1 << ((ccsidr & 0x7) + 2); /* words per line */
+ line_bytes *= sizeof(unsigned int); /* bytes per word */
+
+ return line_bytes;
+}
+
+/*
+ * Do a dcache operation by virtual address. This is useful for
+ * maintaining coherency in drivers which do DMA transfers and only need to
+ * perform cache maintenance on a particular memory range rather than the
+ * entire cache.
+ */
+static void dcache_op_va(void const *addr, size_t len, enum dcache_op op)
+{
+ unsigned long line, linesize;
+
+ linesize = dcache_line_bytes();
+ line = (uintptr_t)addr & ~(linesize - 1);
+
+ dsb();
+ while (line < (uintptr_t)addr + len) {
+ switch(op) {
+ case OP_DCCIVAC:
+ dccivac(line);
+ break;
+ case OP_DCCVAC:
+ dccvac(line);
+ break;
+ case OP_DCIVAC:
+ dcivac(line);
+ break;
+ default:
+ break;
+ }
+ line += linesize;
+ }
+ isb();
+}
+
+void dcache_clean_by_va(void const *addr, size_t len)
+{
+ dcache_op_va(addr, len, OP_DCCVAC);
+}
+
+void dcache_clean_invalidate_by_va(void const *addr, size_t len)
+{
+ dcache_op_va(addr, len, OP_DCCIVAC);
+}
+
+void dcache_invalidate_by_va(void const *addr, size_t len)
+{
+ dcache_op_va(addr, len, OP_DCIVAC);
+}
+
+/*
+ * CAUTION: This implementation assumes that coreboot never uses non-identity
+ * page tables for pages containing executed code. If you ever want to violate
+ * this assumption, have fun figuring out the associated problems on your own.
+ */
+void dcache_mmu_disable(void)
+{
+ uint32_t sctlr;
+
+ dcache_clean_invalidate_all();
+ sctlr = read_sctlr_el3();
+ sctlr &= ~(SCTLR_C | SCTLR_M);
+ write_sctlr_el3(sctlr);
+}
+
+void dcache_mmu_enable(void)
+{
+ uint32_t sctlr;
+
+ sctlr = read_sctlr_el3();
+ sctlr |= SCTLR_C | SCTLR_M;
+ write_sctlr_el3(sctlr);
+}
+
+void cache_sync_instructions(void)
+{
+ dcache_clean_all(); /* includes trailing DSB (in assembly) */
+ iciallu(); /* includes BPIALLU (architecturally) */
+ dsb();
+ isb();
+}
diff --git a/payloads/libpayload/arch/arm64/coreboot.c b/payloads/libpayload/arch/arm64/coreboot.c
new file mode 100644
index 0000000000..6c671c8628
--- /dev/null
+++ b/payloads/libpayload/arch/arm64/coreboot.c
@@ -0,0 +1,307 @@
+/*
+ * This file is part of the libpayload project.
+ *
+ * Copyright (C) 2008 Advanced Micro Devices, Inc.
+ * Copyright (C) 2009 coresystems GmbH
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <libpayload-config.h>
+#include <libpayload.h>
+#include <coreboot_tables.h>
+
+/* This pointer gets set in head.S and is passed in from coreboot. */
+void *cb_header_ptr;
+
+/*
+ * Some of this is x86 specific, and the rest of it is generic. Right now,
+ * since we only support x86, we'll avoid trying to make lots of infrastructure
+ * we don't need. If in the future, we want to use coreboot on some other
+ * architecture, then take out the generic parsing code and move it elsewhere.
+ */
+
+/* === Parsing code === */
+/* This is the generic parsing code. */
+
+static void cb_parse_memory(void *ptr, struct sysinfo_t *info)
+{
+ struct cb_memory *mem = ptr;
+ int count = MEM_RANGE_COUNT(mem);
+ int i;
+
+ if (count > SYSINFO_MAX_MEM_RANGES)
+ count = SYSINFO_MAX_MEM_RANGES;
+
+ info->n_memranges = 0;
+
+ for (i = 0; i < count; i++) {
+ struct cb_memory_range *range = MEM_RANGE_PTR(mem, i);
+
+#ifdef CONFIG_LP_MEMMAP_RAM_ONLY
+ if (range->type != CB_MEM_RAM)
+ continue;
+#endif
+
+ info->memrange[info->n_memranges].base =
+ cb_unpack64(range->start);
+
+ info->memrange[info->n_memranges].size =
+ cb_unpack64(range->size);
+
+ info->memrange[info->n_memranges].type = range->type;
+
+ info->n_memranges++;
+ }
+}
+
+static void cb_parse_serial(void *ptr, struct sysinfo_t *info)
+{
+ info->serial = ((struct cb_serial *)ptr);
+}
+
+#ifdef CONFIG_LP_CHROMEOS
+static void cb_parse_vboot_handoff(unsigned char *ptr, struct sysinfo_t *info)
+{
+ struct lb_range *vbho = (struct lb_range *)ptr;
+
+ info->vboot_handoff = (void *)(uintptr_t)vbho->range_start;
+ info->vboot_handoff_size = vbho->range_size;
+}
+
+static void cb_parse_vbnv(unsigned char *ptr, struct sysinfo_t *info)
+{
+ struct lb_range *vbnv = (struct lb_range *)ptr;
+
+ info->vbnv_start = vbnv->range_start;
+ info->vbnv_size = vbnv->range_size;
+}
+
+static void cb_parse_gpios(unsigned char *ptr, struct sysinfo_t *info)
+{
+ int i;
+ struct cb_gpios *gpios = (struct cb_gpios *)ptr;
+
+ info->num_gpios = (gpios->count < SYSINFO_MAX_GPIOS) ?
+ (gpios->count) : SYSINFO_MAX_GPIOS;
+
+ for (i = 0; i < info->num_gpios; i++)
+ info->gpios[i] = gpios->gpios[i];
+}
+
+static void cb_parse_vdat(unsigned char *ptr, struct sysinfo_t *info)
+{
+ struct lb_range *vdat = (struct lb_range *)ptr;
+
+ info->vdat_addr = phys_to_virt(vdat->range_start);
+ info->vdat_size = vdat->range_size;
+}
+#endif
+
+static void cb_parse_dma(unsigned char *ptr)
+{
+ struct lb_range *dma = (struct lb_range *)ptr;
+ init_dma_memory(phys_to_virt(dma->range_start), dma->range_size);
+}
+
+static void cb_parse_tstamp(unsigned char *ptr, struct sysinfo_t *info)
+{
+ struct cb_cbmem_tab *const cbmem = (struct cb_cbmem_tab *)ptr;
+ info->tstamp_table = phys_to_virt(cbmem->cbmem_tab);
+}
+
+static void cb_parse_cbmem_cons(unsigned char *ptr, struct sysinfo_t *info)
+{
+ struct cb_cbmem_tab *const cbmem = (struct cb_cbmem_tab *)ptr;
+ info->cbmem_cons = phys_to_virt(cbmem->cbmem_tab);
+}
+
+static void cb_parse_mrc_cache(unsigned char *ptr, struct sysinfo_t *info)
+{
+ struct cb_cbmem_tab *const cbmem = (struct cb_cbmem_tab *)ptr;
+ info->mrc_cache = phys_to_virt(cbmem->cbmem_tab);
+}
+
+#ifdef CONFIG_LP_NVRAM
+static void cb_parse_optiontable(void *ptr, struct sysinfo_t *info)
+{
+ /* ptr points to a coreboot table entry and is already virtual */
+ info->option_table = ptr;
+}
+
+static void cb_parse_checksum(void *ptr, struct sysinfo_t *info)
+{
+ struct cb_cmos_checksum *cmos_cksum = ptr;
+ info->cmos_range_start = cmos_cksum->range_start;
+ info->cmos_range_end = cmos_cksum->range_end;
+ info->cmos_checksum_location = cmos_cksum->location;
+}
+#endif
+
+#ifdef CONFIG_LP_COREBOOT_VIDEO_CONSOLE
+static void cb_parse_framebuffer(void *ptr, struct sysinfo_t *info)
+{
+ /* ptr points to a coreboot table entry and is already virtual */
+ info->framebuffer = ptr;
+}
+#endif
+
+static void cb_parse_string(unsigned char *ptr, char **info)
+{
+ *info = (char *)((struct cb_string *)ptr)->string;
+}
+
+static int cb_parse_header(void *addr, struct sysinfo_t *info)
+{
+ struct cb_header *header = addr;
+ unsigned char *ptr = addr;
+ void *forward;
+ int i;
+
+ /* No signature found. */
+ if (strncmp((const char *)header->signature, "LBIO", 4))
+ return -1;
+
+ if (!header->table_bytes)
+ return 0;
+
+ /* Make sure the checksums match. */
+ if (ipchksum((u16 *) header, sizeof(*header)) != 0)
+ return -1;
+
+ if (ipchksum((u16 *) (ptr + sizeof(*header)),
+ header->table_bytes) != header->table_checksum)
+ return -1;
+
+ info->header = header;
+
+ /* Now, walk the tables. */
+ ptr += header->header_bytes;
+
+ for (i = 0; i < header->table_entries; i++) {
+ struct cb_record *rec = (struct cb_record *)ptr;
+
+ /* We only care about a few tags here (maybe more later). */
+ switch (rec->tag) {
+ case CB_TAG_FORWARD:
+ forward = phys_to_virt((void *)(unsigned long)((struct cb_forward *)rec)->forward);
+ return cb_parse_header(forward, info);
+ continue;
+ case CB_TAG_MEMORY:
+ cb_parse_memory(ptr, info);
+ break;
+ case CB_TAG_SERIAL:
+ cb_parse_serial(ptr, info);
+ break;
+ case CB_TAG_VERSION:
+ cb_parse_string(ptr, &info->cb_version);
+ break;
+ case CB_TAG_EXTRA_VERSION:
+ cb_parse_string(ptr, &info->extra_version);
+ break;
+ case CB_TAG_BUILD:
+ cb_parse_string(ptr, &info->build);
+ break;
+ case CB_TAG_COMPILE_TIME:
+ cb_parse_string(ptr, &info->compile_time);
+ break;
+ case CB_TAG_COMPILE_BY:
+ cb_parse_string(ptr, &info->compile_by);
+ break;
+ case CB_TAG_COMPILE_HOST:
+ cb_parse_string(ptr, &info->compile_host);
+ break;
+ case CB_TAG_COMPILE_DOMAIN:
+ cb_parse_string(ptr, &info->compile_domain);
+ break;
+ case CB_TAG_COMPILER:
+ cb_parse_string(ptr, &info->compiler);
+ break;
+ case CB_TAG_LINKER:
+ cb_parse_string(ptr, &info->linker);
+ break;
+ case CB_TAG_ASSEMBLER:
+ cb_parse_string(ptr, &info->assembler);
+ break;
+#ifdef CONFIG_LP_NVRAM
+ case CB_TAG_CMOS_OPTION_TABLE:
+ cb_parse_optiontable(ptr, info);
+ break;
+ case CB_TAG_OPTION_CHECKSUM:
+ cb_parse_checksum(ptr, info);
+ break;
+#endif
+#ifdef CONFIG_LP_COREBOOT_VIDEO_CONSOLE
+ // FIXME we should warn on serial if coreboot set up a
+ // framebuffer buf the payload does not know about it.
+ case CB_TAG_FRAMEBUFFER:
+ cb_parse_framebuffer(ptr, info);
+ break;
+#endif
+ case CB_TAG_MAINBOARD:
+ info->mainboard = (struct cb_mainboard *)ptr;
+ break;
+#ifdef CONFIG_LP_CHROMEOS
+ case CB_TAG_GPIO:
+ cb_parse_gpios(ptr, info);
+ break;
+ case CB_TAG_VDAT:
+ cb_parse_vdat(ptr, info);
+ break;
+ case CB_TAG_VBNV:
+ cb_parse_vbnv(ptr, info);
+ break;
+ case CB_TAG_VBOOT_HANDOFF:
+ cb_parse_vboot_handoff(ptr, info);
+ break;
+#endif
+ case CB_TAG_DMA:
+ cb_parse_dma(ptr);
+ break;
+ case CB_TAG_TIMESTAMPS:
+ cb_parse_tstamp(ptr, info);
+ break;
+ case CB_TAG_CBMEM_CONSOLE:
+ cb_parse_cbmem_cons(ptr, info);
+ break;
+ case CB_TAG_MRC_CACHE:
+ cb_parse_mrc_cache(ptr, info);
+ break;
+ }
+
+ ptr += rec->size;
+ }
+
+ return 1;
+}
+
+/* == Architecture specific == */
+/* FIXME put in actual address range */
+
+int get_coreboot_info(struct sysinfo_t *info)
+{
+ int ret = cb_parse_header(cb_header_ptr, info);
+
+ return (ret == 1) ? 0 : -1;
+}
diff --git a/payloads/libpayload/arch/arm64/cpu.S b/payloads/libpayload/arch/arm64/cpu.S
new file mode 100644
index 0000000000..d80f73c112
--- /dev/null
+++ b/payloads/libpayload/arch/arm64/cpu.S
@@ -0,0 +1,98 @@
+/*
+ * Optimized assembly for low-level CPU operations on ARM64 processors.
+ *
+ * Copyright (c) 2010 Per Odlund <per.odlund@armagedon.se>
+ * Copyright (c) 2014 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <arch/asm.h>
+
+.macro dcache_apply_all crm
+ dsb sy
+ mrs x0, clidr_el1 // read CLIDR
+ and w3, w0, #0x07000000 // narrow to LoC
+ lsr w3, w3, #23 // left align LoC (low 4 bits)
+ cbz w3, 5f //done
+
+ mov w10, #0 // w10 = 2 * cache level
+ mov w8, #1 // w8 = constant 0b1
+
+1: //next_level
+ add w2, w10, w10, lsr #1 // calculate 3 * cache level
+ lsr w1, w0, w2 // extract 3-bit cache type for this level
+ and w1, w1, #0x7 // w1 = cache type
+ cmp w1, #2 // is it data or i&d?
+ b.lt 4f //skip
+ msr csselr_el1, x10 // select current cache level
+ isb // sync change of csselr
+ mrs x1, ccsidr_el1 // w1 = read ccsidr
+ and w2, w1, #7 // w2 = log2(linelen_bytes) - 4
+ add w2, w2, #4 // w2 = log2(linelen_bytes)
+ ubfx w4, w1, #3, #10 // w4 = associativity - 1 (also
+ // max way number)
+ clz w5, w4 // w5 = 32 - log2(ways)
+ // (bit position of way in DC)
+ lsl w9, w4, w5 // w9 = max way number
+ // (aligned for DC)
+ lsl w16, w8, w5 // w16 = amount to decrement (way
+ // number per iteration)
+2: //next_way
+ ubfx w7, w1, #13, #15 // w7 = max set #, right aligned
+ lsl w7, w7, w2 // w7 = max set #, DC aligned
+ lsl w17, w8, w2 // w17 = amount to decrement (set
+ // number per iteration)
+
+3: //next_set
+ orr w11, w10, w9 // w11 = combine way # & cache #
+ orr w11, w11, w7 // ... and set #
+ dc \crm, x11 // clean and/or invalidate line
+ subs w7, w7, w17 // decrement set number
+ b.ge 3b //next_set
+ subs x9, x9, x16 // decrement way number
+ b.ge 2b //next_way
+
+4: //skip
+ add w10, w10, #2 // increment 2 *cache level
+ cmp w3, w10 // Went beyond LoC?
+ b.gt 1b //next_level
+
+5: //done
+ dsb sy
+ isb
+ ret
+.endm
+
+ENTRY(dcache_invalidate_all)
+ dcache_apply_all crm=isw
+ENDPROC(dcache_invalidate_all)
+
+ENTRY(dcache_clean_all)
+ dcache_apply_all crm=csw
+ENDPROC(dcache_clean_all)
+
+ENTRY(dcache_clean_invalidate_all)
+ dcache_apply_all crm=cisw
+ENDPROC(dcache_clean_invalidate_all)
diff --git a/payloads/libpayload/arch/arm64/dummy_media.c b/payloads/libpayload/arch/arm64/dummy_media.c
new file mode 100644
index 0000000000..7926976422
--- /dev/null
+++ b/payloads/libpayload/arch/arm64/dummy_media.c
@@ -0,0 +1,42 @@
+/*
+ * This file is part of the libpayload project.
+ *
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+#define LIBPAYLOAD
+
+#include <cbfs.h>
+#include <string.h>
+
+/* The generic cbfs code relies on the libpayload_init_default_cbfs_media
+ * symbol. Therefore, provide an implementation that just throws an error. */
+
+int libpayload_init_default_cbfs_media(struct cbfs_media *media);
+
+int libpayload_init_default_cbfs_media(struct cbfs_media *media)
+{
+ return -1;
+}
diff --git a/payloads/libpayload/arch/arm64/exception.c b/payloads/libpayload/arch/arm64/exception.c
new file mode 100644
index 0000000000..988bef9ed4
--- /dev/null
+++ b/payloads/libpayload/arch/arm64/exception.c
@@ -0,0 +1,96 @@
+/*
+ * This file is part of the libpayload project.
+ *
+ * Copyright 2014 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <exception.h>
+#include <libpayload.h>
+#include <stdint.h>
+
+extern unsigned int test_exc;
+
+struct exception_handler_info
+{
+ const char *name;
+ exception_hook hook;
+};
+
+static struct exception_handler_info exceptions[EXC_COUNT] = {
+ [EXC_INV] = { "_invalid_exception" },
+ [EXC_SYNC] = { "_sync" },
+ [EXC_IRQ] = { "_irq" },
+ [EXC_FIQ] = { "_fiq" },
+ [EXC_SERROR] = {"_serror"}
+};
+
+static void print_regs(struct exception_state *state)
+{
+ int i;
+
+ printf("ELR = 0x%016llx ",state->elr);
+ printf("ESR = 0x%08llx ",state->esr);
+ for (i = 0; i < 31; i++) {
+ printf("X%02d = 0x%016llx ", i, state->regs[i]);
+ }
+}
+
+void exception_dispatch(struct exception_state *state, int idx);
+void exception_dispatch(struct exception_state *state, int idx)
+{
+ if (idx >= EXC_COUNT) {
+ printf("Bad exception index %d.\n", idx);
+ } else {
+ struct exception_handler_info *info = &exceptions[idx];
+ if (info->hook) {
+ info->hook(idx, state);
+ return;
+ }
+
+ if (info->name)
+ printf("exception %s\n", info->name);
+ else
+ printf("exception _not_used.\n");
+ }
+ print_regs(state);
+
+ if (test_exc)
+ test_exc = 0;
+ else
+ halt();
+}
+
+void exception_init(void)
+{
+ extern void* exception_table;
+ set_vbar(exception_table);
+}
+
+void exception_install_hook(int type, exception_hook hook)
+{
+ die_if(type >= EXC_COUNT, "Out of bounds exception index %d.\n", type);
+ exceptions[type].hook = hook;
+}
diff --git a/payloads/libpayload/arch/arm64/exception_asm.S b/payloads/libpayload/arch/arm64/exception_asm.S
new file mode 100644
index 0000000000..c68ba5a7c6
--- /dev/null
+++ b/payloads/libpayload/arch/arm64/exception_asm.S
@@ -0,0 +1,146 @@
+/*
+ * This file is part of the libpayload project.
+ *
+ * Copyright 2014 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+ .text
+
+ /* Macro for exception entry
+ * Store x30 before any branch
+ * Branch to exception_prologue to save rest of the registers
+ * Move exception id into x1
+ * Branch to exception_handler
+ */
+.macro eentry id
+ stp x30, xzr, [sp, #-16]!
+ bl exception_prologue
+ mov x1, \id
+ bl exception_handler
+.endm
+
+ /* Exception table has 16 entries and each of 128 bytes
+ * Hence, 16 * 128 = 2048. Thus, 11 passed as parameter
+ * to align
+ */
+
+ .align 11
+ .global exception_table
+exception_table:
+
+ .align 7
+sync_el0:
+ eentry #0
+
+ .align 7
+irq_el0:
+ eentry #0
+
+ .align 7
+fiq_el0:
+ eentry #0
+
+ .align 7
+serror_el0:
+ eentry #0
+
+ .align 7
+sync_elx:
+ eentry #1
+
+ .align 7
+irq_elx:
+ eentry #2
+
+ .align 7
+fiq_elx:
+ eentry #3
+
+ .align 7
+serror_elx:
+ eentry #4
+
+exception_prologue:
+ /* Save all registers x0-x29 */
+ stp x28, x29, [sp, #-16]!
+ stp x26, x27, [sp, #-16]!
+ stp x24, x25, [sp, #-16]!
+ stp x22, x23, [sp, #-16]!
+ stp x20, x21, [sp, #-16]!
+ stp x18, x19, [sp, #-16]!
+ stp x16, x17, [sp, #-16]!
+ stp x14, x15, [sp, #-16]!
+ stp x12, x13, [sp, #-16]!
+ stp x10, x11, [sp, #-16]!
+ stp x8, x9, [sp, #-16]!
+ stp x6, x7, [sp, #-16]!
+ stp x4, x5, [sp, #-16]!
+ stp x2, x3, [sp, #-16]!
+ stp x0, x1, [sp, #-16]!
+
+ /* Save the exception reason on stack */
+ mrs x1, esr_el3
+
+ /* Save the return address on stack */
+ mrs x0, elr_el3
+ stp x0, x1, [sp, #-16]!
+
+ ret
+
+exception_handler:
+ /* Save address of saved registers into x0
+ * This acts as first argument to exception_dispatch
+ */
+ mov x0, sp
+ bl exception_dispatch
+
+ /* Pop return address saved on stack */
+ ldp x0, x1, [sp], #16
+ /* Pop exception reason saved on stack, followed by regs x0-x30 */
+ ldp x0, x1, [sp], #16
+ ldp x2, x3, [sp], #16
+ ldp x4, x5, [sp], #16
+ ldp x6, x7, [sp], #16
+ ldp x8, x9, [sp], #16
+ ldp x10, x11, [sp], #16
+ ldp x12, x13, [sp], #16
+ ldp x14, x15, [sp], #16
+ ldp x16, x17, [sp], #16
+ ldp x18, x19, [sp], #16
+ ldp x20, x21, [sp], #16
+ ldp x22, x23, [sp], #16
+ ldp x24, x25, [sp], #16
+ ldp x26, x27, [sp], #16
+ ldp x28, x29, [sp], #16
+ ldp x30, xzr, [sp], #16
+ eret
+
+ .global set_vbar
+set_vbar:
+ /* Initialize the exception table address in vbar for EL3 */
+ /* FIXME: Do we need to initialize for other levels too? EL1/EL2 */
+ msr vbar_el3, x0
+ ret
diff --git a/payloads/libpayload/arch/arm64/head.S b/payloads/libpayload/arch/arm64/head.S
new file mode 100644
index 0000000000..214c5d7253
--- /dev/null
+++ b/payloads/libpayload/arch/arm64/head.S
@@ -0,0 +1,66 @@
+/*
+ * This file is part of the libpayload project.
+ *
+ * Copyright (C) 2008 Advanced Micro Devices, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <arch/asm.h>
+
+/*
+ * Our entry point
+ */
+ENTRY(_entry)
+
+ /* Save off the location of the coreboot tables */
+ ldr x1, 1f
+ str x0, [x1]
+
+ /* TODO: disable interrupts */
+
+ /* TODO: Clear BSS */
+
+ /* Setup new stack */
+ ldr x1, 2f
+ mov sp, x1
+
+ /* TODO: Save old stack pointer and link register */
+
+ /* Let's rock. */
+ bl start_main
+
+ /* %r0 has the return value - pass it on unmolested */
+
+ /* TODO: restore old stack pointer and link register */
+
+ /* Return to the original context. */
+ ret
+ENDPROC(_entry)
+
+.align 4
+1:
+.quad cb_header_ptr
+2:
+.quad _stack
diff --git a/payloads/libpayload/arch/arm64/libpayload.ldscript b/payloads/libpayload/arch/arm64/libpayload.ldscript
new file mode 100644
index 0000000000..41a2e89faa
--- /dev/null
+++ b/payloads/libpayload/arch/arm64/libpayload.ldscript
@@ -0,0 +1,93 @@
+/*
+ * This file is part of the libpayload project.
+ *
+ * Copyright (C) 2013 Google, Inc.
+ * Copyright (C) 2008 Advanced Micro Devices, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+BASE_ADDRESS = 0x80100000;
+
+OUTPUT_FORMAT("elf64-littleaarch64","elf64-littleaarch64", "elf64-littleaarch64")
+OUTPUT_ARCH(arm64)
+
+ENTRY(_entry)
+
+HEAP_SIZE = 2*64*1024;
+STACK_SIZE = 16384;
+
+SECTIONS
+{
+ . = BASE_ADDRESS;
+
+ . = ALIGN(16);
+ _start = .;
+
+ .text : {
+ *(.text._entry)
+ *(.text)
+ *(.text.*)
+ }
+
+ .rodata : {
+ *(.rodata)
+ *(.rodata.*)
+ }
+
+ .data : {
+ *(.data)
+ *(.data.*)
+ }
+
+ _edata = .;
+
+ .bss : {
+ *(.sbss)
+ *(.sbss.*)
+ *(.bss)
+ *(.bss.*)
+ *(COMMON)
+
+ /* Stack and heap */
+
+ . = ALIGN(16);
+ _heap = .;
+ . += HEAP_SIZE;
+ . = ALIGN(16);
+ _eheap = .;
+
+ _estack = .;
+ . += STACK_SIZE;
+ . = ALIGN(16);
+ _stack = .;
+ }
+
+ _end = .;
+
+ /DISCARD/ : {
+ *(.comment)
+ *(.note*)
+ }
+}
diff --git a/payloads/libpayload/arch/arm64/main.c b/payloads/libpayload/arch/arm64/main.c
new file mode 100644
index 0000000000..ba4fc60473
--- /dev/null
+++ b/payloads/libpayload/arch/arm64/main.c
@@ -0,0 +1,82 @@
+/*
+ * This file is part of the libpayload project.
+ *
+ * Copyright (C) 2008 Advanced Micro Devices, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <exception.h>
+#include <libpayload.h>
+
+unsigned int main_argc; /**< The argc value to pass to main() */
+
+/** The argv value to pass to main() */
+char *main_argv[MAX_ARGC_COUNT];
+
+unsigned int test_exc;
+
+int test_exception(void);
+int test_exception(void)
+{
+ int a = 1;
+ int b = 0;
+ test_exc = 1;
+ return a/b;
+}
+
+/**
+ * This is our C entry function - set up the system
+ * and jump into the payload entry point.
+ */
+void start_main(void);
+void start_main(void)
+{
+ extern int main(int argc, char **argv);
+
+ printf("hello libpayload world");
+ /* Gather system information. */
+ lib_get_sysinfo();
+
+
+ exception_init();
+
+ test_exception();
+ /*
+ * Any other system init that has to happen before the
+ * user gets control goes here.
+ */
+
+ /*
+ * Go to the entry point.
+ * In the future we may care about the return value.
+ */
+
+ (void) main(main_argc, (main_argc != 0) ? main_argv : NULL);
+
+ /*
+ * Returning here will go to the _leave function to return
+ * us to the original context.
+ */
+}
diff --git a/payloads/libpayload/arch/arm64/memcpy.S b/payloads/libpayload/arch/arm64/memcpy.S
new file mode 100644
index 0000000000..1c8e55dea7
--- /dev/null
+++ b/payloads/libpayload/arch/arm64/memcpy.S
@@ -0,0 +1,189 @@
+/* Copyright (c) 2012-2013, Linaro Limited
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of the Linaro nor the
+ names of its contributors may be used to endorse or promote products
+ derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES ; LOSS OF USE,
+ DATA, OR PROFITS ; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */
+
+/* Assumptions:
+ *
+ * ARMv8-a, AArch64
+ * Unaligned accesses
+ *
+ */
+
+#define dstin x0
+#define src x1
+#define count x2
+#define tmp1 x3
+#define tmp1w w3
+#define tmp2 x4
+#define tmp2w w4
+#define tmp3 x5
+#define tmp3w w5
+#define dst x6
+
+#define A_l x7
+#define A_h x8
+#define B_l x9
+#define B_h x10
+#define C_l x11
+#define C_h x12
+#define D_l x13
+#define D_h x14
+
+.macro def_fn f p2align=0
+.text
+.p2align \p2align
+.global \f
+.type \f, %function
+\f:
+.endm
+
+def_fn memcpy p2align=6
+
+ mov dst, dstin
+ cmp count, #64
+ b.ge .Lcpy_not_short
+ cmp count, #15
+ b.le .Ltail15tiny
+
+ /* Deal with small copies quickly by dropping straight into the
+ * exit block. */
+.Ltail63:
+ /* Copy up to 48 bytes of data. At this point we only need the
+ * bottom 6 bits of count to be accurate. */
+ ands tmp1, count, #0x30
+ b.eq .Ltail15
+ add dst, dst, tmp1
+ add src, src, tmp1
+ cmp tmp1w, #0x20
+ b.eq 1f
+ b.lt 2f
+ ldp A_l, A_h, [src, #-48]
+ stp A_l, A_h, [dst, #-48]
+1:
+ ldp A_l, A_h, [src, #-32]
+ stp A_l, A_h, [dst, #-32]
+2:
+ ldp A_l, A_h, [src, #-16]
+ stp A_l, A_h, [dst, #-16]
+
+.Ltail15:
+ ands count, count, #15
+ beq 1f
+ add src, src, count
+ ldp A_l, A_h, [src, #-16]
+ add dst, dst, count
+ stp A_l, A_h, [dst, #-16]
+1:
+ ret
+
+.Ltail15tiny:
+ /* Copy up to 15 bytes of data. Does not assume additional data
+ being copied. */
+ tbz count, #3, 1f
+ ldr tmp1, [src], #8
+ str tmp1, [dst], #8
+1:
+ tbz count, #2, 1f
+ ldr tmp1w, [src], #4
+ str tmp1w, [dst], #4
+1:
+ tbz count, #1, 1f
+ ldrh tmp1w, [src], #2
+ strh tmp1w, [dst], #2
+1:
+ tbz count, #0, 1f
+ ldrb tmp1w, [src]
+ strb tmp1w, [dst]
+1:
+ ret
+
+.Lcpy_not_short:
+ /* We don't much care about the alignment of DST, but we want SRC
+ * to be 128-bit (16 byte) aligned so that we don't cross cache line
+ * boundaries on both loads and stores. */
+ neg tmp2, src
+ ands tmp2, tmp2, #15 /* Bytes to reach alignment. */
+ b.eq 2f
+ sub count, count, tmp2
+ /* Copy more data than needed; it's faster than jumping
+ * around copying sub-Quadword quantities. We know that
+ * it can't overrun. */
+ ldp A_l, A_h, [src]
+ add src, src, tmp2
+ stp A_l, A_h, [dst]
+ add dst, dst, tmp2
+ /* There may be less than 63 bytes to go now. */
+ cmp count, #63
+ b.le .Ltail63
+2:
+ subs count, count, #128
+ b.ge .Lcpy_body_large
+ /* Less than 128 bytes to copy, so handle 64 here and then jump
+ * to the tail. */
+ ldp A_l, A_h, [src]
+ ldp B_l, B_h, [src, #16]
+ ldp C_l, C_h, [src, #32]
+ ldp D_l, D_h, [src, #48]
+ stp A_l, A_h, [dst]
+ stp B_l, B_h, [dst, #16]
+ stp C_l, C_h, [dst, #32]
+ stp D_l, D_h, [dst, #48]
+ tst count, #0x3f
+ add src, src, #64
+ add dst, dst, #64
+ b.ne .Ltail63
+ ret
+
+ /* Critical loop. Start at a new cache line boundary. Assuming
+ * 64 bytes per line this ensures the entire loop is in one line. */
+ .p2align 6
+.Lcpy_body_large:
+ /* There are at least 128 bytes to copy. */
+ ldp A_l, A_h, [src, #0]
+ sub dst, dst, #16 /* Pre-bias. */
+ ldp B_l, B_h, [src, #16]
+ ldp C_l, C_h, [src, #32]
+ ldp D_l, D_h, [src, #48]! /* src += 64 - Pre-bias. */
+1:
+ stp A_l, A_h, [dst, #16]
+ ldp A_l, A_h, [src, #16]
+ stp B_l, B_h, [dst, #32]
+ ldp B_l, B_h, [src, #32]
+ stp C_l, C_h, [dst, #48]
+ ldp C_l, C_h, [src, #48]
+ stp D_l, D_h, [dst, #64]!
+ ldp D_l, D_h, [src, #64]!
+ subs count, count, #64
+ b.ge 1b
+ stp A_l, A_h, [dst, #16]
+ stp B_l, B_h, [dst, #32]
+ stp C_l, C_h, [dst, #48]
+ stp D_l, D_h, [dst, #64]
+ add src, src, #16
+ add dst, dst, #64 + 16
+ tst count, #0x3f
+ b.ne .Ltail63
+ ret
+ .size memcpy, .-memcpy
diff --git a/payloads/libpayload/arch/arm64/memmove.S b/payloads/libpayload/arch/arm64/memmove.S
new file mode 100644
index 0000000000..d79316e007
--- /dev/null
+++ b/payloads/libpayload/arch/arm64/memmove.S
@@ -0,0 +1,324 @@
+/* Copyright (c) 2013, Linaro Limited
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of the Linaro nor the
+ names of its contributors may be used to endorse or promote products
+ derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES ; LOSS OF USE,
+ DATA, OR PROFITS ; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */
+
+/* Assumptions:
+ *
+ * ARMv8-a, AArch64
+ * Unaligned accesses
+ */
+
+.macro def_fn f p2align=0
+.text
+.p2align \p2align
+.global \f
+.type \f, %function
+\f:
+.endm
+
+/* Parameters and result. */
+#define dstin x0
+#define src x1
+#define count x2
+#define tmp1 x3
+#define tmp1w w3
+#define tmp2 x4
+#define tmp2w w4
+#define tmp3 x5
+#define tmp3w w5
+#define dst x6
+
+#define A_l x7
+#define A_h x8
+#define B_l x9
+#define B_h x10
+#define C_l x11
+#define C_h x12
+#define D_l x13
+#define D_h x14
+
+def_fn memmove, 6
+ cmp dstin, src
+ b.lo .Ldownwards
+ add tmp1, src, count
+ cmp dstin, tmp1
+ b.hs memcpy /* No overlap. */
+
+ /* Upwards move with potential overlap.
+ * Need to move from the tail backwards. SRC and DST point one
+ * byte beyond the remaining data to move. */
+ add dst, dstin, count
+ add src, src, count
+ cmp count, #64
+ b.ge .Lmov_not_short_up
+
+ /* Deal with small moves quickly by dropping straight into the
+ * exit block. */
+.Ltail63up:
+ /* Move up to 48 bytes of data. At this point we only need the
+ * bottom 6 bits of count to be accurate. */
+ ands tmp1, count, #0x30
+ b.eq .Ltail15up
+ sub dst, dst, tmp1
+ sub src, src, tmp1
+ cmp tmp1w, #0x20
+ b.eq 1f
+ b.lt 2f
+ ldp A_l, A_h, [src, #32]
+ stp A_l, A_h, [dst, #32]
+1:
+ ldp A_l, A_h, [src, #16]
+ stp A_l, A_h, [dst, #16]
+2:
+ ldp A_l, A_h, [src]
+ stp A_l, A_h, [dst]
+.Ltail15up:
+ /* Move up to 15 bytes of data. Does not assume additional data
+ * being moved. */
+ tbz count, #3, 1f
+ ldr tmp1, [src, #-8]!
+ str tmp1, [dst, #-8]!
+1:
+ tbz count, #2, 1f
+ ldr tmp1w, [src, #-4]!
+ str tmp1w, [dst, #-4]!
+1:
+ tbz count, #1, 1f
+ ldrh tmp1w, [src, #-2]!
+ strh tmp1w, [dst, #-2]!
+1:
+ tbz count, #0, 1f
+ ldrb tmp1w, [src, #-1]
+ strb tmp1w, [dst, #-1]
+1:
+ ret
+
+.Lmov_not_short_up:
+ /* We don't much care about the alignment of DST, but we want SRC
+ * to be 128-bit (16 byte) aligned so that we don't cross cache line
+ * boundaries on both loads and stores. */
+ ands tmp2, src, #15 /* Bytes to reach alignment. */
+ b.eq 2f
+ sub count, count, tmp2
+ /* Move enough data to reach alignment; unlike memcpy, we have to
+ * be aware of the overlap, which means we can't move data twice. */
+ tbz tmp2, #3, 1f
+ ldr tmp1, [src, #-8]!
+ str tmp1, [dst, #-8]!
+1:
+ tbz tmp2, #2, 1f
+ ldr tmp1w, [src, #-4]!
+ str tmp1w, [dst, #-4]!
+1:
+ tbz tmp2, #1, 1f
+ ldrh tmp1w, [src, #-2]!
+ strh tmp1w, [dst, #-2]!
+1:
+ tbz tmp2, #0, 1f
+ ldrb tmp1w, [src, #-1]!
+ strb tmp1w, [dst, #-1]!
+1:
+
+ /* There may be less than 63 bytes to go now. */
+ cmp count, #63
+ b.le .Ltail63up
+2:
+ subs count, count, #128
+ b.ge .Lmov_body_large_up
+ /* Less than 128 bytes to move, so handle 64 here and then jump
+ * to the tail. */
+ ldp A_l, A_h, [src, #-64]!
+ ldp B_l, B_h, [src, #16]
+ ldp C_l, C_h, [src, #32]
+ ldp D_l, D_h, [src, #48]
+ stp A_l, A_h, [dst, #-64]!
+ stp B_l, B_h, [dst, #16]
+ stp C_l, C_h, [dst, #32]
+ stp D_l, D_h, [dst, #48]
+ tst count, #0x3f
+ b.ne .Ltail63up
+ ret
+
+ /* Critical loop. Start at a new Icache line boundary. Assuming
+ * 64 bytes per line this ensures the entire loop is in one line. */
+ .p2align 6
+.Lmov_body_large_up:
+ /* There are at least 128 bytes to move. */
+ ldp A_l, A_h, [src, #-16]
+ ldp B_l, B_h, [src, #-32]
+ ldp C_l, C_h, [src, #-48]
+ ldp D_l, D_h, [src, #-64]!
+1:
+ stp A_l, A_h, [dst, #-16]
+ ldp A_l, A_h, [src, #-16]
+ stp B_l, B_h, [dst, #-32]
+ ldp B_l, B_h, [src, #-32]
+ stp C_l, C_h, [dst, #-48]
+ ldp C_l, C_h, [src, #-48]
+ stp D_l, D_h, [dst, #-64]!
+ ldp D_l, D_h, [src, #-64]!
+ subs count, count, #64
+ b.ge 1b
+ stp A_l, A_h, [dst, #-16]
+ stp B_l, B_h, [dst, #-32]
+ stp C_l, C_h, [dst, #-48]
+ stp D_l, D_h, [dst, #-64]!
+ tst count, #0x3f
+ b.ne .Ltail63up
+ ret
+
+
+.Ldownwards:
+ /* For a downwards move we can safely use memcpy provided that
+ * DST is more than 16 bytes away from SRC. */
+ sub tmp1, src, #16
+ cmp dstin, tmp1
+ b.ls memcpy /* May overlap, but not critically. */
+
+ mov dst, dstin /* Preserve DSTIN for return value. */
+ cmp count, #64
+ b.ge .Lmov_not_short_down
+
+ /* Deal with small moves quickly by dropping straight into the
+ * exit block. */
+.Ltail63down:
+ /* Move up to 48 bytes of data. At this point we only need the
+ * bottom 6 bits of count to be accurate. */
+ ands tmp1, count, #0x30
+ b.eq .Ltail15down
+ add dst, dst, tmp1
+ add src, src, tmp1
+ cmp tmp1w, #0x20
+ b.eq 1f
+ b.lt 2f
+ ldp A_l, A_h, [src, #-48]
+ stp A_l, A_h, [dst, #-48]
+1:
+ ldp A_l, A_h, [src, #-32]
+ stp A_l, A_h, [dst, #-32]
+2:
+ ldp A_l, A_h, [src, #-16]
+ stp A_l, A_h, [dst, #-16]
+.Ltail15down:
+ /* Move up to 15 bytes of data. Does not assume additional data
+ being moved. */
+ tbz count, #3, 1f
+ ldr tmp1, [src], #8
+ str tmp1, [dst], #8
+1:
+ tbz count, #2, 1f
+ ldr tmp1w, [src], #4
+ str tmp1w, [dst], #4
+1:
+ tbz count, #1, 1f
+ ldrh tmp1w, [src], #2
+ strh tmp1w, [dst], #2
+1:
+ tbz count, #0, 1f
+ ldrb tmp1w, [src]
+ strb tmp1w, [dst]
+1:
+ ret
+
+.Lmov_not_short_down:
+ /* We don't much care about the alignment of DST, but we want SRC
+ * to be 128-bit (16 byte) aligned so that we don't cross cache line
+ * boundaries on both loads and stores. */
+ neg tmp2, src
+ ands tmp2, tmp2, #15 /* Bytes to reach alignment. */
+ b.eq 2f
+ sub count, count, tmp2
+ /* Move enough data to reach alignment; unlike memcpy, we have to
+ * be aware of the overlap, which means we can't move data twice. */
+ tbz tmp2, #3, 1f
+ ldr tmp1, [src], #8
+ str tmp1, [dst], #8
+1:
+ tbz tmp2, #2, 1f
+ ldr tmp1w, [src], #4
+ str tmp1w, [dst], #4
+1:
+ tbz tmp2, #1, 1f
+ ldrh tmp1w, [src], #2
+ strh tmp1w, [dst], #2
+1:
+ tbz tmp2, #0, 1f
+ ldrb tmp1w, [src], #1
+ strb tmp1w, [dst], #1
+1:
+
+ /* There may be less than 63 bytes to go now. */
+ cmp count, #63
+ b.le .Ltail63down
+2:
+ subs count, count, #128
+ b.ge .Lmov_body_large_down
+ /* Less than 128 bytes to move, so handle 64 here and then jump
+ * to the tail. */
+ ldp A_l, A_h, [src]
+ ldp B_l, B_h, [src, #16]
+ ldp C_l, C_h, [src, #32]
+ ldp D_l, D_h, [src, #48]
+ stp A_l, A_h, [dst]
+ stp B_l, B_h, [dst, #16]
+ stp C_l, C_h, [dst, #32]
+ stp D_l, D_h, [dst, #48]
+ tst count, #0x3f
+ add src, src, #64
+ add dst, dst, #64
+ b.ne .Ltail63down
+ ret
+
+ /* Critical loop. Start at a new cache line boundary. Assuming
+ * 64 bytes per line this ensures the entire loop is in one line. */
+ .p2align 6
+.Lmov_body_large_down:
+ /* There are at least 128 bytes to move. */
+ ldp A_l, A_h, [src, #0]
+ sub dst, dst, #16 /* Pre-bias. */
+ ldp B_l, B_h, [src, #16]
+ ldp C_l, C_h, [src, #32]
+ ldp D_l, D_h, [src, #48]! /* src += 64 - Pre-bias. */
+1:
+ stp A_l, A_h, [dst, #16]
+ ldp A_l, A_h, [src, #16]
+ stp B_l, B_h, [dst, #32]
+ ldp B_l, B_h, [src, #32]
+ stp C_l, C_h, [dst, #48]
+ ldp C_l, C_h, [src, #48]
+ stp D_l, D_h, [dst, #64]!
+ ldp D_l, D_h, [src, #64]!
+ subs count, count, #64
+ b.ge 1b
+ stp A_l, A_h, [dst, #16]
+ stp B_l, B_h, [dst, #32]
+ stp C_l, C_h, [dst, #48]
+ stp D_l, D_h, [dst, #64]
+ add src, src, #16
+ add dst, dst, #64 + 16
+ tst count, #0x3f
+ b.ne .Ltail63down
+ ret
+ .size memmove, . - memmove
diff --git a/payloads/libpayload/arch/arm64/memset.S b/payloads/libpayload/arch/arm64/memset.S
new file mode 100644
index 0000000000..ee8f81859c
--- /dev/null
+++ b/payloads/libpayload/arch/arm64/memset.S
@@ -0,0 +1,242 @@
+/* Copyright (c) 2012-2013, Linaro Limited
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of the Linaro nor the
+ names of its contributors may be used to endorse or promote products
+ derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES ; LOSS OF USE,
+ DATA, OR PROFITS ; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */
+
+/* Assumptions:
+ *
+ * ARMv8-a, AArch64
+ * Unaligned accesses
+ *
+ */
+
+/* By default we assume that the DC instruction can be used to zero
+ data blocks more efficiently. In some circumstances this might be
+ unsafe, for example in an asymmetric multiprocessor environment with
+ different DC clear lengths (neither the upper nor lower lengths are
+ safe to use). The feature can be disabled by defining DONT_USE_DC.
+
+ If code may be run in a virtualized environment, then define
+ MAYBE_VIRT. This will cause the code to cache the system register
+ values rather than re-reading them each call. */
+
+#define dstin x0
+#define val w1
+#define count x2
+#define tmp1 x3
+#define tmp1w w3
+#define tmp2 x4
+#define tmp2w w4
+#define zva_len_x x5
+#define zva_len w5
+#define zva_bits_x x6
+
+#define A_l x7
+#define A_lw w7
+#define dst x8
+#define tmp3w w9
+
+
+.macro def_fn f p2align=0
+.text
+.p2align \p2align
+.global \f
+.type \f, %function
+\f:
+.endm
+
+def_fn memset p2align=6
+
+ mov dst, dstin /* Preserve return value. */
+ ands A_lw, val, #255
+#ifndef DONT_USE_DC
+ b.eq .Lzero_mem
+#endif
+ orr A_lw, A_lw, A_lw, lsl #8
+ orr A_lw, A_lw, A_lw, lsl #16
+ orr A_l, A_l, A_l, lsl #32
+.Ltail_maybe_long:
+ cmp count, #64
+ b.ge .Lnot_short
+.Ltail_maybe_tiny:
+ cmp count, #15
+ b.le .Ltail15tiny
+.Ltail63:
+ ands tmp1, count, #0x30
+ b.eq .Ltail15
+ add dst, dst, tmp1
+ cmp tmp1w, #0x20
+ b.eq 1f
+ b.lt 2f
+ stp A_l, A_l, [dst, #-48]
+1:
+ stp A_l, A_l, [dst, #-32]
+2:
+ stp A_l, A_l, [dst, #-16]
+
+.Ltail15:
+ and count, count, #15
+ add dst, dst, count
+ stp A_l, A_l, [dst, #-16] /* Repeat some/all of last store. */
+ ret
+
+.Ltail15tiny:
+ /* Set up to 15 bytes. Does not assume earlier memory
+ being set. */
+ tbz count, #3, 1f
+ str A_l, [dst], #8
+1:
+ tbz count, #2, 1f
+ str A_lw, [dst], #4
+1:
+ tbz count, #1, 1f
+ strh A_lw, [dst], #2
+1:
+ tbz count, #0, 1f
+ strb A_lw, [dst]
+1:
+ ret
+
+ /* Critical loop. Start at a new cache line boundary. Assuming
+ * 64 bytes per line, this ensures the entire loop is in one line. */
+ .p2align 6
+.Lnot_short:
+ neg tmp2, dst
+ ands tmp2, tmp2, #15
+ b.eq 2f
+ /* Bring DST to 128-bit (16-byte) alignment. We know that there's
+ * more than that to set, so we simply store 16 bytes and advance by
+ * the amount required to reach alignment. */
+ sub count, count, tmp2
+ stp A_l, A_l, [dst]
+ add dst, dst, tmp2
+ /* There may be less than 63 bytes to go now. */
+ cmp count, #63
+ b.le .Ltail63
+2:
+ sub dst, dst, #16 /* Pre-bias. */
+ sub count, count, #64
+1:
+ stp A_l, A_l, [dst, #16]
+ stp A_l, A_l, [dst, #32]
+ stp A_l, A_l, [dst, #48]
+ stp A_l, A_l, [dst, #64]!
+ subs count, count, #64
+ b.ge 1b
+ tst count, #0x3f
+ add dst, dst, #16
+ b.ne .Ltail63
+ ret
+
+#ifndef DONT_USE_DC
+ /* For zeroing memory, check to see if we can use the ZVA feature to
+ * zero entire 'cache' lines. */
+.Lzero_mem:
+ mov A_l, #0
+ cmp count, #63
+ b.le .Ltail_maybe_tiny
+ neg tmp2, dst
+ ands tmp2, tmp2, #15
+ b.eq 1f
+ sub count, count, tmp2
+ stp A_l, A_l, [dst]
+ add dst, dst, tmp2
+ cmp count, #63
+ b.le .Ltail63
+1:
+ /* For zeroing small amounts of memory, it's not worth setting up
+ * the line-clear code. */
+ cmp count, #128
+ b.lt .Lnot_short
+#ifdef MAYBE_VIRT
+ /* For efficiency when virtualized, we cache the ZVA capability. */
+ adrp tmp2, .Lcache_clear
+ ldr zva_len, [tmp2, #:lo12:.Lcache_clear]
+ tbnz zva_len, #31, .Lnot_short
+ cbnz zva_len, .Lzero_by_line
+ mrs tmp1, dczid_el0
+ tbz tmp1, #4, 1f
+ /* ZVA not available. Remember this for next time. */
+ mov zva_len, #~0
+ str zva_len, [tmp2, #:lo12:.Lcache_clear]
+ b .Lnot_short
+1:
+ mov tmp3w, #4
+ and zva_len, tmp1w, #15 /* Safety: other bits reserved. */
+ lsl zva_len, tmp3w, zva_len
+ str zva_len, [tmp2, #:lo12:.Lcache_clear]
+#else
+ mrs tmp1, dczid_el0
+ tbnz tmp1, #4, .Lnot_short
+ mov tmp3w, #4
+ and zva_len, tmp1w, #15 /* Safety: other bits reserved. */
+ lsl zva_len, tmp3w, zva_len
+#endif
+
+.Lzero_by_line:
+ /* Compute how far we need to go to become suitably aligned. We're
+ * already at quad-word alignment. */
+ cmp count, zva_len_x
+ b.lt .Lnot_short /* Not enough to reach alignment. */
+ sub zva_bits_x, zva_len_x, #1
+ neg tmp2, dst
+ ands tmp2, tmp2, zva_bits_x
+ b.eq 1f /* Already aligned. */
+ /* Not aligned, check that there's enough to copy after alignment. */
+ sub tmp1, count, tmp2
+ cmp tmp1, #64
+ ccmp tmp1, zva_len_x, #8, ge /* NZCV=0b1000 */
+ b.lt .Lnot_short
+ /* We know that there's at least 64 bytes to zero and that it's safe
+ * to overrun by 64 bytes. */
+ mov count, tmp1
+2:
+ stp A_l, A_l, [dst]
+ stp A_l, A_l, [dst, #16]
+ stp A_l, A_l, [dst, #32]
+ subs tmp2, tmp2, #64
+ stp A_l, A_l, [dst, #48]
+ add dst, dst, #64
+ b.ge 2b
+ /* We've overrun a bit, so adjust dst downwards. */
+ add dst, dst, tmp2
+1:
+ sub count, count, zva_len_x
+3:
+ dc zva, dst
+ add dst, dst, zva_len_x
+ subs count, count, zva_len_x
+ b.ge 3b
+ ands count, count, zva_bits_x
+ b.ne .Ltail_maybe_long
+ ret
+ .size memset, .-memset
+#ifdef MAYBE_VIRT
+ .bss
+ .p2align 2
+ .Lcache_clear:
+ .space 4
+#endif
+#endif /* DONT_USE_DC */
+
diff --git a/payloads/libpayload/arch/arm64/sysinfo.c b/payloads/libpayload/arch/arm64/sysinfo.c
new file mode 100644
index 0000000000..6d204e2dc7
--- /dev/null
+++ b/payloads/libpayload/arch/arm64/sysinfo.c
@@ -0,0 +1,64 @@
+/*
+ * This file is part of the libpayload project.
+ *
+ * Copyright (C) 2008 Advanced Micro Devices, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <libpayload-config.h>
+#include <libpayload.h>
+#include <coreboot_tables.h>
+
+/**
+ * This is a global structure that is used through the library - we set it
+ * up initially with some dummy values - hopefully they will be overridden.
+ */
+struct sysinfo_t lib_sysinfo = {
+ .cpu_khz = 200,
+};
+
+int lib_get_sysinfo(void)
+{
+ int ret;
+
+ /* Get the CPU speed (for delays). */
+ lib_sysinfo.cpu_khz = get_cpu_speed();
+
+ /* Get information from the coreboot tables,
+ * if they exist */
+
+ ret = get_coreboot_info(&lib_sysinfo);
+
+ if (!lib_sysinfo.n_memranges) {
+ /* If we can't get a good memory range, use the default. */
+ lib_sysinfo.n_memranges = 1;
+
+ lib_sysinfo.memrange[0].base = 0;
+ lib_sysinfo.memrange[0].size = 1024 * 1024;
+ lib_sysinfo.memrange[0].type = CB_MEM_RAM;
+ }
+
+ return ret;
+}
diff --git a/payloads/libpayload/arch/arm64/timer.c b/payloads/libpayload/arch/arm64/timer.c
new file mode 100644
index 0000000000..4587f31ae7
--- /dev/null
+++ b/payloads/libpayload/arch/arm64/timer.c
@@ -0,0 +1,54 @@
+/*
+ * This file is part of the libpayload project.
+ *
+ * Copyright (C) 2008 Advanced Micro Devices, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/**
+ * @file arm64/timer.c
+ * ARM64 specific timer routines
+ */
+
+#include <libpayload.h>
+
+/**
+ * @ingroup arch
+ * Global variable containing the speed of the processor in KHz.
+ */
+u32 cpu_khz;
+
+/**
+ * Calculate the speed of the processor for use in delays.
+ *
+ * @return The CPU speed in kHz.
+ */
+unsigned int get_cpu_speed(void)
+{
+ /* FIXME */
+ cpu_khz = 1000000U;
+
+ return cpu_khz;
+}
diff --git a/payloads/libpayload/arch/arm64/util.S b/payloads/libpayload/arch/arm64/util.S
new file mode 100644
index 0000000000..7c14a39a4f
--- /dev/null
+++ b/payloads/libpayload/arch/arm64/util.S
@@ -0,0 +1,35 @@
+/*
+ * This file is part of the libpayload project.
+ *
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <arch/asm.h>
+
+/* This function puts the system into a halt. */
+ENTRY(halt)
+ b halt
+ENDPROC(halt)
diff --git a/payloads/libpayload/arch/arm64/virtual.c b/payloads/libpayload/arch/arm64/virtual.c
new file mode 100644
index 0000000000..59768dbd66
--- /dev/null
+++ b/payloads/libpayload/arch/arm64/virtual.c
@@ -0,0 +1,38 @@
+/*
+ * This file is part of the libpayload project.
+ *
+ * Copyright (C) 2008 coresystems GmbH
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <unistd.h>
+
+unsigned long virtual_offset = 0;
+
+
+int getpagesize(void)
+{
+ return 4096;
+}