summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/arch/x86/Kconfig8
-rw-r--r--src/arch/x86/bootblock_crt0.S6
-rw-r--r--src/cpu/qemu-x86/cache_as_ram_bootblock.S11
-rw-r--r--src/cpu/x86/64bit/entry64.inc62
4 files changed, 86 insertions, 1 deletions
diff --git a/src/arch/x86/Kconfig b/src/arch/x86/Kconfig
index 171b408da2..37b7d2daaa 100644
--- a/src/arch/x86/Kconfig
+++ b/src/arch/x86/Kconfig
@@ -66,6 +66,14 @@ config ARCH_RAMSTAGE_X86_64
bool
default n
+config ARCH_X86_64_PGTBL_LOC
+ hex "x86_64 page table location in CBFS"
+ depends on ARCH_BOOTBLOCK_X86_64
+ default 0xfffea000
+ help
+ The position where to place pagetables. Needs to be known at
+ compile time. Must not overlap other files in CBFS.
+
config USE_MARCH_586
def_bool n
help
diff --git a/src/arch/x86/bootblock_crt0.S b/src/arch/x86/bootblock_crt0.S
index 3cb57e058c..9fcb5c4e4a 100644
--- a/src/arch/x86/bootblock_crt0.S
+++ b/src/arch/x86/bootblock_crt0.S
@@ -31,6 +31,12 @@
#include <cpu/x86/16bit/reset16.inc>
#include <cpu/x86/32bit/entry32.inc>
+ /* BIST result in eax */
+ mov %eax, %ebx
+ /* entry64.inc preserves ebx. */
+#include <cpu/x86/64bit/entry64.inc>
+ mov %ebx, %eax
+
#if CONFIG(BOOTBLOCK_DEBUG_SPINLOOP)
/* Wait for a JTAG debugger to break in and set EBX non-zero */
diff --git a/src/cpu/qemu-x86/cache_as_ram_bootblock.S b/src/cpu/qemu-x86/cache_as_ram_bootblock.S
index 6ec2e4dc2c..f5678a1807 100644
--- a/src/cpu/qemu-x86/cache_as_ram_bootblock.S
+++ b/src/cpu/qemu-x86/cache_as_ram_bootblock.S
@@ -38,9 +38,17 @@ cache_as_ram:
/* Align the stack and keep aligned for call to bootblock_c_entry() */
and $0xfffffff0, %esp
- sub $4, %esp
/* Restore the BIST result and timestamps. */
+#if defined(__x86_64__)
+ movd %mm1, %rdi
+ shld %rdi, 32
+ movd %mm1, %rsi
+ or %rsi, %rdi
+ movd %mm2, %rsi
+#else
+ sub $4, %esp
+
movd %mm0, %ebx
movd %mm1, %eax
movd %mm2, %edx
@@ -48,6 +56,7 @@ cache_as_ram:
pushl %ebx
pushl %edx
pushl %eax
+#endif
before_c_entry:
post_code(0x29)
diff --git a/src/cpu/x86/64bit/entry64.inc b/src/cpu/x86/64bit/entry64.inc
new file mode 100644
index 0000000000..f726fab506
--- /dev/null
+++ b/src/cpu/x86/64bit/entry64.inc
@@ -0,0 +1,62 @@
+/*
+ * This file is part of the coreboot project.
+ *
+ * Copyright (c) 2019 Patrick Rudolph <siro@das-labor.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * For starting coreboot in long mode.
+ *
+ * For reference see "AMD64 ArchitectureProgrammer's Manual Volume 2",
+ * Document 24593-Rev. 3.31-July 2019 Chapter 5.3
+ *
+ * Clobbers: eax, ecx, edx
+ */
+
+#if defined(__x86_64__)
+ .code32
+#if (CONFIG_ARCH_X86_64_PGTBL_LOC & 0xfff) > 0
+#error pagetables must be 4KiB aligned!
+#endif
+
+#include <cpu/x86/msr.h>
+#include <arch/rom_segs.h>
+
+setup_longmode:
+ /* Get page table address */
+ movl $(CONFIG_ARCH_X86_64_PGTBL_LOC), %eax
+
+ /* load identity mapped page tables */
+ movl %eax, %cr3
+
+ /* enable PAE */
+ movl %cr4, %eax
+ btsl $5, %eax
+ movl %eax, %cr4
+
+ /* enable long mode */
+ movl $(IA32_EFER), %ecx
+ rdmsr
+ btsl $8, %eax
+ wrmsr
+
+ /* enable paging */
+ movl %cr0, %eax
+ btsl $31, %eax
+ movl %eax, %cr0
+
+ /* use long jump to switch to 64-bit code segment */
+ ljmp $ROM_CODE_SEG64, $__longmode_start
+.code64
+__longmode_start:
+
+#endif