summaryrefslogtreecommitdiff
path: root/src/arch/x86/boot.c
diff options
context:
space:
mode:
authorStefan Reinauer <stefan.reinauer@coreboot.org>2015-07-13 09:39:15 +0200
committerStefan Reinauer <stefan.reinauer@coreboot.org>2015-07-13 21:04:56 +0200
commit6cb3a59fd5e754c3627b79db21c5bcc284bfd721 (patch)
treee83db5b11ee4a29d496dcf2798d024b6b8455ab7 /src/arch/x86/boot.c
parent9693885ad88d21ead7bd9ebc32f3e4901841b18b (diff)
downloadcoreboot-6cb3a59fd5e754c3627b79db21c5bcc284bfd721.tar.xz
x86: flatten hierarchy
It never made sense to have bootblock_* in init, but pirq_routing.c in boot, and some ld scripts on the main level while others live in subdirectories. This patch flattens the directory hierarchy and makes x86 more similar to the other architectures. Change-Id: I4056038fe7813e4d3d3042c441e7ab6076a36384 Signed-off-by: Stefan Reinauer <stefan.reinauer@coreboot.org> Reviewed-on: http://review.coreboot.org/10901 Tested-by: build bot (Jenkins) Tested-by: Raptor Engineering Automated Test Stand <noreply@raptorengineeringinc.com> Reviewed-by: Ronald G. Minnich <rminnich@gmail.com>
Diffstat (limited to 'src/arch/x86/boot.c')
-rw-r--r--src/arch/x86/boot.c210
1 files changed, 210 insertions, 0 deletions
diff --git a/src/arch/x86/boot.c b/src/arch/x86/boot.c
new file mode 100644
index 0000000000..7eb87fbef6
--- /dev/null
+++ b/src/arch/x86/boot.c
@@ -0,0 +1,210 @@
+#include <console/console.h>
+#include <arch/stages.h>
+#include <program_loading.h>
+#include <ip_checksum.h>
+#include <string.h>
+#include <symbols.h>
+
+/* When the ramstage is relocatable the elf loading ensures an elf image cannot
+ * be loaded over the ramstage code. */
+static void jmp_payload_no_bounce_buffer(void *entry)
+{
+ /* Jump to kernel */
+ __asm__ __volatile__(
+ " cld \n\t"
+ /* Now jump to the loaded image */
+ " call *%0\n\t"
+
+ /* The loaded image returned? */
+ " cli \n\t"
+ " cld \n\t"
+
+ ::
+ "r" (entry)
+ );
+}
+
+static void jmp_payload(void *entry, unsigned long buffer, unsigned long size)
+{
+ unsigned long lb_start, lb_size;
+
+ lb_start = (unsigned long)&_program;
+ lb_size = _program_size;
+
+ printk(BIOS_SPEW, "entry = 0x%08lx\n", (unsigned long)entry);
+ printk(BIOS_SPEW, "lb_start = 0x%08lx\n", lb_start);
+ printk(BIOS_SPEW, "lb_size = 0x%08lx\n", lb_size);
+ printk(BIOS_SPEW, "buffer = 0x%08lx\n", buffer);
+
+ /* Jump to kernel */
+ __asm__ __volatile__(
+ " cld \n\t"
+#ifdef __x86_64__
+ /* switch back to 32-bit mode */
+ " push %4\n\t"
+ " push %3\n\t"
+ " push %2\n\t"
+ " push %1\n\t"
+ " push %0\n\t"
+
+ ".intel_syntax noprefix\n\t"
+ /* use iret to switch to 32-bit code segment */
+ " xor rax,rax\n\t"
+ " mov ax, ss\n\t"
+ " push rax\n\t"
+ " mov rax, rsp\n\t"
+ " add rax, 8\n\t"
+ " push rax\n\t"
+ " pushfq\n\t"
+ " push 0x10\n\t"
+ " lea rax,[rip+3]\n\t"
+ " push rax\n\t"
+ " iretq\n\t"
+ ".code32\n\t"
+ /* disable paging */
+ " mov eax, cr0\n\t"
+ " btc eax, 31\n\t"
+ " mov cr0, eax\n\t"
+ /* disable long mode */
+ " mov ecx, 0xC0000080\n\t"
+ " rdmsr\n\t"
+ " btc eax, 8\n\t"
+ " wrmsr\n\t"
+
+ " pop eax\n\t"
+ " add esp, 4\n\t"
+ " pop ebx\n\t"
+ " add esp, 4\n\t"
+ " pop ecx\n\t"
+
+ " add esp, 4\n\t"
+ " pop edx\n\t"
+ " add esp, 4\n\t"
+ " pop esi\n\t"
+ " add esp, 4\n\t"
+
+ ".att_syntax prefix\n\t"
+#endif
+
+ /* Save the callee save registers... */
+ " pushl %%esi\n\t"
+ " pushl %%edi\n\t"
+ " pushl %%ebx\n\t"
+ /* Save the parameters I was passed */
+#ifdef __x86_64__
+ " pushl $0\n\t" /* 20 adjust */
+ " pushl %%eax\n\t" /* 16 lb_start */
+ " pushl %%ebx\n\t" /* 12 buffer */
+ " pushl %%ecx\n\t" /* 8 lb_size */
+ " pushl %%edx\n\t" /* 4 entry */
+ " pushl %%esi\n\t" /* 0 elf_boot_notes */
+#else
+ " pushl $0\n\t" /* 20 adjust */
+ " pushl %0\n\t" /* 16 lb_start */
+ " pushl %1\n\t" /* 12 buffer */
+ " pushl %2\n\t" /* 8 lb_size */
+ " pushl %3\n\t" /* 4 entry */
+ " pushl %4\n\t" /* 0 elf_boot_notes */
+
+#endif
+ /* Compute the adjustment */
+ " xorl %%eax, %%eax\n\t"
+ " subl 16(%%esp), %%eax\n\t"
+ " addl 12(%%esp), %%eax\n\t"
+ " addl 8(%%esp), %%eax\n\t"
+ " movl %%eax, 20(%%esp)\n\t"
+ /* Place a copy of coreboot in its new location */
+ /* Move ``longs'' the coreboot size is 4 byte aligned */
+ " movl 12(%%esp), %%edi\n\t"
+ " addl 8(%%esp), %%edi\n\t"
+ " movl 16(%%esp), %%esi\n\t"
+ " movl 8(%%esp), %%ecx\n\n"
+ " shrl $2, %%ecx\n\t"
+ " rep movsl\n\t"
+
+ /* Adjust the stack pointer to point into the new coreboot image */
+ " addl 20(%%esp), %%esp\n\t"
+ /* Adjust the instruction pointer to point into the new coreboot image */
+ " movl $1f, %%eax\n\t"
+ " addl 20(%%esp), %%eax\n\t"
+ " jmp *%%eax\n\t"
+ "1: \n\t"
+
+ /* Copy the coreboot bounce buffer over coreboot */
+ /* Move ``longs'' the coreboot size is 4 byte aligned */
+ " movl 16(%%esp), %%edi\n\t"
+ " movl 12(%%esp), %%esi\n\t"
+ " movl 8(%%esp), %%ecx\n\t"
+ " shrl $2, %%ecx\n\t"
+ " rep movsl\n\t"
+
+ /* Now jump to the loaded image */
+ " movl %5, %%eax\n\t"
+ " movl 0(%%esp), %%ebx\n\t"
+ " call *4(%%esp)\n\t"
+
+ /* The loaded image returned? */
+ " cli \n\t"
+ " cld \n\t"
+
+ /* Copy the saved copy of coreboot where coreboot runs */
+ /* Move ``longs'' the coreboot size is 4 byte aligned */
+ " movl 16(%%esp), %%edi\n\t"
+ " movl 12(%%esp), %%esi\n\t"
+ " addl 8(%%esp), %%esi\n\t"
+ " movl 8(%%esp), %%ecx\n\t"
+ " shrl $2, %%ecx\n\t"
+ " rep movsl\n\t"
+
+ /* Adjust the stack pointer to point into the old coreboot image */
+ " subl 20(%%esp), %%esp\n\t"
+
+ /* Adjust the instruction pointer to point into the old coreboot image */
+ " movl $1f, %%eax\n\t"
+ " subl 20(%%esp), %%eax\n\t"
+ " jmp *%%eax\n\t"
+ "1: \n\t"
+
+ /* Drop the parameters I was passed */
+ " addl $24, %%esp\n\t"
+
+ /* Restore the callee save registers */
+ " popl %%ebx\n\t"
+ " popl %%edi\n\t"
+ " popl %%esi\n\t"
+#ifdef __x86_64__
+ ".code64\n\t"
+#endif
+ ::
+ "ri" (lb_start), "ri" (buffer), "ri" (lb_size),
+ "ri" (entry),
+ "ri"(0), "ri" (0)
+ );
+}
+
+static void try_payload(struct prog *prog)
+{
+ if (prog_type(prog) == ASSET_PAYLOAD) {
+ if (IS_ENABLED(CONFIG_RELOCATABLE_RAMSTAGE))
+ jmp_payload_no_bounce_buffer(prog_entry(prog));
+ else
+ jmp_payload(prog_entry(prog),
+ (uintptr_t)prog_start(prog),
+ prog_size(prog));
+ }
+}
+
+void arch_prog_run(struct prog *prog)
+{
+ if (ENV_RAMSTAGE)
+ try_payload(prog);
+ __asm__ volatile (
+#ifdef __x86_64__
+ "jmp *%%rdi\n"
+#else
+ "jmp *%%edi\n"
+#endif
+
+ :: "D"(prog_entry(prog))
+ );
+}