summaryrefslogtreecommitdiff
path: root/src/cpu/x86/64bit
diff options
context:
space:
mode:
authorPatrick Rudolph <siro@das-labor.org>2019-09-29 11:08:33 +0200
committerPatrick Georgi <pgeorgi@google.com>2020-07-08 07:28:32 +0000
commit03a79520d6f62072ff3de75cc8bbbf0ff4876f62 (patch)
treea5e64772b790c563e25dd872a48f2cbbcdf4192d /src/cpu/x86/64bit
parentd5321bf2fb6ddbceea21e8b8e38ae89288fcfb1a (diff)
downloadcoreboot-03a79520d6f62072ff3de75cc8bbbf0ff4876f62.tar.xz
cpu/x86/smm: Add support for long mode
Enable long mode in SMM handler. x86_32 isn't affected by this change. As the rsm instruction used to leave SMM doesn't restore MSR registers, drop back to protected mode after running the smi_handler and restore IA32_EFER MSR (which enables long mode support) to previous value. NOTE: This commit does NOT introduce a new security model. It uses the same page tables as the remaining firmware does. This can be a security risk if someone is able to manipulate the page tables stored in ROM at runtime. USE FOR TESTING ONLY! Tested on Qemu Q35. Change-Id: I8bba4af4688c723fc079ae905dac95f57ea956f8 Signed-off-by: Patrick Rudolph <siro@das-labor.org> Reviewed-on: https://review.coreboot.org/c/coreboot/+/35681 Reviewed-by: Raul Rangel <rrangel@chromium.org> Reviewed-by: Angel Pons <th3fanbus@gmail.com> Tested-by: build bot (Jenkins) <no-reply@coreboot.org>
Diffstat (limited to 'src/cpu/x86/64bit')
-rw-r--r--src/cpu/x86/64bit/exit32.inc84
1 files changed, 84 insertions, 0 deletions
diff --git a/src/cpu/x86/64bit/exit32.inc b/src/cpu/x86/64bit/exit32.inc
new file mode 100644
index 0000000000..48837d96a9
--- /dev/null
+++ b/src/cpu/x86/64bit/exit32.inc
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+/*
+ * For droping from long mode to protected mode.
+ *
+ * For reference see "AMD64 ArchitectureProgrammer's Manual Volume 2",
+ * Document 24593-Rev. 3.31-July 2019 Chapter 5.3
+ *
+ * Clobbers: rax, rbx, rcx, rdx
+ */
+.code64
+
+#include <cpu/x86/msr.h>
+#include <cpu/x86/cr.h>
+#include <arch/rom_segs.h>
+
+drop_longmode:
+ /* Ensure cache is clean. */
+ wbinvd
+
+ /* Set 32-bit code segment and ss */
+ mov $ROM_CODE_SEG, %rcx
+ /* SetCodeSelector32 will drop us to protected mode on return */
+ call SetCodeSelector32
+
+ /* Skip SetCodeSelector32 */
+.code32
+ jmp __longmode_compatibility
+
+.align 8
+.code64
+SetCodeSelector32:
+ # pop the return address from stack
+ pop %rbx
+
+ # save rsp because we need to push it after ss
+ mov %rsp, %rdx
+
+ # use iret to jump to a 32-bit offset in a new code segment
+ # iret will pop cs:rip, flags, then ss:rsp
+ mov %ss, %ax # need to push ss, but push ss instuction
+ push %rax # not valid in x64 mode, so use ax
+ push %rdx # the rsp to load
+ pushfq # push rflags
+ push %rcx # cx is code segment selector from caller
+ push %rbx # push the IP for the next instruction
+
+ # the iretq will behave like ret, with the new cs/ss value loaded
+ iretq
+
+.align 4
+.code32
+__longmode_compatibility:
+ /* Running in 32-bit compatibility mode */
+
+ /* Use flat data segment */
+ movl $ROM_DATA_SEG, %eax
+ movl %eax, %ds
+ movl %eax, %es
+ movl %eax, %ss
+ movl %eax, %fs
+ movl %eax, %gs
+
+ /* Disable paging. */
+ movl %cr0, %eax
+ andl $(~CR0_PG), %eax
+ movl %eax, %cr0
+
+ /* Disable long mode. */
+ movl $(IA32_EFER), %ecx
+ rdmsr
+ andl $(~EFER_LME), %eax
+ wrmsr
+
+ /* Disable PAE. */
+ movl %cr4, %eax
+ andl $(~CR4_PAE), %eax
+ movl %eax, %cr4
+
+ /* Clear page table register */
+ xor %eax, %eax
+ movl %eax, %cr3
+
+__longmode_exit: