summaryrefslogtreecommitdiff
path: root/src/cpu
diff options
context:
space:
mode:
authorPatrick Rudolph <patrick.rudolph@9elements.com>2019-12-01 07:04:04 +0100
committerPatrick Georgi <pgeorgi@google.com>2020-08-18 08:49:57 +0000
commit24bb8036c90feb8c2f45f59cce15eec7792a6393 (patch)
tree9938a69ddb8c1e6bf9f9185a69bae69977d55f0b /src/cpu
parent5eeead2d73d2daa2361d0272db41e85e1de79a6c (diff)
downloadcoreboot-24bb8036c90feb8c2f45f59cce15eec7792a6393.tar.xz
cpu/x86/smm/smm_stub: Add x86_64 support
Enable long mode in SMM handler. x86_32 isn't affected by this change. * Enter long mode * Add 64bit entry to GDT * Use x86_64 SysV ABI calling conventions for C code entry * Change smm_module_params' cpu to size_t as 'push' is native integer * Drop to protected mode after c handler NOTE: This commit does NOT introduce a new security model. It uses the same page tables as the remaining firmware does. This can be a security risk if someone is able to manipulate the page tables stored in ROM at runtime. USE FOR TESTING ONLY! Tested on Lenovo T410 with additional x86_64 patches. Change-Id: I26300492e4be62ddd5d80525022c758a019d63a1 Signed-off-by: Patrick Rudolph <patrick.rudolph@9elements.com> Reviewed-on: https://review.coreboot.org/c/coreboot/+/37392 Tested-by: build bot (Jenkins) <no-reply@coreboot.org> Reviewed-by: Arthur Heymans <arthur@aheymans.xyz> Reviewed-by: Eugene Myers <cedarhouse1@comcast.net>
Diffstat (limited to 'src/cpu')
-rw-r--r--src/cpu/x86/smm/smm_stub.S52
1 files changed, 48 insertions, 4 deletions
diff --git a/src/cpu/x86/smm/smm_stub.S b/src/cpu/x86/smm/smm_stub.S
index 425724d559..11ea9a71a8 100644
--- a/src/cpu/x86/smm/smm_stub.S
+++ b/src/cpu/x86/smm/smm_stub.S
@@ -10,6 +10,7 @@
*/
#include <cpu/x86/cr.h>
+#include <cpu/x86/msr.h>
.code32
.section ".module_parameters", "aw", @progbits
@@ -148,8 +149,8 @@ smm_trampoline32:
pushl $0x0
mov %esp, %ebp
- /* Allocate locals (fxsave) */
- subl $0x4, %esp
+ /* Allocate locals (fxsave, efer_backup) */
+ subl $0xc, %esp
/* calculate fxsave location */
mov fxsave_area, %edi
@@ -177,22 +178,65 @@ smm_trampoline32:
/* Align stack to 16 bytes. Another 32 bytes are pushed below. */
andl $0xfffffff0, %esp
+#ifdef __x86_64__
+ mov %ecx, %edi
+ /* Backup IA32_EFER. Preserves ebx. */
+ movl $(IA32_EFER), %ecx
+ rdmsr
+ movl %eax, -0x8(%ebp)
+ movl %edx, -0xc(%ebp)
+
+ /* entry64.inc preserves ebx, esi, edi */
+#include <cpu/x86/64bit/entry64.inc>
+ mov %edi, %ecx
+
+#endif
+
/* Call into the c-based SMM relocation function with the platform
* parameters. Equivalent to:
* struct arg = { c_handler_params, cpu_num, smm_runtime, canary };
* c_handler(&arg)
*/
+#ifdef __x86_64__
+ push %rbx /* uintptr_t *canary */
+ push $(smm_runtime)
+ push %rcx /* size_t cpu */
+ push c_handler_arg /* void *arg */
+
+ mov %rsp, %rdi /* *arg */
+
+ movl c_handler, %eax
+ call *%rax
+
+ /*
+ * The only reason to go back to protected mode is that RSM doesn't restore
+ * MSR registers and MSR IA32_EFER was modified by entering long mode.
+ * Drop to protected mode to safely operate on the IA32_EFER MSR.
+ */
+
+ /* Disable long mode. */
+ #include <cpu/x86/64bit/exit32.inc>
+
+ /* Restore IA32_EFER as RSM doesn't restore MSRs. */
+ movl $(IA32_EFER), %ecx
+ rdmsr
+ movl -0x8(%ebp), %eax
+ movl -0xc(%ebp), %edx
+
+ wrmsr
+
+#else
push $0x0 /* Padding */
push $0x0 /* Padding */
push $0x0 /* Padding */
push %ebx /* uintptr_t *canary */
push $(smm_runtime)
- push %ecx /* int cpu */
+ push %ecx /* size_t cpu */
push c_handler_arg /* void *arg */
push %esp /* smm_module_params *arg (allocated on stack). */
mov c_handler, %eax
call *%eax
-
+#endif
/* Retrieve fxsave location. */
mov -4(%ebp), %edi
test %edi, %edi