summaryrefslogtreecommitdiff
path: root/src/cpu/x86/smm/smm_stub.S
blob: 45f634d0c9f6a3d2eb02be3059e5f30adc5b7836 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
/* SPDX-License-Identifier: GPL-2.0-only */

/*
 * The stub is a generic wrapper for bootstrapping a C-based SMM handler. Its
 * primary purpose is to put the CPU into protected mode with a stack and call
 * into the C handler.
 *
 * The stub_entry_params structure needs to correspond to the C structure
 * found in smm.h.
 */

#include <cpu/x86/cr.h>
#include <cpu/x86/msr.h>

.code32
.section ".module_parameters", "aw", @progbits
stub_entry_params:
stack_size:
.long 0
stack_top:
.long 0
c_handler:
.long 0
c_handler_arg:
.long 0
fxsave_area:
.long 0
fxsave_area_size:
.long 0
/* struct smm_runtime begins here. */
smm_runtime:
smbase:
.long 0
smm_size:
.long 0
save_state_size:
.long 0
num_cpus:
.long 0
gnvs_ptr:
.long 0
/* allows the STM to bring up SMM in 32-bit mode */
start32_offset:
.long smm_trampoline32 - _start
/* apic_to_cpu_num is a table mapping the default APIC id to CPU num. If the
 * APIC id is found at the given index, the contiguous CPU number is index
 * into the table. */
apic_to_cpu_num:
.fill CONFIG_MAX_CPUS,1,0xff
/* end struct smm_runtime */

.data
/* Provide fallback stack to use when a valid CPU number cannot be found. */
fallback_stack_bottom:
.skip 128
fallback_stack_top:

#define CR0_CLEAR_FLAGS \
	(CR0_CD | CR0_NW | CR0_PG | CR0_AM | CR0_WP | \
	 CR0_NE | CR0_TS | CR0_EM | CR0_MP)

.text
.code16
.global _start
_start:
	movl	$(smm_relocate_gdt), %ebx
	lgdtl	(%ebx)

	movl	%cr0, %eax
	andl	$~CR0_CLEAR_FLAGS, %eax
	orl	$CR0_PE, %eax
	movl	%eax, %cr0

	/* Enable protected mode */
	ljmpl	$0x8, $smm_trampoline32

.align 4
smm_relocate_gdt:
	/* The first GDT entry is used for the lgdt instruction. */
	.word	smm_relocate_gdt_end - smm_relocate_gdt - 1
	.long	smm_relocate_gdt
	.word	0x0000

	/* gdt selector 0x08, flat code segment */
	.word	0xffff, 0x0000
	.byte	0x00, 0x9b, 0xcf, 0x00 /* G=1 and 0x0f, 4GB limit */

	/* gdt selector 0x10, flat data segment */
	.word	0xffff, 0x0000
	.byte	0x00, 0x93, 0xcf, 0x00

	/* gdt selector 0x18, flat code segment (64-bit) */
	.word	0xffff, 0x0000
	.byte	0x00, 0x9b, 0xaf, 0x00

	/* gdt selector 0x20 tss segment */
	.word   0xffff, 0x0000
	.byte   0x00, 0x8b, 0x80, 0x00
smm_relocate_gdt_end:

.align 4
.code32
.global smm_trampoline32
smm_trampoline32:
	/* Use flat data segment */
	movw	$0x10, %ax
	movw	%ax, %ds
	movw	%ax, %es
	movw	%ax, %ss
	movw	%ax, %fs
	movw	%ax, %gs

	/* The CPU number is calculated by reading the initial APIC id. Since
	 * the OS can maniuplate the APIC id use the non-changing cpuid result
	 * for APIC id (ebx[31:24]). A table is used to handle a discontiguous
	 * APIC id space.  */
	mov	$1, %eax
	cpuid
	bswap	%ebx	/* Default APIC id in bl. */
	mov	$(apic_to_cpu_num), %eax
	xor	%ecx, %ecx

1:
	cmp	(%eax, %ecx, 1), %bl
	je	1f
	inc	%ecx
	cmp	$CONFIG_MAX_CPUS, %ecx
	jne	1b
	/* This is bad. One cannot find a stack entry because a CPU num could
	 * not be assigned. Use the fallback stack and check this condition in
	 * C handler. */
	movl	$(fallback_stack_top), %esp
	/* Clear fxsave location as there will be no saving/restoring. */
	xor	%edi, %edi
	jmp	2f
1:
	movl	stack_size, %eax
	mul	%ecx /* %eax(stack_size) * %ecx(cpu) = %eax(offset) */
	movl	stack_top, %ebx
	subl	%eax, %ebx /* global_stack_top - offset = stack_top */
	mov	%ebx, %esp

	/* Write canary to the bottom of the stack */
	movl	stack_size, %eax
	subl	%eax, %ebx /* %ebx(stack_top) - size = %ebx(stack_bottom) */
	movl	%ebx, (%ebx)
#if ENV_X86_64
	movl	$0, 4(%ebx)
#endif

	/* Create stack frame by pushing a NULL stack base pointer */
	pushl	$0x0
	mov	%esp, %ebp

	/* Allocate locals (fxsave, efer_backup) */
	subl	$0xc, %esp

	/* calculate fxsave location */
	mov	fxsave_area, %edi
	test	%edi, %edi
	jz	2f
	movl	fxsave_area_size, %eax
	mul	%ecx
	add	%eax, %edi

2:
	/* Save location of fxsave area. */
	mov	%edi, -4(%ebp)
	test	%edi, %edi
	jz	1f

	/* Enable sse instructions. */
	mov	%cr4, %eax
	orl	$(CR4_OSFXSR | CR4_OSXMMEXCPT), %eax
	mov	%eax, %cr4

	/* Save FP state. */
	fxsave	(%edi)

1:
	/* Align stack to 16 bytes. Another 32 bytes are pushed below. */
	andl	$0xfffffff0, %esp

#ifdef __x86_64__
	mov	%ecx, %edi
	/* Backup IA32_EFER. Preserves ebx. */
	movl	$(IA32_EFER), %ecx
	rdmsr
	movl	%eax, -0x8(%ebp)
	movl	%edx, -0xc(%ebp)

	/* entry64.inc preserves ebx, esi, edi */
#include <cpu/x86/64bit/entry64.inc>
	mov	%edi, %ecx

#endif

	/* Call into the c-based SMM relocation function with the platform
	 * parameters. Equivalent to:
	 *   struct arg = { c_handler_params, cpu_num, smm_runtime, canary };
	 *   c_handler(&arg)
	 */
#ifdef __x86_64__
	push	%rbx /* uintptr_t *canary */
	push	$(smm_runtime)
	push	%rcx /* size_t cpu */
	push	c_handler_arg /* void *arg */

	mov	%rsp, %rdi	/* *arg */

	movl	c_handler, %eax
	call	*%rax

	/*
	 * The only reason to go back to protected mode is that RSM doesn't restore
	 * MSR registers and MSR IA32_EFER was modified by entering long mode.
	 * Drop to protected mode to safely operate on the IA32_EFER MSR.
	 */

	/* Disable long mode. */
	#include <cpu/x86/64bit/exit32.inc>

	/* Restore IA32_EFER as RSM doesn't restore MSRs. */
	movl	$(IA32_EFER), %ecx
	rdmsr
	movl	-0x8(%ebp), %eax
	movl	-0xc(%ebp), %edx

	wrmsr

#else
	push	$0x0 /* Padding */
	push	$0x0 /* Padding */
	push	$0x0 /* Padding */
	push	%ebx /* uintptr_t *canary */
	push	$(smm_runtime)
	push	%ecx /* size_t cpu */
	push	c_handler_arg /* void *arg */
	push	%esp /* smm_module_params *arg (allocated on stack). */
	mov	c_handler, %eax
	call	*%eax
#endif
	/* Retrieve fxsave location. */
	mov	-4(%ebp), %edi
	test	%edi, %edi
	jz	1f

	/* Restore FP state. */
	fxrstor	(%edi)

1:
	/* Exit from SM mode. */
	rsm