summaryrefslogtreecommitdiff
path: root/src/cpu/x86/smm/smm_module_loader.c
blob: fc1e1b3062297761897a1350ec9b3cafc8cec54c (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
/* SPDX-License-Identifier: GPL-2.0-only */

#include <string.h>
#include <acpi/acpi_gnvs.h>
#include <rmodule.h>
#include <cpu/x86/smm.h>
#include <commonlib/helpers.h>
#include <console/console.h>
#include <security/intel/stm/SmmStm.h>

#define FXSAVE_SIZE 512

/* FXSAVE area during relocation. While it may not be strictly needed the
   SMM stub code relies on the FXSAVE area being non-zero to enable SSE
   instructions within SMM mode. */
static uint8_t fxsave_area_relocation[CONFIG_MAX_CPUS][FXSAVE_SIZE]
__attribute__((aligned(16)));

/*
 * Components that make up the SMRAM:
 * 1. Save state - the total save state memory used
 * 2. Stack - stacks for the CPUs in the SMM handler
 * 3. Stub - SMM stub code for calling into handler
 * 4. Handler - C-based SMM handler.
 *
 * The components are assumed to consist of one consecutive region.
 */

/* These parameters are used by the SMM stub code. A pointer to the params
 * is also passed to the C-base handler. */
struct smm_stub_params {
	u32 stack_size;
	u32 stack_top;
	u32 c_handler;
	u32 c_handler_arg;
	u32 fxsave_area;
	u32 fxsave_area_size;
	struct smm_runtime runtime;
} __packed;

/*
 * The stub is the entry point that sets up protected mode and stacks for each
 * CPU. It then calls into the SMM handler module. It is encoded as an rmodule.
 */
extern unsigned char _binary_smmstub_start[];

/* Per CPU minimum stack size. */
#define SMM_MINIMUM_STACK_SIZE 32

/*
 * The smm_entry_ins consists of 3 bytes. It is used when staggering SMRAM entry
 * addresses across CPUs.
 *
 * 0xe9 <16-bit relative target> ; jmp <relative-offset>
 */
struct smm_entry_ins {
	char jmp_rel;
	uint16_t rel16;
} __packed;

/*
 * Place the entry instructions for num entries beginning at entry_start with
 * a given stride. The entry_start is the highest entry point's address. All
 * other entry points are stride size below the previous.
 */
static void smm_place_jmp_instructions(void *entry_start, size_t stride,
		size_t num, void *jmp_target)
{
	size_t i;
	char *cur;
	struct smm_entry_ins entry = { .jmp_rel = 0xe9 };

	/* Each entry point has an IP value of 0x8000. The SMBASE for each
	 * CPU is different so the effective address of the entry instruction
	 * is different. Therefore, the relative displacement for each entry
	 * instruction needs to be updated to reflect the current effective
	 * IP. Additionally, the IP result from the jmp instruction is
	 * calculated using the next instruction's address so the size of
	 * the jmp instruction needs to be taken into account. */
	cur = entry_start;
	for (i = 0; i < num; i++) {
		uint32_t disp = (uintptr_t)jmp_target;

		disp -= sizeof(entry) + (uintptr_t)cur;
		printk(BIOS_DEBUG,
		       "SMM Module: placing jmp sequence at %p rel16 0x%04x\n",
		       cur, disp);
		entry.rel16 = disp;
		memcpy(cur, &entry, sizeof(entry));
		cur -= stride;
	}
}

/* Place stacks in base -> base + size region, but ensure the stacks don't
 * overlap the staggered entry points. */
static void *smm_stub_place_stacks(char *base, size_t size,
				   struct smm_loader_params *params)
{
	size_t total_stack_size;
	char *stacks_top;

	if (params->stack_top != NULL)
		return params->stack_top;

	/* If stack space is requested assume the space lives in the lower
	 * half of SMRAM. */
	total_stack_size = params->per_cpu_stack_size *
			   params->num_concurrent_stacks;

	/* There has to be at least one stack user. */
	if (params->num_concurrent_stacks < 1)
		return NULL;

	/* Total stack size cannot fit. */
	if (total_stack_size > size)
		return NULL;

	/* Stacks extend down to SMBASE */
	stacks_top = &base[total_stack_size];

	return stacks_top;
}

/* Place the staggered entry points for each CPU. The entry points are
 * staggered by the per CPU SMM save state size extending down from
 * SMM_ENTRY_OFFSET. */
static void smm_stub_place_staggered_entry_points(char *base,
	const struct smm_loader_params *params, const struct rmodule *smm_stub)
{
	size_t stub_entry_offset;

	stub_entry_offset = rmodule_entry_offset(smm_stub);

	/* If there are staggered entry points or the stub is not located
	 * at the SMM entry point then jmp instructions need to be placed. */
	if (params->num_concurrent_save_states > 1 || stub_entry_offset != 0) {
		size_t num_entries;

		base += SMM_ENTRY_OFFSET;
		num_entries = params->num_concurrent_save_states;
		/* Adjust beginning entry and number of entries down since
		 * the initial entry point doesn't need a jump sequence. */
		if (stub_entry_offset == 0) {
			base -= params->per_cpu_save_state_size;
			num_entries--;
		}
		smm_place_jmp_instructions(base,
					   params->per_cpu_save_state_size,
					   num_entries,
					   rmodule_entry(smm_stub));
	}
}

/*
 * The stub setup code assumes it is completely contained within the
 * default SMRAM size (0x10000). There are potentially 3 regions to place
 * within the default SMRAM size:
 * 1. Save state areas
 * 2. Stub code
 * 3. Stack areas
 *
 * The save state and stack areas are treated as contiguous for the number of
 * concurrent areas requested. The save state always lives at the top of SMRAM
 * space, and the entry point is at offset 0x8000.
 */
static int smm_module_setup_stub(void *smbase, size_t smm_size,
				 struct smm_loader_params *params,
				 void *fxsave_area)
{
	size_t total_save_state_size;
	size_t smm_stub_size;
	size_t stub_entry_offset;
	char *smm_stub_loc;
	void *stacks_top;
	size_t size;
	char *base;
	size_t i;
	struct smm_stub_params *stub_params;
	struct rmodule smm_stub;

	base = smbase;
	size = SMM_DEFAULT_SIZE;

	/* The number of concurrent stacks cannot exceed CONFIG_MAX_CPUS. */
	if (params->num_concurrent_stacks > CONFIG_MAX_CPUS)
		return -1;

	/* Fail if can't parse the smm stub rmodule. */
	if (rmodule_parse(&_binary_smmstub_start, &smm_stub))
		return -1;

	/* Adjust remaining size to account for save state. */
	total_save_state_size = params->per_cpu_save_state_size *
				params->num_concurrent_save_states;
	if (total_save_state_size > size)
		return -1;
	size -= total_save_state_size;

	/* The save state size encroached over the first SMM entry point. */
	if (size <= SMM_ENTRY_OFFSET)
		return -1;

	/* Need a minimum stack size and alignment. */
	if (params->per_cpu_stack_size <= SMM_MINIMUM_STACK_SIZE ||
	    (params->per_cpu_stack_size & 3) != 0)
		return -1;

	smm_stub_loc = NULL;
	smm_stub_size = rmodule_memory_size(&smm_stub);
	stub_entry_offset = rmodule_entry_offset(&smm_stub);

	/* Assume the stub is always small enough to live within upper half of
	 * SMRAM region after the save state space has been allocated. */
	smm_stub_loc = &base[SMM_ENTRY_OFFSET];

	/* Adjust for jmp instruction sequence. */
	if (stub_entry_offset != 0) {
		size_t entry_sequence_size = sizeof(struct smm_entry_ins);
		/* Align up to 16 bytes. */
		entry_sequence_size = ALIGN_UP(entry_sequence_size, 16);
		smm_stub_loc += entry_sequence_size;
		smm_stub_size += entry_sequence_size;
	}

	/* Stub is too big to fit. */
	if (smm_stub_size > (size - SMM_ENTRY_OFFSET))
		return -1;

	/* The stacks, if requested, live in the lower half of SMRAM space. */
	size = SMM_ENTRY_OFFSET;

	/* Ensure stacks don't encroach onto staggered SMM
	 * entry points. The staggered entry points extend
	 * below SMM_ENTRY_OFFSET by the number of concurrent
	 * save states - 1 and save state size. */
	if (params->num_concurrent_save_states > 1) {
		size -= total_save_state_size;
		size += params->per_cpu_save_state_size;
	}

	/* Place the stacks in the lower half of SMRAM. */
	stacks_top = smm_stub_place_stacks(base, size, params);
	if (stacks_top == NULL)
		return -1;

	/* Load the stub. */
	if (rmodule_load(smm_stub_loc, &smm_stub))
		return -1;

	/* Place staggered entry points. */
	smm_stub_place_staggered_entry_points(base, params, &smm_stub);

	/* Setup the parameters for the stub code. */
	stub_params = rmodule_parameters(&smm_stub);
	stub_params->stack_top = (uintptr_t)stacks_top;
	stub_params->stack_size = params->per_cpu_stack_size;
	stub_params->c_handler = (uintptr_t)params->handler;
	stub_params->c_handler_arg = (uintptr_t)params->handler_arg;
	stub_params->fxsave_area = (uintptr_t)fxsave_area;
	stub_params->fxsave_area_size = FXSAVE_SIZE;
	stub_params->runtime.smbase = (uintptr_t)smbase;
	stub_params->runtime.smm_size = smm_size;
	stub_params->runtime.save_state_size = params->per_cpu_save_state_size;
	stub_params->runtime.num_cpus = params->num_concurrent_stacks;
	stub_params->runtime.gnvs_ptr = (uintptr_t)acpi_get_gnvs();

	/* Initialize the APIC id to CPU number table to be 1:1 */
	for (i = 0; i < params->num_concurrent_stacks; i++)
		stub_params->runtime.apic_id_to_cpu[i] = i;

	/* Allow the initiator to manipulate SMM stub parameters. */
	params->runtime = &stub_params->runtime;

	printk(BIOS_DEBUG, "SMM Module: stub loaded at %p. Will call %p(%p)\n",
	       smm_stub_loc, params->handler, params->handler_arg);

	return 0;
}

/*
 * smm_setup_relocation_handler assumes the callback is already loaded in
 * memory. i.e. Another SMM module isn't chained to the stub. The other
 * assumption is that the stub will be entered from the default SMRAM
 * location: 0x30000 -> 0x40000.
 */
int smm_setup_relocation_handler(struct smm_loader_params *params)
{
	void *smram = (void *)SMM_DEFAULT_BASE;

	/* There can't be more than 1 concurrent save state for the relocation
	 * handler because all CPUs default to 0x30000 as SMBASE. */
	if (params->num_concurrent_save_states > 1)
		return -1;

	/* A handler has to be defined to call for relocation. */
	if (params->handler == NULL)
		return -1;

	/* Since the relocation handler always uses stack, adjust the number
	 * of concurrent stack users to be CONFIG_MAX_CPUS. */
	if (params->num_concurrent_stacks == 0)
		params->num_concurrent_stacks = CONFIG_MAX_CPUS;

	return smm_module_setup_stub(smram, SMM_DEFAULT_SIZE,
				     params, fxsave_area_relocation);
}

/* The SMM module is placed within the provided region in the following
 * manner:
 * +-----------------+ <- smram + size
 * | BIOS resource   |
 * | list (STM)      |
 * +-----------------+ <- smram + size - CONFIG_BIOS_RESOURCE_LIST_SIZE
 * |    stacks       |
 * +-----------------+ <- .. - total_stack_size
 * |  fxsave area    |
 * +-----------------+ <- .. - total_stack_size - fxsave_size
 * |      ...        |
 * +-----------------+ <- smram + handler_size + SMM_DEFAULT_SIZE
 * |    handler      |
 * +-----------------+ <- smram + SMM_DEFAULT_SIZE
 * |    stub code    |
 * +-----------------+ <- smram
 *
 * It should be noted that this algorithm will not work for
 * SMM_DEFAULT_SIZE SMRAM regions such as the A segment. This algorithm
 * expects a region large enough to encompass the handler and stacks
 * as well as the SMM_DEFAULT_SIZE.
 */
int smm_load_module(void *smram, size_t size, struct smm_loader_params *params)
{
	struct rmodule smm_mod;
	size_t total_stack_size;
	size_t handler_size;
	size_t module_alignment;
	size_t alignment_size;
	size_t fxsave_size;
	void *fxsave_area;
	size_t total_size;
	char *base;

	if (size <= SMM_DEFAULT_SIZE)
		return -1;

	/* Fail if can't parse the smm rmodule. */
	if (rmodule_parse(&_binary_smm_start, &smm_mod))
		return -1;

	/* Clear SMM region */
	if (CONFIG(DEBUG_SMI))
		memset(smram, 0xcd, size);

	total_stack_size = params->per_cpu_stack_size *
			   params->num_concurrent_stacks;

	/* Stacks start at the top of the region. */
	base = smram;
	base += size;

	if (CONFIG(STM))
		base -= CONFIG_MSEG_SIZE + CONFIG_BIOS_RESOURCE_LIST_SIZE;

	params->stack_top = base;

	/* SMM module starts at offset SMM_DEFAULT_SIZE with the load alignment
	 * taken into account. */
	base = smram;
	base += SMM_DEFAULT_SIZE;
	handler_size = rmodule_memory_size(&smm_mod);
	module_alignment = rmodule_load_alignment(&smm_mod);
	alignment_size = module_alignment -
				((uintptr_t)base % module_alignment);
	if (alignment_size != module_alignment) {
		handler_size += alignment_size;
		base += alignment_size;
	}

	if (CONFIG(SSE)) {
		fxsave_size = FXSAVE_SIZE * params->num_concurrent_stacks;
		/* FXSAVE area below all the stacks stack. */
		fxsave_area = params->stack_top;
		fxsave_area -= total_stack_size + fxsave_size;
	} else {
		fxsave_size = 0;
		fxsave_area = NULL;
	}

	/* Does the required amount of memory exceed the SMRAM region size? */
	total_size = total_stack_size + handler_size;
	total_size += fxsave_size + SMM_DEFAULT_SIZE;

	if (total_size > size)
		return -1;

	if (rmodule_load(base, &smm_mod))
		return -1;

	params->handler = rmodule_entry(&smm_mod);
	params->handler_arg = rmodule_parameters(&smm_mod);

	return smm_module_setup_stub(smram, size, params, fxsave_area);
}