summaryrefslogtreecommitdiff
path: root/src/cpu/x86
diff options
context:
space:
mode:
authorAaron Durbin <adurbin@chromium.org>2013-01-03 17:38:47 -0600
committerRonald G. Minnich <rminnich@gmail.com>2013-03-14 05:01:50 +0100
commit50a34648cdc7fc55e1fa75d51ece608c0e27245a (patch)
tree4c0853f9500e4b01007b6c24caebbea1007ff9a0 /src/cpu/x86
parent5ca4f4119bf00a1ec64358f3e6b41d696b1dc123 (diff)
downloadcoreboot-50a34648cdc7fc55e1fa75d51ece608c0e27245a.tar.xz
x86: SMM Module Support
Add support for SMM modules by leveraging the RMODULE lib. This allows for easier dynamic SMM handler placement. The SMM module support consists of a common stub which puts the executing CPU into protected mode and calls into a pre-defined handler. This stub can then be used for SMM relocation as well as the real SMM handler. For the relocation one can call back into coreboot ramstage code to perform relocation in C code. The handler is essentially a copy of smihandler.c, but it drops the TSEG differences. It also doesn't rely on the SMM revision as the cpu code should know what processor it is supported. Ideally the CONFIG_SMM_TSEG option could be removed once the existing users of that option transitioned away from tseg_relocate() and smi_get_tseg_base(). The generic SMI callbacks are now not marked as weak in the declaration so that there aren't unlinked references. The handler has default implementations of the generic SMI callbacks which are marked as weak. If an external compilation module has a strong symbol the linker will use that instead of the link one. Additionally, the parameters to the generic callbacks are dropped as they don't seem to be used directly. The SMM runtime can provide the necessary support if needed. Change-Id: I1e2fed71a40b2eb03197697d29e9c4b246e3b25e Signed-off-by: Aaron Durbin <adurbin@chromium.org> Reviewed-on: http://review.coreboot.org/2693 Tested-by: build bot (Jenkins) Reviewed-by: Ronald G. Minnich <rminnich@gmail.com>
Diffstat (limited to 'src/cpu/x86')
-rw-r--r--src/cpu/x86/Kconfig18
-rw-r--r--src/cpu/x86/smm/Makefile.inc46
-rw-r--r--src/cpu/x86/smm/smm_module_handler.c171
-rw-r--r--src/cpu/x86/smm/smm_module_header.c24
-rw-r--r--src/cpu/x86/smm/smm_module_loader.c371
-rw-r--r--src/cpu/x86/smm/smm_stub.S145
6 files changed, 775 insertions, 0 deletions
diff --git a/src/cpu/x86/Kconfig b/src/cpu/x86/Kconfig
index ae3241e8b3..62d78b5436 100644
--- a/src/cpu/x86/Kconfig
+++ b/src/cpu/x86/Kconfig
@@ -66,3 +66,21 @@ config SMM_TSEG
config SMM_TSEG_SIZE
hex
default 0
+
+config SMM_MODULES
+ bool
+ default n
+ depends on HAVE_SMI_HANDLER
+ select RELOCATABLE_MODULES
+ help
+ If SMM_MODULES is selected then SMM handlers are built as modules.
+ A SMM stub along with a SMM loader/relocator. All the handlers are
+ written in C with stub being the only assembly.
+
+config SMM_MODULE_HEAP_SIZE
+ hex
+ default 0x4000
+ depends on SMM_MODULES
+ help
+ This option determines the size of the heap within the SMM handler
+ modules.
diff --git a/src/cpu/x86/smm/Makefile.inc b/src/cpu/x86/smm/Makefile.inc
index 405cf891ad..ee4dbeaae0 100644
--- a/src/cpu/x86/smm/Makefile.inc
+++ b/src/cpu/x86/smm/Makefile.inc
@@ -17,6 +17,51 @@
## Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
##
+ifeq ($(CONFIG_SMM_MODULES),y)
+
+smmstub-y += smm_stub.S
+smmstub-y += smm_module_header.c
+
+smm-y += smiutil.c
+smm-y += smm_module_header.c
+smm-y += smm_module_handler.c
+
+ramstage-y += smm_module_loader.c
+
+ramstage-srcs += $(obj)/cpu/x86/smm/smm
+ramstage-srcs += $(obj)/cpu/x86/smm/smmstub
+
+# SMM Stub Module. The stub is used as a trampoline for relocation and normal
+# SMM handling.
+$(obj)/cpu/x86/smm/smmstub.o: $$(smmstub-objs)
+ $(CC) $(LDFLAGS) -nostdlib -r -o $@ $^
+
+# Link the SMM stub module with a 0-byte heap.
+$(eval $(call rmodule_link,$(obj)/cpu/x86/smm/smmstub.elf, $(obj)/cpu/x86/smm/smmstub.o, 0))
+
+$(obj)/cpu/x86/smm/smmstub: $(obj)/cpu/x86/smm/smmstub.elf
+ $(OBJCOPY) -O binary $< $@
+
+$(obj)/cpu/x86/smm/smmstub.ramstage.o: $(obj)/cpu/x86/smm/smmstub
+ @printf " OBJCOPY $(subst $(obj)/,,$(@))\n"
+ cd $(dir $@); $(OBJCOPY) -I binary $(notdir $<) -O elf32-i386 -B i386 $(notdir $@)
+
+# C-based SMM handler.
+
+$(obj)/cpu/x86/smm/smm.o: $$(smm-objs)
+ $(CC) $(LDFLAGS) -nostdlib -r -o $@ $^
+
+$(eval $(call rmodule_link,$(obj)/cpu/x86/smm/smm.elf, $(obj)/cpu/x86/smm/smm.o, $(CONFIG_SMM_MODULE_HEAP_SIZE)))
+
+$(obj)/cpu/x86/smm/smm: $(obj)/cpu/x86/smm/smm.elf
+ $(OBJCOPY) -O binary $< $@
+
+$(obj)/cpu/x86/smm/smm.ramstage.o: $(obj)/cpu/x86/smm/smm
+ @printf " OBJCOPY $(subst $(obj)/,,$(@))\n"
+ cd $(dir $@); $(OBJCOPY) -I binary $(notdir $<) -O elf32-i386 -B i386 $(notdir $@)
+
+else # CONFIG_SMM_MODULES
+
ramstage-$(CONFIG_HAVE_SMI_HANDLER) += smmrelocate.S
ifeq ($(CONFIG_HAVE_SMI_HANDLER),y)
ramstage-srcs += $(obj)/cpu/x86/smm/smm_wrap
@@ -50,3 +95,4 @@ $(obj)/cpu/x86/smm/smm_wrap.ramstage.o: $(obj)/cpu/x86/smm/smm_wrap
@printf " OBJCOPY $(subst $(obj)/,,$(@))\n"
cd $(obj)/cpu/x86/smm; $(OBJCOPY) -I binary smm -O elf32-i386 -B i386 smm_wrap.ramstage.o
+endif # CONFIG_SMM_MODULES
diff --git a/src/cpu/x86/smm/smm_module_handler.c b/src/cpu/x86/smm/smm_module_handler.c
new file mode 100644
index 0000000000..67802d6431
--- /dev/null
+++ b/src/cpu/x86/smm/smm_module_handler.c
@@ -0,0 +1,171 @@
+/*
+ * This file is part of the coreboot project.
+ *
+ * Copyright (C) 2013 ChromeOS Authors
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <arch/io.h>
+#include <arch/romcc_io.h>
+#include <console/console.h>
+#include <cpu/x86/smm.h>
+
+typedef enum { SMI_LOCKED, SMI_UNLOCKED } smi_semaphore;
+
+/* SMI multiprocessing semaphore */
+static volatile
+smi_semaphore smi_handler_status __attribute__ ((aligned (4))) = SMI_UNLOCKED;
+
+static int smi_obtain_lock(void)
+{
+ u8 ret = SMI_LOCKED;
+
+ asm volatile (
+ "movb %2, %%al\n"
+ "xchgb %%al, %1\n"
+ "movb %%al, %0\n"
+ : "=g" (ret), "=m" (smi_handler_status)
+ : "g" (SMI_LOCKED)
+ : "eax"
+ );
+
+ return (ret == SMI_UNLOCKED);
+}
+
+static void smi_release_lock(void)
+{
+ asm volatile (
+ "movb %1, %%al\n"
+ "xchgb %%al, %0\n"
+ : "=m" (smi_handler_status)
+ : "g" (SMI_UNLOCKED)
+ : "eax"
+ );
+}
+
+void io_trap_handler(int smif)
+{
+ /* If a handler function handled a given IO trap, it
+ * shall return a non-zero value
+ */
+ printk(BIOS_DEBUG, "SMI function trap 0x%x: ", smif);
+
+ if (southbridge_io_trap_handler(smif))
+ return;
+
+ if (mainboard_io_trap_handler(smif))
+ return;
+
+ printk(BIOS_DEBUG, "Unknown function\n");
+}
+
+/**
+ * @brief Set the EOS bit
+ */
+static void smi_set_eos(void)
+{
+ southbridge_smi_set_eos();
+}
+
+
+static u32 pci_orig;
+
+/**
+ * @brief Backup PCI address to make sure we do not mess up the OS
+ */
+static void smi_backup_pci_address(void)
+{
+ pci_orig = inl(0xcf8);
+}
+
+/**
+ * @brief Restore PCI address previously backed up
+ */
+static void smi_restore_pci_address(void)
+{
+ outl(pci_orig, 0xcf8);
+}
+
+
+static const struct smm_runtime *smm_runtime;
+
+void *smm_get_save_state(int cpu)
+{
+ char *base;
+
+ /* This function assumes all save states start at top of default
+ * SMRAM size space and are staggered down by save state size. */
+ base = (void *)smm_runtime->smbase;
+ base += SMM_DEFAULT_SIZE;
+ base -= (cpu + 1) * smm_runtime->save_state_size;
+
+ return base;
+}
+
+void smm_handler_start(void *arg, int cpu, const struct smm_runtime *runtime)
+{
+ /* Make sure to set the global runtime. It's OK to race as the value
+ * will be the same across CPUs as well as multiple SMIs. */
+ if (smm_runtime == NULL)
+ smm_runtime = runtime;
+
+ if (cpu >= CONFIG_MAX_CPUS) {
+ console_init();
+ printk(BIOS_CRIT,
+ "Invalid CPU number assigned in SMM stub: %d\n", cpu);
+ return;
+ }
+
+ /* Are we ok to execute the handler? */
+ if (!smi_obtain_lock()) {
+ /* For security reasons we don't release the other CPUs
+ * until the CPU with the lock is actually done */
+ while (smi_handler_status == SMI_LOCKED) {
+ asm volatile (
+ ".byte 0xf3, 0x90\n" /* PAUSE */
+ );
+ }
+ return;
+ }
+
+ smi_backup_pci_address();
+
+ console_init();
+
+ printk(BIOS_SPEW, "\nSMI# #%d\n", cpu);
+
+ cpu_smi_handler();
+ northbridge_smi_handler();
+ southbridge_smi_handler();
+
+ smi_restore_pci_address();
+
+ smi_release_lock();
+
+ /* De-assert SMI# signal to allow another SMI */
+ smi_set_eos();
+}
+
+/* Provide a default implementation for all weak handlers so that relocation
+ * entries in the modules make sense. Without default implementations the
+ * weak relocations w/o a symbol have a 0 address which is where the modules
+ * are linked at. */
+int __attribute__((weak)) mainboard_io_trap_handler(int smif) { return 0; }
+void __attribute__((weak)) cpu_smi_handler(void) {}
+void __attribute__((weak)) northbridge_smi_handler() {}
+void __attribute__((weak)) southbridge_smi_handler() {}
+void __attribute__((weak)) mainboard_smi_gpi(u16 gpi_sts) {}
+int __attribute__((weak)) mainboard_smi_apmc(u8 data) { return 0; }
+void __attribute__((weak)) mainboard_smi_sleep(u8 slp_typ) {}
diff --git a/src/cpu/x86/smm/smm_module_header.c b/src/cpu/x86/smm/smm_module_header.c
new file mode 100644
index 0000000000..3ee654f6cd
--- /dev/null
+++ b/src/cpu/x86/smm/smm_module_header.c
@@ -0,0 +1,24 @@
+/*
+ * This file is part of the coreboot project.
+ *
+ * Copyright (C) 2013 ChromeOS Authors
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <rmodule.h>
+
+extern char smm_handler_start[];
+
+DEFINE_RMODULE_HEADER(smm_module, smm_handler_start, RMODULE_TYPE_SMM);
diff --git a/src/cpu/x86/smm/smm_module_loader.c b/src/cpu/x86/smm/smm_module_loader.c
new file mode 100644
index 0000000000..5eb4c5a0a2
--- /dev/null
+++ b/src/cpu/x86/smm/smm_module_loader.c
@@ -0,0 +1,371 @@
+/*
+ * This file is part of the coreboot project.
+ *
+ * Copyright (C) 2012 ChromeOS Authors
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <string.h>
+#include <rmodule.h>
+#include <cpu/x86/smm.h>
+#include <cpu/x86/cache.h>
+#include <console/console.h>
+
+/*
+ * Compoments that make up the SMRAM:
+ * 1. Save state - the total save state memory used
+ * 2. Stack - stacks for the CPUs in the SMM handler
+ * 3. Stub - SMM stub code for calling into handler
+ * 4. Handler - C-based SMM handler.
+ *
+ * The compoents are assumed to consist of one consecutive region.
+ */
+
+/* These paramters are used by the SMM stub code. A pointer to the params
+ * is also passed to the C-base handler. */
+struct smm_stub_params {
+ u32 stack_size;
+ u32 stack_top;
+ u32 c_handler;
+ u32 c_handler_arg;
+ struct smm_runtime runtime;
+} __attribute__ ((packed));
+
+/*
+ * The stub is the entry point that sets up protected mode and stacks for each
+ * cpu. It then calls into the SMM handler module. It is encoded as an rmodule.
+ */
+extern unsigned char _binary_smmstub_start[];
+
+/* This is the SMM handler that the stub calls. It is encoded as an rmodule. */
+extern unsigned char _binary_smm_start[];
+
+/* Per cpu minimum stack size. */
+#define SMM_MINIMUM_STACK_SIZE 32
+
+/*
+ * The smm_entry_ins consists of 3 bytes. It is used when staggering SMRAM entry
+ * addresses across CPUs.
+ *
+ * 0xe9 <16-bit relative target> ; jmp <relative-offset>
+ */
+struct smm_entry_ins {
+ char jmp_rel;
+ uint16_t rel16;
+} __attribute__ ((packed));
+
+/*
+ * Place the entry instructions for num entries beginning at entry_start with
+ * a given stride. The entry_start is the highest entry point's address. All
+ * other entry points are stride size below the previous.
+ */
+static void smm_place_jmp_instructions(void *entry_start, int stride, int num,
+ void *jmp_target)
+{
+ int i;
+ char *cur;
+ struct smm_entry_ins entry = { .jmp_rel = 0xe9 };
+
+ /* Each entry point has an IP value of 0x8000. The SMBASE for each
+ * cpu is different so the effective address of the entry instruction
+ * is different. Therefore, the relative displacment for each entry
+ * instruction needs to be updated to reflect the current effective
+ * IP. Additionally, the IP result from the jmp instruction is
+ * calculated using the next instruction's address so the size of
+ * the jmp instruction needs to be taken into account. */
+ cur = entry_start;
+ for (i = 0; i < num; i++) {
+ uint32_t disp = (uint32_t)jmp_target;
+
+ disp -= sizeof(entry) + (uint32_t)cur;
+ printk(BIOS_DEBUG,
+ "SMM Module: placing jmp sequence at %p rel16 0x%04x\n",
+ cur, disp);
+ entry.rel16 = disp;
+ memcpy(cur, &entry, sizeof(entry));
+ cur -= stride;
+ }
+}
+
+/* Place stacks in base -> base + size region, but ensure the stacks don't
+ * overlap the staggered entry points. */
+static void *smm_stub_place_stacks(char *base, int size,
+ struct smm_loader_params *params)
+{
+ int total_stack_size;
+ char *stacks_top;
+
+ if (params->stack_top != NULL)
+ return params->stack_top;
+
+ /* If stack space is requested assume the space lives in the lower
+ * half of SMRAM. */
+ total_stack_size = params->per_cpu_stack_size *
+ params->num_concurrent_stacks;
+
+ /* There has to be at least one stack user. */
+ if (params->num_concurrent_stacks < 1)
+ return NULL;
+
+ /* Total stack size cannot fit. */
+ if (total_stack_size > size)
+ return NULL;
+
+ /* Stacks extend down to SMBASE */
+ stacks_top = &base[total_stack_size];
+
+ return stacks_top;
+}
+
+/* Place the staggered entry points for each CPU. The entry points are
+ * staggered by the per cpu SMM save state size extending down from
+ * SMM_ENTRY_OFFSET. */
+static void smm_stub_place_staggered_entry_points(char *base,
+ const struct smm_loader_params *params, const struct rmodule *smm_stub)
+{
+ int stub_entry_offset;
+
+ stub_entry_offset = rmodule_entry_offset(smm_stub);
+
+ /* If there are staggered entry points or the stub is not located
+ * at the SMM entry point then jmp instructionss need to be placed. */
+ if (params->num_concurrent_save_states > 1 || stub_entry_offset != 0) {
+ int num_entries;
+
+ base += SMM_ENTRY_OFFSET;
+ num_entries = params->num_concurrent_save_states;
+ /* Adjust beginning entry and number of entries down since
+ * the initial entry point doesn't need a jump sequence. */
+ if (stub_entry_offset == 0) {
+ base -= params->per_cpu_save_state_size;
+ num_entries--;
+ }
+ smm_place_jmp_instructions(base,
+ params->per_cpu_save_state_size,
+ num_entries,
+ rmodule_entry(smm_stub));
+ }
+}
+
+/*
+ * The stub setup code assumes it is completely contained within the
+ * default SMRAM size (0x10000). There are potentially 3 regions to place
+ * within the default SMRAM size:
+ * 1. Save state areas
+ * 2. Stub code
+ * 3. Stack areas
+ *
+ * The save state and stack areas are treated as contiguous for the number of
+ * concurrent areas requested. The save state always lives at the top of SMRAM
+ * space, and the entry point is at offset 0x8000.
+ */
+static int smm_module_setup_stub(void *smbase, struct smm_loader_params *params)
+{
+ int total_save_state_size;
+ int smm_stub_size;
+ int stub_entry_offset;
+ char *smm_stub_loc;
+ void *stacks_top;
+ int size;
+ char *base;
+ int i;
+ struct smm_stub_params *stub_params;
+ struct rmodule smm_stub;
+
+ base = smbase;
+ size = SMM_DEFAULT_SIZE;
+
+ /* The number of concurrent stacks cannot exceed CONFIG_MAX_CPUS. */
+ if (params->num_concurrent_stacks > CONFIG_MAX_CPUS)
+ return -1;
+
+ /* Fail if can't parse the smm stub rmodule. */
+ if (rmodule_parse(&_binary_smmstub_start, &smm_stub))
+ return -1;
+
+ /* Adjust remaining size to account for save state. */
+ total_save_state_size = params->per_cpu_save_state_size *
+ params->num_concurrent_save_states;
+ size -= total_save_state_size;
+
+ /* The save state size encroached over the first SMM entry point. */
+ if (size <= SMM_ENTRY_OFFSET)
+ return -1;
+
+ /* Need a minimum stack size and alignment. */
+ if (params->per_cpu_stack_size <= SMM_MINIMUM_STACK_SIZE ||
+ (params->per_cpu_stack_size & 3) != 0)
+ return -1;
+
+ smm_stub_loc = NULL;
+ smm_stub_size = rmodule_memory_size(&smm_stub);
+ stub_entry_offset = rmodule_entry_offset(&smm_stub);
+
+ /* Assume the stub is always small enough to live within upper half of
+ * SMRAM region after the save state space has been allocated. */
+ smm_stub_loc = &base[SMM_ENTRY_OFFSET];
+
+ /* Adjust for jmp instruction sequence. */
+ if (stub_entry_offset != 0) {
+ int entry_sequence_size = sizeof(struct smm_entry_ins);
+ /* Align up to 16 bytes. */
+ entry_sequence_size += 15;
+ entry_sequence_size &= ~15;
+ smm_stub_loc += entry_sequence_size;
+ smm_stub_size += entry_sequence_size;
+ }
+
+ /* Stub is too big to fit. */
+ if (smm_stub_size > (size - SMM_ENTRY_OFFSET))
+ return -1;
+
+ /* The stacks, if requested, live in the lower half of SMRAM space. */
+ size = SMM_ENTRY_OFFSET;
+
+ /* Ensure stacks don't encroach onto staggered SMM
+ * entry points. The staggered entry points extend
+ * below SMM_ENTRY_OFFSET by the number of concurrent
+ * save states - 1 and save state size. */
+ if (params->num_concurrent_save_states > 1) {
+ size -= total_save_state_size;
+ size += params->per_cpu_save_state_size;
+ }
+
+ /* Place the stacks in the lower half of SMRAM. */
+ stacks_top = smm_stub_place_stacks(base, size, params);
+ if (stacks_top == NULL)
+ return -1;
+
+ /* Load the stub. */
+ if (rmodule_load(smm_stub_loc, &smm_stub))
+ return -1;
+
+ /* Place staggered entry points. */
+ smm_stub_place_staggered_entry_points(base, params, &smm_stub);
+
+ /* Setup the parameters for the stub code. */
+ stub_params = rmodule_parameters(&smm_stub);
+ stub_params->stack_top = (u32)stacks_top;
+ stub_params->stack_size = params->per_cpu_stack_size;
+ stub_params->c_handler = (u32)params->handler;
+ stub_params->c_handler_arg = (u32)params->handler_arg;
+ stub_params->runtime.smbase = (u32)smbase;
+ stub_params->runtime.save_state_size = params->per_cpu_save_state_size;
+
+ /* Initialize the APIC id to cpu number table to be 1:1 */
+ for (i = 0; i < params->num_concurrent_stacks; i++)
+ stub_params->runtime.apic_id_to_cpu[i] = i;
+
+ /* Allow the initiator to manipulate SMM stub parameters. */
+ params->runtime = &stub_params->runtime;
+
+ printk(BIOS_DEBUG, "SMM Module: stub loaded at %p. Will call %p(%p)\n",
+ smm_stub_loc, params->handler, params->handler_arg);
+
+ return 0;
+}
+
+/*
+ * smm_setup_relocation_handler assumes the callback is already loaded in
+ * memory. i.e. Another SMM module isn't chained to the stub. The other
+ * assumption is that the stub will be entered from the default SMRAM
+ * location: 0x30000 -> 0x40000.
+ */
+int smm_setup_relocation_handler(struct smm_loader_params *params)
+{
+ void *smram = (void *)SMM_DEFAULT_BASE;
+
+ /* There can't be more than 1 concurrent save state for the relocation
+ * handler because all CPUs default to 0x30000 as SMBASE. */
+ if (params->num_concurrent_save_states > 1)
+ return -1;
+
+ /* A handler has to be defined to call for relocation. */
+ if (params->handler == NULL)
+ return -1;
+
+ /* Since the relocation handler always uses stack, adjust the number
+ * of conccurent stack users to be CONFIG_MAX_CPUS. */
+ if (params->num_concurrent_stacks == 0)
+ params->num_concurrent_stacks = CONFIG_MAX_CPUS;
+
+ return smm_module_setup_stub(smram, params);
+}
+
+/* The SMM module is placed within the provided region in the following
+ * manner:
+ * +-----------------+ <- smram + size
+ * | stacks |
+ * +-----------------+ <- smram + size - total_stack_size
+ * | ... |
+ * +-----------------+ <- smram + handler_size + SMM_DEFAULT_SIZE
+ * | handler |
+ * +-----------------+ <- smram + SMM_DEFAULT_SIZE
+ * | stub code |
+ * +-----------------+ <- smram
+ *
+ * It should be noted that this algorithm will not work for
+ * SMM_DEFAULT_SIZE SMRAM regions such as the A segment. This algorithm
+ * expectes a region large enough to encompass the handler and stacks
+ * as well as the SMM_DEFAULT_SIZE.
+ */
+int smm_load_module(void *smram, int size, struct smm_loader_params *params)
+{
+ struct rmodule smm_mod;
+ int total_stack_size;
+ int handler_size;
+ int module_alignment;
+ int alignment_size;
+ char *base;
+
+ if (size <= SMM_DEFAULT_SIZE)
+ return -1;
+
+ /* Fail if can't parse the smm rmodule. */
+ if (rmodule_parse(&_binary_smm_start, &smm_mod))
+ return -1;
+
+ total_stack_size = params->per_cpu_stack_size *
+ params->num_concurrent_stacks;
+
+ /* Stacks start at the top of the region. */
+ base = smram;
+ base += size;
+ params->stack_top = base;
+
+ /* SMM module starts at offset SMM_DEFAULT_SIZE with the load alignment
+ * taken into account. */
+ base = smram;
+ base += SMM_DEFAULT_SIZE;
+ handler_size = rmodule_memory_size(&smm_mod);
+ module_alignment = rmodule_load_alignment(&smm_mod);
+ alignment_size = module_alignment - ((u32)base % module_alignment);
+ if (alignment_size != module_alignment) {
+ handler_size += alignment_size;
+ base += alignment_size;
+ }
+
+ /* Does the required amount of memory exceed the SMRAM region size? */
+ if ((total_stack_size + handler_size + SMM_DEFAULT_SIZE) > size)
+ return -1;
+
+ if (rmodule_load(base, &smm_mod))
+ return -1;
+
+ params->handler = rmodule_entry(&smm_mod);
+ params->handler_arg = rmodule_parameters(&smm_mod);
+
+ return smm_module_setup_stub(smram, params);
+}
diff --git a/src/cpu/x86/smm/smm_stub.S b/src/cpu/x86/smm/smm_stub.S
new file mode 100644
index 0000000000..07eb5dcb6d
--- /dev/null
+++ b/src/cpu/x86/smm/smm_stub.S
@@ -0,0 +1,145 @@
+/*
+ * This file is part of the coreboot project.
+ *
+ * Copyright (C) 2012 ChromeOS Authors
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of
+ * the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
+ * MA 02110-1301 USA
+ */
+
+/*
+ * The stub is a generic wrapper for bootstrapping a C-based SMM handler. Its
+ * primary purpose is to put the CPU into protected mode with a stack and call
+ * into the C handler.
+ *
+ * The stub_entry_params structure needs to correspond to the C structure
+ * found in smm.h.
+ */
+
+.code32
+.section ".module_parameters", "aw", @progbits
+stub_entry_params:
+stack_size:
+.long 0
+stack_top:
+.long 0
+c_handler:
+.long 0
+c_handler_arg:
+.long 0
+/* struct smm_runtime begins here. */
+smm_runtime:
+smbase:
+.long 0
+save_state_size:
+.long 0
+/* apic_to_cpu_num is a table mapping the default APIC id to cpu num. If the
+ * APIC id is found at the given index, the contiguous cpu number is index
+ * into the table. */
+apic_to_cpu_num:
+.fill CONFIG_MAX_CPUS,1,0xff
+/* end struct smm_runtime */
+
+.data
+/* Provide fallback stack to use when a valid cpu number cannot be found. */
+fallback_stack_bottom:
+.skip 128
+fallback_stack_top:
+
+.text
+.code16
+.global smm_handler_start
+smm_handler_start:
+ movl $(smm_relocate_gdt), %ebx
+ data32 lgdt (%ebx)
+
+ movl %cr0, %eax
+ andl $0x1FFAFFD1, %eax /* CD,NW,PG,AM,WP,NE,TS,EM,MP = 0 */
+ orl $0x1, %eax /* PE = 1 */
+ movl %eax, %cr0
+
+ /* Enable protected mode */
+ data32 ljmp $0x8, $smm_trampoline32
+
+.align 4
+smm_relocate_gdt:
+ /* The first GDT entry is used for the lgdt instruction. */
+ .word smm_relocate_gdt_end - smm_relocate_gdt - 1
+ .long smm_relocate_gdt
+ .word 0x0000
+
+ /* gdt selector 0x08, flat code segment */
+ .word 0xffff, 0x0000
+ .byte 0x00, 0x9b, 0xcf, 0x00 /* G=1 and 0x0f, 4GB limit */
+
+ /* gdt selector 0x10, flat data segment */
+ .word 0xffff, 0x0000
+ .byte 0x00, 0x93, 0xcf, 0x00
+smm_relocate_gdt_end:
+
+.align 4
+.code32
+.global smm_trampoline32
+smm_trampoline32:
+ /* Use flat data segment */
+ movw $0x10, %ax
+ movw %ax, %ds
+ movw %ax, %es
+ movw %ax, %ss
+ movw %ax, %fs
+ movw %ax, %gs
+
+ /* The CPU number is calculated by reading the initial APIC id. Since
+ * the OS can maniuplate the APIC id use the non-changing cpuid result
+ * for APIC id (ebx[31:24]). A table is used to handle a discontiguous
+ * APIC id space. */
+ mov $1, %eax
+ cpuid
+ bswap %ebx /* Default APIC id in bl. */
+ mov $(apic_to_cpu_num), %eax
+ xor %ecx, %ecx
+
+1:
+ cmp (%eax, %ecx, 1), %bl
+ je 1f
+ inc %ecx
+ cmp $CONFIG_MAX_CPUS, %ecx
+ jne 1b
+ /* This is bad. One cannot find a stack entry because a cpu num could
+ * not be assigned. Use the fallback stack and check this condition in
+ * C handler. */
+ movl $(fallback_stack_top), %esp
+ jmp 2f
+1:
+ movl stack_size, %eax
+ mul %ecx
+ movl stack_top, %edx
+ subl %eax, %edx
+ mov %edx, %esp
+
+2:
+ /* Call into the c-based SMM relocation function with the platform
+ * parameters. Equivalent to:
+ * c_handler(c_handler_params, cpu_num, smm_runtime);
+ */
+ push $(smm_runtime)
+ push %ecx
+ push c_handler_arg
+ mov c_handler, %eax
+ call *%eax
+
+ /* Exit from SM mode. */
+ rsm
+