summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Makefile.inc6
-rw-r--r--src/cpu/x86/Kconfig18
-rw-r--r--src/cpu/x86/smm/Makefile.inc46
-rw-r--r--src/cpu/x86/smm/smm_module_handler.c171
-rw-r--r--src/cpu/x86/smm/smm_module_header.c24
-rw-r--r--src/cpu/x86/smm/smm_module_loader.c371
-rw-r--r--src/cpu/x86/smm/smm_stub.S145
-rw-r--r--src/include/cpu/x86/smm.h80
8 files changed, 860 insertions, 1 deletions
diff --git a/Makefile.inc b/Makefile.inc
index 158ac44a8a..3fb74e753a 100644
--- a/Makefile.inc
+++ b/Makefile.inc
@@ -60,7 +60,7 @@ subdirs-y += site-local
#######################################################################
# Add source classes and their build options
-classes-y := ramstage romstage bootblock smm cpu_microcode
+classes-y := ramstage romstage bootblock smm smmstub cpu_microcode
#######################################################################
# Helper functions for ramstage postprocess
@@ -112,13 +112,17 @@ endif
bootblock-c-ccopts:=-D__BOOT_BLOCK__ -D__PRE_RAM__
bootblock-S-ccopts:=-D__BOOT_BLOCK__ -D__PRE_RAM__
+smmstub-c-ccopts:=-D__SMM__
+smmstub-S-ccopts:=-D__SMM__
smm-c-ccopts:=-D__SMM__
smm-S-ccopts:=-D__SMM__
# SMM TSEG base is dynamic
+ifneq ($(CONFIG_SMM_MODULES),y)
ifeq ($(CONFIG_SMM_TSEG),y)
smm-c-ccopts += -fpic
endif
+endif
ramstage-c-deps:=$$(OPTION_TABLE_H)
romstage-c-deps:=$$(OPTION_TABLE_H)
diff --git a/src/cpu/x86/Kconfig b/src/cpu/x86/Kconfig
index ae3241e8b3..62d78b5436 100644
--- a/src/cpu/x86/Kconfig
+++ b/src/cpu/x86/Kconfig
@@ -66,3 +66,21 @@ config SMM_TSEG
config SMM_TSEG_SIZE
hex
default 0
+
+config SMM_MODULES
+ bool
+ default n
+ depends on HAVE_SMI_HANDLER
+ select RELOCATABLE_MODULES
+ help
+ If SMM_MODULES is selected then SMM handlers are built as modules.
+ A SMM stub along with a SMM loader/relocator. All the handlers are
+ written in C with stub being the only assembly.
+
+config SMM_MODULE_HEAP_SIZE
+ hex
+ default 0x4000
+ depends on SMM_MODULES
+ help
+ This option determines the size of the heap within the SMM handler
+ modules.
diff --git a/src/cpu/x86/smm/Makefile.inc b/src/cpu/x86/smm/Makefile.inc
index 405cf891ad..ee4dbeaae0 100644
--- a/src/cpu/x86/smm/Makefile.inc
+++ b/src/cpu/x86/smm/Makefile.inc
@@ -17,6 +17,51 @@
## Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
##
+ifeq ($(CONFIG_SMM_MODULES),y)
+
+smmstub-y += smm_stub.S
+smmstub-y += smm_module_header.c
+
+smm-y += smiutil.c
+smm-y += smm_module_header.c
+smm-y += smm_module_handler.c
+
+ramstage-y += smm_module_loader.c
+
+ramstage-srcs += $(obj)/cpu/x86/smm/smm
+ramstage-srcs += $(obj)/cpu/x86/smm/smmstub
+
+# SMM Stub Module. The stub is used as a trampoline for relocation and normal
+# SMM handling.
+$(obj)/cpu/x86/smm/smmstub.o: $$(smmstub-objs)
+ $(CC) $(LDFLAGS) -nostdlib -r -o $@ $^
+
+# Link the SMM stub module with a 0-byte heap.
+$(eval $(call rmodule_link,$(obj)/cpu/x86/smm/smmstub.elf, $(obj)/cpu/x86/smm/smmstub.o, 0))
+
+$(obj)/cpu/x86/smm/smmstub: $(obj)/cpu/x86/smm/smmstub.elf
+ $(OBJCOPY) -O binary $< $@
+
+$(obj)/cpu/x86/smm/smmstub.ramstage.o: $(obj)/cpu/x86/smm/smmstub
+ @printf " OBJCOPY $(subst $(obj)/,,$(@))\n"
+ cd $(dir $@); $(OBJCOPY) -I binary $(notdir $<) -O elf32-i386 -B i386 $(notdir $@)
+
+# C-based SMM handler.
+
+$(obj)/cpu/x86/smm/smm.o: $$(smm-objs)
+ $(CC) $(LDFLAGS) -nostdlib -r -o $@ $^
+
+$(eval $(call rmodule_link,$(obj)/cpu/x86/smm/smm.elf, $(obj)/cpu/x86/smm/smm.o, $(CONFIG_SMM_MODULE_HEAP_SIZE)))
+
+$(obj)/cpu/x86/smm/smm: $(obj)/cpu/x86/smm/smm.elf
+ $(OBJCOPY) -O binary $< $@
+
+$(obj)/cpu/x86/smm/smm.ramstage.o: $(obj)/cpu/x86/smm/smm
+ @printf " OBJCOPY $(subst $(obj)/,,$(@))\n"
+ cd $(dir $@); $(OBJCOPY) -I binary $(notdir $<) -O elf32-i386 -B i386 $(notdir $@)
+
+else # CONFIG_SMM_MODULES
+
ramstage-$(CONFIG_HAVE_SMI_HANDLER) += smmrelocate.S
ifeq ($(CONFIG_HAVE_SMI_HANDLER),y)
ramstage-srcs += $(obj)/cpu/x86/smm/smm_wrap
@@ -50,3 +95,4 @@ $(obj)/cpu/x86/smm/smm_wrap.ramstage.o: $(obj)/cpu/x86/smm/smm_wrap
@printf " OBJCOPY $(subst $(obj)/,,$(@))\n"
cd $(obj)/cpu/x86/smm; $(OBJCOPY) -I binary smm -O elf32-i386 -B i386 smm_wrap.ramstage.o
+endif # CONFIG_SMM_MODULES
diff --git a/src/cpu/x86/smm/smm_module_handler.c b/src/cpu/x86/smm/smm_module_handler.c
new file mode 100644
index 0000000000..67802d6431
--- /dev/null
+++ b/src/cpu/x86/smm/smm_module_handler.c
@@ -0,0 +1,171 @@
+/*
+ * This file is part of the coreboot project.
+ *
+ * Copyright (C) 2013 ChromeOS Authors
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <arch/io.h>
+#include <arch/romcc_io.h>
+#include <console/console.h>
+#include <cpu/x86/smm.h>
+
+typedef enum { SMI_LOCKED, SMI_UNLOCKED } smi_semaphore;
+
+/* SMI multiprocessing semaphore */
+static volatile
+smi_semaphore smi_handler_status __attribute__ ((aligned (4))) = SMI_UNLOCKED;
+
+static int smi_obtain_lock(void)
+{
+ u8 ret = SMI_LOCKED;
+
+ asm volatile (
+ "movb %2, %%al\n"
+ "xchgb %%al, %1\n"
+ "movb %%al, %0\n"
+ : "=g" (ret), "=m" (smi_handler_status)
+ : "g" (SMI_LOCKED)
+ : "eax"
+ );
+
+ return (ret == SMI_UNLOCKED);
+}
+
+static void smi_release_lock(void)
+{
+ asm volatile (
+ "movb %1, %%al\n"
+ "xchgb %%al, %0\n"
+ : "=m" (smi_handler_status)
+ : "g" (SMI_UNLOCKED)
+ : "eax"
+ );
+}
+
+void io_trap_handler(int smif)
+{
+ /* If a handler function handled a given IO trap, it
+ * shall return a non-zero value
+ */
+ printk(BIOS_DEBUG, "SMI function trap 0x%x: ", smif);
+
+ if (southbridge_io_trap_handler(smif))
+ return;
+
+ if (mainboard_io_trap_handler(smif))
+ return;
+
+ printk(BIOS_DEBUG, "Unknown function\n");
+}
+
+/**
+ * @brief Set the EOS bit
+ */
+static void smi_set_eos(void)
+{
+ southbridge_smi_set_eos();
+}
+
+
+static u32 pci_orig;
+
+/**
+ * @brief Backup PCI address to make sure we do not mess up the OS
+ */
+static void smi_backup_pci_address(void)
+{
+ pci_orig = inl(0xcf8);
+}
+
+/**
+ * @brief Restore PCI address previously backed up
+ */
+static void smi_restore_pci_address(void)
+{
+ outl(pci_orig, 0xcf8);
+}
+
+
+static const struct smm_runtime *smm_runtime;
+
+void *smm_get_save_state(int cpu)
+{
+ char *base;
+
+ /* This function assumes all save states start at top of default
+ * SMRAM size space and are staggered down by save state size. */
+ base = (void *)smm_runtime->smbase;
+ base += SMM_DEFAULT_SIZE;
+ base -= (cpu + 1) * smm_runtime->save_state_size;
+
+ return base;
+}
+
+void smm_handler_start(void *arg, int cpu, const struct smm_runtime *runtime)
+{
+ /* Make sure to set the global runtime. It's OK to race as the value
+ * will be the same across CPUs as well as multiple SMIs. */
+ if (smm_runtime == NULL)
+ smm_runtime = runtime;
+
+ if (cpu >= CONFIG_MAX_CPUS) {
+ console_init();
+ printk(BIOS_CRIT,
+ "Invalid CPU number assigned in SMM stub: %d\n", cpu);
+ return;
+ }
+
+ /* Are we ok to execute the handler? */
+ if (!smi_obtain_lock()) {
+ /* For security reasons we don't release the other CPUs
+ * until the CPU with the lock is actually done */
+ while (smi_handler_status == SMI_LOCKED) {
+ asm volatile (
+ ".byte 0xf3, 0x90\n" /* PAUSE */
+ );
+ }
+ return;
+ }
+
+ smi_backup_pci_address();
+
+ console_init();
+
+ printk(BIOS_SPEW, "\nSMI# #%d\n", cpu);
+
+ cpu_smi_handler();
+ northbridge_smi_handler();
+ southbridge_smi_handler();
+
+ smi_restore_pci_address();
+
+ smi_release_lock();
+
+ /* De-assert SMI# signal to allow another SMI */
+ smi_set_eos();
+}
+
+/* Provide a default implementation for all weak handlers so that relocation
+ * entries in the modules make sense. Without default implementations the
+ * weak relocations w/o a symbol have a 0 address which is where the modules
+ * are linked at. */
+int __attribute__((weak)) mainboard_io_trap_handler(int smif) { return 0; }
+void __attribute__((weak)) cpu_smi_handler(void) {}
+void __attribute__((weak)) northbridge_smi_handler() {}
+void __attribute__((weak)) southbridge_smi_handler() {}
+void __attribute__((weak)) mainboard_smi_gpi(u16 gpi_sts) {}
+int __attribute__((weak)) mainboard_smi_apmc(u8 data) { return 0; }
+void __attribute__((weak)) mainboard_smi_sleep(u8 slp_typ) {}
diff --git a/src/cpu/x86/smm/smm_module_header.c b/src/cpu/x86/smm/smm_module_header.c
new file mode 100644
index 0000000000..3ee654f6cd
--- /dev/null
+++ b/src/cpu/x86/smm/smm_module_header.c
@@ -0,0 +1,24 @@
+/*
+ * This file is part of the coreboot project.
+ *
+ * Copyright (C) 2013 ChromeOS Authors
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <rmodule.h>
+
+extern char smm_handler_start[];
+
+DEFINE_RMODULE_HEADER(smm_module, smm_handler_start, RMODULE_TYPE_SMM);
diff --git a/src/cpu/x86/smm/smm_module_loader.c b/src/cpu/x86/smm/smm_module_loader.c
new file mode 100644
index 0000000000..5eb4c5a0a2
--- /dev/null
+++ b/src/cpu/x86/smm/smm_module_loader.c
@@ -0,0 +1,371 @@
+/*
+ * This file is part of the coreboot project.
+ *
+ * Copyright (C) 2012 ChromeOS Authors
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <string.h>
+#include <rmodule.h>
+#include <cpu/x86/smm.h>
+#include <cpu/x86/cache.h>
+#include <console/console.h>
+
+/*
+ * Compoments that make up the SMRAM:
+ * 1. Save state - the total save state memory used
+ * 2. Stack - stacks for the CPUs in the SMM handler
+ * 3. Stub - SMM stub code for calling into handler
+ * 4. Handler - C-based SMM handler.
+ *
+ * The compoents are assumed to consist of one consecutive region.
+ */
+
+/* These paramters are used by the SMM stub code. A pointer to the params
+ * is also passed to the C-base handler. */
+struct smm_stub_params {
+ u32 stack_size;
+ u32 stack_top;
+ u32 c_handler;
+ u32 c_handler_arg;
+ struct smm_runtime runtime;
+} __attribute__ ((packed));
+
+/*
+ * The stub is the entry point that sets up protected mode and stacks for each
+ * cpu. It then calls into the SMM handler module. It is encoded as an rmodule.
+ */
+extern unsigned char _binary_smmstub_start[];
+
+/* This is the SMM handler that the stub calls. It is encoded as an rmodule. */
+extern unsigned char _binary_smm_start[];
+
+/* Per cpu minimum stack size. */
+#define SMM_MINIMUM_STACK_SIZE 32
+
+/*
+ * The smm_entry_ins consists of 3 bytes. It is used when staggering SMRAM entry
+ * addresses across CPUs.
+ *
+ * 0xe9 <16-bit relative target> ; jmp <relative-offset>
+ */
+struct smm_entry_ins {
+ char jmp_rel;
+ uint16_t rel16;
+} __attribute__ ((packed));
+
+/*
+ * Place the entry instructions for num entries beginning at entry_start with
+ * a given stride. The entry_start is the highest entry point's address. All
+ * other entry points are stride size below the previous.
+ */
+static void smm_place_jmp_instructions(void *entry_start, int stride, int num,
+ void *jmp_target)
+{
+ int i;
+ char *cur;
+ struct smm_entry_ins entry = { .jmp_rel = 0xe9 };
+
+ /* Each entry point has an IP value of 0x8000. The SMBASE for each
+ * cpu is different so the effective address of the entry instruction
+ * is different. Therefore, the relative displacment for each entry
+ * instruction needs to be updated to reflect the current effective
+ * IP. Additionally, the IP result from the jmp instruction is
+ * calculated using the next instruction's address so the size of
+ * the jmp instruction needs to be taken into account. */
+ cur = entry_start;
+ for (i = 0; i < num; i++) {
+ uint32_t disp = (uint32_t)jmp_target;
+
+ disp -= sizeof(entry) + (uint32_t)cur;
+ printk(BIOS_DEBUG,
+ "SMM Module: placing jmp sequence at %p rel16 0x%04x\n",
+ cur, disp);
+ entry.rel16 = disp;
+ memcpy(cur, &entry, sizeof(entry));
+ cur -= stride;
+ }
+}
+
+/* Place stacks in base -> base + size region, but ensure the stacks don't
+ * overlap the staggered entry points. */
+static void *smm_stub_place_stacks(char *base, int size,
+ struct smm_loader_params *params)
+{
+ int total_stack_size;
+ char *stacks_top;
+
+ if (params->stack_top != NULL)
+ return params->stack_top;
+
+ /* If stack space is requested assume the space lives in the lower
+ * half of SMRAM. */
+ total_stack_size = params->per_cpu_stack_size *
+ params->num_concurrent_stacks;
+
+ /* There has to be at least one stack user. */
+ if (params->num_concurrent_stacks < 1)
+ return NULL;
+
+ /* Total stack size cannot fit. */
+ if (total_stack_size > size)
+ return NULL;
+
+ /* Stacks extend down to SMBASE */
+ stacks_top = &base[total_stack_size];
+
+ return stacks_top;
+}
+
+/* Place the staggered entry points for each CPU. The entry points are
+ * staggered by the per cpu SMM save state size extending down from
+ * SMM_ENTRY_OFFSET. */
+static void smm_stub_place_staggered_entry_points(char *base,
+ const struct smm_loader_params *params, const struct rmodule *smm_stub)
+{
+ int stub_entry_offset;
+
+ stub_entry_offset = rmodule_entry_offset(smm_stub);
+
+ /* If there are staggered entry points or the stub is not located
+ * at the SMM entry point then jmp instructionss need to be placed. */
+ if (params->num_concurrent_save_states > 1 || stub_entry_offset != 0) {
+ int num_entries;
+
+ base += SMM_ENTRY_OFFSET;
+ num_entries = params->num_concurrent_save_states;
+ /* Adjust beginning entry and number of entries down since
+ * the initial entry point doesn't need a jump sequence. */
+ if (stub_entry_offset == 0) {
+ base -= params->per_cpu_save_state_size;
+ num_entries--;
+ }
+ smm_place_jmp_instructions(base,
+ params->per_cpu_save_state_size,
+ num_entries,
+ rmodule_entry(smm_stub));
+ }
+}
+
+/*
+ * The stub setup code assumes it is completely contained within the
+ * default SMRAM size (0x10000). There are potentially 3 regions to place
+ * within the default SMRAM size:
+ * 1. Save state areas
+ * 2. Stub code
+ * 3. Stack areas
+ *
+ * The save state and stack areas are treated as contiguous for the number of
+ * concurrent areas requested. The save state always lives at the top of SMRAM
+ * space, and the entry point is at offset 0x8000.
+ */
+static int smm_module_setup_stub(void *smbase, struct smm_loader_params *params)
+{
+ int total_save_state_size;
+ int smm_stub_size;
+ int stub_entry_offset;
+ char *smm_stub_loc;
+ void *stacks_top;
+ int size;
+ char *base;
+ int i;
+ struct smm_stub_params *stub_params;
+ struct rmodule smm_stub;
+
+ base = smbase;
+ size = SMM_DEFAULT_SIZE;
+
+ /* The number of concurrent stacks cannot exceed CONFIG_MAX_CPUS. */
+ if (params->num_concurrent_stacks > CONFIG_MAX_CPUS)
+ return -1;
+
+ /* Fail if can't parse the smm stub rmodule. */
+ if (rmodule_parse(&_binary_smmstub_start, &smm_stub))
+ return -1;
+
+ /* Adjust remaining size to account for save state. */
+ total_save_state_size = params->per_cpu_save_state_size *
+ params->num_concurrent_save_states;
+ size -= total_save_state_size;
+
+ /* The save state size encroached over the first SMM entry point. */
+ if (size <= SMM_ENTRY_OFFSET)
+ return -1;
+
+ /* Need a minimum stack size and alignment. */
+ if (params->per_cpu_stack_size <= SMM_MINIMUM_STACK_SIZE ||
+ (params->per_cpu_stack_size & 3) != 0)
+ return -1;
+
+ smm_stub_loc = NULL;
+ smm_stub_size = rmodule_memory_size(&smm_stub);
+ stub_entry_offset = rmodule_entry_offset(&smm_stub);
+
+ /* Assume the stub is always small enough to live within upper half of
+ * SMRAM region after the save state space has been allocated. */
+ smm_stub_loc = &base[SMM_ENTRY_OFFSET];
+
+ /* Adjust for jmp instruction sequence. */
+ if (stub_entry_offset != 0) {
+ int entry_sequence_size = sizeof(struct smm_entry_ins);
+ /* Align up to 16 bytes. */
+ entry_sequence_size += 15;
+ entry_sequence_size &= ~15;
+ smm_stub_loc += entry_sequence_size;
+ smm_stub_size += entry_sequence_size;
+ }
+
+ /* Stub is too big to fit. */
+ if (smm_stub_size > (size - SMM_ENTRY_OFFSET))
+ return -1;
+
+ /* The stacks, if requested, live in the lower half of SMRAM space. */
+ size = SMM_ENTRY_OFFSET;
+
+ /* Ensure stacks don't encroach onto staggered SMM
+ * entry points. The staggered entry points extend
+ * below SMM_ENTRY_OFFSET by the number of concurrent
+ * save states - 1 and save state size. */
+ if (params->num_concurrent_save_states > 1) {
+ size -= total_save_state_size;
+ size += params->per_cpu_save_state_size;
+ }
+
+ /* Place the stacks in the lower half of SMRAM. */
+ stacks_top = smm_stub_place_stacks(base, size, params);
+ if (stacks_top == NULL)
+ return -1;
+
+ /* Load the stub. */
+ if (rmodule_load(smm_stub_loc, &smm_stub))
+ return -1;
+
+ /* Place staggered entry points. */
+ smm_stub_place_staggered_entry_points(base, params, &smm_stub);
+
+ /* Setup the parameters for the stub code. */
+ stub_params = rmodule_parameters(&smm_stub);
+ stub_params->stack_top = (u32)stacks_top;
+ stub_params->stack_size = params->per_cpu_stack_size;
+ stub_params->c_handler = (u32)params->handler;
+ stub_params->c_handler_arg = (u32)params->handler_arg;
+ stub_params->runtime.smbase = (u32)smbase;
+ stub_params->runtime.save_state_size = params->per_cpu_save_state_size;
+
+ /* Initialize the APIC id to cpu number table to be 1:1 */
+ for (i = 0; i < params->num_concurrent_stacks; i++)
+ stub_params->runtime.apic_id_to_cpu[i] = i;
+
+ /* Allow the initiator to manipulate SMM stub parameters. */
+ params->runtime = &stub_params->runtime;
+
+ printk(BIOS_DEBUG, "SMM Module: stub loaded at %p. Will call %p(%p)\n",
+ smm_stub_loc, params->handler, params->handler_arg);
+
+ return 0;
+}
+
+/*
+ * smm_setup_relocation_handler assumes the callback is already loaded in
+ * memory. i.e. Another SMM module isn't chained to the stub. The other
+ * assumption is that the stub will be entered from the default SMRAM
+ * location: 0x30000 -> 0x40000.
+ */
+int smm_setup_relocation_handler(struct smm_loader_params *params)
+{
+ void *smram = (void *)SMM_DEFAULT_BASE;
+
+ /* There can't be more than 1 concurrent save state for the relocation
+ * handler because all CPUs default to 0x30000 as SMBASE. */
+ if (params->num_concurrent_save_states > 1)
+ return -1;
+
+ /* A handler has to be defined to call for relocation. */
+ if (params->handler == NULL)
+ return -1;
+
+ /* Since the relocation handler always uses stack, adjust the number
+ * of conccurent stack users to be CONFIG_MAX_CPUS. */
+ if (params->num_concurrent_stacks == 0)
+ params->num_concurrent_stacks = CONFIG_MAX_CPUS;
+
+ return smm_module_setup_stub(smram, params);
+}
+
+/* The SMM module is placed within the provided region in the following
+ * manner:
+ * +-----------------+ <- smram + size
+ * | stacks |
+ * +-----------------+ <- smram + size - total_stack_size
+ * | ... |
+ * +-----------------+ <- smram + handler_size + SMM_DEFAULT_SIZE
+ * | handler |
+ * +-----------------+ <- smram + SMM_DEFAULT_SIZE
+ * | stub code |
+ * +-----------------+ <- smram
+ *
+ * It should be noted that this algorithm will not work for
+ * SMM_DEFAULT_SIZE SMRAM regions such as the A segment. This algorithm
+ * expectes a region large enough to encompass the handler and stacks
+ * as well as the SMM_DEFAULT_SIZE.
+ */
+int smm_load_module(void *smram, int size, struct smm_loader_params *params)
+{
+ struct rmodule smm_mod;
+ int total_stack_size;
+ int handler_size;
+ int module_alignment;
+ int alignment_size;
+ char *base;
+
+ if (size <= SMM_DEFAULT_SIZE)
+ return -1;
+
+ /* Fail if can't parse the smm rmodule. */
+ if (rmodule_parse(&_binary_smm_start, &smm_mod))
+ return -1;
+
+ total_stack_size = params->per_cpu_stack_size *
+ params->num_concurrent_stacks;
+
+ /* Stacks start at the top of the region. */
+ base = smram;
+ base += size;
+ params->stack_top = base;
+
+ /* SMM module starts at offset SMM_DEFAULT_SIZE with the load alignment
+ * taken into account. */
+ base = smram;
+ base += SMM_DEFAULT_SIZE;
+ handler_size = rmodule_memory_size(&smm_mod);
+ module_alignment = rmodule_load_alignment(&smm_mod);
+ alignment_size = module_alignment - ((u32)base % module_alignment);
+ if (alignment_size != module_alignment) {
+ handler_size += alignment_size;
+ base += alignment_size;
+ }
+
+ /* Does the required amount of memory exceed the SMRAM region size? */
+ if ((total_stack_size + handler_size + SMM_DEFAULT_SIZE) > size)
+ return -1;
+
+ if (rmodule_load(base, &smm_mod))
+ return -1;
+
+ params->handler = rmodule_entry(&smm_mod);
+ params->handler_arg = rmodule_parameters(&smm_mod);
+
+ return smm_module_setup_stub(smram, params);
+}
diff --git a/src/cpu/x86/smm/smm_stub.S b/src/cpu/x86/smm/smm_stub.S
new file mode 100644
index 0000000000..07eb5dcb6d
--- /dev/null
+++ b/src/cpu/x86/smm/smm_stub.S
@@ -0,0 +1,145 @@
+/*
+ * This file is part of the coreboot project.
+ *
+ * Copyright (C) 2012 ChromeOS Authors
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of
+ * the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
+ * MA 02110-1301 USA
+ */
+
+/*
+ * The stub is a generic wrapper for bootstrapping a C-based SMM handler. Its
+ * primary purpose is to put the CPU into protected mode with a stack and call
+ * into the C handler.
+ *
+ * The stub_entry_params structure needs to correspond to the C structure
+ * found in smm.h.
+ */
+
+.code32
+.section ".module_parameters", "aw", @progbits
+stub_entry_params:
+stack_size:
+.long 0
+stack_top:
+.long 0
+c_handler:
+.long 0
+c_handler_arg:
+.long 0
+/* struct smm_runtime begins here. */
+smm_runtime:
+smbase:
+.long 0
+save_state_size:
+.long 0
+/* apic_to_cpu_num is a table mapping the default APIC id to cpu num. If the
+ * APIC id is found at the given index, the contiguous cpu number is index
+ * into the table. */
+apic_to_cpu_num:
+.fill CONFIG_MAX_CPUS,1,0xff
+/* end struct smm_runtime */
+
+.data
+/* Provide fallback stack to use when a valid cpu number cannot be found. */
+fallback_stack_bottom:
+.skip 128
+fallback_stack_top:
+
+.text
+.code16
+.global smm_handler_start
+smm_handler_start:
+ movl $(smm_relocate_gdt), %ebx
+ data32 lgdt (%ebx)
+
+ movl %cr0, %eax
+ andl $0x1FFAFFD1, %eax /* CD,NW,PG,AM,WP,NE,TS,EM,MP = 0 */
+ orl $0x1, %eax /* PE = 1 */
+ movl %eax, %cr0
+
+ /* Enable protected mode */
+ data32 ljmp $0x8, $smm_trampoline32
+
+.align 4
+smm_relocate_gdt:
+ /* The first GDT entry is used for the lgdt instruction. */
+ .word smm_relocate_gdt_end - smm_relocate_gdt - 1
+ .long smm_relocate_gdt
+ .word 0x0000
+
+ /* gdt selector 0x08, flat code segment */
+ .word 0xffff, 0x0000
+ .byte 0x00, 0x9b, 0xcf, 0x00 /* G=1 and 0x0f, 4GB limit */
+
+ /* gdt selector 0x10, flat data segment */
+ .word 0xffff, 0x0000
+ .byte 0x00, 0x93, 0xcf, 0x00
+smm_relocate_gdt_end:
+
+.align 4
+.code32
+.global smm_trampoline32
+smm_trampoline32:
+ /* Use flat data segment */
+ movw $0x10, %ax
+ movw %ax, %ds
+ movw %ax, %es
+ movw %ax, %ss
+ movw %ax, %fs
+ movw %ax, %gs
+
+ /* The CPU number is calculated by reading the initial APIC id. Since
+ * the OS can maniuplate the APIC id use the non-changing cpuid result
+ * for APIC id (ebx[31:24]). A table is used to handle a discontiguous
+ * APIC id space. */
+ mov $1, %eax
+ cpuid
+ bswap %ebx /* Default APIC id in bl. */
+ mov $(apic_to_cpu_num), %eax
+ xor %ecx, %ecx
+
+1:
+ cmp (%eax, %ecx, 1), %bl
+ je 1f
+ inc %ecx
+ cmp $CONFIG_MAX_CPUS, %ecx
+ jne 1b
+ /* This is bad. One cannot find a stack entry because a cpu num could
+ * not be assigned. Use the fallback stack and check this condition in
+ * C handler. */
+ movl $(fallback_stack_top), %esp
+ jmp 2f
+1:
+ movl stack_size, %eax
+ mul %ecx
+ movl stack_top, %edx
+ subl %eax, %edx
+ mov %edx, %esp
+
+2:
+ /* Call into the c-based SMM relocation function with the platform
+ * parameters. Equivalent to:
+ * c_handler(c_handler_params, cpu_num, smm_runtime);
+ */
+ push $(smm_runtime)
+ push %ecx
+ push c_handler_arg
+ mov c_handler, %eax
+ call *%eax
+
+ /* Exit from SM mode. */
+ rsm
+
diff --git a/src/include/cpu/x86/smm.h b/src/include/cpu/x86/smm.h
index 00a8c7a218..302873f893 100644
--- a/src/include/cpu/x86/smm.h
+++ b/src/include/cpu/x86/smm.h
@@ -379,6 +379,14 @@ int __attribute__((weak)) mainboard_io_trap_handler(int smif);
void southbridge_smi_set_eos(void);
+#if CONFIG_SMM_MODULES
+void cpu_smi_handler(void);
+void northbridge_smi_handler(void);
+void southbridge_smi_handler(void);
+void mainboard_smi_gpi(u16 gpi_sts);
+int mainboard_smi_apmc(u8 data);
+void mainboard_smi_sleep(u8 slp_typ);
+#else
void __attribute__((weak)) cpu_smi_handler(unsigned int node, smm_state_save_area_t *state_save);
void __attribute__((weak)) northbridge_smi_handler(unsigned int node, smm_state_save_area_t *state_save);
void __attribute__((weak)) southbridge_smi_handler(unsigned int node, smm_state_save_area_t *state_save);
@@ -386,10 +394,14 @@ void __attribute__((weak)) southbridge_smi_handler(unsigned int node, smm_state_
void __attribute__((weak)) mainboard_smi_gpi(u16 gpi_sts);
int __attribute__((weak)) mainboard_smi_apmc(u8 data);
void __attribute__((weak)) mainboard_smi_sleep(u8 slp_typ);
+#endif /* CONFIG_SMM_MODULES */
#if !CONFIG_SMM_TSEG
void smi_release_lock(void);
#define tseg_relocate(ptr)
+#elif CONFIG_SMM_MODULES
+#define tseg_relocate(ptr)
+#define smi_get_tseg_base() 0
#else
/* Return address of TSEG base */
u32 smi_get_tseg_base(void);
@@ -400,4 +412,72 @@ void tseg_relocate(void **ptr);
/* Get PMBASE address */
u16 smm_get_pmbase(void);
+#if CONFIG_SMM_MODULES
+
+struct smm_runtime {
+ u32 smbase;
+ u32 save_state_size;
+ /* The apic_id_to_cpu provides a mapping from APIC id to cpu number.
+ * The cpu number is indicated by the index into the array by matching
+ * the deafult APIC id and value at the index. The stub loader
+ * initializes this array with a 1:1 mapping. If the APIC ids are not
+ * contiguous like the 1:1 mapping it is up to the caller of the stub
+ * loader to adjust this mapping. */
+ u8 apic_id_to_cpu[CONFIG_MAX_CPUS];
+} __attribute__ ((packed));
+
+typedef void (*smm_handler_t)(void *arg, int cpu,
+ const struct smm_runtime *runtime);
+
+#ifdef __SMM__
+/* SMM Runtime helpers. */
+
+/* Entry point for SMM modules. */
+void smm_handler_start(void *arg, int cpu, const struct smm_runtime *runtime);
+
+/* Retrieve SMM save state for a given CPU. WARNING: This does not take into
+ * account CPUs which are configured to not save their state to RAM. */
+void *smm_get_save_state(int cpu);
+
+#else
+/* SMM Module Loading API */
+
+/* Ths smm_loader_params structure provides direction to the SMM loader:
+ * - stack_top - optional external stack provided to loader. It must be at
+ * least per_cpu_stack_size * num_concurrent_stacks in size.
+ * - per_cpu_stack_size - stack size per cpu for smm modules.
+ * - num_concurrent_stacks - number of concurrent cpus in handler needing stack
+ * optional for setting up relocation handler.
+ * - per_cpu_save_state_size - the smm save state size per cpu
+ * - num_concurrent_save_states - number of concurrent cpus needing save state
+ * space
+ * - handler - optional handler to call. Only used during SMM relocation setup.
+ * - handler_arg - optional argument to handler for SMM relocation setup. For
+ * loading the SMM module, the handler_arg is filled in with
+ * the address of the module's parameters (if present).
+ * - runtime - this field is a result only. The SMM runtime location is filled
+ * into this field so the code doing the loading can manipulate the
+ * runtime's assumptions. e.g. updating the apic id to cpu map to
+ * handle sparse apic id space.
+ */
+struct smm_loader_params {
+ void *stack_top;
+ int per_cpu_stack_size;
+ int num_concurrent_stacks;
+
+ int per_cpu_save_state_size;
+ int num_concurrent_save_states;
+
+ smm_handler_t handler;
+ void *handler_arg;
+
+ struct smm_runtime *runtime;
+};
+
+/* Both of these return 0 on success, < 0 on failure. */
+int smm_setup_relocation_handler(struct smm_loader_params *params);
+int smm_load_module(void *smram, int size, struct smm_loader_params *params);
+#endif /* __SMM__ */
+#endif /* CONFIG_SMM_MODULES */
+
#endif