From 269563a423f9291e84b5a93859a3e17767cf27a0 Mon Sep 17 00:00:00 2001 From: Stefan Reinauer Date: Mon, 19 Jan 2009 21:20:22 +0000 Subject: First shot at factoring SMM code into generic parts and southbridge specific parts. This should help to reduce the code duplication for Rudolf's K8/VIA SMM implementation... Signed-off-by: Stefan Reinauer Acked-by: Joseph Smith git-svn-id: svn://svn.coreboot.org/coreboot/trunk@3870 2b7e53f0-3cfb-0310-b3e9-8179ed1497e1 --- src/cpu/amd/model_10xxx/Config.lb | 1 + src/cpu/amd/model_fxx/Config.lb | 1 + src/cpu/amd/model_gx1/Config.lb | 1 + src/cpu/amd/model_gx2/Config.lb | 3 +- src/cpu/amd/model_lx/Config.lb | 3 +- src/cpu/intel/ep80579/Config.lb | 1 + src/cpu/intel/model_69x/Config.lb | 1 + src/cpu/intel/model_6dx/Config.lb | 1 + src/cpu/intel/model_6ex/Config.lb | 1 + src/cpu/intel/model_6fx/Config.lb | 1 + src/cpu/intel/model_6xx/Config.lb | 1 + src/cpu/intel/model_f0x/Config.lb | 1 + src/cpu/intel/model_f1x/Config.lb | 1 + src/cpu/intel/model_f2x/Config.lb | 1 + src/cpu/intel/model_f3x/Config.lb | 1 + src/cpu/intel/model_f4x/Config.lb | 1 + src/cpu/via/model_c3/Config.lb | 1 + src/cpu/via/model_c7/Config.lb | 1 + src/cpu/x86/smm/Config.lb | 47 +++++++++ src/cpu/x86/smm/smihandler.c | 203 ++++++++++++++++++++++++++++++++++++++ src/cpu/x86/smm/smm.ld | 53 ++++++++++ src/cpu/x86/smm/smmhandler.S | 201 +++++++++++++++++++++++++++++++++++++ src/cpu/x86/smm/smmrelocate.S | 168 +++++++++++++++++++++++++++++++ 23 files changed, 692 insertions(+), 2 deletions(-) create mode 100644 src/cpu/x86/smm/Config.lb create mode 100644 src/cpu/x86/smm/smihandler.c create mode 100644 src/cpu/x86/smm/smm.ld create mode 100644 src/cpu/x86/smm/smmhandler.S create mode 100644 src/cpu/x86/smm/smmrelocate.S (limited to 'src/cpu') diff --git a/src/cpu/amd/model_10xxx/Config.lb b/src/cpu/amd/model_10xxx/Config.lb index 94aa01afae..47303be0e8 100644 --- a/src/cpu/amd/model_10xxx/Config.lb +++ b/src/cpu/amd/model_10xxx/Config.lb @@ -31,6 +31,7 @@ dir /cpu/x86/sse dir /cpu/x86/lapic dir /cpu/x86/cache dir /cpu/x86/pae +dir /cpu/x86/smm dir /cpu/amd/mtrr dir /cpu/amd/quadcore dir /cpu/amd/microcode diff --git a/src/cpu/amd/model_fxx/Config.lb b/src/cpu/amd/model_fxx/Config.lb index 0adfa28d08..60463e3e62 100644 --- a/src/cpu/amd/model_fxx/Config.lb +++ b/src/cpu/amd/model_fxx/Config.lb @@ -12,6 +12,7 @@ dir /cpu/x86/sse dir /cpu/x86/lapic dir /cpu/x86/cache dir /cpu/x86/pae +dir /cpu/x86/smm dir /cpu/amd/mtrr dir /cpu/amd/dualcore dir /cpu/amd/microcode diff --git a/src/cpu/amd/model_gx1/Config.lb b/src/cpu/amd/model_gx1/Config.lb index bf76bd97f0..4a79a30529 100644 --- a/src/cpu/amd/model_gx1/Config.lb +++ b/src/cpu/amd/model_gx1/Config.lb @@ -3,4 +3,5 @@ dir /cpu/x86/fpu dir /cpu/x86/mmx dir /cpu/x86/lapic dir /cpu/x86/cache +dir /cpu/x86/smm driver model_gx1_init.o diff --git a/src/cpu/amd/model_gx2/Config.lb b/src/cpu/amd/model_gx2/Config.lb index e05141c5d9..8294c4521d 100644 --- a/src/cpu/amd/model_gx2/Config.lb +++ b/src/cpu/amd/model_gx2/Config.lb @@ -3,6 +3,7 @@ dir /cpu/x86/fpu dir /cpu/x86/mmx dir /cpu/x86/lapic dir /cpu/x86/cache +dir /cpu/x86/smm driver model_gx2_init.o object cpubug.o -object vsmsetup.o \ No newline at end of file +object vsmsetup.o diff --git a/src/cpu/amd/model_lx/Config.lb b/src/cpu/amd/model_lx/Config.lb index 43c6254061..42e90e436d 100644 --- a/src/cpu/amd/model_lx/Config.lb +++ b/src/cpu/amd/model_lx/Config.lb @@ -3,6 +3,7 @@ dir /cpu/x86/fpu dir /cpu/x86/mmx dir /cpu/x86/lapic dir /cpu/x86/cache +dir /cpu/x86/smm driver model_lx_init.o object cpubug.o -object vsmsetup.o \ No newline at end of file +object vsmsetup.o diff --git a/src/cpu/intel/ep80579/Config.lb b/src/cpu/intel/ep80579/Config.lb index 4bf4ee3921..626af76dc9 100644 --- a/src/cpu/intel/ep80579/Config.lb +++ b/src/cpu/intel/ep80579/Config.lb @@ -26,5 +26,6 @@ dir /cpu/x86/mmx dir /cpu/x86/sse dir /cpu/x86/lapic dir /cpu/x86/cache +dir /cpu/x86/smm dir /cpu/intel/microcode driver ep80579_init.o diff --git a/src/cpu/intel/model_69x/Config.lb b/src/cpu/intel/model_69x/Config.lb index 03a4fd70fa..46909d27f7 100644 --- a/src/cpu/intel/model_69x/Config.lb +++ b/src/cpu/intel/model_69x/Config.lb @@ -5,5 +5,6 @@ dir /cpu/x86/mmx dir /cpu/x86/sse dir /cpu/x86/lapic dir /cpu/x86/cache +dir /cpu/x86/smm dir /cpu/intel/microcode driver model_69x_init.o diff --git a/src/cpu/intel/model_6dx/Config.lb b/src/cpu/intel/model_6dx/Config.lb index 46656a005d..15746148e7 100644 --- a/src/cpu/intel/model_6dx/Config.lb +++ b/src/cpu/intel/model_6dx/Config.lb @@ -5,5 +5,6 @@ dir /cpu/x86/mmx dir /cpu/x86/sse dir /cpu/x86/lapic dir /cpu/x86/cache +dir /cpu/x86/smm dir /cpu/intel/microcode driver model_6dx_init.o diff --git a/src/cpu/intel/model_6ex/Config.lb b/src/cpu/intel/model_6ex/Config.lb index f45d70f9ef..a9c4537372 100644 --- a/src/cpu/intel/model_6ex/Config.lb +++ b/src/cpu/intel/model_6ex/Config.lb @@ -8,6 +8,7 @@ dir /cpu/x86/mmx dir /cpu/x86/sse dir /cpu/x86/lapic dir /cpu/x86/cache +dir /cpu/x86/smm dir /cpu/intel/microcode dir /cpu/intel/hyperthreading driver model_6ex_init.o diff --git a/src/cpu/intel/model_6fx/Config.lb b/src/cpu/intel/model_6fx/Config.lb index 549022d993..74f8be6168 100644 --- a/src/cpu/intel/model_6fx/Config.lb +++ b/src/cpu/intel/model_6fx/Config.lb @@ -8,6 +8,7 @@ dir /cpu/x86/mmx dir /cpu/x86/sse dir /cpu/x86/lapic dir /cpu/x86/cache +dir /cpu/x86/smm dir /cpu/intel/microcode dir /cpu/intel/hyperthreading driver model_6fx_init.o diff --git a/src/cpu/intel/model_6xx/Config.lb b/src/cpu/intel/model_6xx/Config.lb index 5b1fde1ba5..de33edd64e 100644 --- a/src/cpu/intel/model_6xx/Config.lb +++ b/src/cpu/intel/model_6xx/Config.lb @@ -5,5 +5,6 @@ dir /cpu/x86/mmx dir /cpu/x86/sse dir /cpu/x86/lapic dir /cpu/x86/cache +dir /cpu/x86/smm dir /cpu/intel/microcode driver model_6xx_init.o diff --git a/src/cpu/intel/model_f0x/Config.lb b/src/cpu/intel/model_f0x/Config.lb index 2458c81c14..6afad2ad40 100644 --- a/src/cpu/intel/model_f0x/Config.lb +++ b/src/cpu/intel/model_f0x/Config.lb @@ -7,5 +7,6 @@ dir /cpu/x86/mmx dir /cpu/x86/sse dir /cpu/x86/lapic dir /cpu/x86/cache +dir /cpu/x86/smm dir /cpu/intel/microcode driver model_f0x_init.o diff --git a/src/cpu/intel/model_f1x/Config.lb b/src/cpu/intel/model_f1x/Config.lb index 5387d455f1..b6ae5085e1 100644 --- a/src/cpu/intel/model_f1x/Config.lb +++ b/src/cpu/intel/model_f1x/Config.lb @@ -7,5 +7,6 @@ dir /cpu/x86/mmx dir /cpu/x86/sse dir /cpu/x86/lapic dir /cpu/x86/cache +dir /cpu/x86/smm dir /cpu/intel/microcode driver model_f1x_init.o diff --git a/src/cpu/intel/model_f2x/Config.lb b/src/cpu/intel/model_f2x/Config.lb index 3cf5062931..314205f716 100644 --- a/src/cpu/intel/model_f2x/Config.lb +++ b/src/cpu/intel/model_f2x/Config.lb @@ -7,6 +7,7 @@ dir /cpu/x86/mmx dir /cpu/x86/sse dir /cpu/x86/lapic dir /cpu/x86/cache +dir /cpu/x86/smm dir /cpu/intel/microcode dir /cpu/intel/hyperthreading driver model_f2x_init.o diff --git a/src/cpu/intel/model_f3x/Config.lb b/src/cpu/intel/model_f3x/Config.lb index 175ff58919..0f80cd853b 100644 --- a/src/cpu/intel/model_f3x/Config.lb +++ b/src/cpu/intel/model_f3x/Config.lb @@ -7,6 +7,7 @@ dir /cpu/x86/mmx dir /cpu/x86/sse dir /cpu/x86/lapic dir /cpu/x86/cache +dir /cpu/x86/smm dir /cpu/intel/microcode dir /cpu/intel/hyperthreading driver model_f3x_init.o diff --git a/src/cpu/intel/model_f4x/Config.lb b/src/cpu/intel/model_f4x/Config.lb index 66f6ceb00f..cef79889e5 100644 --- a/src/cpu/intel/model_f4x/Config.lb +++ b/src/cpu/intel/model_f4x/Config.lb @@ -7,6 +7,7 @@ dir /cpu/x86/mmx dir /cpu/x86/sse dir /cpu/x86/lapic dir /cpu/x86/cache +dir /cpu/x86/smm dir /cpu/intel/microcode dir /cpu/intel/hyperthreading driver model_f4x_init.o diff --git a/src/cpu/via/model_c3/Config.lb b/src/cpu/via/model_c3/Config.lb index 174d3ccc16..de7d9142af 100644 --- a/src/cpu/via/model_c3/Config.lb +++ b/src/cpu/via/model_c3/Config.lb @@ -26,5 +26,6 @@ dir /cpu/x86/mmx dir /cpu/x86/sse dir /cpu/x86/lapic dir /cpu/x86/cache +dir /cpu/x86/smm dir /cpu/intel/microcode driver model_c3_init.o diff --git a/src/cpu/via/model_c7/Config.lb b/src/cpu/via/model_c7/Config.lb index 6bc6001908..d956832162 100644 --- a/src/cpu/via/model_c7/Config.lb +++ b/src/cpu/via/model_c7/Config.lb @@ -26,5 +26,6 @@ dir /cpu/x86/mmx dir /cpu/x86/sse dir /cpu/x86/lapic dir /cpu/x86/cache +dir /cpu/x86/smm dir /cpu/intel/microcode driver model_c7_init.o diff --git a/src/cpu/x86/smm/Config.lb b/src/cpu/x86/smm/Config.lb new file mode 100644 index 0000000000..8edbabdf3d --- /dev/null +++ b/src/cpu/x86/smm/Config.lb @@ -0,0 +1,47 @@ +## +## This file is part of the coreboot project. +## +## Copyright (C) 2008 coresystems GmbH +## +## This program is free software; you can redistribute it and/or modify +## it under the terms of the GNU General Public License as published by +## the Free Software Foundation; either version 2 of the License, or +## (at your option) any later version. +## +## This program is distributed in the hope that it will be useful, +## but WITHOUT ANY WARRANTY; without even the implied warranty of +## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +## GNU General Public License for more details. +## +## You should have received a copy of the GNU General Public License +## along with this program; if not, write to the Free Software +## Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA +## + +uses HAVE_SMI_HANDLER + +if HAVE_SMI_HANDLER + object smmrelocate.S + + smmobject smmhandler.S + smmobject smihandler.o + + makerule smm.o + depends "$(SMM-OBJECTS) printk.o vtxprintf.o $(LIBGCC_FILE_NAME)" + action "$(CC) $(DISTRO_LFLAGS) -nostdlib -r -o $@ $(SMM-OBJECTS) printk.o vtxprintf.o $(LIBGCC_FILE_NAME)" + end + + makerule smm + depends "smm.o $(TOP)/src/cpu/x86/smm/smm.ld ldoptions" + action "$(CC) $(DISTRO_LFLAGS) -nostdlib -nostartfiles -static -o smm.elf -T $(TOP)/src/cpu/x86/smm/smm.ld smm.o" + action "$(CROSS_COMPILE)nm -n smm.elf | sort > smm.map" + action "$(OBJCOPY) -O binary smm.elf smm" + end + + makerule smm_bin.c + depends "smm" + action "(echo 'unsigned char smm[] = {'; od -vtx1 smm | sed -e 's,^[0-9]* *,,' -e 's:[0-9a-f][0-9a-f] :0x&,:g' -e 's:[0-9a-f][0-9a-f]$$:0x&,:'; echo '}; unsigned int smm_len = '; wc -c smm |awk '{print $$1;}' ; echo ';') > smm_bin.c" + end + + object ./smm_bin.o +end diff --git a/src/cpu/x86/smm/smihandler.c b/src/cpu/x86/smm/smihandler.c new file mode 100644 index 0000000000..0dc8926653 --- /dev/null +++ b/src/cpu/x86/smm/smihandler.c @@ -0,0 +1,203 @@ +/* + * This file is part of the coreboot project. + * + * Copyright (C) 2008 coresystems GmbH + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; version 2 of + * the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, + * MA 02110-1301 USA + */ + +#include +#include +#include +#include +#include + +void southbridge_smi_set_eos(void); + +#define DEBUG_SMI + +typedef enum { SMI_LOCKED, SMI_UNLOCKED } smi_semaphore; + +/* SMI multiprocessing semaphore */ +static volatile smi_semaphore smi_handler_status = SMI_UNLOCKED; + +static int smi_obtain_lock(void) +{ + u8 ret = SMI_LOCKED; + + asm volatile ( + "movb %2, %%al\n" + "xchgb %%al, %1\n" + "movb %%al, %0\n" + : "=g" (ret), "=m" (smi_handler_status) + : "g" (SMI_LOCKED) + : "eax" + ); + + return (ret == SMI_UNLOCKED); +} + +static void smi_release_lock(void) +{ + asm volatile ( + "movb %1, %%al\n" + "xchgb %%al, %0\n" + : "=m" (smi_handler_status) + : "g" (SMI_UNLOCKED) + : "eax" + ); +} + +#define LAPIC_ID 0xfee00020 +static inline __attribute__((always_inline)) unsigned long nodeid(void) +{ + return (*((volatile unsigned long *)(LAPIC_ID)) >> 24); +} + +/* ********************* smi_util ************************* */ + +/* Data */ +#define UART_RBR 0x00 +#define UART_TBR 0x00 + +/* Control */ +#define UART_IER 0x01 +#define UART_IIR 0x02 +#define UART_FCR 0x02 +#define UART_LCR 0x03 +#define UART_MCR 0x04 +#define UART_DLL 0x00 +#define UART_DLM 0x01 + +/* Status */ +#define UART_LSR 0x05 +#define UART_MSR 0x06 +#define UART_SCR 0x07 + +static int uart_can_tx_byte(void) +{ + return inb(TTYS0_BASE + UART_LSR) & 0x20; +} + +static void uart_wait_to_tx_byte(void) +{ + while(!uart_can_tx_byte()) + ; +} + +static void uart_wait_until_sent(void) +{ + while(!(inb(TTYS0_BASE + UART_LSR) & 0x40)) + ; +} + +static void uart_tx_byte(unsigned char data) +{ + uart_wait_to_tx_byte(); + outb(data, TTYS0_BASE + UART_TBR); + /* Make certain the data clears the fifos */ + uart_wait_until_sent(); +} + +void console_tx_flush(void) +{ + uart_wait_to_tx_byte(); +} + +void console_tx_byte(unsigned char byte) +{ + if (byte == '\n') + uart_tx_byte('\r'); + uart_tx_byte(byte); +} + +/* ********************* smi_util ************************* */ + + +void io_trap_handler(int smif) +{ + southbridge_io_trap_handler(smif); +} + +/** + * @brief Set the EOS bit + */ +static void smi_set_eos(void) +{ + southbridge_smi_set_eos(); +} + +/** + * @brief Interrupt handler for SMI# + * + * @param smm_revision revision of the smm state save map + */ + +void smi_handler(u32 smm_revision) +{ + u8 reg8; + u16 pmctrl; + u16 pm1_sts; + u32 smi_sts, gpe0_sts, tco_sts; + unsigned int node; + smm_state_save_area_t state_save; + + /* Are we ok to execute the handler? */ + if (!smi_obtain_lock()) + return; + + node=nodeid(); + +#ifdef DEBUG_SMI + console_loglevel = DEFAULT_CONSOLE_LOGLEVEL; +#else + console_loglevel = 1; +#endif + + printk_debug("\nSMI# #%d\n", node); + + switch (smm_revision) { + case 0x00030007: + state_save.type = LEGACY; + state_save.legacy_state_save = (legacy_smm_state_save_area_t *) + (0xa8000 + 0x7e00 - (node * 0x400)); + break; + case 0x00030100: + state_save.type = EM64T; + state_save.em64t_state_save = (em64t_smm_state_save_area_t *) + (0xa8000 + 0x7d00 - (node * 0x400)); + break; + case 0x00030064: + state_save.type = AMD64; + state_save.amd64_state_save = (amd64_smm_state_save_area_t *) + (0xa8000 + 0x7e00 - (node * 0x400)); + break; + default: + printk_debug("smm_revision: 0x%08x\n", smm_revision); + printk_debug("SMI# not supported on your CPU\n"); + /* Don't release lock, so no further SMI will happen, + * if we don't handle it anyways. + */ + return; + } + + southbridge_smi_handler(node, &state_save); + + smi_release_lock(); + + /* De-assert SMI# signal to allow another SMI */ + smi_set_eos(); +} diff --git a/src/cpu/x86/smm/smm.ld b/src/cpu/x86/smm/smm.ld new file mode 100644 index 0000000000..1b25c2d2f8 --- /dev/null +++ b/src/cpu/x86/smm/smm.ld @@ -0,0 +1,53 @@ + +/* Maximum number of CPUs/cores */ +CPUS = 4; + +SECTIONS +{ + /* This is the actual SMM handler. + * + * We just put code, rodata, data and bss all in a row. + */ + . = 0xa0000; + .handler (.): { + /* Assembler stub */ + *(.handler) + + /* C code of the SMM handler */ + *(.text); + *(.text.*); + + /* C read-only data of the SMM handler */ + . = ALIGN(16); + *(.rodata) + *(.rodata.*) + + /* C read-write data of the SMM handler */ + . = ALIGN(4); + *(.data) + + /* C uninitialized data of the SMM handler */ + . = ALIGN(4); + *(.bss) + *(.sbss) + + /* What is this? */ + *(COMMON) + . = ALIGN(4); + } + + /* We are using the ASEG interleaved to stuff the SMM handlers + * for all CPU cores in there. The jump table redirects the execution + * to the actual SMM handler + */ + . = 0xa8000 - (( CPUS - 1) * 0x400); + .jumptable : { + *(.jumptable) + } + + /DISCARD/ : { + *(.comment) + *(.note) + *(.note.*) + } +} diff --git a/src/cpu/x86/smm/smmhandler.S b/src/cpu/x86/smm/smmhandler.S new file mode 100644 index 0000000000..f1bc550365 --- /dev/null +++ b/src/cpu/x86/smm/smmhandler.S @@ -0,0 +1,201 @@ +/* + * This file is part of the coreboot project. + * + * Copyright (C) 2008 coresystems GmbH + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; version 2 of + * the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, + * MA 02110-1301 USA + */ + +/* NOTE: This handler assumes the SMM window goes from 0xa0000 + * to 0xaffff. In fact, at least on Intel Core CPUs (i945 chipset) + * the SMM window is 128K big, covering 0xa0000 to 0xbffff. + * So there is a lot of potential for growth in here. Let's stick + * to 64k if we can though. + */ + +/* + * +--------------------------------+ 0xaffff + * | Save State Map Node 0 | + * | Save State Map Node 1 | + * | Save State Map Node 2 | + * | Save State Map Node 3 | + * | ... | + * +--------------------------------+ 0xaf000 + * | | + * | | + * | | + * +--------------------------------+ 0xa8400 + * | SMM Entry Node 0 (+ stack) | + * +--------------------------------+ 0xa8000 + * | SMM Entry Node 1 (+ stack) | + * | SMM Entry Node 2 (+ stack) | + * | SMM Entry Node 3 (+ stack) | + * | ... | + * +--------------------------------+ 0xa7400 + * | | + * | SMM Handler | + * | | + * +--------------------------------+ 0xa0000 + * + */ + +#include + +#define LAPIC_ID 0xfee00020 + +/* SMM_HANDLER_OFFSET is the 16bit offset within the ASEG + * at which smm_handler_start lives. At the moment the handler + * lives right at 0xa0000, so the offset is 0. + */ + +#define SMM_HANDLER_OFFSET 0x0000 + +/* initially SMM is some sort of real mode. Let gcc know + * how to treat the SMM handler stub + */ + +.section ".handler", "a", @progbits + +.code16 + +/** + * SMM code to enable protected mode and jump to the + * C-written function void smi_handler(u32 smm_revision) + * + * All the bad magic is not all that bad after all. + */ +smm_handler_start: + movw $(smm_gdtptr16 - smm_handler_start + SMM_HANDLER_OFFSET), %bx + data32 lgdt %cs:(%bx) + + movl %cr0, %eax + andl $0x7FFAFFD1, %eax /* PG,AM,WP,NE,TS,EM,MP = 0 */ + orl $0x60000001, %eax /* CD, NW, PE = 1 */ + movl %eax, %cr0 + + /* Enable protected mode */ + data32 ljmp $0x08, $1f + +.code32 +1: + /* Use flat data segment */ + movw $0x10, %ax + movw %ax, %ds + movw %ax, %es + movw %ax, %ss + movw %ax, %fs + movw %ax, %gs + + /* Get this CPU's LAPIC ID */ + movl $LAPIC_ID, %esi + movl (%esi), %ecx + shr $24, %ecx + + /* calculate stack offset by multiplying the APIC ID + * by 1024 (0x400), and save that offset in ebp. + */ + shl $10, %ecx + movl %ecx, %ebp + + /* We put the stack for each core right above + * its SMM entry point. Core 0 starts at 0xa8000, + * we spare 0x10 bytes for the jump to be sure. + */ + movl $0xa8010, %eax + subl %ecx, %eax /* subtract offset, see above */ + movl %eax, %ebx /* Save bottom of stack in ebx */ + +#define SMM_STACK_SIZE (0x400 - 0x10) + /* clear stack */ + cld + movl %eax, %edi + movl $(SMM_STACK_SIZE >> 2), %ecx + xorl %eax, %eax + rep stosl + + /* set new stack */ + addl $SMM_STACK_SIZE, %ebx + movl %ebx, %esp + + /* Get SMM revision */ + movl $0xa8000 + 0x7efc, %ebx /* core 0 address */ + subl %ebp, %ebx /* subtract core X offset */ + movl (%ebx), %eax + pushl %eax + + /* Call 32bit C handler */ + call smi_handler + + /* To return, just do rsm. It will "clean up" protected mode */ + rsm + +.code16 + +.align 4, 0xff + +smm_gdtptr16: + .word smm_gdt_end - smm_gdt - 1 + .long smm_gdt - smm_handler_start + 0xa0000 + SMM_HANDLER_OFFSET + +.code32 + +smm_gdt: + /* The first GDT entry can not be used. Keep it zero */ + .long 0x00000000, 0x00000000 + + /* gdt selector 0x08, flat code segment */ + .word 0xffff, 0x0000 + .byte 0x00, 0x9b, 0xcf, 0x00 /* G=1 and 0x0f, 4GB limit */ + + /* gdt selector 0x10, flat data segment */ + .word 0xffff, 0x0000 + .byte 0x00, 0x93, 0xcf, 0x00 + +smm_gdt_end: + + +.section ".jumptable", "a", @progbits + +/* This is the SMM jump table. All cores use the same SMM handler + * for simplicity. But SMM Entry needs to be different due to the + * save state area. The jump table makes sure all CPUs jump into the + * real handler on SMM entry. + */ + +/* This code currently supports up to 4 CPU cores. If more than 4 CPU cores + * shall be used, below table has to be updated, as well as smm.ld + */ + +/* GNU AS/LD will always generate code that assumes CS is 0xa000. In reality + * CS will be set to SMM_BASE[19:4] though. Knowing that the smm handler is the + * first thing in the ASEG, we do a far jump here, to set CS to 0xa000. + */ + +.code16 +jumptable: + /* core 3 */ + ljmp $0xa000, $SMM_HANDLER_OFFSET +.align 1024, 0x00 + /* core 2 */ + ljmp $0xa000, $SMM_HANDLER_OFFSET +.align 1024, 0x00 + /* core 1 */ + ljmp $0xa000, $SMM_HANDLER_OFFSET +.align 1024, 0x00 + /* core 0 */ + ljmp $0xa000, $SMM_HANDLER_OFFSET +.align 1024, 0x00 + diff --git a/src/cpu/x86/smm/smmrelocate.S b/src/cpu/x86/smm/smmrelocate.S new file mode 100644 index 0000000000..2a7bfc23c7 --- /dev/null +++ b/src/cpu/x86/smm/smmrelocate.S @@ -0,0 +1,168 @@ +/* + * This file is part of the coreboot project. + * + * Copyright (C) 2008 coresystems GmbH + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; version 2 of + * the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, + * MA 02110-1301 USA + */ + +#include +#include "../../../../src/northbridge/intel/i945/ich7.h" + +#undef DEBUG_SMM_RELOCATION +//#define DEBUG_SMM_RELOCATION + +#define LAPIC_ID 0xfee00020 + +.global smm_relocation_start +.global smm_relocation_end + +/* initially SMM is some sort of real mode. */ +.code16 + +/** + * This trampoline code relocates SMBASE to 0xa0000 - ( lapicid * 0x400 ) + * + * Why 0x400? It is a safe value to cover the save state area per CPU. On + * current AMD CPUs this area is _documented_ to be 0x200 bytes. On Intel + * Core 2 CPUs the _documented_ parts of the save state area is 48 bytes + * bigger, effectively sizing our data structures 0x300 bytes. + * + * LAPICID SMBASE SMM Entry SAVE STATE + * 0 0xa0000 0xa8000 0xafd00 + * 1 0x9fc00 0xa7c00 0xaf900 + * 2 0x9f800 0xa7800 0xaf500 + * 3 0x9f400 0xa7400 0xaf100 + * 4 0x9f000 0xa7000 0xaed00 + * 5 0x9ec00 0xa6c00 0xae900 + * 6 0x9e800 0xa6800 0xae500 + * 7 0x9e400 0xa6400 0xae100 + * 8 0x9e000 0xa6000 0xadd00 + * 9 0x9dc00 0xa5c00 0xad900 + * 10 0x9d800 0xa5800 0xad500 + * 11 0x9d400 0xa5400 0xad100 + * 12 0x9d000 0xa5000 0xacd00 + * 13 0x9cc00 0xa4c00 0xac900 + * 14 0x9c800 0xa4800 0xac500 + * 15 0x9c400 0xa4400 0xac100 + * . . . . + * . . . . + * . . . . + * 31 0x98400 0xa0400 0xa8100 + * + * With 32 cores, the SMM handler would need to fit between + * 0xa0000-0xa0400 and the stub plus stack would need to go + * at 0xa8000-0xa8100 (example for core 0). That is not enough. + * + * This means we're basically limited to 16 cpu cores before + * we need to use the TSEG/HSEG for the actual SMM handler plus stack. + * When we exceed 32 cores, we also need to put SMBASE to TSEG/HSEG. + * + * If we figure out the documented values above are safe to use, + * we could pack the structure above even more, so we could use the + * scheme to pack save state areas for 63 AMD CPUs or 58 Intel CPUs + * in the ASEG. + * + * Note: Some versions of Pentium M need their SMBASE aligned to 32k. + * On those the above only works for up to 2 cores. But for now we only + * care fore Core (2) Duo/Solo + * + */ + +smm_relocation_start: + /* Check revision to see if AMD64 style SMM_BASE + * Intel Core Solo/Duo: 0x30007 + * Intel Core2 Solo/Duo: 0x30100 + * AMD64: 0x3XX64 + * This check does not make much sense, unless someone ports + * SMI handling to AMD64 CPUs. + */ + + mov $0x38000 + 0x7efc, %ebx + addr32 mov (%ebx), %al + cmp $0x64, %al + je 1f + + mov $0x38000 + 0x7ef8, %ebx + jmp smm_relocate +1: + mov $0x38000 + 0x7f00, %ebx + +smm_relocate: + /* Get this CPU's LAPIC ID */ + movl $LAPIC_ID, %esi + addr32 movl (%esi), %ecx + shr $24, %ecx + + /* calculate offset by multiplying the + * apic ID by 1024 (0x400) + */ + movl %ecx, %edx + shl $10, %edx + + movl $0xa0000, %eax + subl %edx, %eax /* subtract offset, see above */ + + addr32 movl %eax, (%ebx) + + + /* The next section of code is hardware specific */ + + /* Clear SMI status */ + movw $(DEFAULT_PMBASE + 0x34), %dx + inw %dx, %ax + outw %ax, %dx + + /* Clear PM1 status */ + movw $(DEFAULT_PMBASE + 0x00), %dx + inw %dx, %ax + outw %ax, %dx + + /* Set EOS bit so other SMIs can occur */ + movw $(DEFAULT_PMBASE + 0x30), %dx + inl %dx, %eax + orl $(1 << 1), %eax + outl %eax, %dx + + /* End of hardware specific section. */ +#ifdef DEBUG_SMM_RELOCATION + /* print [SMM-x] so we can determine if CPUx went to SMM */ + movw $TTYS0_BASE, %dx + mov $'[', %al + outb %al, %dx + mov $'S', %al + outb %al, %dx + mov $'M', %al + outb %al, %dx + outb %al, %dx + movb $'-', %al + outb %al, %dx + /* calculate ascii of cpu number. More than 9 cores? -> FIXME */ + movb %cl, %al + addb $'0', %al + outb %al, %dx + mov $']', %al + outb %al, %dx + mov $'\r', %al + outb %al, %dx + mov $'\n', %al + outb %al, %dx +#endif + + /* That's it. return */ + rsm +smm_relocation_end: + -- cgit v1.2.3