summaryrefslogtreecommitdiff
path: root/src/cpu
diff options
context:
space:
mode:
authorVladimir Serbinenko <phcoder@gmail.com>2015-05-28 21:09:31 +0200
committerVladimir Serbinenko <phcoder@gmail.com>2015-05-28 22:07:58 +0200
commit44cbe10f592965d7611d8624f94f2952d42eba35 (patch)
tree4da983e52c73cc30653c18946fe6afdd014cb0f7 /src/cpu
parentbeb45020ace2b2d3f6cf4b88f9e7218c33a6225b (diff)
downloadcoreboot-44cbe10f592965d7611d8624f94f2952d42eba35.tar.xz
smm: Merge configs SMM_MODULES and SMM_TSEG
SMM_TSEG now implies SMM_MODULES and SMM_MODULES can't be used without SMM_TSEG Remove some newly dead code while on it. Change-Id: I2e1818245170b1e0abbd853bedf856cec83b92f2 Signed-off-by: Vladimir Serbinenko <phcoder@gmail.com> Reviewed-on: http://review.coreboot.org/10355 Tested-by: build bot (Jenkins) Reviewed-by: Aaron Durbin <adurbin@chromium.org>
Diffstat (limited to 'src/cpu')
-rw-r--r--src/cpu/intel/fsp_model_206ax/Kconfig1
-rw-r--r--src/cpu/intel/haswell/Kconfig1
-rw-r--r--src/cpu/intel/model_2065x/Kconfig1
-rw-r--r--src/cpu/intel/model_206ax/Kconfig1
-rw-r--r--src/cpu/x86/Kconfig11
-rw-r--r--src/cpu/x86/smm/Makefile.inc14
-rw-r--r--src/cpu/x86/smm/smihandler.c11
-rw-r--r--src/cpu/x86/smm/smmhandler_tseg.S318
8 files changed, 4 insertions, 354 deletions
diff --git a/src/cpu/intel/fsp_model_206ax/Kconfig b/src/cpu/intel/fsp_model_206ax/Kconfig
index b1a5e0910f..3280f77d8e 100644
--- a/src/cpu/intel/fsp_model_206ax/Kconfig
+++ b/src/cpu/intel/fsp_model_206ax/Kconfig
@@ -37,7 +37,6 @@ config CPU_SPECIFIC_OPTIONS
select SSE2
select UDELAY_LAPIC
select SMM_TSEG
- select SMM_MODULES
select SUPPORT_CPU_UCODE_IN_CBFS if HAVE_FSP_BIN
select PARALLEL_CPU_INIT
select TSC_SYNC_MFENCE
diff --git a/src/cpu/intel/haswell/Kconfig b/src/cpu/intel/haswell/Kconfig
index f2848078f3..779f1d6bab 100644
--- a/src/cpu/intel/haswell/Kconfig
+++ b/src/cpu/intel/haswell/Kconfig
@@ -19,7 +19,6 @@ config CPU_SPECIFIC_OPTIONS
select UDELAY_TSC
select TSC_CONSTANT_RATE
select SMM_TSEG
- select SMM_MODULES
select RELOCATABLE_MODULES
select RELOCATABLE_RAMSTAGE
select SUPPORT_CPU_UCODE_IN_CBFS
diff --git a/src/cpu/intel/model_2065x/Kconfig b/src/cpu/intel/model_2065x/Kconfig
index 946825b24a..4a85f09c0d 100644
--- a/src/cpu/intel/model_2065x/Kconfig
+++ b/src/cpu/intel/model_2065x/Kconfig
@@ -15,7 +15,6 @@ config CPU_SPECIFIC_OPTIONS
select UDELAY_TSC
select TSC_CONSTANT_RATE
select SMM_TSEG
- select SMM_MODULES
select SUPPORT_CPU_UCODE_IN_CBFS
select PARALLEL_CPU_INIT
#select AP_IN_SIPI_WAIT
diff --git a/src/cpu/intel/model_206ax/Kconfig b/src/cpu/intel/model_206ax/Kconfig
index 1ecc9116e6..490849bee7 100644
--- a/src/cpu/intel/model_206ax/Kconfig
+++ b/src/cpu/intel/model_206ax/Kconfig
@@ -16,7 +16,6 @@ config CPU_SPECIFIC_OPTIONS
select SSE2
select UDELAY_LAPIC
select SMM_TSEG
- select SMM_MODULES
select SUPPORT_CPU_UCODE_IN_CBFS
select PARALLEL_CPU_INIT
#select AP_IN_SIPI_WAIT
diff --git a/src/cpu/x86/Kconfig b/src/cpu/x86/Kconfig
index 21a0fac79c..9e5f6155fe 100644
--- a/src/cpu/x86/Kconfig
+++ b/src/cpu/x86/Kconfig
@@ -79,21 +79,12 @@ config LOGICAL_CPUS
config SMM_TSEG
bool
default n
-
-config SMM_MODULES
- bool
- default n
- depends on HAVE_SMI_HANDLER
select RELOCATABLE_MODULES
- help
- If SMM_MODULES is selected then SMM handlers are built as modules.
- A SMM stub along with a SMM loader/relocator. All the handlers are
- written in C with stub being the only assembly.
config SMM_MODULE_HEAP_SIZE
hex
default 0x4000
- depends on SMM_MODULES
+ depends on SMM_TSEG
help
This option determines the size of the heap within the SMM handler
modules.
diff --git a/src/cpu/x86/smm/Makefile.inc b/src/cpu/x86/smm/Makefile.inc
index 9f8f1d2760..60b0388386 100644
--- a/src/cpu/x86/smm/Makefile.inc
+++ b/src/cpu/x86/smm/Makefile.inc
@@ -34,7 +34,7 @@ smm-c-deps:=$$(OPTION_TABLE_H)
$(obj)/cpu/x86/smm/smm.o: $$(smm-objs) $(COMPILER_RT_smm)
$(LD_smm) -nostdlib -r -o $@ $(COMPILER_RT_FLAGS_smm) --whole-archive --start-group $(smm-objs) --no-whole-archive $(COMPILER_RT_smm) --end-group
-ifeq ($(CONFIG_SMM_MODULES),y)
+ifeq ($(CONFIG_SMM_TSEG),y)
smmstub-y += smm_stub.S
@@ -79,7 +79,7 @@ else
cd $(dir $@); $(OBJCOPY_smm) -I binary $(notdir $<) -O elf64-x86_64 -B x86_64 $(notdir $@)
endif
-else # CONFIG_SMM_MODULES
+else # CONFIG_SMM_TSEG
$(obj)/cpu/x86/smm/smm_wrap: $(obj)/cpu/x86/smm/smm.o $(src)/cpu/x86/smm/$(SMM_LDSCRIPT)
$(LD_smm) $(SMM_LDFLAGS) -nostdlib -nostartfiles --gc-sections -static -o $(obj)/cpu/x86/smm/smm.elf -T $(src)/cpu/x86/smm/$(SMM_LDSCRIPT) $(obj)/cpu/x86/smm/smm.o
@@ -101,19 +101,11 @@ ifeq ($(CONFIG_HAVE_SMI_HANDLER),y)
ramstage-srcs += $(obj)/cpu/x86/smm/smm_wrap.manual
endif
-# Use TSEG specific entry point and linker script
-ifeq ($(CONFIG_SMM_TSEG),y)
-smm-y += smmhandler_tseg.S
-smm-c-ccopts += -fpic
-SMM_LDFLAGS := -pie
-SMM_LDSCRIPT := smm_tseg.ld
-else
smm-y += smmhandler.S
SMM_LDFLAGS :=
SMM_LDSCRIPT := smm.ld
-endif
smm-y += smihandler.c
-endif # CONFIG_SMM_MODULES
+endif # CONFIG_SMM_TSEG
diff --git a/src/cpu/x86/smm/smihandler.c b/src/cpu/x86/smm/smihandler.c
index 2387f6e8b7..9c282c3d56 100644
--- a/src/cpu/x86/smm/smihandler.c
+++ b/src/cpu/x86/smm/smihandler.c
@@ -29,7 +29,6 @@
static int do_driver_init = 1;
-#if !CONFIG_SMM_TSEG /* TSEG handler locks in assembly */
typedef enum { SMI_LOCKED, SMI_UNLOCKED } smi_semaphore;
/* SMI multiprocessing semaphore */
@@ -61,7 +60,6 @@ void smi_release_lock(void)
: "eax"
);
}
-#endif
#define LAPIC_ID 0xfee00020
static inline __attribute__((always_inline)) unsigned long nodeid(void)
@@ -129,12 +127,6 @@ void smi_handler(u32 smm_revision)
smm_state_save_area_t state_save;
u32 smm_base = 0xa0000; /* ASEG */
-#if CONFIG_SMM_TSEG
- /* Update global variable TSEG base */
- if (!smi_get_tseg_base())
- return;
- smm_base = smi_get_tseg_base();
-#else
/* Are we ok to execute the handler? */
if (!smi_obtain_lock()) {
/* For security reasons we don't release the other CPUs
@@ -147,7 +139,6 @@ void smi_handler(u32 smm_revision)
}
return;
}
-#endif
smi_backup_pci_address();
@@ -204,9 +195,7 @@ void smi_handler(u32 smm_revision)
smi_restore_pci_address();
-#if !CONFIG_SMM_TSEG
smi_release_lock();
-#endif
/* De-assert SMI# signal to allow another SMI */
smi_set_eos();
diff --git a/src/cpu/x86/smm/smmhandler_tseg.S b/src/cpu/x86/smm/smmhandler_tseg.S
deleted file mode 100644
index 90dd932756..0000000000
--- a/src/cpu/x86/smm/smmhandler_tseg.S
+++ /dev/null
@@ -1,318 +0,0 @@
-/*
- * This file is part of the coreboot project.
- *
- * Copyright (C) 2008 coresystems GmbH
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; version 2 of
- * the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc.
- */
-
-/*
- * +--------------------------------+
- * | SMM Handler C Code |
- * +--------------------------------+ 0x14000
- * | SMM Handler Heap |
- * +--------------------------------+ 0x10000
- * | Save State Map Node 0 |
- * | Save State Map Node 1 |
- * | Save State Map Node 2 |
- * | Save State Map Node 3 |
- * | ... |
- * +--------------------------------+ 0xf000
- * | |
- * | |
- * | EARLY DATA (lock, vectors) |
- * +--------------------------------+ 0x8400
- * | SMM Entry Node 0 (+ stack) |
- * +--------------------------------+ 0x8000
- * | SMM Entry Node 1 (+ stack) |
- * | SMM Entry Node 2 (+ stack) |
- * | SMM Entry Node 3 (+ stack) |
- * | ... |
- * +--------------------------------+ 0x7400
- * | |
- * | SMM Handler Assembly Stub |
- * | |
- * +--------------------------------+ TSEG
- *
- */
-
-#define LAPIC_ID 0xfee00020
-#define SMM_STACK_SIZE (0x400 - 0x10)
-
-/* Values for the xchg lock */
-#define SMI_LOCKED 0
-#define SMI_UNLOCKED 1
-
-#define __PRE_RAM__
-#if CONFIG_NORTHBRIDGE_INTEL_SANDYBRIDGE || CONFIG_NORTHBRIDGE_INTEL_IVYBRIDGE || CONFIG_NORTHBRIDGE_INTEL_IVYBRIDGE_NATIVE || CONFIG_NORTHBRIDGE_INTEL_SANDYBRIDGE_NATIVE
-#include <northbridge/intel/sandybridge/sandybridge.h>
-#define TSEG_BAR (DEFAULT_PCIEXBAR | TSEG)
-#elif CONFIG_NORTHBRIDGE_INTEL_NEHALEM
-#include <northbridge/intel/nehalem/nehalem.h>
-#define TSEG_BAR (DEFAULT_PCIEXBAR | TSEG)
-#elif CONFIG_NORTHBRIDGE_INTEL_HASWELL
-#include <northbridge/intel/haswell/haswell.h>
-#define TSEG_BAR (DEFAULT_PCIEXBAR | TSEG)
-#else
-#if CONFIG_NORTHBRIDGE_INTEL_FSP_SANDYBRIDGE || CONFIG_NORTHBRIDGE_INTEL_FSP_IVYBRIDGE
-#include <northbridge/intel/fsp_sandybridge/northbridge.h>
-#define TSEG_BAR (DEFAULT_PCIEXBAR | TSEG)
-#else
-#error "Northbridge must define TSEG_BAR."
-#endif
-#endif
-
-
-
-/* initially SMM is some sort of real mode. Let gcc know
- * how to treat the SMM handler stub
- */
-
-.section ".handler", "a", @progbits
-
-.code16
-
-/**
- * SMM code to enable protected mode and jump to the
- * C-written function void smi_handler(u32 smm_revision)
- *
- * All the bad magic is not all that bad after all.
- */
-.global smm_handler_start
-smm_handler_start:
- movl $(TSEG_BAR), %eax /* Get TSEG base from PCIE */
- addr32 movl (%eax), %edx /* Save TSEG_BAR in %edx */
- andl $~1, %edx /* Remove lock bit */
-
- /* Obtain lock */
- movl %edx, %ebx
- addl $(smm_lock), %ebx
- movw $SMI_LOCKED, %ax
- addr32 xchg %ax, (%ebx)
- cmpw $SMI_UNLOCKED, %ax
-
- /* Proceed if we got the lock */
- je smm_check_prot_vector
-
- /* If we did not get the lock, wait for release */
-wait_for_unlock:
- pause
- addr32 movw (%ebx), %ax
- cmpw $SMI_LOCKED, %ax
- je wait_for_unlock
- rsm
-
-smm_check_prot_vector:
- /* See if we need to adjust protected vector */
- movl %edx, %eax
- addl $(smm_prot_vector), %eax
- addr32 movl (%eax), %ebx
- cmpl $(smm_prot_start), %ebx
- jne smm_check_gdt_vector
-
- /* Adjust vector with TSEG offset */
- addl %edx, %ebx
- addr32 movl %ebx, (%eax)
-
-smm_check_gdt_vector:
- /* See if we need to adjust GDT vector */
- movl %edx, %eax
- addl $(smm_gdt_vector + 2), %eax
- addr32 movl (%eax), %ebx
- cmpl $(smm_gdt - smm_handler_start), %ebx
- jne smm_load_gdt
-
- /* Adjust vector with TSEG offset */
- addl %edx, %ebx
- addr32 movl %ebx, (%eax)
-
-smm_load_gdt:
- movl $(smm_gdt_vector), %ebx
- addl %edx, %ebx /* TSEG base in %edx */
- data32 lgdt (%ebx)
-
- movl %cr0, %eax
- andl $0x1FFAFFD1, %eax /* CD,NW,PG,AM,WP,NE,TS,EM,MP = 0 */
- orl $0x1, %eax /* PE = 1 */
- movl %eax, %cr0
-
- /* Enable protected mode */
- movl $(smm_prot_vector), %eax
- addl %edx, %eax
- data32 ljmp *(%eax)
-
-.code32
-smm_prot_start:
- /* Use flat data segment */
- movw $0x10, %ax
- movw %ax, %ds
- movw %ax, %es
- movw %ax, %ss
- movw %ax, %fs
- movw %ax, %gs
-
- /* Get this CPU's LAPIC ID */
- movl $LAPIC_ID, %esi
- movl (%esi), %ecx
- shr $24, %ecx
-
- /* calculate stack offset by multiplying the APIC ID
- * by 1024 (0x400), and save that offset in ebp.
- */
- shl $10, %ecx
- movl %ecx, %ebp
-
- /* We put the stack for each core right above
- * its SMM entry point. Core 0 starts at SMM_BASE + 0x8000,
- * we spare 0x10 bytes for the jump to be sure.
- */
- movl $0x8010, %eax /* core 0 address */
- addl %edx, %eax /* addjust for TSEG */
- subl %ecx, %eax /* subtract offset, see above */
- movl %eax, %ebx /* Save bottom of stack in ebx */
-
- /* clear stack */
- cld
- movl %eax, %edi
- movl $(SMM_STACK_SIZE >> 2), %ecx
- xorl %eax, %eax
- rep stosl
-
- /* set new stack */
- addl $SMM_STACK_SIZE, %ebx
- movl %ebx, %esp
-
- /* Get SMM revision */
- movl $0xfefc, %ebx /* core 0 address */
- addl %edx, %ebx /* addjust for TSEG */
- subl %ebp, %ebx /* subtract core X offset */
- movl (%ebx), %eax
- pushl %eax
-
- /* Call 32bit C handler */
- call smi_handler
-
- /* Release lock */
- movl $(TSEG_BAR), %eax /* Get TSEG base from PCIE */
- movl (%eax), %ebx /* Save TSEG_BAR in %ebx */
- andl $~1, %ebx /* Remove lock bit */
- addl $(smm_lock), %ebx
- movw $SMI_UNLOCKED, %ax
- xchg %ax, (%ebx)
-
- /* To return, just do rsm. It will "clean up" protected mode */
- rsm
-
-smm_gdt:
- /* The first GDT entry can not be used. Keep it zero */
- .long 0x00000000, 0x00000000
-
- /* gdt selector 0x08, flat code segment */
- .word 0xffff, 0x0000
- .byte 0x00, 0x9b, 0xcf, 0x00 /* G=1 and 0x0f, 4GB limit */
-
- /* gdt selector 0x10, flat data segment */
- .word 0xffff, 0x0000
- .byte 0x00, 0x93, 0xcf, 0x00
-
-smm_gdt_end:
-
-.section ".earlydata", "a", @progbits
-
-.code16
-
-.align 4, 0xff
-
-smm_lock:
- .word SMI_UNLOCKED
-
-.align 4, 0xff
-
-smm_prot_vector:
- .long smm_prot_start
- .short 8
-
-.align 4, 0xff
-
-smm_gdt_vector:
- .word smm_gdt_end - smm_gdt - 1
- .long smm_gdt - smm_handler_start
-
-.section ".jumptable", "a", @progbits
-
-/* This is the SMM jump table. All cores use the same SMM handler
- * for simplicity. But SMM Entry needs to be different due to the
- * save state area. The jump table makes sure all CPUs jump into the
- * real handler on SMM entry.
- */
-
-/* This code currently supports up to 16 CPU cores. If more than 16 CPU cores
- * shall be used, below table has to be updated, as well as smm_tseg.ld
- */
-
-/* When using TSEG do a relative jump and fix up the CS later since we
- * do not know what our TSEG base is yet.
- */
-
-.code16
-jumptable:
- /* core 15 */
- jmp smm_handler_start
-.align 1024, 0x00
- /* core 14 */
- jmp smm_handler_start
-.align 1024, 0x00
- /* core 13 */
- jmp smm_handler_start
-.align 1024, 0x00
- /* core 12 */
- jmp smm_handler_start
-.align 1024, 0x00
- /* core 11 */
- jmp smm_handler_start
-.align 1024, 0x00
- /* core 10 */
- jmp smm_handler_start
-.align 1024, 0x00
- /* core 9 */
- jmp smm_handler_start
-.align 1024, 0x00
- /* core 8 */
- jmp smm_handler_start
-.align 1024, 0x00
- /* core 7 */
- jmp smm_handler_start
-.align 1024, 0x00
- /* core 6 */
- jmp smm_handler_start
-.align 1024, 0x00
- /* core 5 */
- jmp smm_handler_start
-.align 1024, 0x00
- /* core 4 */
- jmp smm_handler_start
-.align 1024, 0x00
- /* core 3 */
- jmp smm_handler_start
-.align 1024, 0x00
- /* core 2 */
- jmp smm_handler_start
-.align 1024, 0x00
- /* core 1 */
- jmp smm_handler_start
-.align 1024, 0x00
- /* core 0 */
- jmp smm_handler_start
-.align 1024, 0x00