summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDamien Zammit <damien@zamaudio.com>2015-11-28 21:27:05 +1100
committerStefan Reinauer <stefan.reinauer@coreboot.org>2015-12-02 00:38:45 +0100
commit149c4c5d0191f1728a66ec986c3eae698cbf87cb (patch)
tree919fd67a4480497e6b1cd0ea8a0b99fdc58706cb
parent003d15cab43fe34f1916d6f3877f2a6f2b8f6e25 (diff)
downloadcoreboot-149c4c5d0191f1728a66ec986c3eae698cbf87cb.tar.xz
x86/smm: Initialize SMM on some CPUs one-by-one
We currently race in SMM init on Atom 230 (and potentially other CPUs). At least on the 230, this leads to a hang on RSM, likely because both hyperthreads mess around with SMBASE and other SMM state variables in parallel without coordination. The same behaviour occurs with Atom D5xx. Change it so first APs are spun up and sent to sleep, then BSP initializes SMM, then every CPU, one after another. Only do this when SERIALIZE_SMM_INITIALIZATION is set. Set the flag for Atom CPUs. Change-Id: I1ae864e37546298ea222e81349c27cf774ed251f Signed-off-by: Patrick Georgi <patrick@georgi-clan.de> Signed-off-by: Damien Zammit <damien@zamaudio.com> Reviewed-on: https://review.coreboot.org/6311 Tested-by: build bot (Jenkins) Tested-by: BSI firmware lab <coreboot-labor@bsi.bund.de> Reviewed-by: Aaron Durbin <adurbin@chromium.org>
-rw-r--r--src/arch/x86/cpu.c3
-rw-r--r--src/cpu/intel/model_106cx/Kconfig1
-rw-r--r--src/cpu/x86/Kconfig11
-rw-r--r--src/cpu/x86/lapic/lapic_cpu_init.c47
4 files changed, 61 insertions, 1 deletions
diff --git a/src/arch/x86/cpu.c b/src/arch/x86/cpu.c
index ceed0770e2..52b56812d5 100644
--- a/src/arch/x86/cpu.c
+++ b/src/arch/x86/cpu.c
@@ -234,6 +234,9 @@ void cpu_initialize(unsigned int index)
die("CPU: missing cpu device structure");
}
+ if (cpu->initialized)
+ return;
+
post_log_path(cpu);
/* Find what type of cpu we are dealing with */
diff --git a/src/cpu/intel/model_106cx/Kconfig b/src/cpu/intel/model_106cx/Kconfig
index 09acfd93f8..a005ba21da 100644
--- a/src/cpu/intel/model_106cx/Kconfig
+++ b/src/cpu/intel/model_106cx/Kconfig
@@ -11,6 +11,7 @@ config CPU_INTEL_MODEL_106CX
select AP_IN_SIPI_WAIT
select TSC_SYNC_MFENCE
select SUPPORT_CPU_UCODE_IN_CBFS
+ select SERIALIZED_SMM_INITIALIZATION
if CPU_INTEL_MODEL_106CX
diff --git a/src/cpu/x86/Kconfig b/src/cpu/x86/Kconfig
index 131cbf24bb..94225a3807 100644
--- a/src/cpu/x86/Kconfig
+++ b/src/cpu/x86/Kconfig
@@ -96,6 +96,17 @@ config SMM_LAPIC_REMAP_MITIGATION
default y if NORTHBRIDGE_INTEL_NEHALEM
default n
+config SERIALIZED_SMM_INITIALIZATION
+ bool
+ default n
+ help
+ On some CPUs, there is a race condition in SMM.
+ This can occur when both hyperthreads change SMM state
+ variables in parallel without coordination.
+ Setting this option serializes the SMM initialization
+ to avoid an ugly hang in the boot process at the cost
+ of a slightly longer boot time.
+
config X86_AMD_FIXED_MTRRS
bool
default n
diff --git a/src/cpu/x86/lapic/lapic_cpu_init.c b/src/cpu/x86/lapic/lapic_cpu_init.c
index ef150a50d7..bf63517384 100644
--- a/src/cpu/x86/lapic/lapic_cpu_init.c
+++ b/src/cpu/x86/lapic/lapic_cpu_init.c
@@ -458,6 +458,39 @@ static void start_other_cpus(struct bus *cpu_bus, struct device *bsp_cpu)
}
+static void smm_other_cpus(struct bus *cpu_bus, device_t bsp_cpu)
+{
+ device_t cpu;
+ int pre_count = atomic_read(&active_cpus);
+
+ /* Loop through the cpus once to let them run through SMM relocator */
+
+ for(cpu = cpu_bus->children; cpu ; cpu = cpu->sibling) {
+ if (cpu->path.type != DEVICE_PATH_APIC) {
+ continue;
+ }
+
+ printk(BIOS_ERR, "considering CPU 0x%02x for SMM init\n",
+ cpu->path.apic.apic_id);
+
+ if (cpu == bsp_cpu)
+ continue;
+
+ if (!cpu->enabled) {
+ continue;
+ }
+
+ if (!start_cpu(cpu)) {
+ /* Record the error in cpu? */
+ printk(BIOS_ERR, "CPU 0x%02x would not start!\n",
+ cpu->path.apic.apic_id);
+ }
+
+ /* FIXME: endless loop */
+ while (atomic_read(&active_cpus) != pre_count) ;
+ }
+}
+
static void wait_other_cpus_stop(struct bus *cpu_bus)
{
struct device *cpu;
@@ -528,7 +561,8 @@ void initialize_cpus(struct bus *cpu_bus)
#endif
#if CONFIG_HAVE_SMI_HANDLER
- smm_init();
+ if (!IS_ENABLED(CONFIG_SERIALIZED_SMM_INITIALIZATION))
+ smm_init();
#endif
#if CONFIG_SMP && CONFIG_MAX_CPUS > 1
@@ -547,4 +581,15 @@ void initialize_cpus(struct bus *cpu_bus)
/* Now wait the rest of the cpus stop*/
wait_other_cpus_stop(cpu_bus);
#endif
+
+ if (IS_ENABLED(CONFIG_SERIALIZED_SMM_INITIALIZATION)) {
+ /* At this point, all APs are sleeping:
+ * smm_init() will queue a pending SMI on all cpus
+ * and smm_other_cpus() will start them one by one */
+ smm_init();
+#if CONFIG_SMP && CONFIG_MAX_CPUS > 1
+ last_cpu_index = 0;
+ smm_other_cpus(cpu_bus, info->cpu);
+#endif
+ }
}