summaryrefslogtreecommitdiff
path: root/src/cpu/intel
diff options
context:
space:
mode:
authorAaron Durbin <adurbin@chromium.org>2014-02-13 10:30:42 -0600
committerAaron Durbin <adurbin@google.com>2014-02-16 20:42:41 +0100
commit6a360048a1a4f8eaebbf9c4ec75fe4a9543421b2 (patch)
tree7a8aac4c6a25c6a5a69d174a51faf8afc9824fdc /src/cpu/intel
parentb4b9eb399ef2f5539afce6e43b49d4cf1613ae9e (diff)
downloadcoreboot-6a360048a1a4f8eaebbf9c4ec75fe4a9543421b2.tar.xz
haswell: backup the default SMM region on resume
Haswell CPUs need to use the default SMM region for relocating to the desired SMM location. Back up that memory on resume instead of reserving the default region. This makes the haswell support more forgiving to software which expects PC-compatible memory layouts. Change-Id: I9ae74f1f14fe07ba9a0027260d6e65faa6ea2aed Signed-off-by: Aaron Durbin <adurbin@chromium.org> Reviewed-on: http://review.coreboot.org/5217 Tested-by: build bot (Jenkins) Reviewed-by: Aaron Durbin <adurbin@google.com>
Diffstat (limited to 'src/cpu/intel')
-rw-r--r--src/cpu/intel/haswell/Kconfig1
-rw-r--r--src/cpu/intel/haswell/haswell_init.c8
2 files changed, 9 insertions, 0 deletions
diff --git a/src/cpu/intel/haswell/Kconfig b/src/cpu/intel/haswell/Kconfig
index 25df4d15ce..b13d6861bd 100644
--- a/src/cpu/intel/haswell/Kconfig
+++ b/src/cpu/intel/haswell/Kconfig
@@ -6,6 +6,7 @@ if CPU_INTEL_HASWELL
config CPU_SPECIFIC_OPTIONS
def_bool y
+ select BACKUP_DEFAULT_SMM_REGION
select SMP
select SSE2
select UDELAY_TSC
diff --git a/src/cpu/intel/haswell/haswell_init.c b/src/cpu/intel/haswell/haswell_init.c
index 7c9beed6d8..dc6012bced 100644
--- a/src/cpu/intel/haswell/haswell_init.c
+++ b/src/cpu/intel/haswell/haswell_init.c
@@ -33,6 +33,7 @@
#include <cpu/intel/turbo.h>
#include <cpu/x86/cache.h>
#include <cpu/x86/name.h>
+#include <cpu/x86/smm.h>
#include <delay.h>
#include <pc80/mc146818rtc.h>
#include <northbridge/intel/haswell/haswell.h>
@@ -753,6 +754,7 @@ void bsp_init_and_start_aps(struct bus *cpu_bus)
int max_cpus;
int num_aps;
const void *microcode_patch;
+ void *smm_save_area;
/* Perform any necessary BSP initialization before APs are brought up.
* This call also allows the BSP to prepare for any secondary effects
@@ -761,6 +763,9 @@ void bsp_init_and_start_aps(struct bus *cpu_bus)
microcode_patch = intel_microcode_find();
+ /* Save default SMM area before relocation occurs. */
+ smm_save_area = backup_default_smm_area();
+
/* This needs to be called after the mtrr setup so the BSP mtrrs
* can be mirrored by the APs. */
if (setup_ap_init(cpu_bus, &max_cpus, microcode_patch)) {
@@ -782,6 +787,9 @@ void bsp_init_and_start_aps(struct bus *cpu_bus)
/* After SMM relocation a 2nd microcode load is required. */
intel_microcode_load_unlocked(microcode_patch);
+ /* Restore the default SMM region. */
+ restore_default_smm_area(smm_save_area);
+
/* Enable ROM caching if option was selected. */
x86_mtrr_enable_rom_caching();
}