diff options
author | Aaron Durbin <adurbin@chromium.org> | 2013-01-14 14:54:41 -0600 |
---|---|---|
committer | Ronald G. Minnich <rminnich@gmail.com> | 2013-03-18 17:10:18 +0100 |
commit | 7af20698f69bbf10c4f18aa4fcc35ae7cf8cb866 (patch) | |
tree | 553435da18950a247e1061669f78d70453fe629d /src/cpu | |
parent | 24614af9b85bc615b0d9af3f37fa393de039c9f8 (diff) | |
download | coreboot-7af20698f69bbf10c4f18aa4fcc35ae7cf8cb866.tar.xz |
haswell: enable caching before SMM initialization
The SMM handler resides in the TSEG region which is far above
CONFIG_RAM_TOP (which is the highest cacheable address) before
MTRRs are setup. This means that calling initialize_cpus() before
performing MTRR setup on the BSP means the SMM handler is copied
using uncacheable accesses.
Improve the SMM handler setup path by enabling performing MTRR setup on
for the BSP before the call to initialize_cpus(). In order to do this
the haswell_init() function was split into 2 paths: BSP & AP paths.
There is a cpu_common_init() that both call to perform similar
functionality. The BSP path in haswell_init() then starts the APs using
intel_cores_init(). The AP path in haswell_init() loads microcode and
sets up MTRRs.
This split will be leveraged for future support of bringing up APs in
parallel as well as adhering to the Haswell MP initialization
requirements.
Change-Id: Id8e17af149e68d708f3d4765e38b1c61f7ebb470
Signed-off-by: Aaron Durbin <adurbin@chromium.org>
Reviewed-on: http://review.coreboot.org/2746
Tested-by: build bot (Jenkins)
Reviewed-by: Ronald G. Minnich <rminnich@gmail.com>
Diffstat (limited to 'src/cpu')
-rw-r--r-- | src/cpu/intel/haswell/haswell.h | 2 | ||||
-rw-r--r-- | src/cpu/intel/haswell/haswell_init.c | 72 |
2 files changed, 56 insertions, 18 deletions
diff --git a/src/cpu/intel/haswell/haswell.h b/src/cpu/intel/haswell/haswell.h index 7ce868d30d..cb85078db4 100644 --- a/src/cpu/intel/haswell/haswell.h +++ b/src/cpu/intel/haswell/haswell.h @@ -108,6 +108,8 @@ void intel_cpu_haswell_finalize_smm(void); /* Configure power limits for turbo mode */ void set_power_limits(u8 power_limit_1_time); int cpu_config_tdp_levels(void); +struct bus; +void bsp_init_and_start_aps(struct bus *cpu_bus); #endif #endif diff --git a/src/cpu/intel/haswell/haswell_init.c b/src/cpu/intel/haswell/haswell_init.c index 45c1a25bc0..9e62b31eb9 100644 --- a/src/cpu/intel/haswell/haswell_init.c +++ b/src/cpu/intel/haswell/haswell_init.c @@ -499,18 +499,9 @@ static void intel_cores_init(device_t cpu) } } -static void haswell_init(device_t cpu) +static void bsp_init_before_ap_bringup(void) { char processor_name[49]; - struct cpuid_result cpuid_regs; - - intel_update_microcode_from_cbfs(); - - /* Turn on caching if we haven't already */ - x86_enable_cache(); - - /* Clear out pending MCEs */ - configure_mca(); /* Print processor name */ fill_processor_name(processor_name); @@ -523,19 +514,36 @@ static void haswell_init(device_t cpu) set_ehci_debug(0); #endif - /* Setup MTRRs based on physical address size */ - cpuid_regs = cpuid(0x80000008); + /* Setup MTRRs based on physical address size. */ x86_setup_fixed_mtrrs(); - x86_setup_var_mtrrs(cpuid_regs.eax & 0xff, 2); + x86_setup_var_mtrrs(cpuid_eax(0x80000008) & 0xff, 2); x86_mtrr_check(); - /* Setup Page Attribute Tables (PAT) */ - // TODO set up PAT - #if CONFIG_USBDEBUG set_ehci_debug(ehci_debug_addr); #endif + enable_lapic(); +} + +static void ap_init(device_t cpu) +{ + /* Microcode needs to be loaded before caching is enabled. */ + intel_update_microcode_from_cbfs(); + + /* Turn on caching if we haven't already */ + x86_enable_cache(); + x86_setup_fixed_mtrrs(); + x86_setup_var_mtrrs(cpuid_eax(0x80000008) & 0xff, 2); + + enable_lapic(); +} + +static void cpu_common_init(device_t cpu) +{ + /* Clear out pending MCEs */ + configure_mca(); + /* Enable the local cpu apics */ enable_lapic_tpr(); setup_lapic(); @@ -560,9 +568,37 @@ static void haswell_init(device_t cpu) /* Enable Turbo */ enable_turbo(); +} + +void bsp_init_and_start_aps(struct bus *cpu_bus) +{ + /* Perform any necesarry BSP initialization before APs are brought up. + * This call alos allows the BSP to prepare for any secondary effects + * from calling cpu_initialize() such as smm_init(). */ + bsp_init_before_ap_bringup(); + + /* + * This calls into the gerneic initialize_cpus() which attempts to + * start APs on the APIC bus in the devicetree. No APs get started + * because there is only the BSP and a placeholder (disabled) in the + * devicetree. initialize_cpus() also does SMM initialization by way + * of smm_init(). It will eventually call cpu_initialize(0) which calls + * dev_ops->init(). For Haswell the dev_ops->init() starts up the APs + * by way of intel_cores_init(). + */ + initialize_cpus(cpu_bus); +} - /* Start up extra cores */ - intel_cores_init(cpu); +static void haswell_init(device_t cpu) +{ + if (cpu->path.apic.apic_id == 0) { + cpu_common_init(cpu); + /* Start up extra cores */ + intel_cores_init(cpu); + } else { + ap_init(cpu); + cpu_common_init(cpu); + } } static struct device_operations cpu_dev_ops = { |