diff options
author | Aaron Durbin <adurbin@chromium.org> | 2016-05-03 16:48:19 -0500 |
---|---|---|
committer | Aaron Durbin <adurbin@chromium.org> | 2016-05-06 16:44:18 +0200 |
commit | 309b8571cf7ad3fae1930de1c2541f62c3d62655 (patch) | |
tree | a360fa02813de3ecb27c2272ffade2a5e5a1d5db /src/soc/intel/broadwell/smmrelocate.c | |
parent | e72b9d483fd807f94385933ca78e62b084e25536 (diff) | |
download | coreboot-309b8571cf7ad3fae1930de1c2541f62c3d62655.tar.xz |
soc/intel/broadwell: convert to using common MP and SMM init
In order to reduce duplication of code use the common MP and SMM
initialization flow.
Change-Id: I74c81c5d18dff7a84bfedbe07f01e536c0f641fa
Signed-off-by: Aaron Durbin <adurbin@chromium.org>
Reviewed-on: https://review.coreboot.org/14595
Tested-by: build bot (Jenkins)
Reviewed-by: Duncan Laurie <dlaurie@google.com>
Diffstat (limited to 'src/soc/intel/broadwell/smmrelocate.c')
-rw-r--r-- | src/soc/intel/broadwell/smmrelocate.c | 149 |
1 files changed, 25 insertions, 124 deletions
diff --git a/src/soc/intel/broadwell/smmrelocate.c b/src/soc/intel/broadwell/smmrelocate.c index 18119f845d..0cc6399780 100644 --- a/src/soc/intel/broadwell/smmrelocate.c +++ b/src/soc/intel/broadwell/smmrelocate.c @@ -60,9 +60,9 @@ static inline void write_uncore_emrr(struct smm_relocation_params *relo_params) wrmsr(UNCORE_EMRRphysMask_MSR, relo_params->uncore_emrr_mask); } -static void update_save_state(int cpu, - struct smm_relocation_params *relo_params, - const struct smm_runtime *runtime) +static void update_save_state(int cpu, uintptr_t curr_smbase, + uintptr_t staggered_smbase, + struct smm_relocation_params *relo_params) { u32 smbase; u32 iedbase; @@ -70,7 +70,7 @@ static void update_save_state(int cpu, /* The relocated handler runs with all CPUs concurrently. Therefore * stagger the entry points adjusting SMBASE downwards by save state * size * CPU num. */ - smbase = relo_params->smram_base - cpu * runtime->save_state_size; + smbase = staggered_smbase; iedbase = relo_params->ied_base; printk(BIOS_DEBUG, "New SMBASE=0x%08x IEDBASE=0x%08x\n", @@ -101,8 +101,8 @@ static void update_save_state(int cpu, } else { em64t101_smm_state_save_area_t *save_state; - save_state = (void *)(runtime->smbase + SMM_DEFAULT_SIZE - - runtime->save_state_size); + save_state = (void *)(curr_smbase + SMM_DEFAULT_SIZE - + sizeof(*save_state)); save_state->smbase = smbase; save_state->iedbase = iedbase; @@ -130,24 +130,11 @@ static int bsp_setup_msr_save_state(struct smm_relocation_params *relo_params) /* The relocation work is actually performed in SMM context, but the code * resides in the ramstage module. This occurs by trampolining from the default * SMRAM entry point to here. */ -static void asmlinkage cpu_smm_do_relocation(void *arg) +void smm_relocation_handler(int cpu, uintptr_t curr_smbase, + uintptr_t staggered_smbase) { msr_t mtrr_cap; - struct smm_relocation_params *relo_params; - const struct smm_module_params *p; - const struct smm_runtime *runtime; - int cpu; - - p = arg; - runtime = p->runtime; - relo_params = p->arg; - cpu = p->cpu; - - if (cpu >= CONFIG_MAX_CPUS) { - printk(BIOS_CRIT, - "Invalid CPU number assigned in SMM stub: %d\n", cpu); - return; - } + struct smm_relocation_params *relo_params = &smm_reloc_params; printk(BIOS_DEBUG, "In relocation handler: cpu %d\n", cpu); @@ -176,7 +163,7 @@ static void asmlinkage cpu_smm_do_relocation(void *arg) } /* Make appropriate changes to the save state map. */ - update_save_state(cpu, relo_params, runtime); + update_save_state(cpu, curr_smbase, staggered_smbase, relo_params); /* Write EMRR and SMRR MSRs based on indicated support. */ mtrr_cap = rdmsr(MTRR_CAP_MSR); @@ -259,49 +246,6 @@ static void fill_in_relocation_params(device_t dev, params->uncore_emrr_mask.hi = (1 << (39 - 32)) - 1; } -static void adjust_apic_id_map(struct smm_loader_params *smm_params) -{ - struct smm_runtime *runtime; - int i; - - /* Adjust the APIC id map if HT is disabled. */ - if (!ht_disabled) - return; - - runtime = smm_params->runtime; - - /* The APIC ids increment by 2 when HT is disabled. */ - for (i = 0; i < CONFIG_MAX_CPUS; i++) - runtime->apic_id_to_cpu[i] = runtime->apic_id_to_cpu[i] * 2; -} - -static int install_relocation_handler(int num_cpus, - struct smm_relocation_params *relo_params) -{ - /* The default SMM entry can happen in parallel or serially. If the - * default SMM entry is done in parallel the BSP has already setup - * the saving state to each CPU's MSRs. At least one save state size - * is required for the initial SMM entry for the BSP to determine if - * parallel SMM relocation is even feasible. Set the stack size to - * the save state size, and call into the do_relocation handler. */ - int save_state_size = sizeof(em64t101_smm_state_save_area_t); - struct smm_loader_params smm_params = { - .per_cpu_stack_size = save_state_size, - .num_concurrent_stacks = num_cpus, - .per_cpu_save_state_size = save_state_size, - .num_concurrent_save_states = 1, - .handler = (smm_handler_t)&cpu_smm_do_relocation, - .handler_arg = (void *)relo_params, - }; - - if (smm_setup_relocation_handler(&smm_params)) - return -1; - - adjust_apic_id_map(&smm_params); - - return 0; -} - static void setup_ied_area(struct smm_relocation_params *params) { char *ied_base; @@ -321,35 +265,10 @@ static void setup_ied_area(struct smm_relocation_params *params) memset(ied_base + (1 << 20), 0, (32 << 10)); } -static int install_permanent_handler(int num_cpus, - struct smm_relocation_params *relo_params) -{ - /* There are num_cpus concurrent stacks and num_cpus concurrent save - * state areas. Lastly, set the stack size to the save state size. */ - int save_state_size = sizeof(em64t101_smm_state_save_area_t); - struct smm_loader_params smm_params = { - .per_cpu_stack_size = save_state_size, - .num_concurrent_stacks = num_cpus, - .per_cpu_save_state_size = save_state_size, - .num_concurrent_save_states = num_cpus, - }; - - printk(BIOS_DEBUG, "Installing SMM handler to 0x%08x\n", - relo_params->smram_base); - if (smm_load_module((void *)relo_params->smram_base, - relo_params->smram_size, &smm_params)) - return -1; - - adjust_apic_id_map(&smm_params); - - return 0; -} - -static int cpu_smm_setup(void) +void smm_info(uintptr_t *perm_smbase, size_t *perm_smsize, + size_t *smm_save_state_size) { device_t dev = SA_DEV_ROOT; - int num_cpus; - msr_t msr; printk(BIOS_DEBUG, "Setting up SMI for CPU\n"); @@ -357,50 +276,32 @@ static int cpu_smm_setup(void) setup_ied_area(&smm_reloc_params); - msr = rdmsr(CORE_THREAD_COUNT_MSR); - num_cpus = msr.lo & 0xffff; - if (num_cpus > CONFIG_MAX_CPUS) { - printk(BIOS_CRIT, - "Error: Hardware CPUs (%d) > MAX_CPUS (%d)\n", - num_cpus, CONFIG_MAX_CPUS); - } - - if (install_relocation_handler(num_cpus, &smm_reloc_params)) { - printk(BIOS_CRIT, "SMM Relocation handler install failed.\n"); - return -1; - } - - if (install_permanent_handler(num_cpus, &smm_reloc_params)) { - printk(BIOS_CRIT, "SMM Permanent handler install failed.\n"); - return -1; - } - - /* Ensure the SMM handlers hit DRAM before performing first SMI. */ - /* TODO(adurbin): Is this really needed? */ - wbinvd(); - - return 0; + *perm_smbase = smm_reloc_params.smram_base; + *perm_smsize = smm_reloc_params.smram_size; + *smm_save_state_size = sizeof(em64t101_smm_state_save_area_t); } -int smm_initialize(void) +void smm_initialize(void) { - /* Return early if CPU SMM setup failed. */ - if (cpu_smm_setup()) - return -1; - /* Clear the SMM state in the southbridge. */ southbridge_smm_clear_state(); - /* Run the relocation handler. */ + /* + * Run the relocation handler for on the BSP to check and set up + * parallel SMM relocation. + */ smm_initiate_relocation(); if (smm_reloc_params.smm_save_state_in_msrs) { printk(BIOS_DEBUG, "Doing parallel SMM relocation.\n"); } - - return 0; } +/* The default SMM entry can happen in parallel or serially. If the + * default SMM entry is done in parallel the BSP has already setup + * the saving state to each CPU's MSRs. At least one save state size + * is required for the initial SMM entry for the BSP to determine if + * parallel SMM relocation is even feasible. */ void smm_relocate(void) { /* |