summaryrefslogtreecommitdiff
path: root/src/soc/intel/braswell/cpu.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/soc/intel/braswell/cpu.c')
-rw-r--r--src/soc/intel/braswell/cpu.c116
1 files changed, 43 insertions, 73 deletions
diff --git a/src/soc/intel/braswell/cpu.c b/src/soc/intel/braswell/cpu.c
index 6b84c59711..3abec6b53c 100644
--- a/src/soc/intel/braswell/cpu.c
+++ b/src/soc/intel/braswell/cpu.c
@@ -2,6 +2,7 @@
* This file is part of the coreboot project.
*
* Copyright (C) 2013 Google Inc.
+ * Copyright (C) 2015 Intel Corp.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -17,7 +18,6 @@
* Foundation, Inc.
*/
-#include <stdlib.h>
#include <console/console.h>
#include <cpu/cpu.h>
#include <cpu/intel/microcode.h>
@@ -28,64 +28,42 @@
#include <cpu/x86/msr.h>
#include <cpu/x86/mtrr.h>
#include <cpu/x86/smm.h>
-#include <reg_script.h>
-
-#include <soc/iosf.h>
+#include <soc/intel/common/memmap.h>
#include <soc/msr.h>
#include <soc/pattrs.h>
#include <soc/ramstage.h>
#include <soc/smm.h>
+#include <stdlib.h>
static void smm_relocate(void *unused);
static void enable_smis(void *unused);
+static void pre_smm_relocation(void *unused);
static struct mp_flight_record mp_steps[] = {
+ MP_FR_BLOCK_APS(pre_smm_relocation, NULL, pre_smm_relocation, NULL),
MP_FR_BLOCK_APS(smm_relocate, NULL, smm_relocate, NULL),
MP_FR_BLOCK_APS(mp_initialize_cpu, NULL, mp_initialize_cpu, NULL),
/* Wait for APs to finish initialization before proceeding. */
MP_FR_BLOCK_APS(NULL, NULL, enable_smis, NULL),
};
-/* The APIC id space on Bay Trail is sparse. Each id is separated by 2. */
+/* The APIC id space is sparse. Each id is separated by 2. */
static int adjust_apic_id(int index, int apic_id)
{
return 2 * index;
}
-/* Package level MSRs */
-const struct reg_script package_msr_script[] = {
- /* Set Package TDP to ~7W */
- REG_MSR_WRITE(MSR_PKG_POWER_LIMIT, 0x3880fa),
- REG_MSR_RMW(MSR_PP1_POWER_LIMIT, ~(0x7f << 17), 0),
- REG_MSR_WRITE(MSR_PKG_TURBO_CFG1, 0x702),
- REG_MSR_WRITE(MSR_CPU_TURBO_WKLD_CFG1, 0x200b),
- REG_MSR_WRITE(MSR_CPU_TURBO_WKLD_CFG2, 0),
- REG_MSR_WRITE(MSR_CPU_THERM_CFG1, 0x00000305),
- REG_MSR_WRITE(MSR_CPU_THERM_CFG2, 0x0405500d),
- REG_MSR_WRITE(MSR_CPU_THERM_SENS_CFG, 0x27),
- REG_SCRIPT_END
-};
-/* Core level MSRs */
-const struct reg_script core_msr_script[] = {
- /* Dynamic L2 shrink enable and threshold, clear SINGLE_PCTL bit 11 */
- REG_MSR_RMW(MSR_PMG_CST_CONFIG_CONTROL, ~0x3f080f, 0xe0008),
- REG_MSR_RMW(MSR_POWER_MISC,
- ~(ENABLE_ULFM_AUTOCM_MASK | ENABLE_INDP_AUTOCM_MASK), 0),
- /* Disable C1E */
- REG_MSR_RMW(MSR_POWER_CTL, ~0x2, 0),
- REG_MSR_OR(MSR_POWER_MISC, 0x44),
- REG_SCRIPT_END
-};
-
-void baytrail_init_cpus(device_t dev)
+void soc_init_cpus(device_t dev)
{
struct bus *cpu_bus = dev->link_list;
const struct pattrs *pattrs = pattrs_get();
struct mp_params mp_params;
- uint32_t bsmrwac;
void *default_smm_area;
+ printk(BIOS_SPEW, "%s/%s ( %s )\n",
+ __FILE__, __func__, dev_name(dev));
+
/* Set up MTRRs based on physical address size. */
x86_setup_fixed_mtrrs();
x86_setup_var_mtrrs(pattrs->address_bits, 2);
@@ -100,52 +78,22 @@ void baytrail_init_cpus(device_t dev)
default_smm_area = backup_default_smm_area();
- /*
- * Configure the BUNIT to allow dirty cache line evictions in non-SMM
- * mode for the lines that were dirtied while in SMM mode. Otherwise
- * the writes would be silently dropped.
- */
- bsmrwac = iosf_bunit_read(BUNIT_SMRWAC) | SAI_IA_UNTRUSTED;
- iosf_bunit_write(BUNIT_SMRWAC, bsmrwac);
-
- /* Set package MSRs */
- reg_script_run(package_msr_script);
-
- /* Enable Turbo Mode on BSP and siblings of the BSP's building block. */
enable_turbo();
- if (mp_init(cpu_bus, &mp_params)) {
+ if (mp_init(cpu_bus, &mp_params))
printk(BIOS_ERR, "MP initialization failure.\n");
- }
restore_default_smm_area(default_smm_area);
}
-static void baytrail_core_init(device_t cpu)
-{
- printk(BIOS_DEBUG, "Init BayTrail core.\n");
-
- /* On bay trail the turbo disable bit is actually scoped at building
- * block level -- not package. For non-bsp cores that are within a
- * building block enable turbo. The cores within the BSP's building
- * block will just see it already enabled and move on. */
- if (lapicid())
- enable_turbo();
-
- /* Set core MSRs */
- reg_script_run(core_msr_script);
-
- /* Set this core to max frequency ratio */
- set_max_freq();
-}
static struct device_operations cpu_dev_ops = {
- .init = baytrail_core_init,
+ .init = NULL,
};
static struct cpu_device_id cpu_table[] = {
- { X86_VENDOR_INTEL, 0x30673 },
- { X86_VENDOR_INTEL, 0x30678 },
+ { X86_VENDOR_INTEL, 0x406C3 },
+ { X86_VENDOR_INTEL, 0x406C2 },
{ 0, 0 },
};
@@ -202,9 +150,11 @@ static void asmlinkage cpu_smm_do_relocation(void *arg)
smrr.hi = 0;
wrmsr(SMRRphysMask_MSR, smrr);
- /* The relocated handler runs with all CPUs concurrently. Therefore
+ /*
+ * The relocated handler runs with all CPUs concurrently. Therefore
* stagger the entry points adjusting SMBASE downwards by save state
- * size * CPU num. */
+ * size * CPU num.
+ */
smm_state = (void *)(SMM_EM64T100_SAVE_STATE_OFFSET + runtime->smbase);
smm_state->smbase = relo_attrs.smbase - cpu * runtime->save_state_size;
printk(BIOS_DEBUG, "New SMBASE 0x%08x\n", smm_state->smbase);
@@ -232,8 +182,10 @@ static int install_relocation_handler(int num_cpus)
static int install_permanent_handler(int num_cpus)
{
- /* There are num_cpus concurrent stacks and num_cpus concurrent save
- * state areas. Lastly, set the stack size to the save state size. */
+ /*
+ * There are num_cpus concurrent stacks and num_cpus concurrent save
+ * state areas. Lastly, set the stack size to the save state size.
+ */
int save_state_size = sizeof(em64t100_smm_state_save_area_t);
struct smm_loader_params smm_params = {
.per_cpu_stack_size = save_state_size,
@@ -241,11 +193,15 @@ static int install_permanent_handler(int num_cpus)
.per_cpu_save_state_size = save_state_size,
.num_concurrent_save_states = num_cpus,
};
- const int tseg_size = smm_region_size() - CONFIG_SMM_RESERVED_SIZE;
+ void *smm_base;
+ size_t smm_size;
+ int tseg_size;
printk(BIOS_DEBUG, "Installing SMM handler to 0x%08x\n",
relo_attrs.smbase);
+ smm_region(&smm_base, &smm_size);
+ tseg_size = smm_size - CONFIG_SMM_RESERVED_SIZE;
if (smm_load_module((void *)relo_attrs.smbase, tseg_size, &smm_params))
return -1;
@@ -259,11 +215,14 @@ static int smm_load_handlers(void)
/* All range registers are aligned to 4KiB */
const uint32_t rmask = ~((1 << 12) - 1);
const struct pattrs *pattrs = pattrs_get();
+ void *smm_base;
+ size_t smm_size;
/* Initialize global tracking state. */
- relo_attrs.smbase = (uint32_t)smm_region_start();
+ smm_region(&smm_base, &smm_size);
+ relo_attrs.smbase = (uint32_t)smm_base;
relo_attrs.smrr_base = relo_attrs.smbase | MTRR_TYPE_WRBACK;
- relo_attrs.smrr_mask = ~(smm_region_size() - 1) & rmask;
+ relo_attrs.smrr_mask = ~(smm_size - 1) & rmask;
relo_attrs.smrr_mask |= MTRRphysMaskValid;
/* Install handlers. */
@@ -283,6 +242,17 @@ static int smm_load_handlers(void)
return 0;
}
+static void pre_smm_relocation(void *unused)
+{
+ const struct pattrs *pattrs = pattrs_get();
+ msr_t msr_value;
+
+ /* Need to make sure that all cores have microcode loaded. */
+ msr_value = rdmsr(MSR_IA32_BIOS_SIGN_ID);
+ if (msr_value.hi == 0)
+ intel_microcode_load_unlocked(pattrs->microcode_patch);
+}
+
static void smm_relocate(void *unused)
{
const struct pattrs *pattrs = pattrs_get();