summaryrefslogtreecommitdiff
path: root/src/soc/intel/broadwell/cpu
diff options
context:
space:
mode:
Diffstat (limited to 'src/soc/intel/broadwell/cpu')
-rw-r--r--src/soc/intel/broadwell/cpu/Makefile.inc29
-rw-r--r--src/soc/intel/broadwell/cpu/bootblock.c65
-rw-r--r--src/soc/intel/broadwell/cpu/cpu.c642
-rw-r--r--src/soc/intel/broadwell/cpu/romstage.c32
-rw-r--r--src/soc/intel/broadwell/cpu/smmrelocate.c264
-rw-r--r--src/soc/intel/broadwell/cpu/tsc_freq.c14
6 files changed, 1046 insertions, 0 deletions
diff --git a/src/soc/intel/broadwell/cpu/Makefile.inc b/src/soc/intel/broadwell/cpu/Makefile.inc
new file mode 100644
index 0000000000..e9f46376af
--- /dev/null
+++ b/src/soc/intel/broadwell/cpu/Makefile.inc
@@ -0,0 +1,29 @@
+subdirs-y += ../../../../cpu/x86/lapic
+subdirs-y += ../../../../cpu/x86/mtrr
+subdirs-y += ../../../../cpu/x86/smm
+subdirs-y += ../../../../cpu/x86/tsc
+subdirs-y += ../../../../cpu/intel/microcode
+subdirs-y += ../../../../cpu/intel/turbo
+subdirs-y += ../../../../cpu/intel/common
+
+bootblock-y += bootblock.c
+bootblock-y += ../../../../cpu/intel/car/bootblock.c
+bootblock-y += ../../../../cpu/intel/car/non-evict/cache_as_ram.S
+bootblock-y += ../../../../cpu/x86/early_reset.S
+
+romstage-y += romstage.c
+romstage-y += ../../../../cpu/intel/car/romstage.c
+
+postcar-y += ../../../../cpu/intel/car/non-evict/exit_car.S
+
+ramstage-y += cpu.c
+ramstage-y += smmrelocate.c
+
+bootblock-y += tsc_freq.c
+ramstage-y += tsc_freq.c
+romstage-y += tsc_freq.c
+smm-y += tsc_freq.c
+postcar-y += tsc_freq.c
+verstage-y += tsc_freq.c
+
+cpu_microcode_bins += 3rdparty/blobs/soc/intel/broadwell/microcode.bin
diff --git a/src/soc/intel/broadwell/cpu/bootblock.c b/src/soc/intel/broadwell/cpu/bootblock.c
new file mode 100644
index 0000000000..d6883c6436
--- /dev/null
+++ b/src/soc/intel/broadwell/cpu/bootblock.c
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#include <stdint.h>
+#include <arch/bootblock.h>
+#include <arch/io.h>
+#include <cpu/x86/msr.h>
+#include <halt.h>
+#include <soc/rcba.h>
+#include <soc/msr.h>
+#include <delay.h>
+
+static void set_flex_ratio_to_tdp_nominal(void)
+{
+ msr_t flex_ratio, msr;
+ u32 soft_reset;
+ u8 nominal_ratio;
+
+ /* Check for Flex Ratio support */
+ flex_ratio = rdmsr(MSR_FLEX_RATIO);
+ if (!(flex_ratio.lo & FLEX_RATIO_EN))
+ return;
+
+ /* Check for >0 configurable TDPs */
+ msr = rdmsr(MSR_PLATFORM_INFO);
+ if (((msr.hi >> 1) & 3) == 0)
+ return;
+
+ /* Use nominal TDP ratio for flex ratio */
+ msr = rdmsr(MSR_CONFIG_TDP_NOMINAL);
+ nominal_ratio = msr.lo & 0xff;
+
+ /* See if flex ratio is already set to nominal TDP ratio */
+ if (((flex_ratio.lo >> 8) & 0xff) == nominal_ratio)
+ return;
+
+ /* Set flex ratio to nominal TDP ratio */
+ flex_ratio.lo &= ~0xff00;
+ flex_ratio.lo |= nominal_ratio << 8;
+ flex_ratio.lo |= FLEX_RATIO_LOCK;
+ wrmsr(MSR_FLEX_RATIO, flex_ratio);
+
+ /* Set flex ratio in soft reset data register bits 11:6.
+ * RCBA region is enabled in southbridge bootblock */
+ soft_reset = RCBA32(SOFT_RESET_DATA);
+ soft_reset &= ~(0x3f << 6);
+ soft_reset |= (nominal_ratio & 0x3f) << 6;
+ RCBA32(SOFT_RESET_DATA) = soft_reset;
+
+ /* Set soft reset control to use register value */
+ RCBA32_OR(SOFT_RESET_CTRL, 1);
+
+ /* Delay before reset to avoid potential TPM lockout */
+ mdelay(30);
+
+ /* Issue warm reset, will be "CPU only" due to soft reset data */
+ outb(0x0, 0xcf9);
+ outb(0x6, 0xcf9);
+ halt();
+}
+
+void bootblock_early_cpu_init(void)
+{
+ /* Set flex ratio and reset if needed */
+ set_flex_ratio_to_tdp_nominal();
+}
diff --git a/src/soc/intel/broadwell/cpu/cpu.c b/src/soc/intel/broadwell/cpu/cpu.c
new file mode 100644
index 0000000000..72efa3dc81
--- /dev/null
+++ b/src/soc/intel/broadwell/cpu/cpu.c
@@ -0,0 +1,642 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#include <console/console.h>
+#include <device/device.h>
+#include <device/pci.h>
+#include <arch/cpu.h>
+#include <cpu/cpu.h>
+#include <cpu/x86/mtrr.h>
+#include <cpu/x86/msr.h>
+#include <cpu/x86/lapic.h>
+#include <cpu/x86/mp.h>
+#include <cpu/intel/microcode.h>
+#include <cpu/intel/smm_reloc.h>
+#include <cpu/intel/speedstep.h>
+#include <cpu/intel/turbo.h>
+#include <cpu/x86/name.h>
+#include <delay.h>
+#include <soc/cpu.h>
+#include <soc/msr.h>
+#include <soc/pci_devs.h>
+#include <soc/ramstage.h>
+#include <soc/rcba.h>
+#include <soc/systemagent.h>
+#include <soc/intel/broadwell/chip.h>
+#include <cpu/intel/common/common.h>
+
+/* Convert time in seconds to POWER_LIMIT_1_TIME MSR value */
+static const u8 power_limit_time_sec_to_msr[] = {
+ [0] = 0x00,
+ [1] = 0x0a,
+ [2] = 0x0b,
+ [3] = 0x4b,
+ [4] = 0x0c,
+ [5] = 0x2c,
+ [6] = 0x4c,
+ [7] = 0x6c,
+ [8] = 0x0d,
+ [10] = 0x2d,
+ [12] = 0x4d,
+ [14] = 0x6d,
+ [16] = 0x0e,
+ [20] = 0x2e,
+ [24] = 0x4e,
+ [28] = 0x6e,
+ [32] = 0x0f,
+ [40] = 0x2f,
+ [48] = 0x4f,
+ [56] = 0x6f,
+ [64] = 0x10,
+ [80] = 0x30,
+ [96] = 0x50,
+ [112] = 0x70,
+ [128] = 0x11,
+};
+
+/* Convert POWER_LIMIT_1_TIME MSR value to seconds */
+static const u8 power_limit_time_msr_to_sec[] = {
+ [0x00] = 0,
+ [0x0a] = 1,
+ [0x0b] = 2,
+ [0x4b] = 3,
+ [0x0c] = 4,
+ [0x2c] = 5,
+ [0x4c] = 6,
+ [0x6c] = 7,
+ [0x0d] = 8,
+ [0x2d] = 10,
+ [0x4d] = 12,
+ [0x6d] = 14,
+ [0x0e] = 16,
+ [0x2e] = 20,
+ [0x4e] = 24,
+ [0x6e] = 28,
+ [0x0f] = 32,
+ [0x2f] = 40,
+ [0x4f] = 48,
+ [0x6f] = 56,
+ [0x10] = 64,
+ [0x30] = 80,
+ [0x50] = 96,
+ [0x70] = 112,
+ [0x11] = 128,
+};
+
+/* The core 100MHz BCLK is disabled in deeper c-states. One needs to calibrate
+ * the 100MHz BCLK against the 24MHz BCLK to restore the clocks properly
+ * when a core is woken up. */
+static int pcode_ready(void)
+{
+ int wait_count;
+ const int delay_step = 10;
+
+ wait_count = 0;
+ do {
+ if (!(MCHBAR32(BIOS_MAILBOX_INTERFACE) & MAILBOX_RUN_BUSY))
+ return 0;
+ wait_count += delay_step;
+ udelay(delay_step);
+ } while (wait_count < 1000);
+
+ return -1;
+}
+
+static void calibrate_24mhz_bclk(void)
+{
+ int err_code;
+
+ if (pcode_ready() < 0) {
+ printk(BIOS_ERR, "PCODE: mailbox timeout on wait ready.\n");
+ return;
+ }
+
+ /* A non-zero value initiates the PCODE calibration. */
+ MCHBAR32(BIOS_MAILBOX_DATA) = ~0;
+ MCHBAR32(BIOS_MAILBOX_INTERFACE) =
+ MAILBOX_RUN_BUSY | MAILBOX_BIOS_CMD_FSM_MEASURE_INTVL;
+
+ if (pcode_ready() < 0) {
+ printk(BIOS_ERR, "PCODE: mailbox timeout on completion.\n");
+ return;
+ }
+
+ err_code = MCHBAR32(BIOS_MAILBOX_INTERFACE) & 0xff;
+
+ printk(BIOS_DEBUG, "PCODE: 24MHz BCLK calibration response: %d\n",
+ err_code);
+
+ /* Read the calibrated value. */
+ MCHBAR32(BIOS_MAILBOX_INTERFACE) =
+ MAILBOX_RUN_BUSY | MAILBOX_BIOS_CMD_READ_CALIBRATION;
+
+ if (pcode_ready() < 0) {
+ printk(BIOS_ERR, "PCODE: mailbox timeout on read.\n");
+ return;
+ }
+
+ printk(BIOS_DEBUG, "PCODE: 24MHz BCLK calibration value: 0x%08x\n",
+ MCHBAR32(BIOS_MAILBOX_DATA));
+}
+
+static u32 pcode_mailbox_read(u32 command)
+{
+ if (pcode_ready() < 0) {
+ printk(BIOS_ERR, "PCODE: mailbox timeout on wait ready.\n");
+ return 0;
+ }
+
+ /* Send command and start transaction */
+ MCHBAR32(BIOS_MAILBOX_INTERFACE) = command | MAILBOX_RUN_BUSY;
+
+ if (pcode_ready() < 0) {
+ printk(BIOS_ERR, "PCODE: mailbox timeout on completion.\n");
+ return 0;
+ }
+
+ /* Read mailbox */
+ return MCHBAR32(BIOS_MAILBOX_DATA);
+}
+
+static int pcode_mailbox_write(u32 command, u32 data)
+{
+ if (pcode_ready() < 0) {
+ printk(BIOS_ERR, "PCODE: mailbox timeout on wait ready.\n");
+ return -1;
+ }
+
+ MCHBAR32(BIOS_MAILBOX_DATA) = data;
+
+ /* Send command and start transaction */
+ MCHBAR32(BIOS_MAILBOX_INTERFACE) = command | MAILBOX_RUN_BUSY;
+
+ if (pcode_ready() < 0) {
+ printk(BIOS_ERR, "PCODE: mailbox timeout on completion.\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static void initialize_vr_config(void)
+{
+ config_t *conf = config_of_soc();
+ msr_t msr;
+
+ printk(BIOS_DEBUG, "Initializing VR config.\n");
+
+ /* Configure VR_CURRENT_CONFIG. */
+ msr = rdmsr(MSR_VR_CURRENT_CONFIG);
+ /* Preserve bits 63 and 62. Bit 62 is PSI4 enable, but it is only valid
+ * on ULT systems. */
+ msr.hi &= 0xc0000000;
+ msr.hi |= (0x01 << (52 - 32)); /* PSI3 threshold - 1A. */
+ msr.hi |= (0x05 << (42 - 32)); /* PSI2 threshold - 5A. */
+ msr.hi |= (0x14 << (32 - 32)); /* PSI1 threshold - 20A. */
+ msr.hi |= (1 << (62 - 32)); /* Enable PSI4 */
+ /* Leave the max instantaneous current limit (12:0) to default. */
+ wrmsr(MSR_VR_CURRENT_CONFIG, msr);
+
+ /* Configure VR_MISC_CONFIG MSR. */
+ msr = rdmsr(MSR_VR_MISC_CONFIG);
+ /* Set the IOUT_SLOPE scalar applied to dIout in U10.1.9 format. */
+ msr.hi &= ~(0x3ff << (40 - 32));
+ msr.hi |= (0x200 << (40 - 32)); /* 1.0 */
+ /* Set IOUT_OFFSET to 0. */
+ msr.hi &= ~0xff;
+ /* Set entry ramp rate to slow. */
+ msr.hi &= ~(1 << (51 - 32));
+ /* Enable decay mode on C-state entry. */
+ msr.hi |= (1 << (52 - 32));
+ /* Set the slow ramp rate */
+ msr.hi &= ~(0x3 << (53 - 32));
+ /* Configure the C-state exit ramp rate. */
+ if (conf->vr_slow_ramp_rate_enable) {
+ /* Configured slow ramp rate. */
+ msr.hi |= ((conf->vr_slow_ramp_rate_set & 0x3) << (53 - 32));
+ /* Set exit ramp rate to slow. */
+ msr.hi &= ~(1 << (50 - 32));
+ } else {
+ /* Fast ramp rate / 4. */
+ msr.hi |= (0x01 << (53 - 32));
+ /* Set exit ramp rate to fast. */
+ msr.hi |= (1 << (50 - 32));
+ }
+ /* Set MIN_VID (31:24) to allow CPU to have full control. */
+ msr.lo &= ~0xff000000;
+ msr.lo |= (conf->vr_cpu_min_vid & 0xff) << 24;
+ wrmsr(MSR_VR_MISC_CONFIG, msr);
+
+ /* Configure VR_MISC_CONFIG2 MSR. */
+ msr = rdmsr(MSR_VR_MISC_CONFIG2);
+ msr.lo &= ~0xffff;
+ /* Allow CPU to control minimum voltage completely (15:8) and
+ * set the fast ramp voltage in 10mV steps. */
+ if (cpu_family_model() == BROADWELL_FAMILY_ULT)
+ msr.lo |= 0x006a; /* 1.56V */
+ else
+ msr.lo |= 0x006f; /* 1.60V */
+ wrmsr(MSR_VR_MISC_CONFIG2, msr);
+
+ /* Set C9/C10 VCC Min */
+ pcode_mailbox_write(MAILBOX_BIOS_CMD_WRITE_C9C10_VOLTAGE, 0x1f1f);
+}
+
+static void configure_pch_power_sharing(void)
+{
+ u32 pch_power, pch_power_ext, pmsync, pmsync2;
+ int i;
+
+ /* Read PCH Power levels from PCODE */
+ pch_power = pcode_mailbox_read(MAILBOX_BIOS_CMD_READ_PCH_POWER);
+ pch_power_ext = pcode_mailbox_read(MAILBOX_BIOS_CMD_READ_PCH_POWER_EXT);
+
+ printk(BIOS_INFO, "PCH Power: PCODE Levels 0x%08x 0x%08x\n",
+ pch_power, pch_power_ext);
+
+ pmsync = RCBA32(PMSYNC_CONFIG);
+ pmsync2 = RCBA32(PMSYNC_CONFIG2);
+
+ /* Program PMSYNC_TPR_CONFIG PCH power limit values
+ * pmsync[0:4] = mailbox[0:5]
+ * pmsync[8:12] = mailbox[6:11]
+ * pmsync[16:20] = mailbox[12:17]
+ */
+ for (i = 0; i < 3; i++) {
+ u32 level = pch_power & 0x3f;
+ pch_power >>= 6;
+ pmsync &= ~(0x1f << (i * 8));
+ pmsync |= (level & 0x1f) << (i * 8);
+ }
+ RCBA32(PMSYNC_CONFIG) = pmsync;
+
+ /* Program PMSYNC_TPR_CONFIG2 Extended PCH power limit values
+ * pmsync2[0:4] = mailbox[23:18]
+ * pmsync2[8:12] = mailbox_ext[6:11]
+ * pmsync2[16:20] = mailbox_ext[12:17]
+ * pmsync2[24:28] = mailbox_ext[18:22]
+ */
+ pmsync2 &= ~0x1f;
+ pmsync2 |= pch_power & 0x1f;
+
+ for (i = 1; i < 4; i++) {
+ u32 level = pch_power_ext & 0x3f;
+ pch_power_ext >>= 6;
+ pmsync2 &= ~(0x1f << (i * 8));
+ pmsync2 |= (level & 0x1f) << (i * 8);
+ }
+ RCBA32(PMSYNC_CONFIG2) = pmsync2;
+}
+
+int cpu_config_tdp_levels(void)
+{
+ msr_t platform_info;
+
+ /* Bits 34:33 indicate how many levels supported */
+ platform_info = rdmsr(MSR_PLATFORM_INFO);
+ return (platform_info.hi >> 1) & 3;
+}
+
+/*
+ * Configure processor power limits if possible
+ * This must be done AFTER set of BIOS_RESET_CPL
+ */
+void set_power_limits(u8 power_limit_1_time)
+{
+ msr_t msr = rdmsr(MSR_PLATFORM_INFO);
+ msr_t limit;
+ unsigned int power_unit;
+ unsigned int tdp, min_power, max_power, max_time;
+ u8 power_limit_1_val;
+
+ if (power_limit_1_time >= ARRAY_SIZE(power_limit_time_sec_to_msr))
+ power_limit_1_time = ARRAY_SIZE(power_limit_time_sec_to_msr) - 1;
+
+ if (!(msr.lo & PLATFORM_INFO_SET_TDP))
+ return;
+
+ /* Get units */
+ msr = rdmsr(MSR_PKG_POWER_SKU_UNIT);
+ power_unit = 2 << ((msr.lo & 0xf) - 1);
+
+ /* Get power defaults for this SKU */
+ msr = rdmsr(MSR_PKG_POWER_SKU);
+ tdp = msr.lo & 0x7fff;
+ min_power = (msr.lo >> 16) & 0x7fff;
+ max_power = msr.hi & 0x7fff;
+ max_time = (msr.hi >> 16) & 0x7f;
+
+ printk(BIOS_DEBUG, "CPU TDP: %u Watts\n", tdp / power_unit);
+
+ if (power_limit_time_msr_to_sec[max_time] > power_limit_1_time)
+ power_limit_1_time = power_limit_time_msr_to_sec[max_time];
+
+ if (min_power > 0 && tdp < min_power)
+ tdp = min_power;
+
+ if (max_power > 0 && tdp > max_power)
+ tdp = max_power;
+
+ power_limit_1_val = power_limit_time_sec_to_msr[power_limit_1_time];
+
+ /* Set long term power limit to TDP */
+ limit.lo = 0;
+ limit.lo |= tdp & PKG_POWER_LIMIT_MASK;
+ limit.lo |= PKG_POWER_LIMIT_EN;
+ limit.lo |= (power_limit_1_val & PKG_POWER_LIMIT_TIME_MASK) <<
+ PKG_POWER_LIMIT_TIME_SHIFT;
+
+ /* Set short term power limit to 1.25 * TDP */
+ limit.hi = 0;
+ limit.hi |= ((tdp * 125) / 100) & PKG_POWER_LIMIT_MASK;
+ limit.hi |= PKG_POWER_LIMIT_EN;
+ /* Power limit 2 time is only programmable on server SKU */
+
+ wrmsr(MSR_PKG_POWER_LIMIT, limit);
+
+ /* Set power limit values in MCHBAR as well */
+ MCHBAR32(MCH_PKG_POWER_LIMIT_LO) = limit.lo;
+ MCHBAR32(MCH_PKG_POWER_LIMIT_HI) = limit.hi;
+
+ /* Set DDR RAPL power limit by copying from MMIO to MSR */
+ msr.lo = MCHBAR32(MCH_DDR_POWER_LIMIT_LO);
+ msr.hi = MCHBAR32(MCH_DDR_POWER_LIMIT_HI);
+ wrmsr(MSR_DDR_RAPL_LIMIT, msr);
+
+ /* Use nominal TDP values for CPUs with configurable TDP */
+ if (cpu_config_tdp_levels()) {
+ msr = rdmsr(MSR_CONFIG_TDP_NOMINAL);
+ limit.hi = 0;
+ limit.lo = msr.lo & 0xff;
+ wrmsr(MSR_TURBO_ACTIVATION_RATIO, limit);
+ }
+}
+
+static void configure_c_states(void)
+{
+ msr_t msr;
+
+ msr = rdmsr(MSR_PKG_CST_CONFIG_CONTROL);
+ msr.lo |= (1 << 31); // Timed MWAIT Enable
+ msr.lo |= (1 << 30); // Package c-state Undemotion Enable
+ msr.lo |= (1 << 29); // Package c-state Demotion Enable
+ msr.lo |= (1 << 28); // C1 Auto Undemotion Enable
+ msr.lo |= (1 << 27); // C3 Auto Undemotion Enable
+ msr.lo |= (1 << 26); // C1 Auto Demotion Enable
+ msr.lo |= (1 << 25); // C3 Auto Demotion Enable
+ msr.lo &= ~(1 << 10); // Disable IO MWAIT redirection
+ /* The deepest package c-state defaults to factory-configured value. */
+ wrmsr(MSR_PKG_CST_CONFIG_CONTROL, msr);
+
+ msr = rdmsr(MSR_MISC_PWR_MGMT);
+ msr.lo &= ~(1 << 0); // Enable P-state HW_ALL coordination
+ wrmsr(MSR_MISC_PWR_MGMT, msr);
+
+ msr = rdmsr(MSR_POWER_CTL);
+ msr.lo |= (1 << 18); // Enable Energy Perf Bias MSR 0x1b0
+ msr.lo |= (1 << 1); // C1E Enable
+ msr.lo |= (1 << 0); // Bi-directional PROCHOT#
+ wrmsr(MSR_POWER_CTL, msr);
+
+ /* C-state Interrupt Response Latency Control 0 - package C3 latency */
+ msr.hi = 0;
+ msr.lo = IRTL_VALID | IRTL_1024_NS | C_STATE_LATENCY_CONTROL_0_LIMIT;
+ wrmsr(MSR_C_STATE_LATENCY_CONTROL_0, msr);
+
+ /* C-state Interrupt Response Latency Control 1 */
+ msr.hi = 0;
+ msr.lo = IRTL_VALID | IRTL_1024_NS | C_STATE_LATENCY_CONTROL_1_LIMIT;
+ wrmsr(MSR_C_STATE_LATENCY_CONTROL_1, msr);
+
+ /* C-state Interrupt Response Latency Control 2 - package C6/C7 short */
+ msr.hi = 0;
+ msr.lo = IRTL_VALID | IRTL_1024_NS | C_STATE_LATENCY_CONTROL_2_LIMIT;
+ wrmsr(MSR_C_STATE_LATENCY_CONTROL_2, msr);
+
+ /* C-state Interrupt Response Latency Control 3 - package C8 */
+ msr.hi = 0;
+ msr.lo = IRTL_VALID | IRTL_1024_NS |
+ C_STATE_LATENCY_CONTROL_3_LIMIT;
+ wrmsr(MSR_C_STATE_LATENCY_CONTROL_3, msr);
+
+ /* C-state Interrupt Response Latency Control 4 - package C9 */
+ msr.hi = 0;
+ msr.lo = IRTL_VALID | IRTL_1024_NS |
+ C_STATE_LATENCY_CONTROL_4_LIMIT;
+ wrmsr(MSR_C_STATE_LATENCY_CONTROL_4, msr);
+
+ /* C-state Interrupt Response Latency Control 5 - package C10 */
+ msr.hi = 0;
+ msr.lo = IRTL_VALID | IRTL_1024_NS |
+ C_STATE_LATENCY_CONTROL_5_LIMIT;
+ wrmsr(MSR_C_STATE_LATENCY_CONTROL_5, msr);
+}
+
+static void configure_thermal_target(void)
+{
+ config_t *conf;
+ struct device *lapic;
+ msr_t msr;
+
+ /* Find pointer to CPU configuration */
+ lapic = dev_find_lapic(SPEEDSTEP_APIC_MAGIC);
+ if (!lapic || !lapic->chip_info)
+ return;
+ conf = lapic->chip_info;
+
+ /* Set TCC activation offset if supported */
+ msr = rdmsr(MSR_PLATFORM_INFO);
+ if ((msr.lo & (1 << 30)) && conf->tcc_offset) {
+ msr = rdmsr(MSR_TEMPERATURE_TARGET);
+ msr.lo &= ~(0xf << 24); /* Bits 27:24 */
+ msr.lo |= (conf->tcc_offset & 0xf) << 24;
+ wrmsr(MSR_TEMPERATURE_TARGET, msr);
+ }
+}
+
+static void configure_misc(void)
+{
+ msr_t msr;
+
+ msr = rdmsr(IA32_MISC_ENABLE);
+ msr.lo |= (1 << 0); /* Fast String enable */
+ msr.lo |= (1 << 3); /* TM1/TM2/EMTTM enable */
+ msr.lo |= (1 << 16); /* Enhanced SpeedStep Enable */
+ wrmsr(IA32_MISC_ENABLE, msr);
+
+ /* Disable Thermal interrupts */
+ msr.lo = 0;
+ msr.hi = 0;
+ wrmsr(IA32_THERM_INTERRUPT, msr);
+
+ /* Enable package critical interrupt only */
+ msr.lo = 1 << 4;
+ msr.hi = 0;
+ wrmsr(IA32_PACKAGE_THERM_INTERRUPT, msr);
+}
+
+static void set_max_ratio(void)
+{
+ msr_t msr, perf_ctl;
+
+ perf_ctl.hi = 0;
+
+ /* Check for configurable TDP option */
+ if (get_turbo_state() == TURBO_ENABLED) {
+ msr = rdmsr(MSR_TURBO_RATIO_LIMIT);
+ perf_ctl.lo = (msr.lo & 0xff) << 8;
+ } else if (cpu_config_tdp_levels()) {
+ /* Set to nominal TDP ratio */
+ msr = rdmsr(MSR_CONFIG_TDP_NOMINAL);
+ perf_ctl.lo = (msr.lo & 0xff) << 8;
+ } else {
+ /* Platform Info bits 15:8 give max ratio */
+ msr = rdmsr(MSR_PLATFORM_INFO);
+ perf_ctl.lo = msr.lo & 0xff00;
+ }
+ wrmsr(IA32_PERF_CTL, perf_ctl);
+
+ printk(BIOS_DEBUG, "CPU: frequency set to %d\n",
+ ((perf_ctl.lo >> 8) & 0xff) * CPU_BCLK);
+}
+
+static void configure_mca(void)
+{
+ msr_t msr;
+ int i;
+ int num_banks;
+
+ msr = rdmsr(IA32_MCG_CAP);
+ num_banks = msr.lo & 0xff;
+ msr.lo = msr.hi = 0;
+ /* TODO(adurbin): This should only be done on a cold boot. Also, some
+ * of these banks are core vs package scope. For now every CPU clears
+ * every bank. */
+ for (i = 0; i < num_banks; i++)
+ wrmsr(IA32_MC0_STATUS + (i * 4), msr);
+}
+
+/* All CPUs including BSP will run the following function. */
+static void cpu_core_init(struct device *cpu)
+{
+ /* Clear out pending MCEs */
+ configure_mca();
+
+ /* Enable the local CPU APICs */
+ enable_lapic_tpr();
+ setup_lapic();
+
+ /* Set virtualization based on Kconfig option */
+ set_vmx_and_lock();
+
+ /* Configure C States */
+ configure_c_states();
+
+ /* Configure Enhanced SpeedStep and Thermal Sensors */
+ configure_misc();
+
+ /* Thermal throttle activation offset */
+ configure_thermal_target();
+
+ /* Enable Direct Cache Access */
+ configure_dca_cap();
+
+ /* Set energy policy */
+ set_energy_perf_bias(ENERGY_POLICY_NORMAL);
+
+ /* Enable Turbo */
+ enable_turbo();
+}
+
+/* MP initialization support. */
+static const void *microcode_patch;
+
+static void pre_mp_init(void)
+{
+ /* Setup MTRRs based on physical address size. */
+ x86_setup_mtrrs_with_detect();
+ x86_mtrr_check();
+
+ initialize_vr_config();
+ calibrate_24mhz_bclk();
+ configure_pch_power_sharing();
+}
+
+static int get_cpu_count(void)
+{
+ msr_t msr;
+ int num_threads;
+ int num_cores;
+
+ msr = rdmsr(MSR_CORE_THREAD_COUNT);
+ num_threads = (msr.lo >> 0) & 0xffff;
+ num_cores = (msr.lo >> 16) & 0xffff;
+ printk(BIOS_DEBUG, "CPU has %u cores, %u threads enabled.\n",
+ num_cores, num_threads);
+
+ return num_threads;
+}
+
+static void get_microcode_info(const void **microcode, int *parallel)
+{
+ microcode_patch = intel_microcode_find();
+ *microcode = microcode_patch;
+ *parallel = 1;
+}
+
+static void per_cpu_smm_trigger(void)
+{
+ /* Relocate the SMM handler. */
+ smm_relocate();
+
+ /* After SMM relocation a 2nd microcode load is required. */
+ intel_microcode_load_unlocked(microcode_patch);
+}
+
+static void post_mp_init(void)
+{
+ /* Set Max Ratio */
+ set_max_ratio();
+
+ /* Now that all APs have been relocated as well as the BSP let SMIs
+ * start flowing. */
+ global_smi_enable();
+
+ /* Lock down the SMRAM space. */
+ smm_lock();
+}
+
+static const struct mp_ops mp_ops = {
+ .pre_mp_init = pre_mp_init,
+ .get_cpu_count = get_cpu_count,
+ .get_smm_info = smm_info,
+ .get_microcode_info = get_microcode_info,
+ .pre_mp_smm_init = smm_initialize,
+ .per_cpu_smm_trigger = per_cpu_smm_trigger,
+ .relocation_handler = smm_relocation_handler,
+ .post_mp_init = post_mp_init,
+};
+
+void broadwell_init_cpus(struct device *dev)
+{
+ struct bus *cpu_bus = dev->link_list;
+
+ if (mp_init_with_smm(cpu_bus, &mp_ops))
+ printk(BIOS_ERR, "MP initialization failure.\n");
+}
+
+static struct device_operations cpu_dev_ops = {
+ .init = cpu_core_init,
+};
+
+static const struct cpu_device_id cpu_table[] = {
+ { X86_VENDOR_INTEL, CPUID_HASWELL_ULT },
+ { X86_VENDOR_INTEL, CPUID_BROADWELL_C0 },
+ { X86_VENDOR_INTEL, CPUID_BROADWELL_D0 },
+ { X86_VENDOR_INTEL, CPUID_BROADWELL_E0 },
+ { 0, 0 },
+};
+
+static const struct cpu_driver driver __cpu_driver = {
+ .ops = &cpu_dev_ops,
+ .id_table = cpu_table,
+};
diff --git a/src/soc/intel/broadwell/cpu/romstage.c b/src/soc/intel/broadwell/cpu/romstage.c
new file mode 100644
index 0000000000..c9f70a85d1
--- /dev/null
+++ b/src/soc/intel/broadwell/cpu/romstage.c
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#include <arch/cpu.h>
+#include <console/console.h>
+#include <cpu/x86/msr.h>
+#include <soc/cpu.h>
+#include <soc/msr.h>
+#include <soc/romstage.h>
+
+void set_max_freq(void)
+{
+ msr_t msr, perf_ctl, platform_info;
+
+ /* Check for configurable TDP option */
+ platform_info = rdmsr(MSR_PLATFORM_INFO);
+
+ if ((platform_info.hi >> 1) & 3) {
+ /* Set to nominal TDP ratio */
+ msr = rdmsr(MSR_CONFIG_TDP_NOMINAL);
+ perf_ctl.lo = (msr.lo & 0xff) << 8;
+ } else {
+ /* Platform Info bits 15:8 give max ratio */
+ msr = rdmsr(MSR_PLATFORM_INFO);
+ perf_ctl.lo = msr.lo & 0xff00;
+ }
+
+ perf_ctl.hi = 0;
+ wrmsr(IA32_PERF_CTL, perf_ctl);
+
+ printk(BIOS_DEBUG, "CPU: frequency set to %d MHz\n",
+ ((perf_ctl.lo >> 8) & 0xff) * CPU_BCLK);
+}
diff --git a/src/soc/intel/broadwell/cpu/smmrelocate.c b/src/soc/intel/broadwell/cpu/smmrelocate.c
new file mode 100644
index 0000000000..3d4162abd0
--- /dev/null
+++ b/src/soc/intel/broadwell/cpu/smmrelocate.c
@@ -0,0 +1,264 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#include <types.h>
+#include <string.h>
+#include <device/device.h>
+#include <device/pci.h>
+#include <device/pci_ops.h>
+#include <cpu/x86/mp.h>
+#include <cpu/x86/msr.h>
+#include <cpu/x86/mtrr.h>
+#include <cpu/x86/smm.h>
+#include <cpu/intel/em64t101_save_state.h>
+#include <cpu/intel/smm_reloc.h>
+#include <console/console.h>
+#include <smp/node.h>
+#include <soc/cpu.h>
+#include <soc/msr.h>
+#include <soc/pci_devs.h>
+#include <soc/systemagent.h>
+
+static void update_save_state(int cpu, uintptr_t curr_smbase,
+ uintptr_t staggered_smbase,
+ struct smm_relocation_params *relo_params)
+{
+ u32 smbase;
+ u32 iedbase;
+
+ /* The relocated handler runs with all CPUs concurrently. Therefore
+ * stagger the entry points adjusting SMBASE downwards by save state
+ * size * CPU num. */
+ smbase = staggered_smbase;
+ iedbase = relo_params->ied_base;
+
+ printk(BIOS_DEBUG, "New SMBASE=0x%08x IEDBASE=0x%08x\n",
+ smbase, iedbase);
+
+ /* All threads need to set IEDBASE and SMBASE to the relocated
+ * handler region. However, the save state location depends on the
+ * smm_save_state_in_msrs field in the relocation parameters. If
+ * smm_save_state_in_msrs is non-zero then the CPUs are relocating
+ * the SMM handler in parallel, and each CPUs save state area is
+ * located in their respective MSR space. If smm_save_state_in_msrs
+ * is zero then the SMM relocation is happening serially so the
+ * save state is at the same default location for all CPUs. */
+ if (relo_params->smm_save_state_in_msrs) {
+ msr_t smbase_msr;
+ msr_t iedbase_msr;
+
+ smbase_msr.lo = smbase;
+ smbase_msr.hi = 0;
+
+ /* According the BWG the IEDBASE MSR is in bits 63:32. It's
+ * not clear why it differs from the SMBASE MSR. */
+ iedbase_msr.lo = 0;
+ iedbase_msr.hi = iedbase;
+
+ wrmsr(SMBASE_MSR, smbase_msr);
+ wrmsr(IEDBASE_MSR, iedbase_msr);
+ } else {
+ em64t101_smm_state_save_area_t *save_state;
+
+ save_state = (void *)(curr_smbase + SMM_DEFAULT_SIZE -
+ sizeof(*save_state));
+
+ save_state->smbase = smbase;
+ save_state->iedbase = iedbase;
+ }
+}
+
+/* Returns 1 if SMM MSR save state was set. */
+static int bsp_setup_msr_save_state(struct smm_relocation_params *relo_params)
+{
+ msr_t smm_mca_cap;
+
+ smm_mca_cap = rdmsr(SMM_MCA_CAP_MSR);
+ if (smm_mca_cap.hi & SMM_CPU_SVRSTR_MASK) {
+ msr_t smm_feature_control;
+
+ smm_feature_control = rdmsr(SMM_FEATURE_CONTROL_MSR);
+ smm_feature_control.hi = 0;
+ smm_feature_control.lo |= SMM_CPU_SAVE_EN;
+ wrmsr(SMM_FEATURE_CONTROL_MSR, smm_feature_control);
+ relo_params->smm_save_state_in_msrs = 1;
+ }
+ return relo_params->smm_save_state_in_msrs;
+}
+
+/* The relocation work is actually performed in SMM context, but the code
+ * resides in the ramstage module. This occurs by trampolining from the default
+ * SMRAM entry point to here. */
+void smm_relocation_handler(int cpu, uintptr_t curr_smbase,
+ uintptr_t staggered_smbase)
+{
+ msr_t mtrr_cap;
+ struct smm_relocation_params *relo_params = &smm_reloc_params;
+
+ printk(BIOS_DEBUG, "In relocation handler: CPU %d\n", cpu);
+
+ /* Determine if the processor supports saving state in MSRs. If so,
+ * enable it before the non-BSPs run so that SMM relocation can occur
+ * in parallel in the non-BSP CPUs. */
+ if (cpu == 0) {
+ /* If smm_save_state_in_msrs is 1 then that means this is the
+ * 2nd time through the relocation handler for the BSP.
+ * Parallel SMM handler relocation is taking place. However,
+ * it is desired to access other CPUs save state in the real
+ * SMM handler. Therefore, disable the SMM save state in MSRs
+ * feature. */
+ if (relo_params->smm_save_state_in_msrs) {
+ msr_t smm_feature_control;
+
+ smm_feature_control = rdmsr(SMM_FEATURE_CONTROL_MSR);
+ smm_feature_control.lo &= ~SMM_CPU_SAVE_EN;
+ wrmsr(SMM_FEATURE_CONTROL_MSR, smm_feature_control);
+ } else if (bsp_setup_msr_save_state(relo_params))
+ /* Just return from relocation handler if MSR save
+ * state is enabled. In that case the BSP will come
+ * back into the relocation handler to setup the new
+ * SMBASE as well disabling SMM save state in MSRs. */
+ return;
+ }
+
+ /* Make appropriate changes to the save state map. */
+ update_save_state(cpu, curr_smbase, staggered_smbase, relo_params);
+
+ /* Write PRMRR and SMRR MSRs based on indicated support. */
+ mtrr_cap = rdmsr(MTRR_CAP_MSR);
+ if (mtrr_cap.lo & SMRR_SUPPORTED)
+ write_smrr(relo_params);
+
+ if (mtrr_cap.lo & PRMRR_SUPPORTED) {
+ write_prmrr(relo_params);
+ /* UNCORE_PRMRR msrs are package level. Therefore, only
+ * configure these MSRs on the BSP. */
+ if (cpu == 0)
+ write_uncore_prmrr(relo_params);
+ }
+}
+
+static void fill_in_relocation_params(struct smm_relocation_params *params)
+{
+ uintptr_t tseg_base;
+ size_t tseg_size;
+ u32 prmrr_base;
+ u32 prmrr_size;
+ int phys_bits;
+ /* All range registers are aligned to 4KiB */
+ const u32 rmask = ~((1 << 12) - 1);
+
+ /* Some of the range registers are dependent on the number of physical
+ * address bits supported. */
+ phys_bits = cpuid_eax(0x80000008) & 0xff;
+
+ /* The range bounded by the TSEGMB and BGSM registers encompasses the
+ * SMRAM range as well as the IED range. However, the SMRAM available
+ * to the handler is 4MiB since the IEDRAM lives TSEGMB + 4MiB.
+ */
+ smm_region(&tseg_base, &tseg_size);
+
+ /* SMRR has 32-bits of valid address aligned to 4KiB. */
+ params->smrr_base.lo = (tseg_base & rmask) | MTRR_TYPE_WRBACK;
+ params->smrr_base.hi = 0;
+ params->smrr_mask.lo = (~(tseg_size - 1) & rmask) | MTRR_PHYS_MASK_VALID;
+ params->smrr_mask.hi = 0;
+
+ smm_subregion(SMM_SUBREGION_CHIPSET, &params->ied_base, &params->ied_size);
+
+ /* The PRMRR and UNCORE_PRMRR are at IEDBASE + 2MiB */
+ prmrr_base = (params->ied_base + (2 << 20)) & rmask;
+ prmrr_size = params->ied_size - (2 << 20);
+
+ /* PRMRR has 46 bits of valid address aligned to 4KiB. It's dependent
+ * on the number of physical address bits supported. */
+ params->prmrr_base.lo = prmrr_base | MTRR_TYPE_WRBACK;
+ params->prmrr_base.hi = 0;
+ params->prmrr_mask.lo = (~(prmrr_size - 1) & rmask)
+ | MTRR_PHYS_MASK_VALID;
+ params->prmrr_mask.hi = (1 << (phys_bits - 32)) - 1;
+
+ /* UNCORE_PRMRR has 39 bits of valid address aligned to 4KiB. */
+ params->uncore_prmrr_base.lo = prmrr_base;
+ params->uncore_prmrr_base.hi = 0;
+ params->uncore_prmrr_mask.lo = (~(prmrr_size - 1) & rmask) |
+ MTRR_PHYS_MASK_VALID;
+ params->uncore_prmrr_mask.hi = (1 << (39 - 32)) - 1;
+}
+
+static void setup_ied_area(struct smm_relocation_params *params)
+{
+ char *ied_base;
+
+ struct ied_header ied = {
+ .signature = "INTEL RSVD",
+ .size = params->ied_size,
+ .reserved = {0},
+ };
+
+ ied_base = (void *)params->ied_base;
+
+ /* Place IED header at IEDBASE. */
+ memcpy(ied_base, &ied, sizeof(ied));
+
+ /* Zero out 32KiB at IEDBASE + 1MiB */
+ memset(ied_base + (1 << 20), 0, (32 << 10));
+}
+
+void smm_info(uintptr_t *perm_smbase, size_t *perm_smsize,
+ size_t *smm_save_state_size)
+{
+ printk(BIOS_DEBUG, "Setting up SMI for CPU\n");
+
+ fill_in_relocation_params(&smm_reloc_params);
+
+ smm_subregion(SMM_SUBREGION_HANDLER, perm_smbase, perm_smsize);
+
+ setup_ied_area(&smm_reloc_params);
+
+ *smm_save_state_size = sizeof(em64t101_smm_state_save_area_t);
+}
+
+void smm_initialize(void)
+{
+ /* Clear the SMM state in the southbridge. */
+ smm_southbridge_clear_state();
+
+ /*
+ * Run the relocation handler for on the BSP to check and set up
+ * parallel SMM relocation.
+ */
+ smm_initiate_relocation();
+
+ if (smm_reloc_params.smm_save_state_in_msrs)
+ printk(BIOS_DEBUG, "Doing parallel SMM relocation.\n");
+}
+
+/* The default SMM entry can happen in parallel or serially. If the
+ * default SMM entry is done in parallel the BSP has already setup
+ * the saving state to each CPU's MSRs. At least one save state size
+ * is required for the initial SMM entry for the BSP to determine if
+ * parallel SMM relocation is even feasible. */
+void smm_relocate(void)
+{
+ /*
+ * If smm_save_state_in_msrs is non-zero then parallel SMM relocation
+ * shall take place. Run the relocation handler a second time on the
+ * BSP to do * the final move. For APs, a relocation handler always
+ * needs to be run.
+ */
+ if (smm_reloc_params.smm_save_state_in_msrs)
+ smm_initiate_relocation_parallel();
+ else if (!boot_cpu())
+ smm_initiate_relocation();
+}
+
+void smm_lock(void)
+{
+ struct device *sa_dev = pcidev_path_on_root(SA_DEVFN_ROOT);
+ /* LOCK the SMM memory window and enable normal SMM.
+ * After running this function, only a full reset can
+ * make the SMM registers writable again.
+ */
+ printk(BIOS_DEBUG, "Locking SMM.\n");
+ pci_write_config8(sa_dev, SMRAM, D_LCK | G_SMRAME | C_BASE_SEG);
+}
diff --git a/src/soc/intel/broadwell/cpu/tsc_freq.c b/src/soc/intel/broadwell/cpu/tsc_freq.c
new file mode 100644
index 0000000000..4a8a34349f
--- /dev/null
+++ b/src/soc/intel/broadwell/cpu/tsc_freq.c
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#include <cpu/x86/msr.h>
+#include <cpu/x86/tsc.h>
+#include <soc/cpu.h>
+#include <soc/msr.h>
+
+unsigned long tsc_freq_mhz(void)
+{
+ msr_t platform_info;
+
+ platform_info = rdmsr(MSR_PLATFORM_INFO);
+ return CPU_BCLK * ((platform_info.lo >> 8) & 0xff);
+}