summaryrefslogtreecommitdiff
path: root/src/cpu/intel/haswell/romstage.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/cpu/intel/haswell/romstage.c')
-rw-r--r--src/cpu/intel/haswell/romstage.c117
1 files changed, 25 insertions, 92 deletions
diff --git a/src/cpu/intel/haswell/romstage.c b/src/cpu/intel/haswell/romstage.c
index 1c293d4647..ab9fd591be 100644
--- a/src/cpu/intel/haswell/romstage.c
+++ b/src/cpu/intel/haswell/romstage.c
@@ -50,100 +50,39 @@ static inline void reset_system(void)
halt();
}
-/* The cache-as-ram assembly file calls romstage_main() after setting up
- * cache-as-ram. romstage_main() will then call the mainboards's
- * mainboard_romstage_entry() function. That function then calls
- * romstage_common() below. The reason for the back and forth is to provide
- * common entry point from cache-as-ram while still allowing for code sharing.
- * Because we can't use global variables the stack is used for allocations --
- * thus the need to call back and forth. */
+#define ROMSTAGE_RAM_STACK_SIZE 0x5000
-
-static inline u32 *stack_push(u32 *stack, u32 value)
-{
- stack = &stack[-1];
- *stack = value;
- return stack;
-}
-
-/* setup_romstage_stack_after_car() determines the stack to use after
- * cache-as-ram is torn down as well as the MTRR settings to use. */
-static void *setup_romstage_stack_after_car(void)
+/* platform_enter_postcar() determines the stack to use after
+ * cache-as-ram is torn down as well as the MTRR settings to use,
+ * and continues execution in postcar stage. */
+static void platform_enter_postcar(void)
{
- int num_mtrrs;
- u32 *slot;
- u32 mtrr_mask_upper;
- u32 top_of_ram;
-
- /* Top of stack needs to be aligned to a 4-byte boundary. */
- slot = (void *)romstage_ram_stack_top();
- num_mtrrs = 0;
-
- /* The upper bits of the MTRR mask need to set according to the number
- * of physical address bits. */
- mtrr_mask_upper = (1 << (cpu_phys_address_size() - 32)) - 1;
-
- /* The order for each MTRR is value then base with upper 32-bits of
- * each value coming before the lower 32-bits. The reasoning for
- * this ordering is to create a stack layout like the following:
- * +0: Number of MTRRs
- * +4: MTRR base 0 31:0
- * +8: MTRR base 0 63:32
- * +12: MTRR mask 0 31:0
- * +16: MTRR mask 0 63:32
- * +20: MTRR base 1 31:0
- * +24: MTRR base 1 63:32
- * +28: MTRR mask 1 31:0
- * +32: MTRR mask 1 63:32
- */
+ struct postcar_frame pcf;
+ uintptr_t top_of_ram;
+ if (postcar_frame_init(&pcf, ROMSTAGE_RAM_STACK_SIZE))
+ die("Unable to initialize postcar frame.\n");
/* Cache the ROM as WP just below 4GiB. */
- slot = stack_push(slot, mtrr_mask_upper); /* upper mask */
- slot = stack_push(slot, ~(CACHE_ROM_SIZE - 1) | MTRR_PHYS_MASK_VALID);
- slot = stack_push(slot, 0); /* upper base */
- slot = stack_push(slot, ~(CACHE_ROM_SIZE - 1) | MTRR_TYPE_WRPROT);
- num_mtrrs++;
+ postcar_frame_add_mtrr(&pcf, CACHE_ROM_BASE, CACHE_ROM_SIZE,
+ MTRR_TYPE_WRPROT);
/* Cache RAM as WB from 0 -> CACHE_TMP_RAMTOP. */
- slot = stack_push(slot, mtrr_mask_upper); /* upper mask */
- slot = stack_push(slot, ~(CACHE_TMP_RAMTOP - 1) | MTRR_PHYS_MASK_VALID);
- slot = stack_push(slot, 0); /* upper base */
- slot = stack_push(slot, 0 | MTRR_TYPE_WRBACK);
- num_mtrrs++;
-
- top_of_ram = (uint32_t)cbmem_top();
- /* Cache 8MiB below the top of RAM. On haswell systems the top of
- * RAM under 4GiB is the start of the TSEG region. It is required to
- * be 8MiB aligned. Set this area as cacheable so it can be used later
- * for ramstage before setting up the entire RAM as cacheable. */
- slot = stack_push(slot, mtrr_mask_upper); /* upper mask */
- slot = stack_push(slot, ~((8 << 20) - 1) | MTRR_PHYS_MASK_VALID);
- slot = stack_push(slot, 0); /* upper base */
- slot = stack_push(slot, (top_of_ram - (8 << 20)) | MTRR_TYPE_WRBACK);
- num_mtrrs++;
-
- /* Cache 8MiB at the top of RAM. Top of RAM on haswell systems
- * is where the TSEG region resides. However, it is not restricted
- * to SMM mode until SMM has been relocated. By setting the region
- * to cacheable it provides faster access when relocating the SMM
- * handler as well as using the TSEG region for other purposes. */
- slot = stack_push(slot, mtrr_mask_upper); /* upper mask */
- slot = stack_push(slot, ~((8 << 20) - 1) | MTRR_PHYS_MASK_VALID);
- slot = stack_push(slot, 0); /* upper base */
- slot = stack_push(slot, top_of_ram | MTRR_TYPE_WRBACK);
- num_mtrrs++;
-
- /* Save the number of MTRRs to setup. Return the stack location
- * pointing to the number of MTRRs. */
- slot = stack_push(slot, num_mtrrs);
-
- return slot;
+ postcar_frame_add_mtrr(&pcf, 0, CACHE_TMP_RAMTOP, MTRR_TYPE_WRBACK);
+
+ /* Cache at least 8 MiB below the top of ram, and at most 8 MiB
+ * above top of the ram. This satisfies MTRR alignment requirement
+ * with different TSEG size configurations.
+ */
+ top_of_ram = ALIGN_DOWN((uintptr_t)cbmem_top(), 8*MiB);
+ postcar_frame_add_mtrr(&pcf, top_of_ram - 8*MiB, 16*MiB,
+ MTRR_TYPE_WRBACK);
+
+ run_postcar_phase(&pcf);
}
asmlinkage void *romstage_main(unsigned long bist)
{
int i;
- void *romstage_stack_after_car;
const int num_guards = 4;
const u32 stack_guard = 0xdeadbeef;
u32 *stack_base = (void *)(CONFIG_DCACHE_RAM_BASE +
@@ -163,10 +102,10 @@ asmlinkage void *romstage_main(unsigned long bist)
printk(BIOS_DEBUG, "Smashed stack detected in romstage!\n");
}
- /* Get the stack to use after cache-as-ram is torn down. */
- romstage_stack_after_car = setup_romstage_stack_after_car();
+ platform_enter_postcar();
- return romstage_stack_after_car;
+ /* We do not return here */
+ return NULL;
}
void romstage_common(const struct romstage_params *params)
@@ -248,9 +187,3 @@ void romstage_common(const struct romstage_params *params)
if (IS_ENABLED(CONFIG_TPM1) || IS_ENABLED(CONFIG_TPM2))
tpm_setup(wake_from_s3);
}
-
-asmlinkage void romstage_after_car(void)
-{
- /* Load the ramstage. */
- run_ramstage();
-}