From 2c3662710ae4f2ef063098c5dfb58dbe74fe121e Mon Sep 17 00:00:00 2001 From: Rudolf Marek Date: Sat, 18 Dec 2010 23:30:59 +0000 Subject: SMM on AMD K8 Part 2/2 Signed-off-by: Rudolf Marek Acked-by: Stefan Reinauer git-svn-id: svn://svn.coreboot.org/coreboot/trunk@6202 2b7e53f0-3cfb-0310-b3e9-8179ed1497e1 --- src/cpu/amd/smm/smm_init.c | 48 ++++++++++++++++------------------------------ 1 file changed, 17 insertions(+), 31 deletions(-) (limited to 'src/cpu/amd') diff --git a/src/cpu/amd/smm/smm_init.c b/src/cpu/amd/smm/smm_init.c index d6f728bbba..ad1c112ed0 100644 --- a/src/cpu/amd/smm/smm_init.c +++ b/src/cpu/amd/smm/smm_init.c @@ -57,13 +57,13 @@ void smm_init(void) smm_handler_copied = 1; - /* MTRR changes don't like an enabled cache */ - disable_cache(); - /* Back up MSRs for later restore */ syscfg_orig = rdmsr(SYSCFG_MSR); mtrr_aseg_orig = rdmsr(MTRRfix16K_A0000_MSR); + /* MTRR changes don't like an enabled cache */ + disable_cache(); + msr = syscfg_orig; /* Allow changes to MTRR extended attributes */ msr.lo |= SYSCFG_MSR_MtrrFixDramModEn; @@ -78,60 +78,46 @@ void smm_init(void) msr.lo = 0x18181818; msr.hi = 0x18181818; wrmsr(MTRRfix16K_A0000_MSR, msr); - enable_cache(); - /* disable the extended features */ + /* enable the extended features */ msr = syscfg_orig; msr.lo |= SYSCFG_MSR_MtrrFixDramModEn; msr.lo |= SYSCFG_MSR_MtrrFixDramEn; wrmsr(SYSCFG_MSR, msr); - /* enable the SMM memory window */ - // TODO does "Enable ASEG SMRAM Range" have to happen on - // every CPU core? - msr = rdmsr(SMM_MASK_MSR); - msr.lo |= (1 << 0); // Enable ASEG SMRAM Range - msr.lo &= ~(1 << 2); // Open ASEG SMRAM Range - wrmsr(SMM_MASK_MSR, msr); - + enable_cache(); /* copy the real SMM handler */ memcpy((void *)SMM_BASE, &_binary_smm_start, (size_t)&_binary_smm_size); wbinvd(); - msr = rdmsr(SMM_MASK_MSR); - msr.lo |= ~(1 << 2); // Close ASEG SMRAM Range - wrmsr(SMM_MASK_MSR, msr); - - /* Change SYSCFG so we can restore the MTRR */ - msr = syscfg_orig; - msr.lo |= SYSCFG_MSR_MtrrFixDramModEn; - msr.lo &= ~SYSCFG_MSR_MtrrFixDramEn; - wrmsr(SYSCFG_MSR, msr); - /* Restore MTRR */ disable_cache(); - wrmsr(MTRRfix16K_A0000_MSR, mtrr_aseg_orig); /* Restore SYSCFG */ wrmsr(SYSCFG_MSR, syscfg_orig); + + wrmsr(MTRRfix16K_A0000_MSR, mtrr_aseg_orig); enable_cache(); } + /* But set SMM base address on all CPUs/cores */ msr = rdmsr(SMM_BASE_MSR); msr.lo = SMM_BASE - (lapicid() * 0x400); wrmsr(SMM_BASE_MSR, msr); -} - -void smm_lock(void) -{ - // TODO I think this should be running on each CPU - msr_t msr; - printk(BIOS_DEBUG, "Locking SMM.\n"); + /* enable the SMM memory window */ + msr = rdmsr(SMM_MASK_MSR); + msr.lo |= (1 << 0); // Enable ASEG SMRAM Range + wrmsr(SMM_MASK_MSR, msr); /* Set SMMLOCK to avoid exploits messing with SMM */ msr = rdmsr(HWCR_MSR); msr.lo |= (1 << 0); wrmsr(HWCR_MSR, msr); } + +void smm_lock(void) +{ + /* We lock SMM per CPU core */ +} -- cgit v1.2.3