summaryrefslogtreecommitdiff
path: root/src/cpu/amd
diff options
context:
space:
mode:
authorRudolf Marek <r.marek@assembler.cz>2010-12-18 23:30:59 +0000
committerStefan Reinauer <stepan@openbios.org>2010-12-18 23:30:59 +0000
commit2c3662710ae4f2ef063098c5dfb58dbe74fe121e (patch)
tree788cfb8e3aa7ee01aa0a029c1c07735ae0f9b315 /src/cpu/amd
parentcadc54583877db65f33d2db11088d5fae1b77b74 (diff)
downloadcoreboot-2c3662710ae4f2ef063098c5dfb58dbe74fe121e.tar.xz
SMM on AMD K8 Part 2/2
Signed-off-by: Rudolf Marek <r.marek@assembler.cz> Acked-by: Stefan Reinauer <stepan@coresystems.de> git-svn-id: svn://svn.coreboot.org/coreboot/trunk@6202 2b7e53f0-3cfb-0310-b3e9-8179ed1497e1
Diffstat (limited to 'src/cpu/amd')
-rw-r--r--src/cpu/amd/smm/smm_init.c48
1 files changed, 17 insertions, 31 deletions
diff --git a/src/cpu/amd/smm/smm_init.c b/src/cpu/amd/smm/smm_init.c
index d6f728bbba..ad1c112ed0 100644
--- a/src/cpu/amd/smm/smm_init.c
+++ b/src/cpu/amd/smm/smm_init.c
@@ -57,13 +57,13 @@ void smm_init(void)
smm_handler_copied = 1;
- /* MTRR changes don't like an enabled cache */
- disable_cache();
-
/* Back up MSRs for later restore */
syscfg_orig = rdmsr(SYSCFG_MSR);
mtrr_aseg_orig = rdmsr(MTRRfix16K_A0000_MSR);
+ /* MTRR changes don't like an enabled cache */
+ disable_cache();
+
msr = syscfg_orig;
/* Allow changes to MTRR extended attributes */
msr.lo |= SYSCFG_MSR_MtrrFixDramModEn;
@@ -78,60 +78,46 @@ void smm_init(void)
msr.lo = 0x18181818;
msr.hi = 0x18181818;
wrmsr(MTRRfix16K_A0000_MSR, msr);
- enable_cache();
- /* disable the extended features */
+ /* enable the extended features */
msr = syscfg_orig;
msr.lo |= SYSCFG_MSR_MtrrFixDramModEn;
msr.lo |= SYSCFG_MSR_MtrrFixDramEn;
wrmsr(SYSCFG_MSR, msr);
- /* enable the SMM memory window */
- // TODO does "Enable ASEG SMRAM Range" have to happen on
- // every CPU core?
- msr = rdmsr(SMM_MASK_MSR);
- msr.lo |= (1 << 0); // Enable ASEG SMRAM Range
- msr.lo &= ~(1 << 2); // Open ASEG SMRAM Range
- wrmsr(SMM_MASK_MSR, msr);
-
+ enable_cache();
/* copy the real SMM handler */
memcpy((void *)SMM_BASE, &_binary_smm_start, (size_t)&_binary_smm_size);
wbinvd();
- msr = rdmsr(SMM_MASK_MSR);
- msr.lo |= ~(1 << 2); // Close ASEG SMRAM Range
- wrmsr(SMM_MASK_MSR, msr);
-
- /* Change SYSCFG so we can restore the MTRR */
- msr = syscfg_orig;
- msr.lo |= SYSCFG_MSR_MtrrFixDramModEn;
- msr.lo &= ~SYSCFG_MSR_MtrrFixDramEn;
- wrmsr(SYSCFG_MSR, msr);
-
/* Restore MTRR */
disable_cache();
- wrmsr(MTRRfix16K_A0000_MSR, mtrr_aseg_orig);
/* Restore SYSCFG */
wrmsr(SYSCFG_MSR, syscfg_orig);
+
+ wrmsr(MTRRfix16K_A0000_MSR, mtrr_aseg_orig);
enable_cache();
}
+
/* But set SMM base address on all CPUs/cores */
msr = rdmsr(SMM_BASE_MSR);
msr.lo = SMM_BASE - (lapicid() * 0x400);
wrmsr(SMM_BASE_MSR, msr);
-}
-
-void smm_lock(void)
-{
- // TODO I think this should be running on each CPU
- msr_t msr;
- printk(BIOS_DEBUG, "Locking SMM.\n");
+ /* enable the SMM memory window */
+ msr = rdmsr(SMM_MASK_MSR);
+ msr.lo |= (1 << 0); // Enable ASEG SMRAM Range
+ wrmsr(SMM_MASK_MSR, msr);
/* Set SMMLOCK to avoid exploits messing with SMM */
msr = rdmsr(HWCR_MSR);
msr.lo |= (1 << 0);
wrmsr(HWCR_MSR, msr);
}
+
+void smm_lock(void)
+{
+ /* We lock SMM per CPU core */
+}